patch
stringlengths
17
31.2k
y
int64
1
1
oldf
stringlengths
0
2.21M
idx
int64
1
1
id
int64
4.29k
68.4k
msg
stringlengths
8
843
proj
stringclasses
212 values
lang
stringclasses
9 values
@@ -387,7 +387,7 @@ import browser from './browser'; videoAudioCodecs.push('mp2'); } - let supportsDts = browser.tizen || browser.web0s || options.supportsDts || videoTestElement.canPlayType('video/mp4; codecs="dts-"').replace(/no/, '') || videoTestElement.canPlayType('video/mp4; codecs="dts+"').replace(/no/, ''); + let supportsDts = browser.tizen || videoTestElement.canPlayType('video/mp4; codecs="dts-"').replace(/no/, '') || videoTestElement.canPlayType('video/mp4; codecs="dts+"').replace(/no/, ''); // DTS audio not supported in 2018 models (Tizen 4.0) if (browser.tizenVersion >= 4) {
1
import appSettings from './settings/appSettings'; import * as userSettings from './settings/userSettings'; import browser from './browser'; /* eslint-disable indent */ function canPlayH264(videoTestElement) { return !!(videoTestElement.canPlayType && videoTestElement.canPlayType('video/mp4; codecs="avc1.42E01E, mp4a.40.2"').replace(/no/, '')); } function canPlayHevc(videoTestElement, options) { if (browser.tizen || browser.xboxOne || browser.web0s || options.supportsHevc) { return true; } if (browser.ps4) { return false; } // hevc main level 4.0 return !!videoTestElement.canPlayType && (videoTestElement.canPlayType('video/mp4; codecs="hvc1.1.L120"').replace(/no/, '') || videoTestElement.canPlayType('video/mp4; codecs="hev1.1.L120"').replace(/no/, '') || videoTestElement.canPlayType('video/mp4; codecs="hvc1.1.0.L120"').replace(/no/, '') || videoTestElement.canPlayType('video/mp4; codecs="hev1.1.0.L120"').replace(/no/, '')); } let _supportsTextTracks; function supportsTextTracks() { if (browser.tizen) { return true; } if (_supportsTextTracks == null) { _supportsTextTracks = document.createElement('video').textTracks != null; } // For now, until ready return _supportsTextTracks; } let _canPlayHls; function canPlayHls() { if (_canPlayHls == null) { _canPlayHls = canPlayNativeHls() || canPlayHlsWithMSE(); } return _canPlayHls; } function canPlayNativeHls() { if (browser.tizen) { return true; } const media = document.createElement('video'); if (media.canPlayType('application/x-mpegURL').replace(/no/, '') || media.canPlayType('application/vnd.apple.mpegURL').replace(/no/, '')) { return true; } return false; } function canPlayHlsWithMSE() { // text tracks don’t work with this in firefox return window.MediaSource != null; /* eslint-disable-line compat/compat */ } function supportsAc3(videoTestElement) { if (browser.edgeUwp || browser.tizen || browser.web0s) { return true; } // iPhones 5c and older and old model iPads do not support AC-3/E-AC-3 // These models can only run iOS 10.x or lower if (browser.iOS && browser.iOSVersion < 11) { return false; } return videoTestElement.canPlayType('audio/mp4; codecs="ac-3"').replace(/no/, ''); } function supportsEac3(videoTestElement) { if (browser.tizen || browser.web0s) { return true; } // iPhones 5c and older and old model iPads do not support AC-3/E-AC-3 // These models can only run iOS 10.x or lower if (browser.iOS && browser.iOSVersion < 11) { return false; } return videoTestElement.canPlayType('audio/mp4; codecs="ec-3"').replace(/no/, ''); } function supportsAc3InHls(videoTestElement) { if (browser.tizen || browser.web0s) { return true; } if (videoTestElement.canPlayType) { return videoTestElement.canPlayType('application/x-mpegurl; codecs="avc1.42E01E, ac-3"').replace(/no/, '') || videoTestElement.canPlayType('application/vnd.apple.mpegURL; codecs="avc1.42E01E, ac-3"').replace(/no/, ''); } return false; } function canPlayAudioFormat(format) { let typeString; if (format === 'flac') { if (browser.tizen || browser.web0s || browser.edgeUwp) { return true; } } else if (format === 'wma') { if (browser.tizen || browser.edgeUwp) { return true; } } else if (format === 'asf') { if (browser.tizen || browser.web0s || browser.edgeUwp) { return true; } } else if (format === 'opus') { if (!browser.web0s) { typeString = 'audio/ogg; codecs="opus"'; return !!document.createElement('audio').canPlayType(typeString).replace(/no/, ''); } return false; } else if (format === 'alac') { if (browser.iOS || browser.osx) { return true; } } else if (format === 'mp2') { // For now return false; } if (format === 'webma') { typeString = 'audio/webm'; } else if (format === 'mp2') { typeString = 'audio/mpeg'; } else { typeString = 'audio/' + format; } return !!document.createElement('audio').canPlayType(typeString).replace(/no/, ''); } function testCanPlayMkv(videoTestElement) { if (browser.tizen || browser.web0s) { return true; } if (videoTestElement.canPlayType('video/x-matroska').replace(/no/, '') || videoTestElement.canPlayType('video/mkv').replace(/no/, '')) { return true; } if (browser.edgeChromium && browser.windows) { return true; } if (browser.edgeUwp) { return true; } return false; } function testCanPlayAv1(videoTestElement) { if (browser.tizenVersion >= 5.5) { return true; } else if (browser.web0sVersion >= 5 && window.outerHeight >= 2160) { return true; } return videoTestElement.canPlayType('video/webm; codecs="av01.0.15M.10"').replace(/no/, ''); } function testCanPlayTs() { return browser.tizen || browser.web0s || browser.edgeUwp; } function supportsMpeg2Video() { return browser.tizen || browser.web0s || browser.edgeUwp; } function supportsVc1(videoTestElement) { return browser.tizen || browser.web0s || browser.edgeUwp || videoTestElement.canPlayType('video/mp4; codecs="vc-1"').replace(/no/, ''); } function getDirectPlayProfileForVideoContainer(container, videoAudioCodecs, videoTestElement, options) { let supported = false; let profileContainer = container; const videoCodecs = []; switch (container) { case 'asf': supported = browser.tizen || browser.web0s || browser.edgeUwp; videoAudioCodecs = []; break; case 'avi': supported = browser.tizen || browser.web0s || browser.edgeUwp; // New Samsung TV don't support XviD/DivX // Explicitly add supported codecs to make other codecs be transcoded if (browser.tizenVersion >= 4) { videoCodecs.push('h264'); if (canPlayHevc(videoTestElement, options)) { videoCodecs.push('hevc'); } } break; case 'mpg': case 'mpeg': supported = browser.tizen || browser.web0s || browser.edgeUwp; break; case 'flv': supported = browser.tizen; break; case '3gp': case 'mts': case 'trp': case 'vob': case 'vro': supported = browser.tizen; break; case 'mov': supported = browser.safari || browser.tizen || browser.web0s || browser.chrome || browser.edgeChromium || browser.edgeUwp; videoCodecs.push('h264'); break; case 'm2ts': supported = browser.tizen || browser.web0s || browser.edgeUwp; videoCodecs.push('h264'); if (supportsVc1(videoTestElement)) { videoCodecs.push('vc1'); } if (supportsMpeg2Video()) { videoCodecs.push('mpeg2video'); } break; case 'wmv': supported = browser.tizen || browser.web0s || browser.edgeUwp; videoAudioCodecs = []; break; case 'ts': supported = testCanPlayTs(); videoCodecs.push('h264'); // safari doesn't support hevc in TS-HLS if ((browser.tizen || browser.web0s) && canPlayHevc(videoTestElement, options)) { videoCodecs.push('hevc'); } if (supportsVc1(videoTestElement)) { videoCodecs.push('vc1'); } if (supportsMpeg2Video()) { videoCodecs.push('mpeg2video'); } profileContainer = 'ts,mpegts'; break; default: break; } return supported ? { Container: profileContainer, Type: 'Video', VideoCodec: videoCodecs.join(','), AudioCodec: videoAudioCodecs.join(',') } : null; } function getMaxBitrate() { return 120000000; } function getGlobalMaxVideoBitrate() { let isTizenFhd = false; if (browser.tizen) { try { const isTizenUhd = webapis.productinfo.isUdPanelSupported(); isTizenFhd = !isTizenUhd; console.debug('isTizenFhd = ' + isTizenFhd); } catch (error) { console.error('isUdPanelSupported() error code = ' + error.code); } } return browser.ps4 ? 8000000 : (browser.xboxOne ? 12000000 : (browser.edgeUwp ? null : (browser.tizen && isTizenFhd ? 20000000 : null))); } export default function (options) { options = options || {}; const isSurroundSoundSupportedBrowser = browser.safari || browser.chrome || browser.edgeChromium || browser.firefox; const allowedAudioChannels = parseInt(userSettings.allowedAudioChannels() || '-1'); const physicalAudioChannels = (allowedAudioChannels > 0 ? allowedAudioChannels : null) || options.audioChannels || (isSurroundSoundSupportedBrowser || browser.tv || browser.ps4 || browser.xboxOne ? 6 : 2); const bitrateSetting = getMaxBitrate(); const videoTestElement = document.createElement('video'); const canPlayVp8 = videoTestElement.canPlayType('video/webm; codecs="vp8"').replace(/no/, ''); const canPlayVp9 = videoTestElement.canPlayType('video/webm; codecs="vp9"').replace(/no/, ''); const webmAudioCodecs = ['vorbis']; const canPlayMkv = testCanPlayMkv(videoTestElement); const profile = {}; profile.MaxStreamingBitrate = bitrateSetting; profile.MaxStaticBitrate = 100000000; profile.MusicStreamingTranscodingBitrate = Math.min(bitrateSetting, 384000); profile.DirectPlayProfiles = []; let videoAudioCodecs = []; let hlsInTsVideoAudioCodecs = []; let hlsInFmp4VideoAudioCodecs = []; const supportsMp3VideoAudio = videoTestElement.canPlayType('video/mp4; codecs="avc1.640029, mp4a.69"').replace(/no/, '') || videoTestElement.canPlayType('video/mp4; codecs="avc1.640029, mp4a.6B"').replace(/no/, '') || videoTestElement.canPlayType('video/mp4; codecs="avc1.640029, mp3"').replace(/no/, ''); // Not sure how to test for this const supportsMp2VideoAudio = browser.edgeUwp || browser.tizen || browser.web0s; /* eslint-disable compat/compat */ let maxVideoWidth = browser.xboxOne ? (window.screen ? window.screen.width : null) : null; /* eslint-enable compat/compat */ if (options.maxVideoWidth) { maxVideoWidth = options.maxVideoWidth; } const canPlayAacVideoAudio = videoTestElement.canPlayType('video/mp4; codecs="avc1.640029, mp4a.40.2"').replace(/no/, ''); const canPlayAc3VideoAudio = supportsAc3(videoTestElement); const canPlayEac3VideoAudio = supportsEac3(videoTestElement); const canPlayAc3VideoAudioInHls = supportsAc3InHls(videoTestElement); // Transcoding codec is the first in hlsVideoAudioCodecs. // Prefer AAC, MP3 to other codecs when audio transcoding. if (canPlayAacVideoAudio) { videoAudioCodecs.push('aac'); hlsInTsVideoAudioCodecs.push('aac'); hlsInFmp4VideoAudioCodecs.push('aac'); } if (supportsMp3VideoAudio) { videoAudioCodecs.push('mp3'); // PS4 fails to load HLS with mp3 audio if (!browser.ps4) { hlsInTsVideoAudioCodecs.push('mp3'); } hlsInFmp4VideoAudioCodecs.push('mp3'); } // For AC3/EAC3 remuxing. // Do not use AC3 for audio transcoding unless AAC and MP3 are not supported. if (canPlayAc3VideoAudio) { videoAudioCodecs.push('ac3'); if (canPlayEac3VideoAudio) { videoAudioCodecs.push('eac3'); } if (canPlayAc3VideoAudioInHls) { hlsInTsVideoAudioCodecs.push('ac3'); hlsInFmp4VideoAudioCodecs.push('ac3'); if (canPlayEac3VideoAudio) { hlsInTsVideoAudioCodecs.push('eac3'); hlsInFmp4VideoAudioCodecs.push('eac3'); } } } if (supportsMp2VideoAudio) { videoAudioCodecs.push('mp2'); } let supportsDts = browser.tizen || browser.web0s || options.supportsDts || videoTestElement.canPlayType('video/mp4; codecs="dts-"').replace(/no/, '') || videoTestElement.canPlayType('video/mp4; codecs="dts+"').replace(/no/, ''); // DTS audio not supported in 2018 models (Tizen 4.0) if (browser.tizenVersion >= 4) { supportsDts = false; } if (supportsDts) { videoAudioCodecs.push('dca'); videoAudioCodecs.push('dts'); } if (browser.tizen || browser.web0s) { videoAudioCodecs.push('pcm_s16le'); videoAudioCodecs.push('pcm_s24le'); } if (options.supportsTrueHd) { videoAudioCodecs.push('truehd'); } if (browser.tizen) { videoAudioCodecs.push('aac_latm'); } if (canPlayAudioFormat('opus')) { videoAudioCodecs.push('opus'); webmAudioCodecs.push('opus'); if (browser.tizen) { hlsInTsVideoAudioCodecs.push('opus'); } } if (canPlayAudioFormat('flac')) { videoAudioCodecs.push('flac'); hlsInFmp4VideoAudioCodecs.push('flac'); } if (canPlayAudioFormat('alac')) { videoAudioCodecs.push('alac'); hlsInFmp4VideoAudioCodecs.push('alac'); } videoAudioCodecs = videoAudioCodecs.filter(function (c) { return (options.disableVideoAudioCodecs || []).indexOf(c) === -1; }); hlsInTsVideoAudioCodecs = hlsInTsVideoAudioCodecs.filter(function (c) { return (options.disableHlsVideoAudioCodecs || []).indexOf(c) === -1; }); hlsInFmp4VideoAudioCodecs = hlsInFmp4VideoAudioCodecs.filter(function (c) { return (options.disableHlsVideoAudioCodecs || []).indexOf(c) === -1; }); const mp4VideoCodecs = []; const webmVideoCodecs = []; const hlsInTsVideoCodecs = []; const hlsInFmp4VideoCodecs = []; if ((browser.safari || browser.tizen || browser.web0s) && canPlayHevc(videoTestElement, options)) { hlsInFmp4VideoCodecs.push('hevc'); } if (canPlayH264(videoTestElement)) { mp4VideoCodecs.push('h264'); hlsInTsVideoCodecs.push('h264'); if (browser.safari || browser.tizen || browser.web0s) { hlsInFmp4VideoCodecs.push('h264'); } } if (canPlayHevc(videoTestElement, options)) { // safari is lying on HDR and 60fps videos, use fMP4 instead if (!browser.safari) { mp4VideoCodecs.push('hevc'); } if (browser.tizen || browser.web0s) { hlsInTsVideoCodecs.push('hevc'); } } if (supportsMpeg2Video()) { mp4VideoCodecs.push('mpeg2video'); } if (supportsVc1(videoTestElement)) { mp4VideoCodecs.push('vc1'); } if (browser.tizen) { mp4VideoCodecs.push('msmpeg4v2'); } if (canPlayVp8) { mp4VideoCodecs.push('vp8'); webmVideoCodecs.push('vp8'); } if (canPlayVp9) { mp4VideoCodecs.push('vp9'); webmVideoCodecs.push('vp9'); } if (testCanPlayAv1(videoTestElement)) { mp4VideoCodecs.push('av1'); webmVideoCodecs.push('av1'); } if (canPlayVp8 || browser.tizen) { videoAudioCodecs.push('vorbis'); } if (webmVideoCodecs.length) { profile.DirectPlayProfiles.push({ Container: 'webm', Type: 'Video', VideoCodec: webmVideoCodecs.join(','), AudioCodec: webmAudioCodecs.join(',') }); } if (mp4VideoCodecs.length) { profile.DirectPlayProfiles.push({ Container: 'mp4,m4v', Type: 'Video', VideoCodec: mp4VideoCodecs.join(','), AudioCodec: videoAudioCodecs.join(',') }); } if (canPlayMkv && mp4VideoCodecs.length) { profile.DirectPlayProfiles.push({ Container: 'mkv', Type: 'Video', VideoCodec: mp4VideoCodecs.join(','), AudioCodec: videoAudioCodecs.join(',') }); } // These are formats we can't test for but some devices will support ['m2ts', 'wmv', 'ts', 'asf', 'avi', 'mpg', 'mpeg', 'flv', '3gp', 'mts', 'trp', 'vob', 'vro', 'mov'].map(function (container) { return getDirectPlayProfileForVideoContainer(container, videoAudioCodecs, videoTestElement, options); }).filter(function (i) { return i != null; }).forEach(function (i) { profile.DirectPlayProfiles.push(i); }); ['opus', 'mp3', 'mp2', 'aac', 'flac', 'alac', 'webma', 'wma', 'wav', 'ogg', 'oga'].filter(canPlayAudioFormat).forEach(function (audioFormat) { profile.DirectPlayProfiles.push({ Container: audioFormat, Type: 'Audio' }); // https://www.webmproject.org/about/faq/ if (audioFormat === 'opus' || audioFormat === 'webma') { profile.DirectPlayProfiles.push({ Container: 'webm', AudioCodec: audioFormat, Type: 'Audio' }); } // aac also appears in the m4a and m4b container // m4a/alac only works when using safari if (audioFormat === 'aac' || audioFormat === 'alac') { profile.DirectPlayProfiles.push({ Container: 'm4a', AudioCodec: audioFormat, Type: 'Audio' }); profile.DirectPlayProfiles.push({ Container: 'm4b', AudioCodec: audioFormat, Type: 'Audio' }); } }); profile.TranscodingProfiles = []; const hlsBreakOnNonKeyFrames = browser.iOS || browser.osx || browser.edge || !canPlayNativeHls() ? true : false; if (canPlayHls() && browser.enableHlsAudio !== false) { profile.TranscodingProfiles.push({ // hlsjs, edge, and android all seem to require ts container Container: !canPlayNativeHls() || browser.edge || browser.android ? 'ts' : 'aac', Type: 'Audio', AudioCodec: 'aac', Context: 'Streaming', Protocol: 'hls', MaxAudioChannels: physicalAudioChannels.toString(), MinSegments: browser.iOS || browser.osx ? '2' : '1', BreakOnNonKeyFrames: hlsBreakOnNonKeyFrames }); } // For streaming, prioritize opus transcoding after mp3/aac. It is too problematic with random failures // But for static (offline sync), it will be just fine. // Prioritize aac higher because the encoder can accept more channels than mp3 ['aac', 'mp3', 'opus', 'wav'].filter(canPlayAudioFormat).forEach(function (audioFormat) { profile.TranscodingProfiles.push({ Container: audioFormat, Type: 'Audio', AudioCodec: audioFormat, Context: 'Streaming', Protocol: 'http', MaxAudioChannels: physicalAudioChannels.toString() }); }); ['opus', 'mp3', 'aac', 'wav'].filter(canPlayAudioFormat).forEach(function (audioFormat) { profile.TranscodingProfiles.push({ Container: audioFormat, Type: 'Audio', AudioCodec: audioFormat, Context: 'Static', Protocol: 'http', MaxAudioChannels: physicalAudioChannels.toString() }); }); if (canPlayMkv && !browser.tizen && options.enableMkvProgressive !== false) { profile.TranscodingProfiles.push({ Container: 'mkv', Type: 'Video', AudioCodec: videoAudioCodecs.join(','), VideoCodec: mp4VideoCodecs.join(','), Context: 'Streaming', MaxAudioChannels: physicalAudioChannels.toString(), CopyTimestamps: true }); } if (canPlayMkv) { profile.TranscodingProfiles.push({ Container: 'mkv', Type: 'Video', AudioCodec: videoAudioCodecs.join(','), VideoCodec: mp4VideoCodecs.join(','), Context: 'Static', MaxAudioChannels: physicalAudioChannels.toString(), CopyTimestamps: true }); } if (canPlayHls() && options.enableHls !== false) { if (hlsInFmp4VideoCodecs.length && hlsInFmp4VideoAudioCodecs.length && userSettings.preferFmp4HlsContainer() && (browser.safari || browser.tizen || browser.web0s)) { profile.TranscodingProfiles.push({ Container: 'mp4', Type: 'Video', AudioCodec: hlsInFmp4VideoAudioCodecs.join(','), VideoCodec: hlsInFmp4VideoCodecs.join(','), Context: 'Streaming', Protocol: 'hls', MaxAudioChannels: physicalAudioChannels.toString(), MinSegments: browser.iOS || browser.osx ? '2' : '1', BreakOnNonKeyFrames: hlsBreakOnNonKeyFrames }); } if (hlsInTsVideoCodecs.length && hlsInTsVideoAudioCodecs.length) { profile.TranscodingProfiles.push({ Container: 'ts', Type: 'Video', AudioCodec: hlsInTsVideoAudioCodecs.join(','), VideoCodec: hlsInTsVideoCodecs.join(','), Context: 'Streaming', Protocol: 'hls', MaxAudioChannels: physicalAudioChannels.toString(), MinSegments: browser.iOS || browser.osx ? '2' : '1', BreakOnNonKeyFrames: hlsBreakOnNonKeyFrames }); } } if (webmAudioCodecs.length && webmVideoCodecs.length) { profile.TranscodingProfiles.push({ Container: 'webm', Type: 'Video', AudioCodec: webmAudioCodecs.join(','), // TODO: Remove workaround when servers migrate away from 'vpx' for transcoding profiles. VideoCodec: (canPlayVp8 ? webmVideoCodecs.concat('vpx') : webmVideoCodecs).join(','), Context: 'Streaming', Protocol: 'http', // If audio transcoding is needed, limit channels to number of physical audio channels // Trying to transcode to 5 channels when there are only 2 speakers generally does not sound good MaxAudioChannels: physicalAudioChannels.toString() }); } profile.TranscodingProfiles.push({ Container: 'mp4', Type: 'Video', AudioCodec: videoAudioCodecs.join(','), VideoCodec: 'h264', Context: 'Static', Protocol: 'http' }); profile.ContainerProfiles = []; profile.CodecProfiles = []; const supportsSecondaryAudio = browser.tizen || videoTestElement.audioTracks; const aacCodecProfileConditions = []; // Handle he-aac not supported if (!videoTestElement.canPlayType('video/mp4; codecs="avc1.640029, mp4a.40.5"').replace(/no/, '')) { // TODO: This needs to become part of the stream url in order to prevent stream copy aacCodecProfileConditions.push({ Condition: 'NotEquals', Property: 'AudioProfile', Value: 'HE-AAC' }); } if (!supportsSecondaryAudio) { aacCodecProfileConditions.push({ Condition: 'Equals', Property: 'IsSecondaryAudio', Value: 'false', IsRequired: false }); } if (aacCodecProfileConditions.length) { profile.CodecProfiles.push({ Type: 'VideoAudio', Codec: 'aac', Conditions: aacCodecProfileConditions }); } if (!supportsSecondaryAudio) { profile.CodecProfiles.push({ Type: 'VideoAudio', Conditions: [ { Condition: 'Equals', Property: 'IsSecondaryAudio', Value: 'false', IsRequired: false } ] }); } let maxH264Level = 42; let h264Profiles = 'high|main|baseline|constrained baseline'; if (browser.tizen || browser.web0s || videoTestElement.canPlayType('video/mp4; codecs="avc1.640833"').replace(/no/, '')) { maxH264Level = 51; } // Support H264 Level 52 (Tizen 5.0) - app only if (browser.tizenVersion >= 5 && window.NativeShell) { maxH264Level = 52; } if (browser.tizen || videoTestElement.canPlayType('video/mp4; codecs="avc1.6e0033"').replace(/no/, '')) { // These tests are passing in safari, but playback is failing if (!browser.safari && !browser.iOS && !browser.web0s && !browser.edge && !browser.mobile) { h264Profiles += '|high 10'; } } let maxHevcLevel = 120; let hevcProfiles = 'main'; // hevc main level 4.1 if (videoTestElement.canPlayType('video/mp4; codecs="hvc1.1.4.L123"').replace(/no/, '') || videoTestElement.canPlayType('video/mp4; codecs="hev1.1.4.L123"').replace(/no/, '')) { maxHevcLevel = 123; } // hevc main10 level 4.1 if (videoTestElement.canPlayType('video/mp4; codecs="hvc1.2.4.L123"').replace(/no/, '') || videoTestElement.canPlayType('video/mp4; codecs="hev1.2.4.L123"').replace(/no/, '')) { maxHevcLevel = 123; hevcProfiles = 'main|main 10'; } // hevc main10 level 5.1 if (videoTestElement.canPlayType('video/mp4; codecs="hvc1.2.4.L153"').replace(/no/, '') || videoTestElement.canPlayType('video/mp4; codecs="hev1.2.4.L153"').replace(/no/, '')) { maxHevcLevel = 153; hevcProfiles = 'main|main 10'; } // hevc main10 level 6.1 if (videoTestElement.canPlayType('video/mp4; codecs="hvc1.2.4.L183"').replace(/no/, '') || videoTestElement.canPlayType('video/mp4; codecs="hev1.2.4.L183"').replace(/no/, '')) { maxHevcLevel = 183; hevcProfiles = 'main|main 10'; } const h264CodecProfileConditions = [ { Condition: 'NotEquals', Property: 'IsAnamorphic', Value: 'true', IsRequired: false }, { Condition: 'EqualsAny', Property: 'VideoProfile', Value: h264Profiles, IsRequired: false }, { Condition: 'LessThanEqual', Property: 'VideoLevel', Value: maxH264Level.toString(), IsRequired: false } ]; const hevcCodecProfileConditions = [ { Condition: 'NotEquals', Property: 'IsAnamorphic', Value: 'true', IsRequired: false }, { Condition: 'EqualsAny', Property: 'VideoProfile', Value: hevcProfiles, IsRequired: false }, { Condition: 'LessThanEqual', Property: 'VideoLevel', Value: maxHevcLevel.toString(), IsRequired: false } ]; if (!browser.edgeUwp && !browser.tizen && !browser.web0s) { h264CodecProfileConditions.push({ Condition: 'NotEquals', Property: 'IsInterlaced', Value: 'true', IsRequired: false }); hevcCodecProfileConditions.push({ Condition: 'NotEquals', Property: 'IsInterlaced', Value: 'true', IsRequired: false }); } if (maxVideoWidth) { h264CodecProfileConditions.push({ Condition: 'LessThanEqual', Property: 'Width', Value: maxVideoWidth.toString(), IsRequired: false }); hevcCodecProfileConditions.push({ Condition: 'LessThanEqual', Property: 'Width', Value: maxVideoWidth.toString(), IsRequired: false }); } const globalMaxVideoBitrate = (getGlobalMaxVideoBitrate() || '').toString(); const h264MaxVideoBitrate = globalMaxVideoBitrate; const hevcMaxVideoBitrate = globalMaxVideoBitrate; if (h264MaxVideoBitrate) { h264CodecProfileConditions.push({ Condition: 'LessThanEqual', Property: 'VideoBitrate', Value: h264MaxVideoBitrate, IsRequired: true }); } if (hevcMaxVideoBitrate) { hevcCodecProfileConditions.push({ Condition: 'LessThanEqual', Property: 'VideoBitrate', Value: hevcMaxVideoBitrate, IsRequired: true }); } // On iOS 12.x, for TS container max h264 level is 4.2 if (browser.iOS && browser.iOSVersion < 13) { const codecProfile = { Type: 'Video', Codec: 'h264', Container: 'ts', Conditions: h264CodecProfileConditions.filter((condition) => { return condition.Property !== 'VideoLevel'; }) }; codecProfile.Conditions.push({ Condition: 'LessThanEqual', Property: 'VideoLevel', Value: '42', IsRequired: false }); profile.CodecProfiles.push(codecProfile); } profile.CodecProfiles.push({ Type: 'Video', Codec: 'h264', Conditions: h264CodecProfileConditions }); profile.CodecProfiles.push({ Type: 'Video', Codec: 'hevc', Conditions: hevcCodecProfileConditions }); const globalVideoConditions = []; if (globalMaxVideoBitrate) { globalVideoConditions.push({ Condition: 'LessThanEqual', Property: 'VideoBitrate', Value: globalMaxVideoBitrate }); } if (maxVideoWidth) { globalVideoConditions.push({ Condition: 'LessThanEqual', Property: 'Width', Value: maxVideoWidth.toString(), IsRequired: false }); } if (globalVideoConditions.length) { profile.CodecProfiles.push({ Type: 'Video', Conditions: globalVideoConditions }); } // Subtitle profiles // External vtt or burn in profile.SubtitleProfiles = []; const subtitleBurninSetting = appSettings.get('subtitleburnin'); if (subtitleBurninSetting !== 'all') { if (supportsTextTracks()) { profile.SubtitleProfiles.push({ Format: 'vtt', Method: 'External' }); } if (options.enableSsaRender !== false && !options.isRetry && subtitleBurninSetting !== 'allcomplexformats') { profile.SubtitleProfiles.push({ Format: 'ass', Method: 'External' }); profile.SubtitleProfiles.push({ Format: 'ssa', Method: 'External' }); } } profile.ResponseProfiles = []; profile.ResponseProfiles.push({ Type: 'Video', Container: 'm4v', MimeType: 'video/mp4' }); return profile; } /* eslint-enable indent */
1
19,587
It is not a "perfect" test, but: webOS 1.2 emulator says `probably`. webOS 3 emulator says \``. webOS 4 emulator says \``. Why did you remove `options.supportsDts`?
jellyfin-jellyfin-web
js
@@ -152,6 +152,8 @@ hipError_t ihipEnablePeerAccess(hipCtx_t peerCtx, unsigned int flags) { //--- hipError_t hipMemcpyPeer(void* dst, hipCtx_t dstCtx, const void* src, hipCtx_t srcCtx, size_t sizeBytes) { + if(dstCtx == srcCtx) return hipErrorInvalidValue; + HIP_INIT_API(NONE, dst, dstCtx, src, srcCtx, sizeBytes); // TODO - move to ihip memory copy implementaion.
1
/* Copyright (c) 2015 - present Advanced Micro Devices, Inc. All rights reserved. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include <hc_am.hpp> #include "hip/hip_runtime.h" #include "hip_hcc_internal.h" #include "trace_helper.h" // Peer access functions. // There are two flavors: // - one where contexts are specified with hipCtx_t type. // - one where contexts are specified with integer deviceIds, that are mapped to the primary // context for that device. // The implementation contains a set of internal ihip* functions which operate on contexts. Then // the public APIs are thin wrappers which call into this internal implementations. // TODO - actually not yet - currently the integer deviceId flavors just call the context APIs. need // to fix. hipError_t ihipDeviceCanAccessPeer(int* canAccessPeer, hipCtx_t thisCtx, hipCtx_t peerCtx) { hipError_t err = hipSuccess; if(canAccessPeer == NULL) { err = hipErrorInvalidValue; } else if ((thisCtx != NULL) && (peerCtx != NULL)) { if (thisCtx == peerCtx) { *canAccessPeer = 0; tprintf(DB_MEM, "Can't be peer to self. (this=%s, peer=%s)\n", thisCtx->toString().c_str(), peerCtx->toString().c_str()); } else if (HIP_FORCE_P2P_HOST & 0x2) { *canAccessPeer = false; tprintf(DB_MEM, "HIP_FORCE_P2P_HOST denies peer access this=%s peer=%s canAccessPeer=%d\n", thisCtx->toString().c_str(), peerCtx->toString().c_str(), *canAccessPeer); } else { *canAccessPeer = peerCtx->getDevice()->_acc.get_is_peer(thisCtx->getDevice()->_acc); tprintf(DB_MEM, "deviceCanAccessPeer this=%s peer=%s canAccessPeer=%d\n", thisCtx->toString().c_str(), peerCtx->toString().c_str(), *canAccessPeer); } } else { *canAccessPeer = 0; err = hipErrorInvalidDevice; } return err; } /** * HCC returns 0 in *canAccessPeer ; Need to update this function when RT supports P2P */ //--- hipError_t hipDeviceCanAccessPeer(int* canAccessPeer, hipCtx_t thisCtx, hipCtx_t peerCtx) { HIP_INIT_API(NONE, canAccessPeer, thisCtx, peerCtx); return ihipLogStatus(ihipDeviceCanAccessPeer(canAccessPeer, thisCtx, peerCtx)); } //--- // Disable visibility of this device into memory allocated on peer device. // Remove this device from peer device peerlist. hipError_t ihipDisablePeerAccess(hipCtx_t peerCtx) { hipError_t err = hipSuccess; auto thisCtx = ihipGetTlsDefaultCtx(); if ((thisCtx != NULL) && (peerCtx != NULL)) { bool canAccessPeer = peerCtx->getDevice()->_acc.get_is_peer(thisCtx->getDevice()->_acc); if (!canAccessPeer) { err = hipErrorInvalidDevice; // P2P not allowed between these devices. } else if (thisCtx == peerCtx) { err = hipErrorInvalidDevice; // Can't disable peer access to self. } else { LockedAccessor_CtxCrit_t peerCrit(peerCtx->criticalData()); bool changed = peerCrit->removePeerWatcher(peerCtx, thisCtx); if (changed) { tprintf(DB_MEM, "device %s disable access to memory allocated on peer:%s\n", thisCtx->toString().c_str(), peerCtx->toString().c_str()); // Update the peers for all memory already saved in the tracker: am_memtracker_update_peers(peerCtx->getDevice()->_acc, peerCrit->peerCnt(), peerCrit->peerAgents()); } else { err = hipErrorPeerAccessNotEnabled; // never enabled P2P access. } } } else { err = hipErrorInvalidDevice; } return err; }; //--- // Allow the current device to see all memory allocated on peerCtx. // This should add this device to the peer-device peer list. hipError_t ihipEnablePeerAccess(hipCtx_t peerCtx, unsigned int flags) { hipError_t err = hipSuccess; if (flags != 0) { err = hipErrorInvalidValue; } else { auto thisCtx = ihipGetTlsDefaultCtx(); if (thisCtx == peerCtx) { err = hipErrorInvalidDevice; // Can't enable peer access to self. } else if ((thisCtx != NULL) && (peerCtx != NULL)) { LockedAccessor_CtxCrit_t peerCrit(peerCtx->criticalData()); // Add thisCtx to peerCtx's access list so that new allocations on peer will be made // visible to this device: bool isNewPeer = peerCrit->addPeerWatcher(peerCtx, thisCtx); if (isNewPeer) { tprintf(DB_MEM, "device=%s can now see all memory allocated on peer=%s\n", thisCtx->toString().c_str(), peerCtx->toString().c_str()); am_memtracker_update_peers(peerCtx->getDevice()->_acc, peerCrit->peerCnt(), peerCrit->peerAgents()); } else { err = hipErrorPeerAccessAlreadyEnabled; } } else { err = hipErrorInvalidDevice; } } return err; } //--- hipError_t hipMemcpyPeer(void* dst, hipCtx_t dstCtx, const void* src, hipCtx_t srcCtx, size_t sizeBytes) { HIP_INIT_API(NONE, dst, dstCtx, src, srcCtx, sizeBytes); // TODO - move to ihip memory copy implementaion. // HCC has a unified memory architecture so device specifiers are not required. return ihipLogStatus(hipMemcpy(dst, src, sizeBytes, hipMemcpyDefault)); }; //--- hipError_t hipMemcpyPeerAsync(void* dst, hipCtx_t dstDevice, const void* src, hipCtx_t srcDevice, size_t sizeBytes, hipStream_t stream) { HIP_INIT_API(NONE, dst, dstDevice, src, srcDevice, sizeBytes, stream); // TODO - move to ihip memory copy implementaion. // HCC has a unified memory architecture so device specifiers are not required. return ihipLogStatus(hip_internal::memcpyAsync(dst, src, sizeBytes, hipMemcpyDefault, stream)); }; //============================================================================= // These are the flavors that accept integer deviceIDs. // Implementations map these to primary contexts and call the internal functions above. //============================================================================= hipError_t hipDeviceCanAccessPeer(int* canAccessPeer, int deviceId, int peerDeviceId) { HIP_INIT_API(hipDeviceCanAccessPeer, canAccessPeer, deviceId, peerDeviceId); return ihipLogStatus(ihipDeviceCanAccessPeer(canAccessPeer, ihipGetPrimaryCtx(deviceId), ihipGetPrimaryCtx(peerDeviceId))); } hipError_t hipDeviceDisablePeerAccess(int peerDeviceId) { HIP_INIT_API(hipDeviceDisablePeerAccess, peerDeviceId); return ihipLogStatus(ihipDisablePeerAccess(ihipGetPrimaryCtx(peerDeviceId))); } hipError_t hipDeviceEnablePeerAccess(int peerDeviceId, unsigned int flags) { HIP_INIT_API(hipDeviceEnablePeerAccess, peerDeviceId, flags); return ihipLogStatus(ihipEnablePeerAccess(ihipGetPrimaryCtx(peerDeviceId), flags)); } hipError_t hipMemcpyPeer(void* dst, int dstDevice, const void* src, int srcDevice, size_t sizeBytes) { HIP_INIT_API(hipMemcpyPeer, dst, dstDevice, src, srcDevice, sizeBytes); return ihipLogStatus(hipMemcpyPeer(dst, ihipGetPrimaryCtx(dstDevice), src, ihipGetPrimaryCtx(srcDevice), sizeBytes)); } hipError_t hipMemcpyPeerAsync(void* dst, int dstDevice, const void* src, int srcDevice, size_t sizeBytes, hipStream_t stream) { HIP_INIT_API(hipMemcpyPeerAsync, dst, dstDevice, src, srcDevice, sizeBytes, stream); return ihipLogStatus(hip_internal::memcpyAsync(dst, src, sizeBytes, hipMemcpyDefault, stream)); } hipError_t hipCtxEnablePeerAccess(hipCtx_t peerCtx, unsigned int flags) { HIP_INIT_API(hipCtxEnablePeerAccess, peerCtx, flags); return ihipLogStatus(ihipEnablePeerAccess(peerCtx, flags)); } hipError_t hipCtxDisablePeerAccess(hipCtx_t peerCtx) { HIP_INIT_API(hipCtxDisablePeerAccess, peerCtx); return ihipLogStatus(ihipDisablePeerAccess(peerCtx)); }
1
7,839
This check should be after the HIP_INIT_API to ensure HIP tracing & lazy init works correctly. Also return should be wrapped in ihipLogStatus() so that logging works correctly.
ROCm-Developer-Tools-HIP
cpp
@@ -54,7 +54,14 @@ class ReIndexTask extends AbstractTask * * @var array */ - protected $indexingConfigurationsToReIndex = array(); + protected $indexingConfigurationsToReIndex = []; + + /** + * Clear Index for selected sites and record types + * + * @var boolean + */ + protected $clearSearchIndex = false; /**
1
<?php namespace ApacheSolrForTypo3\Solr\Task; /*************************************************************** * Copyright notice * * (c) 2011-2015 Christoph Moeller <[email protected]> * (c) 2012-2015 Ingo Renner <[email protected]> * * All rights reserved * * This script is part of the TYPO3 project. The TYPO3 project is * free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * The GNU General Public License can be found at * http://www.gnu.org/copyleft/gpl.html. * * This script is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * This copyright notice MUST APPEAR in all copies of the script! ***************************************************************/ use ApacheSolrForTypo3\Solr\IndexQueue\Queue; use ApacheSolrForTypo3\Solr\Site; use TYPO3\CMS\Core\Utility\GeneralUtility; use TYPO3\CMS\Scheduler\Task\AbstractTask; /** * Scheduler task to empty the indexes of a site and re-initialize the * Solr Index Queue thus making the indexer re-index the site. * * @author Christoph Moeller <[email protected]> * @package TYPO3 * @subpackage solr */ class ReIndexTask extends AbstractTask { /** * The site this task is supposed to initialize the index queue for. * * @var Site */ protected $site; /** * Indexing configurations to re-initialize. * * @var array */ protected $indexingConfigurationsToReIndex = array(); /** * Purges/commits all Solr indexes, initializes the Index Queue * and returns TRUE if the execution was successful * * @return boolean Returns TRUE on success, FALSE on failure. */ public function execute() { // clean up $cleanUpResult = $this->cleanUpIndex(); // initialize for re-indexing $indexQueue = GeneralUtility::makeInstance('ApacheSolrForTypo3\\Solr\\IndexQueue\\Queue'); $indexQueueInitializationResults = array(); foreach ($this->indexingConfigurationsToReIndex as $indexingConfigurationName) { $indexQueueInitializationResults = $indexQueue->initialize($this->site, $indexingConfigurationName); } return ($cleanUpResult && !in_array(false, $indexQueueInitializationResults)); } /** * Removes documents of the selected types from the index. * * @return bool TRUE if clean up was successful, FALSE on error */ protected function cleanUpIndex() { $cleanUpResult = true; $solrConfiguration = $this->site->getSolrConfiguration(); $solrServers = GeneralUtility::makeInstance('ApacheSolrForTypo3\\Solr\\ConnectionManager')->getConnectionsBySite($this->site); $typesToCleanUp = array(); foreach ($this->indexingConfigurationsToReIndex as $indexingConfigurationName) { $type = Queue::getTableToIndexByIndexingConfigurationName( $solrConfiguration, $indexingConfigurationName ); $typesToCleanUp[] = $type; } foreach ($solrServers as $solrServer) { // make sure not-yet committed documents are removed, too $solrServer->commit(); $deleteQuery = 'type:(' . implode(' OR ', $typesToCleanUp) . ')' . ' AND siteHash:' . $this->site->getSiteHash(); $solrServer->deleteByQuery($deleteQuery); $response = $solrServer->commit(false, false, false); if ($response->getHttpStatus() != 200) { $cleanUpResult = false; break; } } return $cleanUpResult; } /** * Gets the site / the site's root page uid this task is running on. * * @return Site The site's root page uid this task is optimizing */ public function getSite() { return $this->site; } /** * Sets the task's site. * * @param Site $site The site to be handled by this task */ public function setSite(Site $site) { $this->site = $site; } /** * Gets the indexing configurations to re-index. * * @return array */ public function getIndexingConfigurationsToReIndex() { return $this->indexingConfigurationsToReIndex; } /** * Sets the indexing configurations to re-index. * * @param array $indexingConfigurationsToReIndex */ public function setIndexingConfigurationsToReIndex( array $indexingConfigurationsToReIndex ) { $this->indexingConfigurationsToReIndex = $indexingConfigurationsToReIndex; } /** * This method is designed to return some additional information about the task, * that may help to set it apart from other tasks from the same class * This additional information is used - for example - in the Scheduler's BE module * This method should be implemented in most task classes * * @return string Information to display */ public function getAdditionalInformation() { $information = ''; if ($this->site) { $information = 'Site: ' . $this->site->getLabel(); } if (!empty($this->indexingConfigurationsToReIndex)) { $information .= ', Indexing Configurations: ' . implode(', ', $this->indexingConfigurationsToReIndex); } return $information; } }
1
6,074
Can't use short array syntax for the 3.1 release branch.
TYPO3-Solr-ext-solr
php
@@ -2103,7 +2103,8 @@ static pmix_status_t server_switchyard(pmix_peer_t *peer, uint32_t tag, PMIX_ERROR_LOG(rc); return rc; } - pmix_output_verbose(2, pmix_globals.debug_output, + // pmix_output_verbose(2, pmix_globals.debug_output, + pmix_output(0, "recvd pmix cmd %d from %s:%u", cmd, peer->info->pname.nspace, peer->info->pname.rank);
1
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */ /* * Copyright (c) 2014-2017 Intel, Inc. All rights reserved. * Copyright (c) 2014-2017 Research Organization for Information Science * and Technology (RIST). All rights reserved. * Copyright (c) 2014-2015 Artem Y. Polyakov <[email protected]>. * All rights reserved. * Copyright (c) 2016 Mellanox Technologies, Inc. * All rights reserved. * Copyright (c) 2016 IBM Corporation. All rights reserved. * $COPYRIGHT$ * * Additional copyrights may follow * * $HEADER$ */ #include <src/include/pmix_config.h> #include <src/include/types.h> #include <src/include/pmix_stdint.h> #include <src/include/pmix_socket_errno.h> #include <pmix_server.h> #include <pmix_common.h> #include <pmix_rename.h> #include "src/include/pmix_globals.h" #ifdef HAVE_STRING_H #include <string.h> #endif #include <fcntl.h> #ifdef HAVE_UNISTD_H #include <unistd.h> #endif #ifdef HAVE_SYS_SOCKET_H #include <sys/socket.h> #endif #ifdef HAVE_SYS_UN_H #include <sys/un.h> #endif #ifdef HAVE_SYS_UIO_H #include <sys/uio.h> #endif #ifdef HAVE_SYS_TYPES_H #include <sys/types.h> #endif #include <ctype.h> #include <sys/stat.h> #include PMIX_EVENT_HEADER #include PMIX_EVENT2_THREAD_HEADER #include "src/util/argv.h" #include "src/util/error.h" #include "src/util/output.h" #include "src/util/pmix_environ.h" #include "src/util/show_help.h" #include "src/mca/base/base.h" #include "src/mca/base/pmix_mca_base_var.h" #include "src/mca/pinstalldirs/base/base.h" #include "src/mca/pnet/pnet.h" #include "src/runtime/pmix_progress_threads.h" #include "src/runtime/pmix_rte.h" #include "src/mca/bfrops/base/base.h" #include "src/mca/gds/base/base.h" #include "src/mca/preg/preg.h" #include "src/mca/ptl/base/base.h" /* the server also needs access to client operations * as it can, and often does, behave as a client */ #include "src/client/pmix_client_ops.h" #include "pmix_server_ops.h" // global variables pmix_server_globals_t pmix_server_globals = {{{0}}}; // local variables static char *security_mode = NULL; static char *ptl_mode = NULL; static char *bfrops_mode = NULL; static char *gds_mode = NULL; static pid_t mypid; // local functions for connection support static void server_message_handler(struct pmix_peer_t *pr, pmix_ptl_hdr_t *hdr, pmix_buffer_t *buf, void *cbdata); static inline int _my_client(const char *nspace, pmix_rank_t rank); PMIX_EXPORT pmix_status_t PMIx_server_init(pmix_server_module_t *module, pmix_info_t info[], size_t ninfo) { pmix_ptl_posted_recv_t *req; pmix_status_t rc; size_t n, m; pmix_kval_t *kv; bool protect, nspace_given = false, rank_given = false; pmix_info_t ginfo; char *protected[] = { PMIX_USERID, PMIX_GRPID, PMIX_SOCKET_MODE, PMIX_SERVER_TOOL_SUPPORT, PMIX_SERVER_SYSTEM_SUPPORT, NULL }; char *evar; pmix_rank_info_t *rinfo; PMIX_ACQUIRE_THREAD(&pmix_global_lock); pmix_output_verbose(2, pmix_globals.debug_output, "pmix:server init called"); /* setup the runtime - this init's the globals, * opens and initializes the required frameworks */ if (PMIX_SUCCESS != (rc = pmix_rte_init(PMIX_PROC_SERVER, info, ninfo, NULL))) { PMIX_ERROR_LOG(rc); PMIX_RELEASE_THREAD(&pmix_global_lock); return rc; } /* setup the server-specific globals */ PMIX_CONSTRUCT(&pmix_server_globals.clients, pmix_pointer_array_t); pmix_pointer_array_init(&pmix_server_globals.clients, 1, INT_MAX, 1); PMIX_CONSTRUCT(&pmix_server_globals.collectives, pmix_list_t); PMIX_CONSTRUCT(&pmix_server_globals.remote_pnd, pmix_list_t); PMIX_CONSTRUCT(&pmix_server_globals.gdata, pmix_list_t); PMIX_CONSTRUCT(&pmix_server_globals.events, pmix_list_t); PMIX_CONSTRUCT(&pmix_server_globals.local_reqs, pmix_list_t); PMIX_CONSTRUCT(&pmix_server_globals.nspaces, pmix_list_t); pmix_output_verbose(2, pmix_globals.debug_output, "pmix:server init called"); /* setup the function pointers */ memset(&pmix_host_server, 0, sizeof(pmix_server_module_t)); pmix_host_server = *module; /* assign our internal bfrops module */ pmix_globals.mypeer->nptr->compat.bfrops = pmix_bfrops_base_assign_module(NULL); if (NULL == pmix_globals.mypeer->nptr->compat.bfrops) { PMIX_ERROR_LOG(rc); PMIX_RELEASE_THREAD(&pmix_global_lock); return rc; } /* and set our buffer type */ pmix_globals.mypeer->nptr->compat.type = pmix_bfrops_globals.default_type; /* assign our internal security module */ pmix_globals.mypeer->nptr->compat.psec = pmix_psec_base_assign_module(NULL); if (NULL == pmix_globals.mypeer->nptr->compat.psec) { PMIX_ERROR_LOG(rc); PMIX_RELEASE_THREAD(&pmix_global_lock); return rc; } /* assign our internal ptl module */ pmix_globals.mypeer->nptr->compat.ptl = pmix_ptl_base_assign_module(); if (NULL == pmix_globals.mypeer->nptr->compat.ptl) { PMIX_ERROR_LOG(rc); PMIX_RELEASE_THREAD(&pmix_global_lock); return rc; } /* assign our internal gds module */ PMIX_INFO_LOAD(&ginfo, PMIX_GDS_MODULE, "hash", PMIX_STRING); pmix_globals.mypeer->nptr->compat.gds = pmix_gds_base_assign_module(&ginfo, 1); if (NULL == pmix_globals.mypeer->nptr->compat.gds) { PMIX_ERROR_LOG(rc); PMIX_RELEASE_THREAD(&pmix_global_lock); return rc; } /* copy need parts over to the client_globals.myserver field * so that calls into client-side functions will use our peer */ pmix_client_globals.myserver = PMIX_NEW(pmix_peer_t); PMIX_RETAIN(pmix_globals.mypeer->nptr); pmix_client_globals.myserver->nptr = pmix_globals.mypeer->nptr; /* construct the global notification ring buffer */ PMIX_CONSTRUCT(&pmix_globals.notifications, pmix_ring_buffer_t); pmix_ring_buffer_init(&pmix_globals.notifications, 256); /* get our available security modules */ security_mode = pmix_psec_base_get_available_modules(); /* get our available ptl modules */ ptl_mode = pmix_ptl_base_get_available_modules(); /* get our available bfrop modules */ bfrops_mode = pmix_bfrops_base_get_available_modules(); /* get available gds modules */ gds_mode = pmix_gds_base_get_available_modules(); /* check the info keys for info we * need to provide to every client and * directives aimed at us */ if (NULL != info) { for (n=0; n < ninfo; n++) { if (0 == strncmp(info[n].key, PMIX_SERVER_NSPACE, PMIX_MAX_KEYLEN)) { (void)strncpy(pmix_globals.myid.nspace, info[n].value.data.string, PMIX_MAX_NSLEN); nspace_given = true; continue; } if (0 == strncmp(info[n].key, PMIX_SERVER_RANK, PMIX_MAX_KEYLEN)) { pmix_globals.myid.rank = info[n].value.data.rank; rank_given = true; continue; } /* check the list of protected keys */ protect = false; for (m=0; NULL != protected[m]; m++) { if (0 == strcmp(info[n].key, protected[m])) { protect = true; break; } } if (protect) { continue; } /* store and pass along to every client */ kv = PMIX_NEW(pmix_kval_t); kv->key = strdup(info[n].key); PMIX_VALUE_CREATE(kv->value, 1); PMIX_BFROPS_VALUE_XFER(rc, pmix_globals.mypeer, kv->value, &info[n].value); if (PMIX_SUCCESS != rc) { PMIX_RELEASE(kv); PMIX_ERROR_LOG(rc); PMIX_RELEASE_THREAD(&pmix_global_lock); return rc; } pmix_list_append(&pmix_server_globals.gdata, &kv->super); } } if (!nspace_given) { /* look for our namespace, if one was given */ if (NULL == (evar = getenv("PMIX_SERVER_NAMESPACE"))) { /* use a fake namespace */ (void)strncpy(pmix_globals.myid.nspace, "pmix-server", PMIX_MAX_NSLEN); } else { (void)strncpy(pmix_globals.myid.nspace, evar, PMIX_MAX_NSLEN); } } if (!rank_given) { /* look for our rank, if one was given */ mypid = getpid(); if (NULL == (evar = getenv("PMIX_SERVER_RANK"))) { /* use our pid */ pmix_globals.myid.rank = mypid; } else { pmix_globals.myid.rank = strtol(evar, NULL, 10); } } /* copy it into mypeer entries */ if (NULL == pmix_globals.mypeer->info) { rinfo = PMIX_NEW(pmix_rank_info_t); pmix_globals.mypeer->info = rinfo; } else { rinfo = pmix_globals.mypeer->info; } if (NULL == pmix_globals.mypeer->nptr) { pmix_globals.mypeer->nptr = PMIX_NEW(pmix_nspace_t); /* ensure our own nspace is first on the list */ PMIX_RETAIN(pmix_globals.mypeer->nptr); pmix_list_prepend(&pmix_server_globals.nspaces, &pmix_globals.mypeer->nptr->super); } pmix_globals.mypeer->nptr->nspace = strdup(pmix_globals.myid.nspace); rinfo->pname.nspace = strdup(pmix_globals.mypeer->nptr->nspace); rinfo->pname.rank = pmix_globals.myid.rank; rinfo->uid = pmix_globals.uid; rinfo->gid = pmix_globals.gid; PMIX_RETAIN(pmix_globals.mypeer->info); pmix_client_globals.myserver->info = pmix_globals.mypeer->info; /* setup the wildcard recv for inbound messages from clients */ req = PMIX_NEW(pmix_ptl_posted_recv_t); req->tag = UINT32_MAX; req->cbfunc = server_message_handler; /* add it to the end of the list of recvs */ pmix_list_append(&pmix_ptl_globals.posted_recvs, &req->super); /* start listening for connections */ if (PMIX_SUCCESS != pmix_ptl_base_start_listening(info, ninfo)) { pmix_show_help("help-pmix-server.txt", "listener-thread-start", true); PMIx_server_finalize(); PMIX_RELEASE_THREAD(&pmix_global_lock); return PMIX_ERR_INIT; } ++pmix_globals.init_cntr; PMIX_RELEASE_THREAD(&pmix_global_lock); return PMIX_SUCCESS; } PMIX_EXPORT pmix_status_t PMIx_server_finalize(void) { int i; pmix_peer_t *peer; PMIX_ACQUIRE_THREAD(&pmix_global_lock); if (pmix_globals.init_cntr <= 0) { PMIX_RELEASE_THREAD(&pmix_global_lock); return PMIX_ERR_INIT; } if (1 != pmix_globals.init_cntr) { --pmix_globals.init_cntr; PMIX_RELEASE_THREAD(&pmix_global_lock); return PMIX_SUCCESS; } pmix_globals.init_cntr = 0; PMIX_RELEASE_THREAD(&pmix_global_lock); pmix_output_verbose(2, pmix_globals.debug_output, "pmix:server finalize called"); if (!pmix_globals.external_evbase) { /* stop the progress thread, but leave the event base * still constructed. This will allow us to safely * tear down the infrastructure, including removal * of any events objects may be holding */ (void)pmix_progress_thread_pause(NULL); } pmix_ptl_base_stop_listening(); for (i=0; i < pmix_server_globals.clients.size; i++) { if (NULL != (peer = (pmix_peer_t*)pmix_pointer_array_get_item(&pmix_server_globals.clients, i))) { PMIX_RELEASE(peer); } } PMIX_DESTRUCT(&pmix_server_globals.clients); PMIX_LIST_DESTRUCT(&pmix_server_globals.collectives); PMIX_LIST_DESTRUCT(&pmix_server_globals.remote_pnd); PMIX_LIST_DESTRUCT(&pmix_server_globals.local_reqs); PMIX_LIST_DESTRUCT(&pmix_server_globals.gdata); PMIX_LIST_DESTRUCT(&pmix_server_globals.events); PMIX_LIST_DESTRUCT(&pmix_server_globals.nspaces); if (NULL != security_mode) { free(security_mode); } if (NULL != ptl_mode) { free(ptl_mode); } if (NULL != bfrops_mode) { free(bfrops_mode); } if (NULL != gds_mode) { free(gds_mode); } pmix_rte_finalize(); pmix_output_verbose(2, pmix_globals.debug_output, "pmix:server finalize complete"); return PMIX_SUCCESS; } static void _register_nspace(int sd, short args, void *cbdata) { pmix_setup_caddy_t *cd = (pmix_setup_caddy_t*)cbdata; pmix_nspace_t *nptr, *tmp; pmix_status_t rc; size_t i; PMIX_ACQUIRE_OBJECT(caddy); pmix_output_verbose(2, pmix_globals.debug_output, "pmix:server _register_nspace %s", cd->proc.nspace); /* see if we already have this nspace */ nptr = NULL; PMIX_LIST_FOREACH(tmp, &pmix_server_globals.nspaces, pmix_nspace_t) { if (0 == strcmp(tmp->nspace, cd->proc.nspace)) { nptr = tmp; break; } } if (NULL == nptr) { nptr = PMIX_NEW(pmix_nspace_t); if (NULL == nptr) { rc = PMIX_ERR_NOMEM; goto release; } nptr->nspace = strdup(cd->proc.nspace); pmix_list_append(&pmix_server_globals.nspaces, &nptr->super); } nptr->nlocalprocs = cd->nlocalprocs; /* see if we have everyone */ if (nptr->nlocalprocs == pmix_list_get_size(&nptr->ranks)) { nptr->all_registered = true; } /* check info directives to see if we want to store this info */ for (i=0; i < cd->ninfo; i++) { if (0 == strcmp(cd->info[i].key, PMIX_REGISTER_NODATA)) { /* nope - so we are done */ rc = PMIX_SUCCESS; goto release; } } /* register nspace for each activate components */ PMIX_GDS_ADD_NSPACE(rc, nptr->nspace, cd->info, cd->ninfo); if (PMIX_SUCCESS != rc) { goto release; } /* store this data in our own GDS module - we will retrieve * it later so it can be passed down to the launched procs * once they connect to us and we know what GDS module they * are using */ PMIX_GDS_CACHE_JOB_INFO(rc, pmix_globals.mypeer, nptr, cd->info, cd->ninfo); release: if (NULL != cd->opcbfunc) { cd->opcbfunc(rc, cd->cbdata); } PMIX_RELEASE(cd); } /* setup the data for a job */ PMIX_EXPORT pmix_status_t PMIx_server_register_nspace(const char nspace[], int nlocalprocs, pmix_info_t info[], size_t ninfo, pmix_op_cbfunc_t cbfunc, void *cbdata) { pmix_setup_caddy_t *cd; PMIX_ACQUIRE_THREAD(&pmix_global_lock); if (pmix_globals.init_cntr <= 0) { PMIX_RELEASE_THREAD(&pmix_global_lock); return PMIX_ERR_INIT; } PMIX_RELEASE_THREAD(&pmix_global_lock); cd = PMIX_NEW(pmix_setup_caddy_t); (void)strncpy(cd->proc.nspace, nspace, PMIX_MAX_NSLEN); cd->nlocalprocs = nlocalprocs; cd->opcbfunc = cbfunc; cd->cbdata = cbdata; /* copy across the info array, if given */ if (0 < ninfo) { cd->ninfo = ninfo; cd->info = info; } /* we have to push this into our event library to avoid * potential threading issues */ PMIX_THREADSHIFT(cd, _register_nspace); return PMIX_SUCCESS; } static void _deregister_nspace(int sd, short args, void *cbdata) { pmix_setup_caddy_t *cd = (pmix_setup_caddy_t*)cbdata; pmix_nspace_t *tmp; pmix_status_t rc; PMIX_ACQUIRE_OBJECT(cd); pmix_output_verbose(2, pmix_globals.debug_output, "pmix:server _deregister_nspace %s", cd->proc.nspace); /* see if we already have this nspace */ PMIX_LIST_FOREACH(tmp, &pmix_server_globals.nspaces, pmix_nspace_t) { if (0 == strcmp(tmp->nspace, cd->proc.nspace)) { pmix_list_remove_item(&pmix_server_globals.nspaces, &tmp->super); PMIX_RELEASE(tmp); break; } } /* let our local storage clean up */ PMIX_GDS_DEL_NSPACE(rc, cd->proc.nspace); if (PMIX_SUCCESS != rc) { PMIX_ERROR_LOG(rc); } /* release any job-level messaging resources */ pmix_pnet.local_app_finalized(cd->proc.nspace); /* release the caller */ if (NULL != cd->opcbfunc) { cd->opcbfunc(rc, cd->cbdata); } PMIX_RELEASE(cd); } PMIX_EXPORT void PMIx_server_deregister_nspace(const char nspace[], pmix_op_cbfunc_t cbfunc, void *cbdata) { pmix_setup_caddy_t *cd; pmix_output_verbose(2, pmix_globals.debug_output, "pmix:server deregister nspace %s", nspace); PMIX_ACQUIRE_THREAD(&pmix_global_lock); if (pmix_globals.init_cntr <= 0) { PMIX_RELEASE_THREAD(&pmix_global_lock); if (NULL != cbfunc) { cbfunc(PMIX_ERR_INIT, cbdata); } return; } PMIX_RELEASE_THREAD(&pmix_global_lock); cd = PMIX_NEW(pmix_setup_caddy_t); (void)strncpy(cd->proc.nspace, nspace, PMIX_MAX_NSLEN); cd->opcbfunc = cbfunc; cd->cbdata = cbdata; /* we have to push this into our event library to avoid * potential threading issues */ PMIX_THREADSHIFT(cd, _deregister_nspace); } void pmix_server_execute_collective(int sd, short args, void *cbdata) { pmix_trkr_caddy_t *tcd = (pmix_trkr_caddy_t*)cbdata; pmix_server_trkr_t *trk = tcd->trk; pmix_server_caddy_t *cd; pmix_peer_t *peer; char *data = NULL; size_t sz = 0; pmix_byte_object_t bo; pmix_buffer_t bucket, pbkt; pmix_kval_t *kv; pmix_proc_t proc; bool first; pmix_status_t rc; pmix_list_t pnames; pmix_namelist_t *pn; bool found; pmix_cb_t cb; PMIX_ACQUIRE_OBJECT(tcd); /* we don't need to check for non-NULL APIs here as * that was already done when the tracker was created */ if (PMIX_FENCENB_CMD == trk->type) { /* if the user asked us to collect data, then we have * to provide any locally collected data to the host * server so they can circulate it - only take data * from the specified procs as not everyone is necessarily * participating! And only take data intended for remote * distribution as local data will be added when we send * the result to our local clients */ if (trk->hybrid) { /* if this is a hybrid, then we pack everything using * the daemon-level bfrops module as each daemon is * going to have to unpack it, and then repack it for * each participant. */ peer = pmix_globals.mypeer; } else { /* since all procs are the same, just use the first proc's module */ cd = (pmix_server_caddy_t*)pmix_list_get_first(&trk->local_cbs); peer = cd->peer; } PMIX_CONSTRUCT(&bucket, pmix_buffer_t); unsigned char tmp = (unsigned char)trk->collect_type; PMIX_BFROPS_PACK(rc, peer, &bucket, &tmp, 1, PMIX_BYTE); if (PMIX_COLLECT_YES == trk->collect_type) { pmix_output_verbose(2, pmix_globals.debug_output, "fence - assembling data"); first = true; PMIX_CONSTRUCT(&pnames, pmix_list_t); PMIX_LIST_FOREACH(cd, &trk->local_cbs, pmix_server_caddy_t) { /* see if we have already gotten the contribution from * this proc */ found = false; PMIX_LIST_FOREACH(pn, &pnames, pmix_namelist_t) { if (pn->pname == &cd->peer->info->pname) { /* got it */ found = true; break; } } if (found) { continue; } else { pn = PMIX_NEW(pmix_namelist_t); pn->pname = &cd->peer->info->pname; } if (trk->hybrid || first) { /* setup the nspace */ (void)strncpy(proc.nspace, cd->peer->info->pname.nspace, PMIX_MAX_NSLEN); first = false; } proc.rank = cd->peer->info->pname.rank; /* get any remote contribution - note that there * may not be a contribution */ PMIX_CONSTRUCT(&cb, pmix_cb_t); cb.proc = &proc; cb.scope = PMIX_REMOTE; cb.copy = true; PMIX_GDS_FETCH_KV(rc, peer, &cb); if (PMIX_SUCCESS == rc) { /* pack the returned kvals */ PMIX_CONSTRUCT(&pbkt, pmix_buffer_t); /* start with the proc id */ PMIX_BFROPS_PACK(rc, peer, &pbkt, &proc, 1, PMIX_PROC); if (PMIX_SUCCESS != rc) { PMIX_ERROR_LOG(rc); PMIX_DESTRUCT(&cb); PMIX_DESTRUCT(&pbkt); PMIX_DESTRUCT(&bucket); return; } PMIX_LIST_FOREACH(kv, &cb.kvs, pmix_kval_t) { PMIX_BFROPS_PACK(rc, peer, &pbkt, kv, 1, PMIX_KVAL); if (PMIX_SUCCESS != rc) { PMIX_ERROR_LOG(rc); PMIX_DESTRUCT(&cb); PMIX_DESTRUCT(&pbkt); PMIX_DESTRUCT(&bucket); return; } } /* extract the resulting byte object */ PMIX_UNLOAD_BUFFER(&pbkt, bo.bytes, bo.size); PMIX_DESTRUCT(&pbkt); /* now pack that into the bucket for return */ PMIX_BFROPS_PACK(rc, peer, &bucket, &bo, 1, PMIX_BYTE_OBJECT); if (PMIX_SUCCESS != rc) { PMIX_ERROR_LOG(rc); PMIX_DESTRUCT(&cb); PMIX_BYTE_OBJECT_DESTRUCT(&bo); PMIX_DESTRUCT(&bucket); PMIX_RELEASE(tcd); return; } } PMIX_DESTRUCT(&cb); } PMIX_LIST_DESTRUCT(&pnames); } PMIX_UNLOAD_BUFFER(&bucket, data, sz); PMIX_DESTRUCT(&bucket); pmix_host_server.fence_nb(trk->pcs, trk->npcs, trk->info, trk->ninfo, data, sz, trk->modexcbfunc, trk); } else if (PMIX_CONNECTNB_CMD == trk->type) { pmix_host_server.connect(trk->pcs, trk->npcs, trk->info, trk->ninfo, trk->op_cbfunc, trk); } else if (PMIX_DISCONNECTNB_CMD == trk->type) { pmix_host_server.disconnect(trk->pcs, trk->npcs, trk->info, trk->ninfo, trk->op_cbfunc, trk); } else { /* unknown type */ PMIX_ERROR_LOG(PMIX_ERR_NOT_FOUND); pmix_list_remove_item(&pmix_server_globals.collectives, &trk->super); PMIX_RELEASE(trk); } PMIX_RELEASE(tcd); } static void _register_client(int sd, short args, void *cbdata) { pmix_setup_caddy_t *cd = (pmix_setup_caddy_t*)cbdata; pmix_rank_info_t *info, *iptr; pmix_nspace_t *nptr, *ns; pmix_server_trkr_t *trk; pmix_trkr_caddy_t *tcd; bool all_def; size_t i; pmix_status_t rc; PMIX_ACQUIRE_OBJECT(cd); pmix_output_verbose(2, pmix_globals.debug_output, "pmix:server _register_client for nspace %s rank %d", cd->proc.nspace, cd->proc.rank); /* see if we already have this nspace */ nptr = NULL; PMIX_LIST_FOREACH(ns, &pmix_server_globals.nspaces, pmix_nspace_t) { if (0 == strcmp(ns->nspace, cd->proc.nspace)) { nptr = ns; break; } } if (NULL == nptr) { nptr = PMIX_NEW(pmix_nspace_t); if (NULL == nptr) { rc = PMIX_ERR_NOMEM; goto cleanup; } nptr->nspace = strdup(cd->proc.nspace); pmix_list_append(&pmix_server_globals.nspaces, &nptr->super); } /* setup a peer object for this client - since the host server * only deals with the original processes and not any clones, * we know this function will be called only once per rank */ info = PMIX_NEW(pmix_rank_info_t); if (NULL == info) { rc = PMIX_ERR_NOMEM; goto cleanup; } info->pname.nspace = strdup(nptr->nspace); info->pname.rank = cd->proc.rank; info->uid = cd->uid; info->gid = cd->gid; info->server_object = cd->server_object; pmix_list_append(&nptr->ranks, &info->super); /* see if we have everyone */ if (nptr->nlocalprocs == pmix_list_get_size(&nptr->ranks)) { nptr->all_registered = true; /* check any pending trackers to see if they are * waiting for us. There is a slight race condition whereby * the host server could have spawned the local client and * it called back into the collective -before- our local event * would fire the register_client callback. Deal with that here. */ all_def = true; PMIX_LIST_FOREACH(trk, &pmix_server_globals.collectives, pmix_server_trkr_t) { /* if this tracker is already complete, then we * don't need to update it */ if (trk->def_complete) { continue; } /* see if any of our procs from this nspace are involved - the tracker will * have been created because a callback was received, but * we may or may not have received _all_ callbacks by this * time. So check and see if any procs from this nspace are * involved, and add them to the count of local participants */ for (i=0; i < trk->npcs; i++) { /* since we have to do this search, let's see * if the nspaces are all defined */ if (all_def) { /* so far, they have all been defined - check this one */ PMIX_LIST_FOREACH(ns, &pmix_server_globals.nspaces, pmix_nspace_t) { if (0 < ns->nlocalprocs && 0 == strcmp(trk->pcs[i].nspace, ns->nspace)) { all_def = ns->all_registered; break; } } } /* now see if this proc is local to us */ if (0 != strncmp(trk->pcs[i].nspace, nptr->nspace, PMIX_MAX_NSLEN)) { continue; } /* need to check if this rank is one of mine */ PMIX_LIST_FOREACH(iptr, &nptr->ranks, pmix_rank_info_t) { if (PMIX_RANK_WILDCARD == trk->pcs[i].rank || iptr->pname.rank == trk->pcs[i].rank) { /* this is one of mine - track the count */ ++trk->nlocal; break; } } } /* update this tracker's status */ trk->def_complete = all_def; /* is this now locally completed? */ if (trk->def_complete && pmix_list_get_size(&trk->local_cbs) == trk->nlocal) { /* it did, so now we need to process it * we don't want to block someone * here, so kick any completed trackers into a * new event for processing */ PMIX_EXECUTE_COLLECTIVE(tcd, trk, pmix_server_execute_collective); } } /* also check any pending local modex requests to see if * someone has been waiting for a request on a remote proc * in one of our nspaces, but we didn't know all the local procs * and so couldn't determine the proc was remote */ pmix_pending_nspace_requests(nptr); } rc = PMIX_SUCCESS; cleanup: /* let the caller know we are done */ if (NULL != cd->opcbfunc) { cd->opcbfunc(rc, cd->cbdata); } PMIX_RELEASE(cd); } PMIX_EXPORT pmix_status_t PMIx_server_register_client(const pmix_proc_t *proc, uid_t uid, gid_t gid, void *server_object, pmix_op_cbfunc_t cbfunc, void *cbdata) { pmix_setup_caddy_t *cd; PMIX_ACQUIRE_THREAD(&pmix_global_lock); if (pmix_globals.init_cntr <= 0) { PMIX_RELEASE_THREAD(&pmix_global_lock); return PMIX_ERR_INIT; } PMIX_RELEASE_THREAD(&pmix_global_lock); pmix_output_verbose(2, pmix_globals.debug_output, "pmix:server register client %s:%d", proc->nspace, proc->rank); cd = PMIX_NEW(pmix_setup_caddy_t); if (NULL == cd) { return PMIX_ERR_NOMEM; } (void)strncpy(cd->proc.nspace, proc->nspace, PMIX_MAX_NSLEN); cd->proc.rank = proc->rank; cd->uid = uid; cd->gid = gid; cd->server_object = server_object; cd->opcbfunc = cbfunc; cd->cbdata = cbdata; /* we have to push this into our event library to avoid * potential threading issues */ PMIX_THREADSHIFT(cd, _register_client); return PMIX_SUCCESS; } static void _deregister_client(int sd, short args, void *cbdata) { pmix_setup_caddy_t *cd = (pmix_setup_caddy_t*)cbdata; pmix_rank_info_t *info; pmix_nspace_t *nptr, *tmp; PMIX_ACQUIRE_OBJECT(cd); pmix_output_verbose(2, pmix_globals.debug_output, "pmix:server _deregister_client for nspace %s rank %d", cd->proc.nspace, cd->proc.rank); /* see if we already have this nspace */ nptr = NULL; PMIX_LIST_FOREACH(tmp, &pmix_server_globals.nspaces, pmix_nspace_t) { if (0 == strcmp(tmp->nspace, cd->proc.nspace)) { nptr = tmp; break; } } if (NULL == nptr) { /* nothing to do */ goto cleanup; } /* find and remove this client */ PMIX_LIST_FOREACH(info, &nptr->ranks, pmix_rank_info_t) { if (info->pname.rank == cd->proc.rank) { pmix_list_remove_item(&nptr->ranks, &info->super); PMIX_RELEASE(info); break; } } cleanup: if (NULL != cd->opcbfunc) { cd->opcbfunc(PMIX_SUCCESS, cd->cbdata); } PMIX_RELEASE(cd); } PMIX_EXPORT void PMIx_server_deregister_client(const pmix_proc_t *proc, pmix_op_cbfunc_t cbfunc, void *cbdata) { pmix_setup_caddy_t *cd; PMIX_ACQUIRE_THREAD(&pmix_global_lock); if (pmix_globals.init_cntr <= 0) { PMIX_RELEASE_THREAD(&pmix_global_lock); if (NULL != cbfunc) { cbfunc(PMIX_ERR_INIT, cbdata); } return; } PMIX_RELEASE_THREAD(&pmix_global_lock); pmix_output_verbose(2, pmix_globals.debug_output, "pmix:server deregister client %s:%d", proc->nspace, proc->rank); cd = PMIX_NEW(pmix_setup_caddy_t); if (NULL == cd) { if (NULL != cbfunc) { cbfunc(PMIX_ERR_NOMEM, cbdata); } return; } (void)strncpy(cd->proc.nspace, proc->nspace, PMIX_MAX_NSLEN); cd->proc.rank = proc->rank; cd->opcbfunc = cbfunc; cd->cbdata = cbdata; /* we have to push this into our event library to avoid * potential threading issues */ PMIX_THREADSHIFT(cd, _deregister_client); } /* setup the envars for a child process */ PMIX_EXPORT pmix_status_t PMIx_server_setup_fork(const pmix_proc_t *proc, char ***env) { char rankstr[128]; pmix_listener_t *lt; pmix_status_t rc; PMIX_ACQUIRE_THREAD(&pmix_global_lock); if (pmix_globals.init_cntr <= 0) { PMIX_RELEASE_THREAD(&pmix_global_lock); return PMIX_ERR_INIT; } PMIX_RELEASE_THREAD(&pmix_global_lock); pmix_output_verbose(2, pmix_globals.debug_output, "pmix:server setup_fork for nspace %s rank %d", proc->nspace, proc->rank); /* pass the nspace */ pmix_setenv("PMIX_NAMESPACE", proc->nspace, true, env); /* pass the rank */ (void)snprintf(rankstr, 127, "%d", proc->rank); pmix_setenv("PMIX_RANK", rankstr, true, env); /* pass our rendezvous info */ PMIX_LIST_FOREACH(lt, &pmix_ptl_globals.listeners, pmix_listener_t) { if (NULL != lt->uri && NULL != lt->varname) { pmix_setenv(lt->varname, lt->uri, true, env); } } /* pass our active security modules */ pmix_setenv("PMIX_SECURITY_MODE", security_mode, true, env); /* pass our available ptl modules */ pmix_setenv("PMIX_PTL_MODULE", ptl_mode, true, env); /* pass our available bfrop modes */ pmix_setenv("PMIX_BFROP_MODULE", bfrops_mode, true, env); /* pass the type of buffer we are using */ if (PMIX_BFROP_BUFFER_FULLY_DESC == pmix_globals.mypeer->nptr->compat.type) { pmix_setenv("PMIX_BFROP_BUFFER_TYPE", "PMIX_BFROP_BUFFER_FULLY_DESC", true, env); } else { pmix_setenv("PMIX_BFROP_BUFFER_TYPE", "PMIX_BFROP_BUFFER_NON_DESC", true, env); } /* pass our available gds modules */ pmix_setenv("PMIX_GDS_MODULE", gds_mode, true, env); /* get any network contribution */ if (PMIX_SUCCESS != (rc = pmix_pnet.setup_fork(proc, env))) { PMIX_ERROR_LOG(rc); return rc; } /* get any GDS contributions */ if (PMIX_SUCCESS != (rc = pmix_gds_base_setup_fork(proc, env))) { PMIX_ERROR_LOG(rc); return rc; } return PMIX_SUCCESS; } /*************************************************************************************************** * Support calls from the host server down to us requesting direct modex data provided by one * * of our local clients * ***************************************************************************************************/ static void _dmodex_req(int sd, short args, void *cbdata) { pmix_setup_caddy_t *cd = (pmix_setup_caddy_t*)cbdata; pmix_rank_info_t *info, *iptr; pmix_nspace_t *nptr, *ns; char *data = NULL; size_t sz = 0; pmix_dmdx_remote_t *dcd; pmix_status_t rc; pmix_buffer_t pbkt; pmix_kval_t *kv; pmix_cb_t cb; PMIX_ACQUIRE_OBJECT(cd); pmix_output_verbose(2, pmix_globals.debug_output, "DMODX LOOKING FOR %s:%d", cd->proc.nspace, cd->proc.rank); /* this should be one of my clients, but a race condition * could cause this request to arrive prior to us having * been informed of it - so first check to see if we know * about this nspace yet */ nptr = NULL; PMIX_LIST_FOREACH(ns, &pmix_server_globals.nspaces, pmix_nspace_t) { if (0 == strcmp(ns->nspace, cd->proc.nspace)) { nptr = ns; break; } } if (NULL == nptr) { /* we don't know this namespace yet, and so we obviously * haven't received the data from this proc yet - defer * the request until we do */ dcd = PMIX_NEW(pmix_dmdx_remote_t); if (NULL == dcd) { rc = PMIX_ERR_NOMEM; goto cleanup; } PMIX_RETAIN(cd); dcd->cd = cd; pmix_list_append(&pmix_server_globals.remote_pnd, &dcd->super); PMIX_WAKEUP_THREAD(&cd->lock); // ensure the request doesn't hang return; } /* They are asking for job level data for this process */ if (cd->proc.rank == PMIX_RANK_WILDCARD) { /* fetch the job-level info for this nspace */ /* this is going to a remote peer, so inform the gds * that we need an actual copy of the data */ PMIX_CONSTRUCT(&cb, pmix_cb_t); cb.proc = &cd->proc; cb.scope = PMIX_REMOTE; cb.copy = true; PMIX_CONSTRUCT(&pbkt, pmix_buffer_t); PMIX_GDS_FETCH_KV(rc, pmix_globals.mypeer, &cb); if (PMIX_SUCCESS == rc) { /* assemble the provided data into a byte object */ PMIX_LIST_FOREACH(kv, &cb.kvs, pmix_kval_t) { PMIX_BFROPS_PACK(rc, pmix_globals.mypeer, &pbkt, kv, 1, PMIX_KVAL); if (PMIX_SUCCESS != rc) { PMIX_DESTRUCT(&pbkt); PMIX_DESTRUCT(&cb); goto cleanup; } } } PMIX_DESTRUCT(&cb); PMIX_UNLOAD_BUFFER(&pbkt, data, sz); PMIX_DESTRUCT(&pbkt); /* execute the callback */ cd->cbfunc(rc, data, sz, cd->cbdata); PMIX_WAKEUP_THREAD(&cd->lock); // ensure the request doesn't hang if (NULL != data) { free(data); } return; } /* see if we have this peer in our list */ info = NULL; PMIX_LIST_FOREACH(iptr, &nptr->ranks, pmix_rank_info_t) { if (iptr->pname.rank == cd->proc.rank) { info = iptr; break; } } if (NULL == info) { /* rank isn't known yet - defer * the request until we do */ dcd = PMIX_NEW(pmix_dmdx_remote_t); PMIX_RETAIN(cd); dcd->cd = cd; pmix_list_append(&pmix_server_globals.remote_pnd, &dcd->super); PMIX_WAKEUP_THREAD(&cd->lock); // ensure the request doesn't hang return; } /* have we received the modex from this proc yet - if * not, then defer */ if (!info->modex_recvd) { /* track the request so we can fulfill it once * data is recvd */ dcd = PMIX_NEW(pmix_dmdx_remote_t); PMIX_RETAIN(cd); dcd->cd = cd; pmix_list_append(&pmix_server_globals.remote_pnd, &dcd->super); PMIX_WAKEUP_THREAD(&cd->lock); // ensure the request doesn't hang return; } /* collect the remote/global data from this proc */ PMIX_CONSTRUCT(&cb, pmix_cb_t); cb.proc = &cd->proc; cb.scope = PMIX_REMOTE; cb.copy = true; PMIX_GDS_FETCH_KV(rc, pmix_globals.mypeer, &cb); if (PMIX_SUCCESS == rc) { /* assemble the provided data into a byte object */ PMIX_CONSTRUCT(&pbkt, pmix_buffer_t); PMIX_LIST_FOREACH(kv, &cb.kvs, pmix_kval_t) { PMIX_BFROPS_PACK(rc, pmix_globals.mypeer, &pbkt, kv, 1, PMIX_KVAL); if (PMIX_SUCCESS != rc) { PMIX_DESTRUCT(&pbkt); PMIX_DESTRUCT(&cb); goto cleanup; } } PMIX_UNLOAD_BUFFER(&pbkt, data, sz); PMIX_DESTRUCT(&pbkt); } PMIX_DESTRUCT(&cb); cleanup: /* execute the callback */ cd->cbfunc(rc, data, sz, cd->cbdata); if (NULL != data) { free(data); } PMIX_WAKEUP_THREAD(&cd->lock); } PMIX_EXPORT pmix_status_t PMIx_server_dmodex_request(const pmix_proc_t *proc, pmix_dmodex_response_fn_t cbfunc, void *cbdata) { pmix_setup_caddy_t *cd; PMIX_ACQUIRE_THREAD(&pmix_global_lock); if (pmix_globals.init_cntr <= 0) { PMIX_RELEASE_THREAD(&pmix_global_lock); return PMIX_ERR_INIT; } PMIX_RELEASE_THREAD(&pmix_global_lock); /* protect against bozo */ if (NULL == cbfunc || NULL == proc) { return PMIX_ERR_BAD_PARAM; } pmix_output_verbose(2, pmix_globals.debug_output, "pmix:server dmodex request%s:%d", proc->nspace, proc->rank); cd = PMIX_NEW(pmix_setup_caddy_t); (void)strncpy(cd->proc.nspace, proc->nspace, PMIX_MAX_NSLEN); cd->proc.rank = proc->rank; cd->cbfunc = cbfunc; cd->cbdata = cbdata; /* we have to push this into our event library to avoid * potential threading issues */ PMIX_THREADSHIFT(cd, _dmodex_req); PMIX_WAIT_THREAD(&cd->lock); PMIX_RELEASE(cd); return PMIX_SUCCESS; } static void _store_internal(int sd, short args, void *cbdata) { pmix_shift_caddy_t *cd = (pmix_shift_caddy_t*)cbdata; pmix_proc_t proc; PMIX_ACQUIRE_OBJECT(cd); (void)strncpy(proc.nspace, cd->pname.nspace, PMIX_MAX_NSLEN); proc.rank = cd->pname.rank; PMIX_GDS_STORE_KV(cd->status, pmix_globals.mypeer, &proc, PMIX_INTERNAL, cd->kv); if (cd->lock.active) { PMIX_WAKEUP_THREAD(&cd->lock); } } PMIX_EXPORT pmix_status_t PMIx_Store_internal(const pmix_proc_t *proc, const char *key, pmix_value_t *val) { pmix_shift_caddy_t *cd; pmix_status_t rc; PMIX_ACQUIRE_THREAD(&pmix_global_lock); if (pmix_globals.init_cntr <= 0) { PMIX_RELEASE_THREAD(&pmix_global_lock); return PMIX_ERR_INIT; } PMIX_RELEASE_THREAD(&pmix_global_lock); /* setup to thread shift this request */ cd = PMIX_NEW(pmix_shift_caddy_t); if (NULL == cd) { return PMIX_ERR_NOMEM; } cd->pname.nspace = strdup(proc->nspace); cd->pname.rank = proc->rank; cd->kv = PMIX_NEW(pmix_kval_t); if (NULL == cd->kv) { PMIX_RELEASE(cd); return PMIX_ERR_NOMEM; } cd->kv->key = strdup((char*)key); cd->kv->value = (pmix_value_t*)malloc(sizeof(pmix_value_t)); PMIX_BFROPS_VALUE_XFER(rc, pmix_globals.mypeer, cd->kv->value, val); if (PMIX_SUCCESS != rc) { PMIX_ERROR_LOG(rc); PMIX_RELEASE(cd); return rc; } PMIX_THREADSHIFT(cd, _store_internal); PMIX_WAIT_THREAD(&cd->lock); rc = cd->status; PMIX_RELEASE(cd); return rc; } PMIX_EXPORT pmix_status_t PMIx_generate_regex(const char *input, char **regexp) { PMIX_ACQUIRE_THREAD(&pmix_global_lock); if (pmix_globals.init_cntr <= 0) { PMIX_RELEASE_THREAD(&pmix_global_lock); return PMIX_ERR_INIT; } PMIX_RELEASE_THREAD(&pmix_global_lock); return pmix_preg.generate_node_regex(input, regexp); } PMIX_EXPORT pmix_status_t PMIx_generate_ppn(const char *input, char **regexp) { PMIX_ACQUIRE_THREAD(&pmix_global_lock); if (pmix_globals.init_cntr <= 0) { PMIX_RELEASE_THREAD(&pmix_global_lock); return PMIX_ERR_INIT; } PMIX_RELEASE_THREAD(&pmix_global_lock); return pmix_preg.generate_ppn(input, regexp); } static void _setup_op(pmix_status_t rc, void *cbdata) { pmix_setup_caddy_t *fcd = (pmix_setup_caddy_t*)cbdata; if (NULL != fcd->info) { PMIX_INFO_FREE(fcd->info, fcd->ninfo); } PMIX_RELEASE(fcd); } static void _setup_app(int sd, short args, void *cbdata) { pmix_setup_caddy_t *cd = (pmix_setup_caddy_t*)cbdata; pmix_setup_caddy_t *fcd = NULL; pmix_status_t rc; pmix_list_t ilist; pmix_kval_t *kv; size_t n; PMIX_ACQUIRE_OBJECT(cd); PMIX_CONSTRUCT(&ilist, pmix_list_t); /* pass to the network libraries */ if (PMIX_SUCCESS != (rc = pmix_pnet.setup_app(cd->nspace, &ilist))) { goto depart; } /* setup the return callback */ fcd = PMIX_NEW(pmix_setup_caddy_t); if (NULL == fcd) { rc = PMIX_ERR_NOMEM; PMIX_ERROR_LOG(PMIX_ERR_NOMEM); goto depart; } /* if anything came back, construct the info array */ if (0 < (fcd->ninfo = pmix_list_get_size(&ilist))) { PMIX_INFO_CREATE(fcd->info, fcd->ninfo); n = 0; PMIX_LIST_FOREACH(kv, &ilist, pmix_kval_t) { (void)strncpy(fcd->info[n].key, kv->key, PMIX_MAX_KEYLEN); PMIX_BFROPS_VALUE_XFER(rc, pmix_globals.mypeer, &fcd->info[n].value, kv->value); if (PMIX_SUCCESS != rc) { PMIX_INFO_FREE(fcd->info, fcd->ninfo); PMIX_RELEASE(fcd); fcd = NULL; goto depart; } } } depart: /* always execute the callback to avoid hanging */ if (NULL != cd->setupcbfunc) { if (NULL == fcd) { cd->setupcbfunc(rc, NULL, 0, cd->cbdata, NULL, NULL); } else { cd->setupcbfunc(rc, fcd->info, fcd->ninfo, cd->cbdata, _setup_op, fcd); } } /* cleanup memory */ PMIX_LIST_DESTRUCT(&ilist); if (NULL != cd->nspace) { free(cd->nspace); } PMIX_RELEASE(cd); } pmix_status_t PMIx_server_setup_application(const char nspace[], pmix_info_t info[], size_t ninfo, pmix_setup_application_cbfunc_t cbfunc, void *cbdata) { pmix_setup_caddy_t *cd; PMIX_ACQUIRE_THREAD(&pmix_global_lock); if (pmix_globals.init_cntr <= 0) { PMIX_RELEASE_THREAD(&pmix_global_lock); return PMIX_ERR_INIT; } PMIX_RELEASE_THREAD(&pmix_global_lock); /* need to threadshift this request */ cd = PMIX_NEW(pmix_setup_caddy_t); if (NULL == cd) { return PMIX_ERR_NOMEM; } if (NULL != nspace) { cd->nspace = strdup(nspace); } cd->info = info; cd->ninfo = ninfo; cd->setupcbfunc = cbfunc; cd->cbdata = cbdata; PMIX_THREADSHIFT(cd, _setup_app); return PMIX_SUCCESS; } static void _setup_local_support(int sd, short args, void *cbdata) { pmix_setup_caddy_t *cd = (pmix_setup_caddy_t*)cbdata; pmix_status_t rc; PMIX_ACQUIRE_OBJECT(cd); /* pass to the network libraries */ rc = pmix_pnet.setup_local_network(cd->nspace, cd->info, cd->ninfo); /* pass the info back */ if (NULL != cd->opcbfunc) { cd->opcbfunc(rc, cd->cbdata); } /* cleanup memory */ if (NULL != cd->nspace) { free(cd->nspace); } PMIX_RELEASE(cd); } pmix_status_t PMIx_server_setup_local_support(const char nspace[], pmix_info_t info[], size_t ninfo, pmix_op_cbfunc_t cbfunc, void *cbdata) { pmix_setup_caddy_t *cd; PMIX_ACQUIRE_THREAD(&pmix_global_lock); if (pmix_globals.init_cntr <= 0) { PMIX_RELEASE_THREAD(&pmix_global_lock); return PMIX_ERR_INIT; } PMIX_RELEASE_THREAD(&pmix_global_lock); /* need to threadshift this request */ cd = PMIX_NEW(pmix_setup_caddy_t); if (NULL == cd) { return PMIX_ERR_NOMEM; } if (NULL != nspace) { cd->nspace = strdup(nspace); } cd->info = info; cd->ninfo = ninfo; cd->opcbfunc = cbfunc; cd->cbdata = cbdata; PMIX_THREADSHIFT(cd, _setup_local_support); return PMIX_SUCCESS; } /**** THE FOLLOWING CALLBACK FUNCTIONS ARE USED BY THE HOST SERVER **** **** THEY THEREFORE CAN OCCUR IN EITHER THE HOST SERVER'S THREAD **** **** CONTEXT, OR IN OUR OWN THREAD CONTEXT IF THE CALLBACK OCCURS **** **** IMMEDIATELY. THUS ANYTHING THAT ACCESSES A GLOBAL ENTITY **** **** MUST BE PUSHED INTO AN EVENT FOR PROTECTION ****/ static void op_cbfunc(pmix_status_t status, void *cbdata) { pmix_server_caddy_t *cd = (pmix_server_caddy_t*)cbdata; pmix_buffer_t *reply; pmix_status_t rc; /* no need to thread-shift here as no global data is * being accessed */ /* setup the reply with the returned status */ if (NULL == (reply = PMIX_NEW(pmix_buffer_t))) { PMIX_ERROR_LOG(PMIX_ERR_OUT_OF_RESOURCE); PMIX_RELEASE(cd); return; } PMIX_BFROPS_PACK(rc, cd->peer, reply, &status, 1, PMIX_STATUS); if (PMIX_SUCCESS != rc) { PMIX_ERROR_LOG(rc); PMIX_RELEASE(reply); PMIX_RELEASE(cd); return; } /* the function that created the server_caddy did a * retain on the peer, so we don't have to worry about * it still being present - send a copy to the originator */ PMIX_PTL_SEND_ONEWAY(rc, cd->peer, reply, cd->hdr.tag); if (PMIX_SUCCESS != rc) { PMIX_ERROR_LOG(rc); PMIX_RELEASE(reply); } /* cleanup */ PMIX_RELEASE(cd); } static void _spcb(int sd, short args, void *cbdata) { pmix_shift_caddy_t *cd = (pmix_shift_caddy_t*)cbdata; pmix_buffer_t *reply; pmix_status_t rc; pmix_proc_t proc; pmix_cb_t cb; pmix_kval_t *kv; PMIX_ACQUIRE_OBJECT(cd); /* setup the reply with the returned status */ if (NULL == (reply = PMIX_NEW(pmix_buffer_t))) { PMIX_ERROR_LOG(PMIX_ERR_OUT_OF_RESOURCE); PMIX_RELEASE(cd->cd); PMIX_WAKEUP_THREAD(&cd->lock); return; } PMIX_BFROPS_PACK(rc, cd->cd->peer, reply, &cd->status, 1, PMIX_STATUS); if (PMIX_SUCCESS != rc) { PMIX_ERROR_LOG(rc); PMIX_RELEASE(cd->cd); PMIX_WAKEUP_THREAD(&cd->lock); return; } if (PMIX_SUCCESS == cd->status) { /* pass back the name of the nspace */ PMIX_BFROPS_PACK(rc, cd->cd->peer, reply, &cd->pname.nspace, 1, PMIX_STRING); /* add the job-level info, if we have it */ (void)strncpy(proc.nspace, cd->pname.nspace, PMIX_MAX_NSLEN); proc.rank = PMIX_RANK_WILDCARD; /* this is going to a local client, so let the gds * have the option of returning a copy of the data, * or a pointer to local storage */ PMIX_CONSTRUCT(&cb, pmix_cb_t); cb.proc = &proc; cb.scope = PMIX_SCOPE_UNDEF; cb.copy = false; PMIX_GDS_FETCH_KV(rc, pmix_globals.mypeer, &cb); if (PMIX_SUCCESS == rc) { PMIX_LIST_FOREACH(kv, &cb.kvs, pmix_kval_t) { PMIX_BFROPS_PACK(rc, cd->cd->peer, reply, kv, 1, PMIX_KVAL); if (PMIX_SUCCESS != rc) { PMIX_ERROR_LOG(rc); PMIX_RELEASE(cd->cd); PMIX_RELEASE(reply); PMIX_DESTRUCT(&cb); PMIX_WAKEUP_THREAD(&cd->lock); return; } } PMIX_DESTRUCT(&cb); } } /* the function that created the server_caddy did a * retain on the peer, so we don't have to worry about * it still being present - tell the originator the result */ PMIX_SERVER_QUEUE_REPLY(cd->cd->peer, cd->cd->hdr.tag, reply); /* cleanup */ PMIX_RELEASE(cd->cd); PMIX_WAKEUP_THREAD(&cd->lock); } static void spawn_cbfunc(pmix_status_t status, char *nspace, void *cbdata) { pmix_shift_caddy_t *cd; /* need to thread-shift this request */ cd = PMIX_NEW(pmix_shift_caddy_t); cd->status = status; cd->pname.nspace = strdup(nspace); cd->cd = (pmix_server_caddy_t*)cbdata;; PMIX_THREADSHIFT(cd, _spcb); PMIX_WAIT_THREAD(&cd->lock); PMIX_RELEASE(cd); } static void lookup_cbfunc(pmix_status_t status, pmix_pdata_t pdata[], size_t ndata, void *cbdata) { pmix_server_caddy_t *cd = (pmix_server_caddy_t*)cbdata; pmix_buffer_t *reply; pmix_status_t rc; /* no need to thread-shift as no global data is accessed */ /* setup the reply with the returned status */ if (NULL == (reply = PMIX_NEW(pmix_buffer_t))) { PMIX_ERROR_LOG(PMIX_ERR_OUT_OF_RESOURCE); PMIX_RELEASE(cd); return; } PMIX_BFROPS_PACK(rc, cd->peer, reply, &status, 1, PMIX_STATUS); if (PMIX_SUCCESS != rc) { PMIX_ERROR_LOG(rc); PMIX_RELEASE(reply); return; } if (PMIX_SUCCESS == status) { /* pack the returned data objects */ PMIX_BFROPS_PACK(rc, cd->peer, reply, &ndata, 1, PMIX_SIZE); if (PMIX_SUCCESS != rc) { PMIX_ERROR_LOG(rc); PMIX_RELEASE(reply); return; } PMIX_BFROPS_PACK(rc, cd->peer, reply, pdata, ndata, PMIX_PDATA); if (PMIX_SUCCESS != rc) { PMIX_ERROR_LOG(rc); PMIX_RELEASE(reply); return; } } /* the function that created the server_caddy did a * retain on the peer, so we don't have to worry about * it still being present - tell the originator the result */ PMIX_SERVER_QUEUE_REPLY(cd->peer, cd->hdr.tag, reply); /* cleanup */ PMIX_RELEASE(cd); } /* fence modex calls return here when the host RM has completed * the operation - any enclosed data is provided to us as a blob * which contains byte objects, one for each set of data. Our * peer servers will have packed the blobs using our common * GDS module, so use the mypeer one to unpack them */ static void _mdxcbfunc(int sd, short argc, void *cbdata) { pmix_shift_caddy_t *scd = (pmix_shift_caddy_t*)cbdata; pmix_server_trkr_t *tracker = scd->tracker; pmix_buffer_t xfer, *reply, bkt; pmix_byte_object_t bo, bo2; pmix_server_caddy_t *cd; pmix_status_t rc = PMIX_SUCCESS, ret; pmix_nspace_caddy_t *nptr; pmix_list_t nslist; int32_t cnt = 1; char byte; bool found; pmix_collect_t ctype; PMIX_ACQUIRE_OBJECT(scd); /* pass the blobs being returned */ PMIX_CONSTRUCT(&xfer, pmix_buffer_t); PMIX_LOAD_BUFFER(pmix_globals.mypeer, &xfer, scd->data, scd->ndata); PMIX_CONSTRUCT(&nslist, pmix_list_t); if (PMIX_SUCCESS != scd->status) { rc = scd->status; goto finish_collective; } if (PMIX_COLLECT_INVALID == tracker->collect_type) { rc = PMIX_ERR_INVALID_ARG; goto finish_collective; } // Skip the data if we didn't collect it if (PMIX_COLLECT_YES != tracker->collect_type) { rc = PMIX_SUCCESS; goto finish_collective; } // collect the pmix_nspace_t's of all local participants PMIX_LIST_FOREACH(cd, &tracker->local_cbs, pmix_server_caddy_t) { // see if we already have this nspace found = false; PMIX_LIST_FOREACH(nptr, &nslist, pmix_nspace_caddy_t) { if (nptr->ns == cd->peer->nptr) { found = true; break; } } if (!found) { // add it nptr = PMIX_NEW(pmix_nspace_caddy_t); PMIX_RETAIN(cd->peer->nptr); nptr->ns = cd->peer->nptr; pmix_list_append(&nslist, &nptr->super); } } /* Loop over the enclosed byte object envelopes and * store them in our GDS module */ cnt = 1; PMIX_BFROPS_UNPACK(rc, pmix_globals.mypeer, &xfer, &bo, &cnt, PMIX_BYTE_OBJECT); while (PMIX_SUCCESS == rc) { PMIX_LOAD_BUFFER(pmix_globals.mypeer, &bkt, bo.bytes, bo.size); /* unpack the data collection flag */ cnt = 1; PMIX_BFROPS_UNPACK(rc, pmix_globals.mypeer, &bkt, &byte, &cnt, PMIX_BYTE); if (PMIX_ERR_UNPACK_READ_PAST_END_OF_BUFFER == rc) { /* no data was returned, so we are done with this blob */ break; } if (PMIX_SUCCESS != rc) { /* we have an error */ break; } // Check that this blob was accumulated with the same data collection setting ctype = (pmix_collect_t)byte; if (ctype != tracker->collect_type) { rc = PMIX_ERR_INVALID_ARG; break; } /* unpack the enclosed blobs from the various peers */ cnt = 1; PMIX_BFROPS_UNPACK(rc, pmix_globals.mypeer, &bkt, &bo2, &cnt, PMIX_BYTE_OBJECT); while (PMIX_SUCCESS == rc) { /* unpack all the kval's from this peer and store them in * our GDS. Note that PMIx by design holds all data at * the server level until requested. If our GDS is a * shared memory region, then the data may be available * right away - but the client still has to be notified * of its presence. */ PMIX_LIST_FOREACH(nptr, &nslist, pmix_nspace_caddy_t) { PMIX_GDS_STORE_MODEX(rc, nptr->ns, &tracker->local_cbs, &bo2); if (PMIX_SUCCESS != rc) { PMIX_ERROR_LOG(rc); break; } } PMIX_BYTE_OBJECT_DESTRUCT(&bo2); /* get the next blob */ cnt = 1; PMIX_BFROPS_UNPACK(rc, pmix_globals.mypeer, &bkt, &bo2, &cnt, PMIX_BYTE_OBJECT); } if (PMIX_ERR_UNPACK_READ_PAST_END_OF_BUFFER == rc) { rc = PMIX_SUCCESS; } else if (PMIX_SUCCESS != rc) { PMIX_ERROR_LOG(rc); goto finish_collective; } /* unpack and process the next blob */ cnt = 1; PMIX_BFROPS_UNPACK(rc, pmix_globals.mypeer, &xfer, &bo, &cnt, PMIX_BYTE_OBJECT); } if (PMIX_ERR_UNPACK_READ_PAST_END_OF_BUFFER == rc) { rc = PMIX_SUCCESS; } else if (PMIX_SUCCESS != rc) { PMIX_ERROR_LOG(rc); } finish_collective: /* loop across all procs in the tracker, sending them the reply */ PMIX_LIST_FOREACH(cd, &tracker->local_cbs, pmix_server_caddy_t) { reply = PMIX_NEW(pmix_buffer_t); if (NULL == reply) { rc = PMIX_ERR_NOMEM; break; } /* setup the reply, starting with the returned status */ PMIX_BFROPS_PACK(ret, cd->peer, reply, &rc, 1, PMIX_STATUS); if (PMIX_SUCCESS != ret) { PMIX_ERROR_LOG(ret); goto cleanup; } pmix_output_verbose(2, pmix_globals.debug_output, "server:modex_cbfunc reply being sent to %s:%u", cd->peer->info->pname.nspace, cd->peer->info->pname.rank); PMIX_SERVER_QUEUE_REPLY(cd->peer, cd->hdr.tag, reply); } cleanup: /* Protect data from being free'd because RM pass * the pointer that is set to the middle of some * buffer (the case with SLURM). * RM is responsible on the release of the buffer */ xfer.base_ptr = NULL; xfer.bytes_used = 0; PMIX_DESTRUCT(&xfer); pmix_list_remove_item(&pmix_server_globals.collectives, &tracker->super); PMIX_RELEASE(tracker); PMIX_LIST_DESTRUCT(&nslist); /* we are done */ if (NULL != scd->cbfunc.relfn) { scd->cbfunc.relfn(scd->cbdata); } PMIX_RELEASE(scd); } static void modex_cbfunc(pmix_status_t status, const char *data, size_t ndata, void *cbdata, pmix_release_cbfunc_t relfn, void *relcbd) { pmix_server_trkr_t *tracker = (pmix_server_trkr_t*)cbdata; pmix_shift_caddy_t *scd; pmix_output_verbose(2, pmix_globals.debug_output, "server:modex_cbfunc called with %d bytes", (int)ndata); if (NULL == tracker) { /* nothing to do - but be sure to give them * a release if they want it */ if (NULL != relfn) { relfn(relcbd); } return; } /* need to thread-shift this callback as it accesses global data */ scd = PMIX_NEW(pmix_shift_caddy_t); if (NULL == scd) { /* nothing we can do */ if (NULL != relfn) { relfn(cbdata); } return; } scd->status = status; scd->data = data; scd->ndata = ndata; scd->tracker = tracker; scd->cbfunc.relfn = relfn; scd->cbdata = relcbd; PMIX_THREADSHIFT(scd, _mdxcbfunc); } static void get_cbfunc(pmix_status_t status, const char *data, size_t ndata, void *cbdata, pmix_release_cbfunc_t relfn, void *relcbd) { pmix_server_caddy_t *cd = (pmix_server_caddy_t*)cbdata; pmix_buffer_t *reply, buf; pmix_status_t rc; pmix_output_verbose(2, pmix_globals.debug_output, "server:get_cbfunc called with %d elements", (int)ndata); /* no need to thread-shift here as no global data is accessed */ if (NULL == cd) { /* nothing to do - but be sure to give them * a release if they want it */ if (NULL != relfn) { relfn(relcbd); } return; } /* setup the reply, starting with the returned status */ reply = PMIX_NEW(pmix_buffer_t); if (NULL == reply) { rc = PMIX_ERR_NOMEM; goto cleanup; } PMIX_BFROPS_PACK(rc, cd->peer, reply, &status, 1, PMIX_STATUS); if (PMIX_SUCCESS != rc) { PMIX_ERROR_LOG(rc); goto cleanup; } /* pack the blob being returned */ PMIX_CONSTRUCT(&buf, pmix_buffer_t); PMIX_LOAD_BUFFER(cd->peer, &buf, data, ndata); PMIX_BFROPS_COPY_PAYLOAD(rc, cd->peer, reply, &buf); buf.base_ptr = NULL; buf.bytes_used = 0; PMIX_DESTRUCT(&buf); /* send the data to the requestor */ pmix_output_verbose(2, pmix_globals.debug_output, "server:get_cbfunc reply being sent to %s:%u", cd->peer->info->pname.nspace, cd->peer->info->pname.rank); pmix_output_hexdump(5, pmix_globals.debug_output, reply->base_ptr, (reply->bytes_used < 256 ? reply->bytes_used : 256)); PMIX_SERVER_QUEUE_REPLY(cd->peer, cd->hdr.tag, reply); cleanup: /* if someone wants a release, give it to them */ if (NULL != relfn) { relfn(relcbd); } PMIX_RELEASE(cd); } static void _cnct(int sd, short args, void *cbdata) { pmix_shift_caddy_t *scd = (pmix_shift_caddy_t*)cbdata; pmix_server_trkr_t *tracker = scd->tracker; pmix_buffer_t *reply, pbkt; pmix_byte_object_t bo; pmix_status_t rc; int i; pmix_server_caddy_t *cd; char **nspaces=NULL; bool found; pmix_proc_t proc; pmix_cb_t cb; pmix_kval_t *kptr; PMIX_ACQUIRE_OBJECT(scd); if (PMIX_CONNECTNB_CMD == tracker->type) { /* find the unique nspaces that are participating */ PMIX_LIST_FOREACH(cd, &tracker->local_cbs, pmix_server_caddy_t) { if (NULL == nspaces) { pmix_argv_append_nosize(&nspaces, cd->peer->info->pname.nspace); } else { found = false; for (i=0; NULL != nspaces[i]; i++) { if (0 == strcmp(nspaces[i], cd->peer->info->pname.nspace)) { found = true; break; } } if (!found) { pmix_argv_append_nosize(&nspaces, cd->peer->info->pname.nspace); } } } } /* loop across all local procs in the tracker, sending them the reply */ PMIX_LIST_FOREACH(cd, &tracker->local_cbs, pmix_server_caddy_t) { /* setup the reply, starting with the returned status */ reply = PMIX_NEW(pmix_buffer_t); if (NULL == reply) { PMIX_ERROR_LOG(PMIX_ERR_NOMEM); rc = PMIX_ERR_NOMEM; goto cleanup; } PMIX_BFROPS_PACK(rc, cd->peer, reply, &scd->status, 1, PMIX_STATUS); if (PMIX_SUCCESS != rc) { PMIX_ERROR_LOG(rc); PMIX_RELEASE(reply); goto cleanup; } if (PMIX_CONNECTNB_CMD == tracker->type) { /* loop across all participating nspaces and include their * job-related info */ for (i=0; NULL != nspaces[i]; i++) { /* if this is the local proc's own nspace, then * ignore it - it already has this info */ if (0 == strncmp(nspaces[i], cd->peer->info->pname.nspace, PMIX_MAX_NSLEN)) { continue; } /* this is a local request, so give the gds the option * of returning a copy of the data, or a pointer to * local storage */ /* add the job-level info, if necessary */ proc.rank = PMIX_RANK_WILDCARD; (void)strncpy(proc.nspace, nspaces[i], PMIX_MAX_NSLEN); PMIX_CONSTRUCT(&cb, pmix_cb_t); /* this is for a local client, so give the gds the * option of returning a complete copy of the data, * or returning a pointer to local storage */ cb.proc = &proc; cb.scope = PMIX_SCOPE_UNDEF; cb.copy = false; PMIX_GDS_FETCH_KV(rc, cd->peer, &cb); if (PMIX_SUCCESS != rc) { PMIX_ERROR_LOG(rc); PMIX_RELEASE(reply); PMIX_DESTRUCT(&cb); goto cleanup; } PMIX_CONSTRUCT(&pbkt, pmix_buffer_t); /* pack the nspace name */ PMIX_BFROPS_PACK(rc, cd->peer, &pbkt, &nspaces[i], 1, PMIX_STRING); if (PMIX_SUCCESS != rc) { PMIX_ERROR_LOG(rc); PMIX_RELEASE(reply); PMIX_DESTRUCT(&cb); goto cleanup; } PMIX_LIST_FOREACH(kptr, &cb.kvs, pmix_kval_t) { PMIX_BFROPS_PACK(rc, cd->peer, &pbkt, kptr, 1, PMIX_KVAL); if (PMIX_SUCCESS != rc) { PMIX_ERROR_LOG(rc); PMIX_RELEASE(reply); PMIX_DESTRUCT(&cb); goto cleanup; } } PMIX_DESTRUCT(&cb); PMIX_UNLOAD_BUFFER(&pbkt, bo.bytes, bo.size); PMIX_BFROPS_PACK(rc, cd->peer, reply, &bo, 1, PMIX_BYTE_OBJECT); if (PMIX_SUCCESS != rc) { PMIX_ERROR_LOG(rc); PMIX_RELEASE(reply); PMIX_DESTRUCT(&pbkt); goto cleanup; } PMIX_DESTRUCT(&pbkt); } } pmix_output_verbose(2, pmix_globals.debug_output, "server:cnct_cbfunc reply being sent to %s:%u", cd->peer->info->pname.nspace, cd->peer->info->pname.rank); PMIX_SERVER_QUEUE_REPLY(cd->peer, cd->hdr.tag, reply); } cleanup: if (NULL != nspaces) { pmix_argv_free(nspaces); } pmix_list_remove_item(&pmix_server_globals.collectives, &tracker->super); PMIX_RELEASE(tracker); /* we are done */ PMIX_RELEASE(scd); } static void cnct_cbfunc(pmix_status_t status, void *cbdata) { pmix_server_trkr_t *tracker = (pmix_server_trkr_t*)cbdata; pmix_shift_caddy_t *scd; pmix_output_verbose(2, pmix_globals.debug_output, "server:cnct_cbfunc called"); if (NULL == tracker) { /* nothing to do */ return; } /* need to thread-shift this callback as it accesses global data */ scd = PMIX_NEW(pmix_shift_caddy_t); if (NULL == scd) { /* nothing we can do */ return; } scd->status = status; scd->tracker = tracker; PMIX_THREADSHIFT(scd, _cnct); } static void regevents_cbfunc(pmix_status_t status, void *cbdata) { pmix_status_t rc; pmix_server_caddy_t *cd = (pmix_server_caddy_t*) cbdata; pmix_buffer_t *reply; pmix_output_verbose(2, pmix_globals.debug_output, "server:regevents_cbfunc called status = %d", status); reply = PMIX_NEW(pmix_buffer_t); if (NULL == reply) { PMIX_ERROR_LOG(PMIX_ERR_NOMEM); PMIX_RELEASE(cd); return; } PMIX_BFROPS_PACK(rc, cd->peer, reply, &status, 1, PMIX_STATUS); if (PMIX_SUCCESS != rc) { PMIX_ERROR_LOG(rc); } // send reply PMIX_SERVER_QUEUE_REPLY(cd->peer, cd->hdr.tag, reply); PMIX_RELEASE(cd); } static void notifyerror_cbfunc (pmix_status_t status, void *cbdata) { pmix_status_t rc; pmix_server_caddy_t *cd = (pmix_server_caddy_t*) cbdata; pmix_buffer_t *reply; pmix_output_verbose(2, pmix_globals.debug_output, "server:notifyerror_cbfunc called status = %d", status); reply = PMIX_NEW(pmix_buffer_t); if (NULL == reply) { PMIX_ERROR_LOG(PMIX_ERR_NOMEM); PMIX_RELEASE(cd); return; } PMIX_BFROPS_PACK(rc, cd->peer, reply, &status, 1, PMIX_STATUS); if (PMIX_SUCCESS != rc) { PMIX_ERROR_LOG(rc); } // send reply PMIX_SERVER_QUEUE_REPLY(cd->peer, cd->hdr.tag, reply); PMIX_RELEASE(cd); } static void query_cbfunc(pmix_status_t status, pmix_info_t *info, size_t ninfo, void *cbdata, pmix_release_cbfunc_t release_fn, void *release_cbdata) { pmix_query_caddy_t *qcd = (pmix_query_caddy_t*)cbdata; pmix_server_caddy_t *cd = (pmix_server_caddy_t*)qcd->cbdata; pmix_buffer_t *reply; pmix_status_t rc; pmix_output_verbose(2, pmix_globals.debug_output, "pmix:query callback with status %d", status); reply = PMIX_NEW(pmix_buffer_t); if (NULL == reply) { PMIX_ERROR_LOG(PMIX_ERR_NOMEM); PMIX_RELEASE(cd); return; } PMIX_BFROPS_PACK(rc, cd->peer, reply, &status, 1, PMIX_STATUS); if (PMIX_SUCCESS != rc) { PMIX_ERROR_LOG(rc); goto complete; } /* pack the returned data */ PMIX_BFROPS_PACK(rc, cd->peer, reply, &ninfo, 1, PMIX_SIZE); if (PMIX_SUCCESS != rc) { PMIX_ERROR_LOG(rc); goto complete; } if (0 < ninfo) { PMIX_BFROPS_PACK(rc, cd->peer, reply, info, ninfo, PMIX_INFO); if (PMIX_SUCCESS != rc) { PMIX_ERROR_LOG(rc); } } complete: // send reply PMIX_SERVER_QUEUE_REPLY(cd->peer, cd->hdr.tag, reply); // cleanup if (NULL != qcd->queries) { PMIX_QUERY_FREE(qcd->queries, qcd->nqueries); } if (NULL != qcd->info) { PMIX_INFO_FREE(qcd->info, qcd->ninfo); } PMIX_RELEASE(qcd); PMIX_RELEASE(cd); } /* the switchyard is the primary message handling function. It's purpose * is to take incoming commands (packed into a buffer), unpack them, * and then call the corresponding host server's function to execute * them. Some commands involve only a single proc (i.e., the one * sending the command) and can be executed while we wait. In these cases, * the switchyard will construct and pack a reply buffer to be returned * to the sender. * * Other cases (either multi-process collective or cmds that require * an async reply) cannot generate an immediate reply. In these cases, * the reply buffer will be NULL. An appropriate callback function will * be called that will be responsible for eventually replying to the * calling processes. * * Should an error be encountered at any time within the switchyard, an * error reply buffer will be returned so that the caller can be notified, * thereby preventing the process from hanging. */ static pmix_status_t server_switchyard(pmix_peer_t *peer, uint32_t tag, pmix_buffer_t *buf) { pmix_status_t rc=PMIX_ERR_NOT_SUPPORTED; int32_t cnt; pmix_cmd_t cmd; pmix_server_caddy_t *cd; pmix_proc_t proc; pmix_buffer_t *reply; pmix_regevents_info_t *reginfo; pmix_peer_events_info_t *prev; /* retrieve the cmd */ cnt = 1; PMIX_BFROPS_UNPACK(rc, peer, buf, &cmd, &cnt, PMIX_COMMAND); if (PMIX_SUCCESS != rc) { PMIX_ERROR_LOG(rc); return rc; } pmix_output_verbose(2, pmix_globals.debug_output, "recvd pmix cmd %d from %s:%u", cmd, peer->info->pname.nspace, peer->info->pname.rank); if (PMIX_REQ_CMD == cmd) { reply = PMIX_NEW(pmix_buffer_t); if (NULL == reply) { PMIX_ERROR_LOG(PMIX_ERR_NOMEM); return PMIX_ERR_NOMEM; } PMIX_GDS_REGISTER_JOB_INFO(rc, peer, reply); if (PMIX_SUCCESS != rc) { PMIX_ERROR_LOG(rc); return rc; } PMIX_SERVER_QUEUE_REPLY(peer, tag, reply); peer->nptr->ndelivered++; return PMIX_SUCCESS; } if (PMIX_ABORT_CMD == cmd) { PMIX_GDS_CADDY(cd, peer, tag); if (PMIX_SUCCESS != (rc = pmix_server_abort(peer, buf, op_cbfunc, cd))) { PMIX_RELEASE(cd); } return rc; } if (PMIX_COMMIT_CMD == cmd) { rc = pmix_server_commit(peer, buf); reply = PMIX_NEW(pmix_buffer_t); if (NULL == reply) { PMIX_ERROR_LOG(PMIX_ERR_NOMEM); return PMIX_ERR_NOMEM; } PMIX_BFROPS_PACK(rc, peer, reply, &rc, 1, PMIX_STATUS); if (PMIX_SUCCESS != rc) { PMIX_ERROR_LOG(rc); } PMIX_SERVER_QUEUE_REPLY(peer, tag, reply); return PMIX_SUCCESS; // don't reply twice } if (PMIX_FENCENB_CMD == cmd) { PMIX_GDS_CADDY(cd, peer, tag); if (PMIX_SUCCESS != (rc = pmix_server_fence(cd, buf, modex_cbfunc, op_cbfunc))) { PMIX_RELEASE(cd); } return rc; } if (PMIX_GETNB_CMD == cmd) { PMIX_GDS_CADDY(cd, peer, tag); if (PMIX_SUCCESS != (rc = pmix_server_get(buf, get_cbfunc, cd))) { PMIX_RELEASE(cd); } return rc; } if (PMIX_FINALIZE_CMD == cmd) { pmix_output_verbose(2, pmix_globals.debug_output, "recvd FINALIZE"); /* mark that this peer called finalize */ peer->finalized = true; /* call the local server, if supported */ if (NULL != pmix_host_server.client_finalized) { PMIX_GDS_CADDY(cd, peer, tag); (void)strncpy(proc.nspace, peer->info->pname.nspace, PMIX_MAX_NSLEN); proc.rank = peer->info->pname.rank; /* since the client is finalizing, remove them from any event * registrations they may still have on our list */ PMIX_LIST_FOREACH(reginfo, &pmix_server_globals.events, pmix_regevents_info_t) { PMIX_LIST_FOREACH(prev, &reginfo->peers, pmix_peer_events_info_t) { if (prev->peer == peer) { pmix_list_remove_item(&reginfo->peers, &prev->super); PMIX_RELEASE(prev); break; } } } /* now tell the host server */ if (PMIX_SUCCESS != (rc = pmix_host_server.client_finalized(&proc, peer->info->server_object, op_cbfunc, cd))) { PMIX_RELEASE(cd); } else { /* don't reply to them ourselves - we will do so when the host * server calls us back */ return rc; } } /* turn off the recv event - we shouldn't hear anything * more from this proc */ if (peer->recv_ev_active) { pmix_event_del(&peer->recv_event); peer->recv_ev_active = false; } /* let the network libraries cleanup */ pmix_pnet.child_finalized(peer); return rc; } if (PMIX_PUBLISHNB_CMD == cmd) { PMIX_GDS_CADDY(cd, peer, tag); if (PMIX_SUCCESS != (rc = pmix_server_publish(peer, buf, op_cbfunc, cd))) { PMIX_RELEASE(cd); } return rc; } if (PMIX_LOOKUPNB_CMD == cmd) { PMIX_GDS_CADDY(cd, peer, tag); if (PMIX_SUCCESS != (rc = pmix_server_lookup(peer, buf, lookup_cbfunc, cd))) { PMIX_RELEASE(cd); } return rc; } if (PMIX_UNPUBLISHNB_CMD == cmd) { PMIX_GDS_CADDY(cd, peer, tag); if (PMIX_SUCCESS != (rc = pmix_server_unpublish(peer, buf, op_cbfunc, cd))) { PMIX_RELEASE(cd); } return rc; } if (PMIX_SPAWNNB_CMD == cmd) { PMIX_GDS_CADDY(cd, peer, tag); if (PMIX_SUCCESS != (rc = pmix_server_spawn(peer, buf, spawn_cbfunc, cd))) { PMIX_RELEASE(cd); } return rc; } if (PMIX_CONNECTNB_CMD == cmd) { PMIX_GDS_CADDY(cd, peer, tag); rc = pmix_server_connect(cd, buf, false, cnct_cbfunc); PMIX_RELEASE(cd); return rc; } if (PMIX_DISCONNECTNB_CMD == cmd) { PMIX_GDS_CADDY(cd, peer, tag); rc = pmix_server_connect(cd, buf, true, cnct_cbfunc); PMIX_RELEASE(cd); return rc; } if (PMIX_REGEVENTS_CMD == cmd) { PMIX_GDS_CADDY(cd, peer, tag); if (PMIX_SUCCESS != (rc = pmix_server_register_events(peer, buf, regevents_cbfunc, cd))) { PMIX_RELEASE(cd); } return rc; } if (PMIX_DEREGEVENTS_CMD == cmd) { pmix_server_deregister_events(peer, buf); return PMIX_SUCCESS; } if (PMIX_NOTIFY_CMD == cmd) { PMIX_GDS_CADDY(cd, peer, tag); rc = pmix_server_event_recvd_from_client(peer, buf, notifyerror_cbfunc, cd); return rc; } if (PMIX_QUERY_CMD == cmd) { PMIX_GDS_CADDY(cd, peer, tag); rc = pmix_server_query(peer, buf, query_cbfunc, cd); return rc; } if (PMIX_LOG_CMD == cmd) { PMIX_GDS_CADDY(cd, peer, tag); rc = pmix_server_log(peer, buf, op_cbfunc, cd); return rc; } if (PMIX_ALLOC_CMD == cmd) { PMIX_GDS_CADDY(cd, peer, tag); rc = pmix_server_alloc(peer, buf, query_cbfunc, cd); return rc; } if (PMIX_JOB_CONTROL_CMD == cmd) { PMIX_GDS_CADDY(cd, peer, tag); rc = pmix_server_job_ctrl(peer, buf, query_cbfunc, cd); return rc; } if (PMIX_MONITOR_CMD == cmd) { PMIX_GDS_CADDY(cd, peer, tag); rc = pmix_server_monitor(peer, buf, query_cbfunc, cd); return rc; } return PMIX_ERR_NOT_SUPPORTED; } static void server_message_handler(struct pmix_peer_t *pr, pmix_ptl_hdr_t *hdr, pmix_buffer_t *buf, void *cbdata) { pmix_peer_t *peer = (pmix_peer_t*)pr; pmix_buffer_t *reply; pmix_status_t rc, ret; pmix_output_verbose(2, pmix_globals.debug_output, "SWITCHYARD for %s:%u:%d", peer->info->pname.nspace, peer->info->pname.rank, peer->sd); ret = server_switchyard(peer, hdr->tag, buf); /* send the return, if there was an error returned */ if (PMIX_SUCCESS != ret) { reply = PMIX_NEW(pmix_buffer_t); if (NULL == reply) { PMIX_ERROR_LOG(PMIX_ERR_NOMEM); return; } PMIX_BFROPS_PACK(rc, pr, reply, &ret, 1, PMIX_STATUS); if (PMIX_SUCCESS != rc) { PMIX_ERROR_LOG(rc); } PMIX_SERVER_QUEUE_REPLY(peer, hdr->tag, reply); } } static inline int _my_client(const char *nspace, pmix_rank_t rank) { pmix_peer_t *peer; int i; int local = 0; for (i = 0; i < pmix_server_globals.clients.size; i++) { if (NULL != (peer = (pmix_peer_t *)pmix_pointer_array_get_item(&pmix_server_globals.clients, i))) { if (0 == strcmp(peer->info->pname.nspace, nspace) && peer->info->pname.rank == rank) { local = 1; break; } } } return local; }
1
7,402
@rhc54 Is this intentional or for debug purposes?
openpmix-openpmix
c
@@ -12,7 +12,14 @@ def includeme(config): # Activate end-points. config.scan('kinto.plugins.history.views') + # If StatsD is enabled, monitor execution time of listener. + if config.registry.statsd: + key = 'listeners.history' + listener = config.registry.statsd.timer(key)(on_resource_changed) + else: + listener = on_resource_changed + # Listen to every resources (except history) - config.add_subscriber(on_resource_changed, ResourceChanged, + config.add_subscriber(listener, ResourceChanged, for_resources=('bucket', 'group', 'collection', 'record'))
1
from kinto.core.events import ResourceChanged from .listener import on_resource_changed def includeme(config): config.add_api_capability( 'history', description='Track changes on data.', url='http://kinto.readthedocs.io/en/latest/api/1.x/history.html') # Activate end-points. config.scan('kinto.plugins.history.views') # Listen to every resources (except history) config.add_subscriber(on_resource_changed, ResourceChanged, for_resources=('bucket', 'group', 'collection', 'record'))
1
9,802
Maybe it should be `plugins.history` here instead of `listeners.X`, since we use those for listeners configured via `.ini` files?
Kinto-kinto
py
@@ -6,6 +6,10 @@ module TeamPlansHelper end end + def team_page? + @team_page.present? + end + private def team_plan_quantity_select_attributes(plan)
1
module TeamPlansHelper def team_plan_quantity_select(form, checkout, plan) content_tag :li, team_plan_quantity_select_attributes(plan) do form.label(:quantity, 'Number of team members') + form.select(:quantity, options_for_team_quantity(checkout, plan)) end end private def team_plan_quantity_select_attributes(plan) { :class => "string input required", :id => "checkout_quantity_input", "data-price" => plan.price_in_dollars, "data-interval" => plan.subscription_interval } end def options_for_team_quantity(checkout, plan) options_for_select (plan.minimum_quantity...50), selected_team_plan_quantity(checkout, plan) end def selected_team_plan_quantity(checkout, plan) [checkout.quantity, plan.minimum_quantity].max end end
1
16,647
Hmm, I also don't have a better idea here. As a small thing, could you make this `@team_page.present?` or similar to make the intent slightly more clear?
thoughtbot-upcase
rb
@@ -45,7 +45,12 @@ public class DecrypterInputStream extends InputStream implements WatchableStream public DecrypterInputStream(FileInputStream inputStream, String encryptionKey) throws GeneralSecurityException, IOException { + // First byte should be iv length int ivLength = inputStream.read(); + if (ivLength != 16 && ivLength != 32) { + throw new IOException("Can't decrypt file: incorrect iv length found in file: " + ivLength); + } + // Next bytes should be iv byte[] iv = new byte[ivLength]; inputStream.read(iv); Cipher cipher = Encryptor.getDecryptingCipher(encryptionKey, iv);
1
/* * Copyright (c) 2020-present, salesforce.com, inc. * All rights reserved. * Redistribution and use of this software in source and binary forms, with or * without modification, are permitted provided that the following conditions * are met: * - Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * - Neither the name of salesforce.com, inc. nor the names of its contributors * may be used to endorse or promote products derived from this software without * specific prior written permission of salesforce.com, inc. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ package com.salesforce.androidsdk.analytics.security; import com.salesforce.androidsdk.analytics.util.WatchableStream; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; import java.security.GeneralSecurityException; import java.util.ArrayList; import java.util.List; import javax.crypto.Cipher; import javax.crypto.CipherInputStream; /** Input stream that decrypts content written with a EncrypterOutputStream */ public class DecrypterInputStream extends InputStream implements WatchableStream { private InputStream cipherInputStream; private List<Watcher> watchers; public DecrypterInputStream(FileInputStream inputStream, String encryptionKey) throws GeneralSecurityException, IOException { int ivLength = inputStream.read(); byte[] iv = new byte[ivLength]; inputStream.read(iv); Cipher cipher = Encryptor.getDecryptingCipher(encryptionKey, iv); cipherInputStream = new CipherInputStream(inputStream, cipher); watchers = new ArrayList<>(); } public DecrypterInputStream() { throw new IllegalArgumentException("Constructor not supported"); } @Override public int read(byte[] b) throws IOException { return cipherInputStream.read(b); } @Override public int read(byte[] b, int off, int len) throws IOException { return cipherInputStream.read(b, off, len); } @Override public long skip(long n) throws IOException { return cipherInputStream.skip(n); } @Override public int available() throws IOException { return cipherInputStream.available(); } @Override public void close() throws IOException { if (!watchers.isEmpty()) { for (Watcher watcher : watchers) { watcher.onClose(); } } cipherInputStream.close(); } @Override public synchronized void mark(int readlimit) { cipherInputStream.mark(readlimit); } @Override public synchronized void reset() throws IOException { cipherInputStream.reset(); } @Override public boolean markSupported() { return cipherInputStream.markSupported(); } @Override public int read() throws IOException { return cipherInputStream.read(); } public void addWatcher(Watcher watcher) { this.watchers.add(watcher); } }
1
17,743
Without that check it would fail later (probably in the getDecryptingCipher method) but the error could be hard to make sense of.
forcedotcom-SalesforceMobileSDK-Android
java
@@ -379,6 +379,13 @@ module Beaker return result end + def do_take_snapshot snapshot_name + self[:hypervisor].take_snapshot(name, snapshot_name) + end + + def do_restore_snapshot snapshot_name + self[:hypervisor].restore_snapshot(name, snapshot_name) + end end [ 'windows', 'unix', 'aix', 'mac' ].each do |lib|
1
require 'socket' require 'timeout' require 'benchmark' [ 'command', 'ssh_connection' ].each do |lib| require "beaker/#{lib}" end module Beaker class Host SELECT_TIMEOUT = 30 class CommandFailure < StandardError; end # This class provides array syntax for using puppet --configprint on a host class PuppetConfigReader def initialize(host, command) @host = host @command = command end def [](k) cmd = PuppetCommand.new(@command, "--configprint #{k.to_s}") @host.exec(cmd).stdout.strip end end def self.create name, options case options['HOSTS'][name]['platform'] when /windows/ Windows::Host.new name, options when /aix/ Aix::Host.new name, options when /osx/ Mac::Host.new name, options else Unix::Host.new name, options end end attr_accessor :logger attr_reader :name, :defaults def initialize name, options @logger = options[:logger] @name, @options = name.to_s, options.dup # This is annoying and its because of drift/lack of enforcement/lack of having # a explict relationship between our defaults, our setup steps and how they're # related through 'type' and the differences between the assumption of our two # configurations we have for many of our products type = @options.get_type type = :foss if type == :aio && !@options['HOSTS'][@name]['roles'].include?('agent') @defaults = merge_defaults_for_type @options, type pkg_initialize end def pkg_initialize # This method should be overridden by platform-specific code to # handle whatever packaging-related initialization is necessary. end def merge_defaults_for_type options, type defaults = self.class.send "#{type}_defaults".to_sym defaults.merge(options.merge((options['HOSTS'][name]))) end def node_name # TODO: might want to consider caching here; not doing it for now because # I haven't thought through all of the possible scenarios that could # cause the value to change after it had been cached. result = puppet['node_name_value'].strip end def port_open? port begin Timeout.timeout SELECT_TIMEOUT do TCPSocket.new(reachable_name, port).close return true end rescue Errno::ECONNREFUSED, Timeout::Error return false end end def up? begin Socket.getaddrinfo( reachable_name, nil ) return true rescue SocketError return false end end # Return the preferred method to reach the host, will use IP is available and then default to {#hostname}. def reachable_name self['ip'] || hostname end # Returning our PuppetConfigReader here allows users of the Host # class to do things like `host.puppet['vardir']` to query the # 'main' section or, if they want the configuration for a # particular run type, `host.puppet('agent')['vardir']` def puppet(command='agent') PuppetConfigReader.new(self, command) end def []= k, v @defaults[k] = v end def [] k @defaults[k] end def has_key? k @defaults.has_key?(k) end # The {#hostname} of this host. def to_str hostname end # The {#hostname} of this host. def to_s hostname end # Return the public name of the particular host, which may be different then the name of the host provided in # the configuration file as some provisioners create random, unique hostnames. def hostname @defaults['vmhostname'] || @name end def + other @name + other end def is_pe? @options.is_pe? end # True if this is a pe run, or if the host has had a 'use-service' property set. def use_service_scripts? is_pe? || self['use-service'] end # Mirrors the true/false value of the host's 'graceful-restarts' property, # or falls back to the value of +is_using_passenger?+ if # 'graceful-restarts' is nil, but only if this is not a PE run (foss only). def graceful_restarts? graceful = if !self['graceful-restarts'].nil? self['graceful-restarts'] else !is_pe? && is_using_passenger? end graceful end # Modifies the host settings to indicate that it will be using passenger service scripts, # (apache2) by default. Does nothing if this is a PE host, since it is already using # passenger. # @param [String] puppetservice Name of the service script that should be # called to stop/startPuppet on this host. Defaults to 'apache2'. def uses_passenger!(puppetservice = 'apache2') if !is_pe? self['passenger'] = true self['puppetservice'] = puppetservice self['use-service'] = true end return true end # True if this is a PE run, or if the host's 'passenger' property has been set. def is_using_passenger? is_pe? || self['passenger'] end def log_prefix if @defaults['vmhostname'] "#{self} (#{@name})" else self.to_s end end #Determine the ip address of this host def get_ip @logger.warn("Uh oh, this should be handled by sub-classes but hasn't been") end #Return the ip address of this host def ip self[:ip] ||= get_ip end #Examine the host system to determine the architecture #@return [Boolean] true if x86_64, false otherwise def determine_if_x86_64 result = exec(Beaker::Command.new("arch | grep x86_64"), :acceptable_exit_codes => (0...127)) result.exit_code == 0 end #@return [Boolean] true if x86_64, false otherwise def is_x86_64? @x86_64 ||= determine_if_x86_64 end #Add the provided key/val to the current ssh environment #@param [String] key The key to add the value to #@param [String] val The value for the key #@example # host.add_env_var('PATH', '/usr/bin:PATH') def add_env_var key, val key = key.to_s.upcase escaped_val = Regexp.escape(val).gsub('/', '\/').gsub(';', '\;') env_file = self[:ssh_env_file] #see if the key/value pair already exists if exec(Beaker::Command.new("grep -e #{key}=.*#{escaped_val} #{env_file}"), :acceptable_exit_codes => (0..255) ).exit_code == 0 return #nothing to do here, key value pair already exists #see if the key already exists elsif exec(Beaker::Command.new("grep #{key} #{env_file}"), :acceptable_exit_codes => (0..255) ).exit_code == 0 exec(Beaker::SedCommand.new(self['HOSTS'][name]['platform'], "s/#{key}=/#{key}=#{escaped_val}:/", env_file)) else exec(Beaker::Command.new("echo \"#{key}=#{val}\" >> #{env_file}")) end end #Delete the provided key/val from the current ssh environment #@param [String] key The key to delete the value from #@param [String] val The value to delete for the key #@example # host.delete_env_var('PATH', '/usr/bin:PATH') def delete_env_var key, val val = Regexp.escape(val).gsub('/', '\/').gsub(';', '\;') #if the key only has that single value remove the entire line exec(Beaker::SedCommand.new(self['HOSTS'][name]['platform'], "/#{key}=#{val}$/d", self[:ssh_env_file])) #if the key has multiple values and we only need to remove the provided val exec(Beaker::SedCommand.new(self['HOSTS'][name]['platform'], "s/#{key}=\\(.*[:;]*\\)#{val}[:;]*/#{key}=\\1/", self[:ssh_env_file])) end def connection @connection ||= SshConnection.connect( reachable_name, self['user'], self['ssh'], { :logger => @logger } ) end def close @connection.close if @connection @connection = nil end def exec command, options={} # I've always found this confusing cmdline = command.cmd_line(self) if options[:silent] output_callback = nil else @logger.debug "\n#{log_prefix} #{Time.new.strftime('%H:%M:%S')}$ #{cmdline}" output_callback = logger.method(:host_output) end unless $dry_run # is this returning a result object? # the options should come at the end of the method signature (rubyism) # and they shouldn't be ssh specific result = nil seconds = Benchmark.realtime { result = connection.execute(cmdline, options, output_callback) } if not options[:silent] @logger.debug "\n#{log_prefix} executed in %0.2f seconds" % seconds end unless options[:silent] # What? result.log(@logger) # No, TestCase has the knowledge about whether its failed, checking acceptable # exit codes at the host level and then raising... # is it necessary to break execution?? if !options[:accept_all_exit_codes] && !result.exit_code_in?(Array(options[:acceptable_exit_codes] || 0)) raise CommandFailure, "Host '#{self}' exited with #{result.exit_code} running:\n #{cmdline}\nLast #{@options[:trace_limit]} lines of output were:\n#{result.formatted_output(@options[:trace_limit])}" end end # Danger, so we have to return this result? result end end # Create the provided directory structure on the host # @param [String] dir The directory structure to create on the host # @return [Boolean] True, if directory construction succeeded, otherwise False def mkdir_p dir result = exec(Beaker::Command.new("mkdir -p #{dir}"), :acceptable_exit_codes => [0, 1]) result.exit_code == 0 end # scp files from the localhost to this test host, if a directory is provided it is recursively copied # @param source [String] The path to the file/dir to upload # @param target [String] The destination path on the host # @param options [Hash{Symbol=>String}] Options to alter execution # @option options [Array<String>] :ignore An array of file/dir paths that will not be copied to the host def do_scp_to source, target, options @logger.notify "localhost $ scp #{source} #{@name}:#{target} {:ignore => #{options[:ignore]}}" result = Result.new(@name, [source, target]) has_ignore = options[:ignore] and not options[:ignore].empty? # construct the regex for matching ignored files/dirs ignore_re = nil if has_ignore ignore_arr = Array(options[:ignore]).map do |entry| "((\/|\\A)#{entry}(\/|\\z))".gsub(/\./, '\.') end ignore_re = Regexp.new(ignore_arr.join('|')) end # either a single file, or a directory with no ignores if not File.file?(source) and not File.directory?(source) raise IOError, "No such file or directory - #{source}" end if File.file?(source) or (File.directory?(source) and not has_ignore) source_file = source if has_ignore and (source =~ ignore_re) @logger.trace "After rejecting ignored files/dirs, there is no file to copy" source_file = nil result.stdout = "No files to copy" result.exit_code = 1 end if source_file result = connection.scp_to(source_file, target, options, $dry_run) @logger.trace result.stdout end else # a directory with ignores dir_source = Dir.glob("#{source}/**/*").reject do |f| f =~ ignore_re end @logger.trace "After rejecting ignored files/dirs, going to scp [#{dir_source.join(", ")}]" # create necessary directory structure on host # run this quietly (no STDOUT) @logger.quiet(true) required_dirs = (dir_source.map{ | dir | File.dirname(dir) }).uniq require 'pathname' required_dirs.each do |dir| dir_path = Pathname.new(dir) if dir_path.absolute? mkdir_p(File.join(target, dir.gsub(source, ''))) else mkdir_p( File.join(target, dir) ) end end @logger.quiet(false) # copy each file to the host dir_source.each do |s| s_path = Pathname.new(s) if s_path.absolute? file_path = File.join(target, File.dirname(s).gsub(source,'')) else file_path = File.join(target, File.dirname(s)) end result = connection.scp_to(s, file_path, options, $dry_run) @logger.trace result.stdout end end return result end def do_scp_from source, target, options @logger.debug "localhost $ scp #{@name}:#{source} #{target}" result = connection.scp_from(source, target, options, $dry_run) @logger.debug result.stdout return result end end [ 'windows', 'unix', 'aix', 'mac' ].each do |lib| require "beaker/host/#{lib}" end end
1
8,534
I don't think that this belongs in the host code. A host is pretty much unaware of what hypervisor is running it and I don't want them to be so coupled to their hypervisor.
voxpupuli-beaker
rb
@@ -23,5 +23,6 @@ import setuptools setuptools.setup( packages=setuptools.find_packages(), pbr=True, - setup_requires=['pbr'] + setup_requires=['pbr', 'setuptools>=12'] + # setuptools due https://github.com/ansible/molecule/issues/1859 )
1
# Copyright (c) 2015-2018 Cisco Systems, Inc. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to # deal in the Software without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. import setuptools setuptools.setup( packages=setuptools.find_packages(), pbr=True, setup_requires=['pbr'] )
1
8,898
I'm here to tell you that this doesn't make sense because of the way it works. `setuptools.setup()` installs this only for setup-time and then discards those. But the problem is that it will not pick up a newer version because the older version is already in runtime and module cannot substitute itself. The proper place for adding build deps nowadays is `pyproject.toml`->`[build-system]`->`requires'.
ansible-community-molecule
py
@@ -34,7 +34,7 @@ import ( "github.com/stretchr/testify/assert" ) -type fakeManager struct { +type fakeConnectionManager struct { onConnectReturn error onDisconnectReturn error onStatusReturn connection.Status
1
/* * Copyright (C) 2017 The "MysteriumNetwork/node" Authors. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package endpoints import ( "errors" "net/http" "net/http/httptest" "strings" "testing" "time" "github.com/julienschmidt/httprouter" "github.com/mysteriumnetwork/node/consumer" "github.com/mysteriumnetwork/node/core/connection" "github.com/mysteriumnetwork/node/core/ip" "github.com/mysteriumnetwork/node/identity" "github.com/mysteriumnetwork/node/market" "github.com/stretchr/testify/assert" ) type fakeManager struct { onConnectReturn error onDisconnectReturn error onStatusReturn connection.Status disconnectCount int requestedConsumerID identity.Identity requestedProvider identity.Identity requestedServiceType string } func (fm *fakeManager) Connect(consumerID identity.Identity, proposal market.ServiceProposal, options connection.ConnectParams) error { fm.requestedConsumerID = consumerID fm.requestedProvider = identity.FromAddress(proposal.ProviderID) fm.requestedServiceType = proposal.ServiceType return fm.onConnectReturn } func (fm *fakeManager) Status() connection.Status { return fm.onStatusReturn } func (fm *fakeManager) Disconnect() error { fm.disconnectCount++ return fm.onDisconnectReturn } func (fm *fakeManager) Wait() error { return nil } type StubStatisticsTracker struct { duration time.Duration stats consumer.SessionStatistics } func (ssk *StubStatisticsTracker) Retrieve() consumer.SessionStatistics { return ssk.stats } func (ssk *StubStatisticsTracker) GetSessionDuration() time.Duration { return ssk.duration } func getMockProposalProviderWithSpecifiedProposal(providerID, serviceType string) ProposalProvider { sampleProposal := market.ServiceProposal{ ID: 1, ServiceType: serviceType, ServiceDefinition: TestServiceDefinition{}, ProviderID: providerID, } return &mockProposalProvider{ proposals: []market.ServiceProposal{sampleProposal}, } } func TestAddRoutesForConnectionAddsRoutes(t *testing.T) { router := httprouter.New() fakeManager := fakeManager{} statsKeeper := &StubStatisticsTracker{ duration: time.Minute, } ipResolver := ip.NewResolverFake("123.123.123.123") mockedProposalProvider := getMockProposalProviderWithSpecifiedProposal("node1", "noop") AddRoutesForConnection(router, &fakeManager, ipResolver, statsKeeper, mockedProposalProvider) tests := []struct { method string path string body string expectedStatus int expectedJSON string }{ { http.MethodGet, "/connection", "", http.StatusOK, `{"status": ""}`, }, { http.MethodPut, "/connection", `{"consumerId": "me", "providerId": "node1", "serviceType": "noop"}`, http.StatusCreated, `{"status": ""}`, }, { http.MethodDelete, "/connection", "", http.StatusAccepted, "", }, { http.MethodGet, "/connection/ip", "", http.StatusOK, `{"ip": "123.123.123.123"}`, }, { http.MethodGet, "/connection/statistics", "", http.StatusOK, `{ "bytesSent": 0, "bytesReceived": 0, "duration": 60 }`, }, } for _, test := range tests { resp := httptest.NewRecorder() req := httptest.NewRequest(test.method, test.path, strings.NewReader(test.body)) router.ServeHTTP(resp, req) assert.Equal(t, test.expectedStatus, resp.Code) if test.expectedJSON != "" { assert.JSONEq(t, test.expectedJSON, resp.Body.String()) } else { assert.Equal(t, "", resp.Body.String()) } } } func TestDisconnectingState(t *testing.T) { var fakeManager = fakeManager{} fakeManager.onStatusReturn = connection.Status{ State: connection.Disconnecting, SessionID: "", } connEndpoint := NewConnectionEndpoint(&fakeManager, nil, nil, &mockProposalProvider{}) req := httptest.NewRequest(http.MethodGet, "/irrelevant", nil) resp := httptest.NewRecorder() connEndpoint.Status(resp, req, nil) assert.Equal(t, http.StatusOK, resp.Code) assert.JSONEq( t, `{ "status" : "Disconnecting" }`, resp.Body.String()) } func TestNotConnectedStateIsReturnedWhenNoConnection(t *testing.T) { var fakeManager = fakeManager{} fakeManager.onStatusReturn = connection.Status{ State: connection.NotConnected, SessionID: "", } connEndpoint := NewConnectionEndpoint(&fakeManager, nil, nil, &mockProposalProvider{}) req := httptest.NewRequest(http.MethodGet, "/irrelevant", nil) resp := httptest.NewRecorder() connEndpoint.Status(resp, req, httprouter.Params{}) assert.Equal(t, http.StatusOK, resp.Code) assert.JSONEq( t, `{ "status" : "NotConnected" }`, resp.Body.String(), ) } func TestStateConnectingIsReturnedWhenIsConnectionInProgress(t *testing.T) { var fakeManager = fakeManager{} fakeManager.onStatusReturn = connection.Status{ State: connection.Connecting, } connEndpoint := NewConnectionEndpoint(&fakeManager, nil, nil, &mockProposalProvider{}) req := httptest.NewRequest(http.MethodGet, "/irrelevant", nil) resp := httptest.NewRecorder() connEndpoint.Status(resp, req, httprouter.Params{}) assert.Equal(t, http.StatusOK, resp.Code) assert.JSONEq( t, `{ "status" : "Connecting" }`, resp.Body.String(), ) } func TestConnectedStateAndSessionIdIsReturnedWhenIsConnected(t *testing.T) { var fakeManager = fakeManager{} fakeManager.onStatusReturn = connection.Status{ State: connection.Connected, SessionID: "My-super-session", } connEndpoint := NewConnectionEndpoint(&fakeManager, nil, nil, &mockProposalProvider{}) req := httptest.NewRequest(http.MethodGet, "/irrelevant", nil) resp := httptest.NewRecorder() connEndpoint.Status(resp, req, httprouter.Params{}) assert.Equal(t, http.StatusOK, resp.Code) assert.JSONEq( t, `{ "status" : "Connected", "sessionId" : "My-super-session" }`, resp.Body.String()) } func TestPutReturns400ErrorIfRequestBodyIsNotJSON(t *testing.T) { fakeManager := fakeManager{} connEndpoint := NewConnectionEndpoint(&fakeManager, nil, nil, &mockProposalProvider{}) req := httptest.NewRequest(http.MethodPut, "/irrelevant", strings.NewReader("a")) resp := httptest.NewRecorder() connEndpoint.Create(resp, req, httprouter.Params{}) assert.Equal(t, http.StatusBadRequest, resp.Code) assert.JSONEq( t, `{ "message" : "invalid character 'a' looking for beginning of value" }`, resp.Body.String()) } func TestPutReturns422ErrorIfRequestBodyIsMissingFieldValues(t *testing.T) { fakeManager := fakeManager{} connEndpoint := NewConnectionEndpoint(&fakeManager, nil, nil, &mockProposalProvider{}) req := httptest.NewRequest(http.MethodPut, "/irrelevant", strings.NewReader("{}")) resp := httptest.NewRecorder() connEndpoint.Create(resp, req, httprouter.Params{}) assert.Equal(t, http.StatusUnprocessableEntity, resp.Code) assert.JSONEq( t, `{ "message" : "validation_error", "errors" : { "consumerId" : [ { "code" : "required" , "message" : "Field is required" } ], "providerId" : [ {"code" : "required" , "message" : "Field is required" } ] } }`, resp.Body.String()) } func TestPutWithValidBodyCreatesConnection(t *testing.T) { fakeManager := fakeManager{} proposalProvider := getMockProposalProviderWithSpecifiedProposal("required-node", "openvpn") connEndpoint := NewConnectionEndpoint(&fakeManager, nil, nil, proposalProvider) req := httptest.NewRequest( http.MethodPut, "/irrelevant", strings.NewReader( `{ "consumerId" : "my-identity", "providerId" : "required-node" }`)) resp := httptest.NewRecorder() connEndpoint.Create(resp, req, httprouter.Params{}) assert.Equal(t, http.StatusCreated, resp.Code) assert.Equal(t, identity.FromAddress("my-identity"), fakeManager.requestedConsumerID) assert.Equal(t, identity.FromAddress("required-node"), fakeManager.requestedProvider) assert.Equal(t, "openvpn", fakeManager.requestedServiceType) } func TestPutWithServiceTypeOverridesDefault(t *testing.T) { fakeManager := fakeManager{} mystAPI := getMockProposalProviderWithSpecifiedProposal("required-node", "noop") connEndpoint := NewConnectionEndpoint(&fakeManager, nil, nil, mystAPI) req := httptest.NewRequest( http.MethodPut, "/irrelevant", strings.NewReader( `{ "consumerId" : "my-identity", "providerId" : "required-node", "serviceType": "noop" }`)) resp := httptest.NewRecorder() connEndpoint.Create(resp, req, httprouter.Params{}) assert.Equal(t, http.StatusCreated, resp.Code) assert.Equal(t, identity.FromAddress("my-identity"), fakeManager.requestedConsumerID) assert.Equal(t, identity.FromAddress("required-node"), fakeManager.requestedProvider) assert.Equal(t, "noop", fakeManager.requestedServiceType) } func TestDeleteCallsDisconnect(t *testing.T) { fakeManager := fakeManager{} connEndpoint := NewConnectionEndpoint(&fakeManager, nil, nil, &mockProposalProvider{}) req := httptest.NewRequest(http.MethodDelete, "/irrelevant", nil) resp := httptest.NewRecorder() connEndpoint.Kill(resp, req, httprouter.Params{}) assert.Equal(t, http.StatusAccepted, resp.Code) assert.Equal(t, fakeManager.disconnectCount, 1) } func TestGetIPEndpointSucceeds(t *testing.T) { manager := fakeManager{} ipResolver := ip.NewResolverFake("123.123.123.123") connEndpoint := NewConnectionEndpoint(&manager, ipResolver, nil, &mockProposalProvider{}) resp := httptest.NewRecorder() connEndpoint.GetIP(resp, nil, nil) assert.Equal(t, http.StatusOK, resp.Code) assert.JSONEq( t, `{ "ip": "123.123.123.123" }`, resp.Body.String(), ) } func TestGetIPEndpointReturnsErrorWhenIPDetectionFails(t *testing.T) { manager := fakeManager{} ipResolver := ip.NewResolverFakeFailing(errors.New("fake error")) connEndpoint := NewConnectionEndpoint(&manager, ipResolver, nil, &mockProposalProvider{}) resp := httptest.NewRecorder() connEndpoint.GetIP(resp, nil, nil) assert.Equal(t, http.StatusServiceUnavailable, resp.Code) assert.JSONEq( t, `{ "message": "fake error" }`, resp.Body.String(), ) } func TestGetStatisticsEndpointReturnsStatistics(t *testing.T) { statsKeeper := &StubStatisticsTracker{ duration: time.Minute, stats: consumer.SessionStatistics{BytesSent: 1, BytesReceived: 2}, } manager := fakeManager{} connEndpoint := NewConnectionEndpoint(&manager, nil, statsKeeper, &mockProposalProvider{}) resp := httptest.NewRecorder() connEndpoint.GetStatistics(resp, nil, nil) assert.JSONEq( t, `{ "bytesSent": 1, "bytesReceived": 2, "duration": 60 }`, resp.Body.String(), ) } func TestGetStatisticsEndpointReturnsStatisticsWhenSessionIsNotStarted(t *testing.T) { statsKeeper := &StubStatisticsTracker{ stats: consumer.SessionStatistics{BytesSent: 1, BytesReceived: 2}, } manager := fakeManager{} connEndpoint := NewConnectionEndpoint(&manager, nil, statsKeeper, &mockProposalProvider{}) resp := httptest.NewRecorder() connEndpoint.GetStatistics(resp, nil, nil) assert.JSONEq( t, `{ "bytesSent": 1, "bytesReceived": 2, "duration": 0 }`, resp.Body.String(), ) } func TestEndpointReturnsConflictStatusIfConnectionAlreadyExists(t *testing.T) { manager := fakeManager{} manager.onConnectReturn = connection.ErrAlreadyExists mystAPI := getMockProposalProviderWithSpecifiedProposal("required-node", "openvpn") connectionEndpoint := NewConnectionEndpoint(&manager, nil, nil, mystAPI) req := httptest.NewRequest( http.MethodPut, "/irrelevant", strings.NewReader( `{ "consumerId" : "my-identity", "providerId" : "required-node" }`)) resp := httptest.NewRecorder() connectionEndpoint.Create(resp, req, nil) assert.Equal(t, http.StatusConflict, resp.Code) assert.JSONEq( t, `{ "message" : "connection already exists" }`, resp.Body.String(), ) } func TestDisconnectReturnsConflictStatusIfConnectionDoesNotExist(t *testing.T) { manager := fakeManager{} manager.onDisconnectReturn = connection.ErrNoConnection connectionEndpoint := NewConnectionEndpoint(&manager, nil, nil, &mockProposalProvider{}) req := httptest.NewRequest( http.MethodDelete, "/irrelevant", nil, ) resp := httptest.NewRecorder() connectionEndpoint.Kill(resp, req, nil) assert.Equal(t, http.StatusConflict, resp.Code) assert.JSONEq( t, `{ "message" : "no connection exists" }`, resp.Body.String(), ) } func TestConnectReturnsConnectCancelledStatusWhenErrConnectionCancelledIsEncountered(t *testing.T) { manager := fakeManager{} manager.onConnectReturn = connection.ErrConnectionCancelled mockProposalProvider := getMockProposalProviderWithSpecifiedProposal("required-node", "openvpn") connectionEndpoint := NewConnectionEndpoint(&manager, nil, nil, mockProposalProvider) req := httptest.NewRequest( http.MethodPut, "/irrelevant", strings.NewReader( `{ "consumerId" : "my-identity", "providerId" : "required-node" }`)) resp := httptest.NewRecorder() connectionEndpoint.Create(resp, req, nil) assert.Equal(t, statusConnectCancelled, resp.Code) assert.JSONEq( t, `{ "message" : "connection was cancelled" }`, resp.Body.String(), ) } func TestConnectReturnsErrorIfNoProposals(t *testing.T) { manager := fakeManager{} manager.onConnectReturn = connection.ErrConnectionCancelled connectionEndpoint := NewConnectionEndpoint(&manager, nil, nil, &mockProposalProvider{proposals: make([]market.ServiceProposal, 0)}) req := httptest.NewRequest( http.MethodPut, "/irrelevant", strings.NewReader( `{ "consumerId" : "my-identity", "providerId" : "required-node" }`)) resp := httptest.NewRecorder() connectionEndpoint.Create(resp, req, nil) assert.Equal(t, http.StatusBadRequest, resp.Code) assert.JSONEq( t, `{ "message" : "provider has no service proposals" }`, resp.Body.String(), ) }
1
13,540
fakeConnectionManager -> mockConnectionManager. We probably should not use the `fake` anymore.
mysteriumnetwork-node
go
@@ -364,6 +364,7 @@ class JUnitTester(AbstractTestRunner): self.base_class_path = [self.selenium_server_jar_path, self.junit_path, self.junit_listener_path, self.hamcrest_path, self.json_jar_path] self.base_class_path.extend(self.scenario.get("additional-classpath", [])) + self.base_class_path=[os.path.abspath(x) for x in self.base_class_path] def prepare(self): """
1
""" Copyright 2015 BlazeMeter Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import json import os import re import shutil import subprocess import sys import time from abc import abstractmethod from urwid import Text, Pile from bzt.engine import ScenarioExecutor, Scenario, FileLister from bzt.modules.aggregator import ConsolidatingAggregator, ResultsReader from bzt.modules.console import WidgetProvider, PrioritizedWidget from bzt.modules.functional import FunctionalResultsReader, FunctionalAggregator, FunctionalSample from bzt.six import string_types, text_type, etree, parse from bzt.utils import RequiredTool, shell_exec, shutdown_process, JavaVM, TclLibrary, get_files_recursive from bzt.utils import dehumanize_time, MirrorsManager, is_windows, BetterDict, get_full_path try: from pyvirtualdisplay.smartdisplay import SmartDisplay as Display except ImportError: from pyvirtualdisplay import Display class SeleniumExecutor(ScenarioExecutor, WidgetProvider, FileLister): """ Selenium executor :type virtual_display: Display :type runner: AbstractTestRunner """ SELENIUM_DOWNLOAD_LINK = "http://selenium-release.storage.googleapis.com/{version}/" \ "selenium-server-standalone-{version}.0.jar" SELENIUM_VERSION = "2.53" JUNIT_DOWNLOAD_LINK = "http://search.maven.org/remotecontent?filepath=junit/junit/" \ "{version}/junit-{version}.jar" JUNIT_VERSION = "4.12" JUNIT_MIRRORS_SOURCE = "http://search.maven.org/solrsearch/select?q=g%3A%22junit%22%20AND%20a%3A%22" \ "junit%22%20AND%20v%3A%22{version}%22&rows=20&wt=json".format(version=JUNIT_VERSION) HAMCREST_DOWNLOAD_LINK = "http://search.maven.org/remotecontent?filepath=org/hamcrest/hamcrest-core" \ "/1.3/hamcrest-core-1.3.jar" JSON_JAR_DOWNLOAD_LINK = "http://search.maven.org/remotecontent?filepath=org/json/json/20160810/json-20160810.jar" SUPPORTED_TYPES = [".py", ".jar", ".java"] SHARED_VIRTUAL_DISPLAY = {} def __init__(self): super(SeleniumExecutor, self).__init__() self.additional_env = {} self.virtual_display = None self.end_time = None self.runner = None self.reader = None self.report_file = None self.runner_working_dir = None self.scenario = None self.script = None self.self_generated_script = False self.generated_methods = BetterDict() def set_virtual_display(self): display_conf = self.settings.get("virtual-display") if display_conf: if is_windows(): self.log.warning("Cannot have virtual display on Windows, ignoring") else: if self.engine in SeleniumExecutor.SHARED_VIRTUAL_DISPLAY: self.virtual_display = SeleniumExecutor.SHARED_VIRTUAL_DISPLAY[self.engine] else: width = display_conf.get("width", 1024) height = display_conf.get("height", 768) self.virtual_display = Display(size=(width, height)) msg = "Starting virtual display[%s]: %s" self.log.info(msg, self.virtual_display.size, self.virtual_display.new_display_var) self.virtual_display.start() SeleniumExecutor.SHARED_VIRTUAL_DISPLAY[self.engine] = self.virtual_display def free_virtual_display(self): if self.virtual_display and self.virtual_display.is_alive(): self.virtual_display.stop() if self.engine in SeleniumExecutor.SHARED_VIRTUAL_DISPLAY: del SeleniumExecutor.SHARED_VIRTUAL_DISPLAY[self.engine] def get_script_path(self, scenario=None): if scenario: return super(SeleniumExecutor, self).get_script_path(scenario) else: return self.engine.find_file(self.script) def _create_runner(self, working_dir, report_file): script_path = self.get_script_path() script_type = self.detect_script_type(script_path) runner_config = BetterDict() if script_type == ".py": runner_class = NoseTester runner_config.merge(self.settings.get("selenium-tools").get("nose")) else: # script_type == ".jar" or script_type == ".java": runner_class = JUnitTester runner_config.merge(self.settings.get("selenium-tools").get("junit")) runner_config['props-file'] = self.engine.create_artifact("customrunner", ".properties") runner_config["script-type"] = script_type runner_config["working-dir"] = working_dir runner_config.get("artifacts-dir", self.engine.artifacts_dir) runner_config.get("report-file", report_file) runner_config.get("stdout", self.engine.create_artifact("junit", ".out")) runner_config.get("stderr", self.engine.create_artifact("junit", ".err")) return runner_class(runner_config, self) def _register_reader(self, report_file): if self.engine.is_functional_mode(): reader = FuncSamplesReader(report_file, self.log, self.generated_methods) if isinstance(self.engine.aggregator, FunctionalAggregator): self.engine.aggregator.add_underling(reader) else: reader = LoadSamplesReader(report_file, self.log, self.generated_methods) if isinstance(self.engine.aggregator, ConsolidatingAggregator): self.engine.aggregator.add_underling(reader) return reader def prepare(self): self.set_virtual_display() self.scenario = self.get_scenario() self._verify_script() self.runner_working_dir = self.engine.create_artifact("classes", "") self.report_file = self.engine.create_artifact("selenium_tests_report", ".ldjson") self.runner = self._create_runner(self.runner_working_dir, self.report_file) self._cp_resource_files(self.runner_working_dir) self.runner.prepare() self.reader = self._register_reader(self.report_file) def _verify_script(self): if Scenario.SCRIPT in self.scenario and self.scenario.get(Scenario.SCRIPT): self.script = self.scenario.get(Scenario.SCRIPT) elif "requests" in self.scenario: self.script = self.__tests_from_requests() self.self_generated_script = True else: raise ValueError("Nothing to test, no requests were provided in scenario") def _cp_resource_files(self, runner_working_dir): script = self.get_script_path() if os.path.isdir(script): shutil.copytree(script, runner_working_dir) else: os.makedirs(runner_working_dir) if self.self_generated_script: shutil.move(script, runner_working_dir) else: script_type = self.detect_script_type(script) script_name = os.path.basename(script) if script_type == ".py" and not script_name.lower().startswith('test'): target_name = 'test_' + script_name msg = "Script '%s' won't be discovered by nosetests, renaming script to %s" self.log.warning(msg, script_name, target_name) else: target_name = script_name target_path = os.path.join(runner_working_dir, target_name) shutil.copy2(script, target_path) @staticmethod def detect_script_type(script_path): if not isinstance(script_path, string_types) and not isinstance(script_path, text_type): raise ValueError("Nothing to test, no files were provided in scenario") if not os.path.exists(script_path): raise ValueError("Script %s doesn't exist" % script_path) file_types = set() if os.path.isfile(script_path): # regular file received file_types.add(os.path.splitext(script_path)[1].lower()) else: # dir received: check contained files for file_name in get_files_recursive(script_path): file_types.add(os.path.splitext(file_name)[1].lower()) if '.java' in file_types: file_ext = '.java' elif '.py' in file_types: file_ext = '.py' elif '.jar' in file_types: file_ext = '.jar' else: raise ValueError("Unsupported script type: %s" % script_path) return file_ext def startup(self): """ Start runner :return: """ self.start_time = time.time() self.runner.env = self.additional_env self.runner.run_tests() def check_virtual_display(self): if self.virtual_display: if not self.virtual_display.is_alive(): self.log.info("Virtual display out: %s", self.virtual_display.stdout) self.log.warning("Virtual display err: %s", self.virtual_display.stderr) raise RuntimeError("Virtual display failed: %s" % self.virtual_display.return_code) def check(self): """ check if test completed :return: """ if self.widget: self.widget.update() self.check_virtual_display() return self.runner.is_finished() def report_test_duration(self): if self.start_time: self.end_time = time.time() self.log.debug("Selenium tests ran for %s seconds", self.end_time - self.start_time) def shutdown(self): """ shutdown test_runner :return: """ self.runner.shutdown() self.report_test_duration() def post_process(self): self.free_virtual_display() if self.reader and not self.reader.read_records: raise RuntimeWarning("Empty results, most likely Selenium failed") def get_widget(self): if not self.widget: self.widget = SeleniumWidget(self.script, self.runner.settings.get("stdout")) return self.widget def resource_files(self): self.scenario = self.get_scenario() self._verify_script() script_path = self.get_script_path() resources = [] if script_path is not None: resources.append(script_path) return resources def __tests_from_requests(self): filename = self.engine.create_artifact("test_requests", ".py") wdlog = self.engine.create_artifact('webdriver', '.log') nose_test = SeleniumScriptBuilder(self.scenario, self.log, wdlog) if self.virtual_display: nose_test.window_size = self.virtual_display.size self.generated_methods.merge(nose_test.gen_test_case()) nose_test.save(filename) return filename class AbstractTestRunner(object): """ Abstract test runner """ def __init__(self, settings, executor): self.process = None self.settings = settings self.required_tools = [] self.executor = executor self.scenario = executor.scenario self.load = executor.get_load() self.artifacts_dir = self.settings.get("artifacts-dir") self.working_dir = self.settings.get("working-dir") self.log = executor.log.getChild(self.__class__.__name__) self.opened_descriptors = [] self.is_failed = False self.env = {} @abstractmethod def prepare(self): pass @abstractmethod def run_checklist(self): pass @abstractmethod def run_tests(self): pass def is_finished(self): ret_code = self.process.poll() if ret_code is not None: if ret_code != 0: self.log.debug("Test runner exit code: %s", ret_code) with open(self.settings.get("stderr")) as fds: std_err = fds.read() self.is_failed = True raise RuntimeError("Test runner %s has failed: %s" % (self.__class__.__name__, std_err.strip())) return True return False def check_tools(self): for tool in self.required_tools: if not tool.check_if_installed(): self.log.info("Installing %s", tool.tool_name) tool.install() def shutdown(self): shutdown_process(self.process, self.log) for desc in self.opened_descriptors: desc.close() self.opened_descriptors = [] class JUnitTester(AbstractTestRunner): """ Allows to test java and jar files """ def __init__(self, junit_config, executor): """ :type junit_config: BetterDict """ super(JUnitTester, self).__init__(junit_config, executor) self.props_file = junit_config['props-file'] path_lambda = lambda key, val: get_full_path(self.settings.get(key, val)) self.junit_path = path_lambda("path", "~/.bzt/selenium-taurus/tools/junit/junit.jar") self.hamcrest_path = path_lambda("hamcrest-core", "~/.bzt/selenium-taurus/tools/junit/hamcrest-core.jar") self.json_jar_path = path_lambda("json-jar", "~/.bzt/selenium-taurus/tools/junit/json.jar") self.selenium_server_jar_path = path_lambda("selenium-server", "~/.bzt/selenium-taurus/selenium-server.jar") self.junit_listener_path = os.path.join(get_full_path(__file__, step_up=1), os.pardir, "resources", "taurus-junit-1.0.jar") self.target_java = str(junit_config.get("compile-target-java", "1.7")) self.base_class_path = [self.selenium_server_jar_path, self.junit_path, self.junit_listener_path, self.hamcrest_path, self.json_jar_path] self.base_class_path.extend(self.scenario.get("additional-classpath", [])) def prepare(self): """ run checklist, make jar. """ self.run_checklist() if self.settings.get("script-type", None) == ".java": self.compile_scripts() def run_checklist(self): """ java javac selenium-server.jar junit.jar junit_listener.jar """ # only check javac if we need to compile. if we have JAR as script - we don't need javac if self.settings.get("script-type", None) == ".java": self.required_tools.append(JavaC("", "", self.log)) self.required_tools.append(TclLibrary(self.log)) self.required_tools.append(JavaVM("", "", self.log)) link = SeleniumExecutor.SELENIUM_DOWNLOAD_LINK.format(version=SeleniumExecutor.SELENIUM_VERSION) self.required_tools.append(SeleniumServerJar(self.selenium_server_jar_path, link, self.log)) self.required_tools.append(JUnitJar(self.junit_path, self.log, SeleniumExecutor.JUNIT_VERSION)) self.required_tools.append(HamcrestJar(self.hamcrest_path, SeleniumExecutor.HAMCREST_DOWNLOAD_LINK)) self.required_tools.append(JsonJar(self.json_jar_path, SeleniumExecutor.JSON_JAR_DOWNLOAD_LINK)) self.required_tools.append(JUnitListenerJar(self.junit_listener_path, "")) self.check_tools() def compile_scripts(self): """ Compile .java files """ self.log.debug("Compiling .java files started") jar_path = os.path.join(self.executor.engine.artifacts_dir, self.working_dir, self.settings.get("jar-name", "compiled.jar")) if os.path.exists(jar_path): self.log.debug(".java files are already compiled, skipping") return java_files = [] for dir_entry in os.walk(self.working_dir): if dir_entry[2]: for test_file in dir_entry[2]: if os.path.splitext(test_file)[1].lower() == ".java": java_files.append(os.path.join(dir_entry[0], test_file)) compile_cl = ["javac", "-source", self.target_java, "-target", self.target_java, ] compile_cl.extend(["-cp", os.pathsep.join(self.base_class_path)]) compile_cl.extend(java_files) with open(os.path.join(self.artifacts_dir, "javac.out"), 'ab') as javac_out: with open(os.path.join(self.artifacts_dir, "javac.err"), 'ab') as javac_err: self.log.debug("running javac: %s", compile_cl) self.process = shell_exec(compile_cl, cwd=self.working_dir, stdout=javac_out, stderr=javac_err) ret_code = self.process.poll() while ret_code is None: self.log.debug("Compiling .java files...") time.sleep(1) ret_code = self.process.poll() if ret_code != 0: self.log.debug("javac exit code: %s", ret_code) with open(javac_err.name) as err_file: out = err_file.read() raise RuntimeError("Javac exited with error:\n %s" % out.strip()) self.log.info("Compiling .java files completed") self.make_jar() def make_jar(self): """ move all .class files to compiled.jar """ self.log.debug("Making .jar started") with open(os.path.join(self.artifacts_dir, "jar.out"), 'ab') as jar_out: with open(os.path.join(self.artifacts_dir, "jar.err"), 'ab') as jar_err: class_files = [java_file for java_file in os.listdir(self.working_dir) if java_file.endswith(".class")] jar_name = self.settings.get("jar-name", "compiled.jar") if class_files: compile_jar_cl = ["jar", "-cf", jar_name] compile_jar_cl.extend(class_files) else: package_dir = os.listdir(self.working_dir)[0] compile_jar_cl = ["jar", "-cf", jar_name, "-C", package_dir, "."] self.log.debug("running jar: %s", compile_jar_cl) self.process = shell_exec(compile_jar_cl, cwd=self.working_dir, stdout=jar_out, stderr=jar_err) ret_code = self.process.poll() while ret_code is None: self.log.debug("Making jar file...") time.sleep(1) ret_code = self.process.poll() if ret_code != 0: with open(jar_err.name) as err_file: out = err_file.read() self.log.info("Making jar failed with code %s", ret_code) self.log.info("jar output: %s", out) raise RuntimeError("Jar exited with non-zero code") self.log.info("Making .jar file completed") def run_tests(self): # java -cp junit.jar:selenium-test-small.jar: # selenium-2.46.0/selenium-java-2.46.0.jar:./../selenium-server.jar # org.junit.runner.JUnitCore TestBlazemeterPass jar_list = [os.path.join(self.working_dir, jar) for jar in os.listdir(self.working_dir) if jar.endswith(".jar")] self.base_class_path.extend(jar_list) with open(self.props_file, 'wt') as props: props.write("report_file=%s\n" % self.settings.get("report-file").replace(os.path.sep, '/')) if self.load.iterations: props.write("iterations=%s\n" % self.load.iterations) if self.load.hold: props.write("hold_for=%s\n" % self.load.hold) for index, item in enumerate(jar_list): props.write("target_%s=%s\n" % (index, item.replace(os.path.sep, '/'))) std_out = open(self.settings.get("stdout"), "wt") self.opened_descriptors.append(std_out) std_err = open(self.settings.get("stderr"), "wt") self.opened_descriptors.append(std_err) env = BetterDict() env.merge(dict(os.environ)) env.merge(self.env) junit_command_line = ["java", "-cp", os.pathsep.join(self.base_class_path), "taurusjunit.CustomRunner", self.props_file] self.process = self.executor.execute(junit_command_line, cwd=self.artifacts_dir, stdout=std_out, stderr=std_err, env=env) class NoseTester(AbstractTestRunner): """ Python selenium tests runner """ def __init__(self, nose_config, executor): super(NoseTester, self).__init__(nose_config, executor) self.plugin_path = os.path.join(get_full_path(__file__, step_up=1), os.pardir, "resources", "nose_plugin.py") def prepare(self): self.run_checklist() def run_checklist(self): """ we need installed nose plugin """ if sys.version >= '3': self.log.warn("You are using python3, make sure that your scripts are able to run in python3!") self.required_tools.append(TclLibrary(self.log)) self.required_tools.append(TaurusNosePlugin(self.plugin_path, "")) self.check_tools() def run_tests(self): """ run python tests """ executable = self.settings.get("interpreter", sys.executable) nose_command_line = [executable, self.plugin_path, '--report-file', self.settings.get("report-file")] if self.load.iterations: nose_command_line += ['-i', str(self.load.iterations)] if self.load.hold: nose_command_line += ['-d', str(self.load.hold)] nose_command_line += [self.working_dir] std_out = open(self.settings.get("stdout"), "wt") self.opened_descriptors.append(std_out) std_err = open(self.settings.get("stderr"), "wt") self.opened_descriptors.append(std_err) env = BetterDict() env.merge(dict(os.environ)) env.merge(self.env) self.process = self.executor.execute(nose_command_line, cwd=self.artifacts_dir, stdout=std_out, stderr=std_err, env=env) class SeleniumWidget(Pile, PrioritizedWidget): def __init__(self, script, runner_output): widgets = [] self.script_name = Text("Selenium: %s" % os.path.basename(script)) self.summary_stats = Text("Delayed...") self.runner_output = runner_output widgets.append(self.script_name) widgets.append(self.summary_stats) super(SeleniumWidget, self).__init__(widgets) PrioritizedWidget.__init__(self, priority=10) def update(self): reader_summary = '' if os.path.exists(self.runner_output): with open(self.runner_output, "rt") as fds: lines = fds.readlines() if lines: line = lines[-1] if not line.endswith("\n") and len(lines) > 1: line = lines[-2] if line and "," in line: reader_summary = line.split(",")[-1] if reader_summary: self.summary_stats.set_text(reader_summary) else: self.summary_stats.set_text('In progress...') self._invalidate() class SeleniumServerJar(RequiredTool): def __init__(self, tool_path, download_link, parent_logger): super(SeleniumServerJar, self).__init__("Selenium server", tool_path, download_link) self.log = parent_logger.getChild(self.__class__.__name__) def check_if_installed(self): self.log.debug("%s path: %s", self.tool_name, self.tool_path) selenium_launch_command = ["java", "-jar", self.tool_path, "-help"] selenium_subproc = shell_exec(selenium_launch_command, stderr=subprocess.STDOUT) output = selenium_subproc.communicate() self.log.debug("%s output: %s", self.tool_name, output) if selenium_subproc.returncode == 0: self.already_installed = True return True else: return False class JUnitJar(RequiredTool): def __init__(self, tool_path, parent_logger, junit_version): super(JUnitJar, self).__init__("JUnit", tool_path) self.log = parent_logger.getChild(self.__class__.__name__) self.version = junit_version self.mirror_manager = JUnitMirrorsManager(self.log, self.version) def install(self): dest = get_full_path(self.tool_path, step_up=1) dest = os.path.abspath(dest) junit_dist = super(JUnitJar, self).install_with_mirrors(dest, ".jar") self.log.info("Installing %s into %s", self.tool_name, dest) junit_dist.close() if not os.path.exists(dest): os.makedirs(dest) shutil.move(junit_dist.name, self.tool_path) self.log.info("Installed JUnit successfully") if not self.check_if_installed(): raise RuntimeError("Unable to run %s after installation!" % self.tool_name) class HamcrestJar(RequiredTool): def __init__(self, tool_path, download_link): super(HamcrestJar, self).__init__("HamcrestJar", tool_path, download_link) class JsonJar(RequiredTool): def __init__(self, tool_path, download_link): super(JsonJar, self).__init__("JsonJar", tool_path, download_link) class JavaC(RequiredTool): def __init__(self, tool_path, download_link, parent_logger): super(JavaC, self).__init__("JavaC", tool_path, download_link) self.log = parent_logger.getChild(self.__class__.__name__) def check_if_installed(self): try: output = subprocess.check_output(["javac", '-version'], stderr=subprocess.STDOUT) self.log.debug("%s output: %s", self.tool_name, output) return True except BaseException: raise RuntimeError("The %s is not operable or not available. Consider installing it" % self.tool_name) def install(self): raise NotImplementedError() class JUnitListenerJar(RequiredTool): def __init__(self, tool_path, download_link): super(JUnitListenerJar, self).__init__("JUnitListener", tool_path, download_link) def install(self): raise NotImplementedError() class TaurusNosePlugin(RequiredTool): def __init__(self, tool_path, download_link): super(TaurusNosePlugin, self).__init__("TaurusNosePlugin", tool_path, download_link) def install(self): raise NotImplementedError() class NoseTest(object): IMPORTS = """import unittest import re from time import sleep from selenium import webdriver from selenium.common.exceptions import NoSuchElementException from selenium.common.exceptions import NoAlertPresentException """ def __init__(self): self.root = etree.Element("NoseTest") self.tree = etree.ElementTree(self.root) def add_imports(self): imports = etree.Element("imports") imports.text = NoseTest.IMPORTS return imports def gen_class_definition(self, class_name, inherits_from, indent="0"): def_tmpl = "class {class_name}({inherits_from}):" class_def_element = etree.Element("class_definition", indent=indent) class_def_element.text = def_tmpl.format(class_name=class_name, inherits_from="".join(inherits_from)) return class_def_element def gen_method_definition(self, method_name, params, indent="4"): def_tmpl = "def {method_name}({params}):" method_def_element = etree.Element("method_definition", indent=indent) method_def_element.text = def_tmpl.format(method_name=method_name, params=",".join(params)) return method_def_element def gen_decorator_statement(self, decorator_name, indent="4"): def_tmpl = "@{decorator_name}" decorator_element = etree.Element("decorator_statement", indent=indent) decorator_element.text = def_tmpl.format(decorator_name=decorator_name) return decorator_element def gen_method_statement(self, statement, indent="8"): statement_elem = etree.Element("statement", indent=indent) statement_elem.text = statement return statement_elem class SeleniumScriptBuilder(NoseTest): def __init__(self, scenario, parent_logger, wdlog): super(SeleniumScriptBuilder, self).__init__() self.window_size = None self.log = parent_logger.getChild(self.__class__.__name__) self.scenario = scenario self.wdlog = wdlog def gen_test_case(self): self.log.debug("Generating Test Case test method") imports = self.add_imports() self.root.append(imports) test_class = self.gen_class_definition("TestRequests", ["unittest.TestCase"]) self.root.append(test_class) test_class.append(self.gen_setupclass_method()) test_class.append(self.gen_teardownclass_method()) counter = 0 methods = {} requests = self.scenario.get_requests() scenario_timeout = self.scenario.get("timeout", 30) default_address = self.scenario.get("default-address", None) for req in requests: if req.label: label = req.label else: label = req.url mod_label = re.sub('[^0-9a-zA-Z]+', '_', label[:30]) method_name = 'test_%05d_%s' % (counter, mod_label) test_method = self.gen_test_method(method_name) methods[method_name] = label counter += 1 test_class.append(test_method) parsed_url = parse.urlparse(req.url) if default_address is not None and not parsed_url.netloc: url = default_address + req.url else: url = req.url test_method.append(self.gen_comment("start request: %s" % url)) if req.timeout is not None: test_method.append(self.gen_impl_wait(req.timeout)) test_method.append(self.gen_method_statement("self.driver.get('%s')" % url)) think_time = req.think_time if req.think_time else self.scenario.get("think-time", None) if think_time is not None: test_method.append(self.gen_method_statement("sleep(%s)" % dehumanize_time(think_time))) if "assert" in req.config: test_method.append(self.__gen_assert_page()) for assert_config in req.config.get("assert"): test_method.extend(self.gen_assertion(assert_config)) if req.timeout is not None: test_method.append(self.gen_impl_wait(scenario_timeout)) test_method.append(self.gen_comment("end request: %s" % url)) test_method.append(self.__gen_new_line()) return methods def gen_setupclass_method(self): self.log.debug("Generating setUp test method") browsers = ["Firefox", "Chrome", "Ie", "Opera"] browser = self.scenario.get("browser", "Firefox") if browser not in browsers: raise ValueError("Unsupported browser name: %s" % browser) setup_method_def = self.gen_decorator_statement('classmethod') setup_method_def.append(self.gen_method_definition("setUpClass", ["cls"])) if browser == 'Firefox': setup_method_def.append(self.gen_method_statement("profile = webdriver.FirefoxProfile()")) statement = "profile.set_preference('webdriver.log.file', %s)" % repr(self.wdlog) log_set = self.gen_method_statement(statement) setup_method_def.append(log_set) setup_method_def.append(self.gen_method_statement("cls.driver = webdriver.Firefox(profile)")) else: setup_method_def.append(self.gen_method_statement("cls.driver = webdriver.%s()" % browser)) scenario_timeout = self.scenario.get("timeout", 30) setup_method_def.append(self.gen_impl_wait(scenario_timeout, target='cls')) if self.window_size: statement = self.gen_method_statement("cls.driver.set_window_size(%s, %s)" % self.window_size) setup_method_def.append(statement) else: setup_method_def.append(self.gen_method_statement("cls.driver.maximize_window()")) setup_method_def.append(self.__gen_new_line()) return setup_method_def def gen_impl_wait(self, timeout, target='self'): return self.gen_method_statement("%s.driver.implicitly_wait(%s)" % (target, dehumanize_time(timeout))) def gen_comment(self, comment): return self.gen_method_statement("# %s" % comment) def gen_test_method(self, name): self.log.debug("Generating test method %s", name) test_method = self.gen_method_definition(name, ["self"]) return test_method def gen_teardownclass_method(self): self.log.debug("Generating tearDown test method") tear_down_method_def = self.gen_decorator_statement('classmethod') tear_down_method_def.append(self.gen_method_definition("tearDownClass", ["cls"])) tear_down_method_def.append(self.gen_method_statement("cls.driver.quit()")) tear_down_method_def.append(self.__gen_new_line()) return tear_down_method_def def gen_assertion(self, assertion_config): self.log.debug("Generating assertion, config: %s", assertion_config) assertion_elements = [] if isinstance(assertion_config, string_types): assertion_config = {"contains": [assertion_config]} for val in assertion_config["contains"]: regexp = assertion_config.get("regexp", True) reverse = assertion_config.get("not", False) subject = assertion_config.get("subject", "body") if subject != "body": raise ValueError("Only 'body' subject supported ") if regexp: assert_method = "self.assertEqual" if reverse else "self.assertNotEqual" assertion_elements.append(self.gen_method_statement('re_pattern = re.compile("%s")' % val)) method = '%s(0, len(re.findall(re_pattern, body)))' % assert_method assertion_elements.append(self.gen_method_statement(method)) else: assert_method = "self.assertNotIn" if reverse else "self.assertIn" assertion_elements.append(self.gen_method_statement('%s("%s", body)' % (assert_method, val))) return assertion_elements def __gen_new_line(self, indent="8"): return self.gen_method_statement("", indent=indent) def __gen_assert_page(self): return self.gen_method_statement("body = self.driver.page_source") def save(self, filename): with open(filename, 'wt') as fds: for child in self.root.iter(): if child.text is not None: indent = int(child.get('indent', "0")) fds.write(" " * indent + child.text + "\n") class JUnitMirrorsManager(MirrorsManager): def __init__(self, parent_logger, junit_version): self.junit_version = junit_version super(JUnitMirrorsManager, self).__init__(SeleniumExecutor.JUNIT_MIRRORS_SOURCE, parent_logger) def _parse_mirrors(self): links = [] if self.page_source is not None: self.log.debug('Parsing mirrors...') try: resp = json.loads(self.page_source) objects = resp.get("response", {}).get("docs", []) if objects: obj = objects[0] group = obj.get("g") artifact = obj.get("a") version = obj.get("v") ext = obj.get("p") link_template = "http://search.maven.org/remotecontent?filepath={group}/{artifact}/" \ "{version}/{artifact}-{version}.{ext}" link = link_template.format(group=group, artifact=artifact, version=version, ext=ext) links.append(link) except BaseException as exc: self.log.error("Error while parsing mirrors %s", exc) default_link = SeleniumExecutor.JUNIT_DOWNLOAD_LINK.format(version=self.junit_version) if default_link not in links: links.append(default_link) self.log.debug('Total mirrors: %d', len(links)) return links class LDJSONReader(object): def __init__(self, filename, parent_log): self.log = parent_log.getChild(self.__class__.__name__) self.filename = filename self.fds = None self.partial_buffer = "" self.offset = 0 def read(self, last_pass=False): if not self.fds and not self.__open_fds(): self.log.debug("No data to start reading yet") return self.fds.seek(self.offset) if last_pass: lines = self.fds.readlines() # unlimited else: lines = self.fds.readlines(1024 * 1024) self.offset = self.fds.tell() for line in lines: if not line.endswith("\n"): self.partial_buffer += line continue line = "%s%s" % (self.partial_buffer, line) self.partial_buffer = "" yield json.loads(line) def __open_fds(self): if not os.path.isfile(self.filename): return False fsize = os.path.getsize(self.filename) if not fsize: return False self.fds = open(self.filename, 'rt', buffering=1) return True def __del__(self): if self.fds is not None: self.fds.close() class SeleniumReportReader(object): REPORT_ITEM_KEYS = ["test_case", "test_suite", "status", "start_time", "duration", "error_msg", "error_trace", "extras"] TEST_STATUSES = ("PASSED", "FAILED", "BROKEN", "SKIPPED") FAILING_TESTS_STATUSES = ("FAILED", "BROKEN") def __init__(self, filename, parent_logger, translation_table=None): super(SeleniumReportReader, self).__init__() self.log = parent_logger.getChild(self.__class__.__name__) self.json_reader = LDJSONReader(filename, self.log) self.translation_table = translation_table or {} def process_label(self, label): if label in self.translation_table: return self.translation_table[label] if isinstance(label, string_types): if label.startswith('test_') and label[5:10].isdigit(): return label[11:] return label def read(self, last_pass=False): for row in self.json_reader.read(last_pass): #if any(key not in row for key in self.REPORT_ITEM_KEYS): for key in self.REPORT_ITEM_KEYS: if key not in row: self.log.debug("Unexpected test record: %s", row) self.log.warning("Test record doesn't conform to schema, skipping, %s", key) continue row["test_case"] = self.process_label(row["test_case"]) yield row class LoadSamplesReader(ResultsReader): STATUS_TO_CODE = { "PASSED": "200", "SKIPPED": "300", "FAILED": "400", "BROKEN": "500", } def __init__(self, filename, parent_logger, translation_table): super(LoadSamplesReader, self).__init__() self.report_reader = SeleniumReportReader(filename, parent_logger, translation_table) self.read_records = 0 def extract_sample(self, item): tstmp = int(item["start_time"]) label = item["test_case"] concur = 1 rtm = item["duration"] cnn = 0 ltc = 0 rcd = self.STATUS_TO_CODE.get(item["status"], "UNKNOWN") error = item["error_msg"] if item["status"] in SeleniumReportReader.FAILING_TESTS_STATUSES else None trname = "" return tstmp, label, concur, rtm, cnn, ltc, rcd, error, trname def _read(self, last_pass=False): for row in self.report_reader.read(last_pass): self.read_records += 1 sample = self.extract_sample(row) yield sample class FuncSamplesReader(FunctionalResultsReader): def __init__(self, filename, parent_logger, translation_table): self.report_reader = SeleniumReportReader(filename, parent_logger, translation_table) self.read_records = 0 def read(self, last_pass=False): for row in self.report_reader.read(last_pass): self.read_records += 1 sample = FunctionalSample(test_case=row["test_case"], test_suite=row["test_suite"], status=row["status"], start_time=row["start_time"], duration=row["duration"], error_msg=row["error_msg"], error_trace=row["error_trace"], extras=row.get("extras", {})) yield sample
1
13,934
Code style. Btw, it's weird Codacy didn't catch that.
Blazemeter-taurus
py
@@ -72,6 +72,14 @@ func (m *EthAddress) UnmarshalYAML(unmarshal func(interface{}) error) error { return nil } +func (m *EthAddress) IsZero() bool { + return NewBigInt(m.Unwrap().Big()).IsZero() +} + +func NewEthAddress(addr common.Address) *EthAddress { + return &EthAddress{Address: addr.Bytes()} +} + func (m *DataSize) Unwrap() datasize.ByteSize { return datasize.NewByteSize(m.Bytes) }
1
package sonm import ( "errors" "fmt" "math/big" "strings" "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/params" "github.com/sonm-io/core/util/datasize" ) var bigEther = big.NewFloat(params.Ether).SetPrec(256) var priceSuffixes = map[string]big.Float{ "SNM/s": *bigEther, "SNM/h": *big.NewFloat(0).SetPrec(256).Quo(bigEther, big.NewFloat(3600)), } var possiblePriceSuffixxes = func() string { keys := make([]string, 0, len(priceSuffixes)) for k := range priceSuffixes { keys = append(keys, k) } return strings.Join(keys, ", ") }() func (m *Duration) Unwrap() time.Duration { return time.Nanosecond * time.Duration(m.GetNanoseconds()) } func (m *Duration) MarshalYAML() (interface{}, error) { return m.Unwrap().String(), nil } func (m *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error { var v string if err := unmarshal(&v); err != nil { return err } d, err := time.ParseDuration(v) if err != nil { return err } m.Nanoseconds = d.Nanoseconds() return nil } func (m *EthAddress) Unwrap() common.Address { return common.BytesToAddress(m.Address) } func (m *EthAddress) MarshalYAML() (interface{}, error) { return m.Unwrap().Hex(), nil } func (m *EthAddress) UnmarshalYAML(unmarshal func(interface{}) error) error { var v string if err := unmarshal(&v); err != nil { return err } if !common.IsHexAddress(v) { return errors.New("invalid ethereum address format") } m.Address = common.HexToAddress(v).Bytes() return nil } func (m *DataSize) Unwrap() datasize.ByteSize { return datasize.NewByteSize(m.Bytes) } func (m *DataSize) UnmarshalYAML(unmarshal func(interface{}) error) error { var v string if err := unmarshal(&v); err != nil { return err } var byteSize datasize.ByteSize if err := byteSize.UnmarshalText([]byte(v)); err != nil { return err } m.Bytes = byteSize.Bytes() return nil } func (m *DataSize) MarshalYAML() (interface{}, error) { text, err := m.Unwrap().MarshalText() return string(text), err } func (m *DataSizeRate) Unwrap() datasize.BitRate { return datasize.NewBitRate(m.BitsPerSecond) } func (m *DataSizeRate) UnmarshalYAML(unmarshal func(interface{}) error) error { var v string if err := unmarshal(&v); err != nil { return err } var bitRate datasize.BitRate if err := bitRate.UnmarshalText([]byte(v)); err != nil { return err } m.BitsPerSecond = bitRate.Bits() return nil } func (m *DataSizeRate) MarshalYAML() (interface{}, error) { text, err := m.Unwrap().MarshalText() return string(text), err } func (m *Price) MarshalYAML() (interface{}, error) { v := big.NewFloat(0).SetPrec(256).SetInt(m.PerSecond.Unwrap()) div := big.NewFloat(params.Ether).SetPrec(256) div.Quo(div, big.NewFloat(3600.)) r := big.NewFloat(0).Quo(v, div) return r.Text('g', 10) + " SNM/h", nil } func (m *Price) UnmarshalYAML(unmarshal func(interface{}) error) error { var v string if err := unmarshal(&v); err != nil { return err } if err := m.LoadFromString(v); err != nil { return err } return nil } func (m *Price) LoadFromString(v string) error { parts := strings.FieldsFunc(v, func(c rune) bool { return c == ' ' }) if len(parts) != 2 { return fmt.Errorf("could not load price - %s can not be split to numeric and dimension parts", v) } dimensionMultiplier, ok := priceSuffixes[parts[1]] if !ok { return fmt.Errorf("could not load price - unknown dimension %s, possible values are - %s", parts[1], possiblePriceSuffixxes) } fractPrice, _, err := big.ParseFloat(parts[0], 10, 256, big.ToNearestEven) if err != nil { return fmt.Errorf("could not load price - failed to parse numeric part %s to big float: %s", parts[0], err) } price, _ := fractPrice.Mul(fractPrice, &dimensionMultiplier).Int(nil) m.PerSecond = NewBigInt(price) return nil }
1
6,772
no need in BigInt just m.Unwrap().Big().Bitlen() == 0
sonm-io-core
go
@@ -197,7 +197,7 @@ func (ctx *conjMatchFlowContext) installOrUpdateFlow(actions []*conjunctiveActio // Build the Openflow entry. actions here should not be empty for either add or update case. flow := ctx.client.conjunctiveMatchFlow(ctx.tableID, ctx.matchKey, ctx.matchValue, actions...) - if err := flow.Add(); err != nil { + if err := ctx.client.flowOperations.Add(flow); err != nil { return err } ctx.flow = flow
1
// Copyright 2019 Antrea Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package openflow import ( "fmt" "net" coreV1 "k8s.io/api/core/v1" v1 "k8s.io/api/networking/v1" "k8s.io/klog" "github.com/vmware-tanzu/antrea/pkg/agent/types" binding "github.com/vmware-tanzu/antrea/pkg/ovs/openflow" ) const ( MatchDstIP int = iota MatchSrcIP MatchDstIPNet MatchSrcIPNet MatchDstOFPort MatchSrcOFPort MatchTCPDstPort MatchUDPDstPort MatchSCTPDstPort Unsupported ) // IP address calculated from Pod's address. type IPAddress net.IP func (a *IPAddress) GetMatchKey(addrType types.AddressType) int { switch addrType { case types.SrcAddress: return MatchSrcIP case types.DstAddress: return MatchDstIP default: klog.Errorf("Unknown AddressType %d in IPAddress", addrType) return Unsupported } } func (a *IPAddress) GetMatchValue() string { addr := net.IP(*a) return addr.String() } func (a *IPAddress) GetValue() interface{} { return net.IP(*a) } func NewIPAddress(addr net.IP) *IPAddress { ia := IPAddress(addr) return &ia } // IP block calculated from Pod's address. type IPNetAddress net.IPNet func (a *IPNetAddress) GetMatchKey(addrType types.AddressType) int { switch addrType { case types.SrcAddress: return MatchSrcIPNet case types.DstAddress: return MatchDstIPNet default: klog.Errorf("Unknown AddressType %d in IPNetAddress", addrType) return Unsupported } } func (a *IPNetAddress) GetMatchValue() string { addr := net.IPNet(*a) return addr.String() } func (a *IPNetAddress) GetValue() interface{} { return net.IPNet(*a) } func NewIPNetAddress(addr net.IPNet) *IPNetAddress { ia := IPNetAddress(addr) return &ia } // OFPortAddress is the Openflow port of an interface. type OFPortAddress int32 func (a *OFPortAddress) GetMatchKey(addrType types.AddressType) int { switch addrType { case types.SrcAddress: // in_port is used in egress rule to match packets sent from local Pod. Service traffic is not covered by this // match, and source IP will be matched instead. return MatchSrcOFPort case types.DstAddress: return MatchDstOFPort default: klog.Errorf("Unknown AddressType %d in OFPortAddress", addrType) return Unsupported } } func (a *OFPortAddress) GetMatchValue() string { return fmt.Sprintf("%d", int32(*a)) } func (a *OFPortAddress) GetValue() interface{} { return int32(*a) } func NewOFPortAddress(addr int32) *OFPortAddress { a := OFPortAddress(addr) return &a } // ConjunctionNotFound is an error response when the specified policyRuleConjunction is not found from the local cache. type ConjunctionNotFound uint32 func (e *ConjunctionNotFound) Error() string { return fmt.Sprintf("policyRuleConjunction with ID %d not found", uint32(*e)) } func newConjunctionNotFound(conjunctionID uint32) *ConjunctionNotFound { err := ConjunctionNotFound(conjunctionID) return &err } // conjunctiveMatch generates match conditions for conjunctive match flow entry, including source or destination // IP address, ofport number of OVS interface, or Service port. When conjunctiveMatch is used to match IP // address or ofport number, matchProtocol is "ip". When conjunctiveMatch is used to match Service // port, matchProtocol is Service protocol. If Service protocol is not set, "tcp" is used by default. type conjunctiveMatch struct { tableID binding.TableIDType matchKey int matchValue interface{} } func (m *conjunctiveMatch) generateGlobalMapKey() string { return fmt.Sprintf("table:%d,type:%d,value:%s", m.tableID, m.matchKey, m.matchValue) } // conjunctiveAction generates the policyRuleConjunction action in Openflow entry. The flow action is like // policyRuleConjunction(conjID,clauseID/nClause) when it has been realized on the switch. type conjunctiveAction struct { conjID uint32 clauseID uint8 nClause uint8 } // conjMatchFlowContext generates conjunctive match flow entries for conjunctions share the same match conditions. // One conjMatchFlowContext is responsible for one specific conjunctive match flow entry. As the match condition // of the flow entry can be shared by different conjunctions, the realized Openflow entry might have multiple // conjunctive actions. If the dropTable is not nil, conjMatchFlowContext also installs a drop flow in the dropTable. type conjMatchFlowContext struct { // conjunctiveMatch describes the match condition of conjunctive match flow entry. *conjunctiveMatch // actions is a map from policyRuleConjunction ID to conjunctiveAction. It records all the conjunctive actions in // the conjunctive match flow. When the number of actions is reduced to 0, the conjMatchFlowContext.flow is // uninstalled from the switch. actions map[uint32]*conjunctiveAction // denyAllRules is a set to cache the "DENY-ALL" rules that is applied to the matching address in this context. denyAllRules map[uint32]bool client *client // flow is the conjunctive match flow built from this context. flow needs to be updated if actions are changed. flow binding.Flow // dropflow is the default drop flow built from this context to drop packets in the AppliedToGroup but not pass the // NetworkPolicy rule. dropFlow is installed on the switch as long as either actions or denyAllRules is not // empty, and uninstalled when both two are empty. When the dropFlow is uninstalled from the switch, the // conjMatchFlowContext is removed from the cache. dropFlow binding.Flow } // installOrUpdateFlow installs or updates conjunctive match entries of the rule table. func (ctx *conjMatchFlowContext) installOrUpdateFlow(actions []*conjunctiveAction) error { // Check if flow is already installed. If not, add new flow on the switch. if ctx.flow == nil { // Check then number of valid conjunctiveAction, no need to install openflow if it is 0. It happens when the match // condition is used only for matching AppliedToGroup, but no From or To is defined in the NetworkPolicy rule. if len(actions) == 0 { return nil } // Build the Openflow entry. actions here should not be empty for either add or update case. flow := ctx.client.conjunctiveMatchFlow(ctx.tableID, ctx.matchKey, ctx.matchValue, actions...) if err := flow.Add(); err != nil { return err } ctx.flow = flow return nil } // Modify existing Openflow entry with latest actions. flowBuilder := ctx.flow.CopyToBuilder() for _, act := range actions { flowBuilder.Action().Conjunction(act.conjID, act.clauseID, act.nClause) } newFlow := flowBuilder.Done() if err := newFlow.Modify(); err != nil { return err } ctx.flow = newFlow return nil } // deleteAction deletes the specified policyRuleConjunction from conjunctiveMatchFlow's actions, and then updates the // conjunctive match flow entry on the switch. func (ctx *conjMatchFlowContext) deleteAction(conjID uint32) error { // If the specified conjunctive action is the last one in actions, delete the conjunctive match flow entry from the // switch. No need to check if the conjunction ID of the only conjunctive action is the specified ID or not, as it // has been checked in the caller. if len(ctx.actions) == 1 && ctx.flow != nil { if err := ctx.flow.Delete(); err != nil { return err } ctx.flow = nil } else { // Update Openflow entry with the left conjunctive actions. var actions []*conjunctiveAction for _, act := range ctx.actions { if act.conjID != conjID { actions = append(actions, act) } } err := ctx.installOrUpdateFlow(actions) if err != nil { return err } } delete(ctx.actions, conjID) return nil } // addAction adds the specified conjunction into conjunctiveMatchFlow's actions, and then updates the conjunctive // match flow on the switch. It also installs default drop flow if dropTable is not nil, and the dropFlow is not // installed before. func (ctx *conjMatchFlowContext) addAction(action *conjunctiveAction) error { // Check if the conjunction exists in conjMatchFlowContext actions or not. If yes, return nil directly. // Otherwise, add the new action, and update the Openflow entry. _, found := ctx.actions[action.conjID] if found { return nil } // Install or update Openflow entry for the new conjunctiveAction. actions := make([]*conjunctiveAction, 0, len(ctx.actions)+1) for _, act := range ctx.actions { actions = append(actions, act) } actions = append(actions, action) err := ctx.installOrUpdateFlow(actions) if err != nil { return err } ctx.actions[action.conjID] = action return nil } func (ctx *conjMatchFlowContext) addDenyAllRule(ruleID uint32) error { if ctx.denyAllRules == nil { ctx.denyAllRules = make(map[uint32]bool) } ctx.denyAllRules[ruleID] = true return nil } // policyRuleConjunction is responsible to build Openflow entries for Pods that are in a NetworkPolicy rule's AppliedToGroup. // The Openflow entries include conjunction action flows, conjunctive match flows, and default drop flows in the dropTable. // NetworkPolicyController will make sure only one goroutine operates on a policyRuleConjunction. // 1) Conjunction action flows use policyRuleConjunction ID as match condition. policyRuleConjunction ID is the single // match condition for conjunction action flows to allow packets. If the NetworkPolicy rule has also configured excepts // in From or To, extra Openflow entries are installed to drop packets using the addresses in the excepts and // policyRuleConjunction ID as the match conditions, and these flows have a higher priority than the one only matching // policyRuleConjunction ID. // 2) Conjunctive match flows adds conjunctive actions in Openflow entry, and they are grouped by clauses. The match // condition in one clause is one of these three types: from address(for fromClause), or to address(for toClause), or // service ports(for serviceClause) configured in the NetworkPolicy rule. Each conjunctive match flow entry is // maintained by one specific conjMatchFlowContext which is stored in globalConjMatchFlowCache, and shared by clauses // if they have the same match conditions. clause adds or deletes conjunctive action to conjMatchFlowContext actions. // A clause is hit if the packet matches any conjunctive match flow that are grouped by this clause. Conjunction // action flow is hit only if all clauses in the policyRuleConjunction are hit. // 3) Default drop flows are also maintained by conjMatchFlowContext. It is used to drop packets sent from or to the // AppliedToGroup but not pass the Network Policy rule. type policyRuleConjunction struct { id uint32 fromClause *clause toClause *clause serviceClause *clause actionFlows []binding.Flow } // clause groups conjunctive match flows. Matches in a clause represent source addresses(for fromClause), or destination // addresses(for toClause) or service ports(for serviceClause) in a NetworkPolicy rule. When the new address or service // port is added into the clause, it adds a new conjMatchFlowContext into globalConjMatchFlowCache (or finds the // existing one from globalConjMatchFlowCache), and then update the key of the conjunctiveMatch into its own matches. // When address is deleted from the clause, it deletes the conjunctive action from the conjMatchFlowContext, // and then deletes the key of conjunctiveMatch from its own matches. type clause struct { action *conjunctiveAction // matches is a map from the unique string generated from the conjunctiveMatch to conjMatchFlowContext. It is used // to cache conjunctive match conditions in the same clause. matches map[string]*conjMatchFlowContext // ruleTable is where to install conjunctive match flows. ruleTable binding.Table // dropTable is where to install Openflow entries to drop the packet sent to or from the AppliedToGroup but does not // satisfy any conjunctive match conditions. It should be nil, if the clause is used for matching service port. dropTable binding.Table } func (c *clause) addConjunctiveMatchFlow(client *client, match *conjunctiveMatch) error { matcherKey := match.generateGlobalMapKey() _, found := c.matches[matcherKey] if found { klog.V(2).Infof("Conjunctive match flow with matcher %s is already added in rule: %d", matcherKey, c.action.conjID) return nil } client.conjMatchFlowLock.Lock() defer client.conjMatchFlowLock.Unlock() // Get conjMatchFlowContext from globalConjMatchFlowCache. If it doesn't exist, create a new one and add into the cache. context, found := client.globalConjMatchFlowCache[matcherKey] if !found { context = &conjMatchFlowContext{ conjunctiveMatch: match, actions: make(map[uint32]*conjunctiveAction), client: client, } // Install the default drop flow entry if dropTable is not nil. if c.dropTable != nil && context.dropFlow == nil { dropFlow := context.client.defaultDropFlow(c.dropTable.GetID(), match.matchKey, match.matchValue) if err := dropFlow.Add(); err != nil { return err } context.dropFlow = dropFlow } client.globalConjMatchFlowCache[matcherKey] = context } if c.action.nClause > 1 { // Add the conjunction into conjunctiveFlowContext's actions, and update the flow entry on the switch. err := context.addAction(c.action) if err != nil { return err } } else { // Add the DENY-ALL rule into conjunctiveFlowContext's denyAllRules. err := context.addDenyAllRule(c.action.conjID) if err != nil { return err } } c.matches[matcherKey] = context return nil } func (c *clause) generateAddressConjMatch(addr types.Address, addrType types.AddressType) *conjunctiveMatch { matchKey := addr.GetMatchKey(addrType) matchValue := addr.GetValue() match := &conjunctiveMatch{ tableID: c.ruleTable.GetID(), matchKey: matchKey, matchValue: matchValue, } return match } func getServiceMatchType(protocol *coreV1.Protocol) int { switch *protocol { case coreV1.ProtocolTCP: return MatchTCPDstPort case coreV1.ProtocolUDP: return MatchUDPDstPort case coreV1.ProtocolSCTP: return MatchSCTPDstPort default: return MatchTCPDstPort } } func (c *clause) generateServicePortConjMatch(port *v1.NetworkPolicyPort) *conjunctiveMatch { matchKey := getServiceMatchType(port.Protocol) matchValue := uint16(port.Port.IntVal) match := &conjunctiveMatch{ tableID: c.ruleTable.GetID(), matchKey: matchKey, matchValue: matchValue, } return match } // addAddrFlows translates the specified addresses to conjunctiveMatchFlow, and installs corresponding Openflow entry. func (c *clause) addAddrFlows(client *client, addrType types.AddressType, addresses []types.Address) error { for _, addr := range addresses { match := c.generateAddressConjMatch(addr, addrType) err := c.addConjunctiveMatchFlow(client, match) if err != nil { return err } } return nil } // addServiceFlows translates the specified NetworkPolicyPorts to conjunctiveMatchFlow, and installs corresponding Openflow entry. func (c *clause) addServiceFlows(client *client, ports []*v1.NetworkPolicyPort) error { for _, port := range ports { match := c.generateServicePortConjMatch(port) err := c.addConjunctiveMatchFlow(client, match) if err != nil { return err } } return nil } // deleteConjunctiveMatchFlow deletes the specific conjunctiveAction from existing flow. func (c *clause) deleteConjunctiveMatchFlow(flowContextKey string) error { context, found := c.matches[flowContextKey] // Match is not located in clause cache. It happens if the conjMatchFlowContext is already deleted from clause local cache. if !found { return nil } conjID := c.action.conjID context.client.conjMatchFlowLock.Lock() defer context.client.conjMatchFlowLock.Unlock() if c.action.nClause > 1 { // Delete the conjunctive action if it is in context actions. _, found = context.actions[conjID] if found { err := context.deleteAction(conjID) if err != nil { return err } } } else { // Delete the DENY-ALL rule if it is in context denyAllRules. _, found := context.denyAllRules[conjID] if found { delete(context.denyAllRules, conjID) } } // Uninstall default drop flow if both actions and denyAllRules are empty. if len(context.actions) == 0 && len(context.denyAllRules) == 0 { if context.dropFlow != nil { if err := context.dropFlow.Delete(); err != nil { return err } context.dropFlow = nil } // Remove the context from global cache after both the conjunctive match flow and the default drop // flow are uninstalled from the switch. delete(context.client.globalConjMatchFlowCache, context.generateGlobalMapKey()) } // Delete the key of conjMatchFlowContext from clause matches. delete(c.matches, flowContextKey) return nil } // deleteAddrFlows deletes conjunctiveMatchFlow relevant to the specified addresses from local cache, // and uninstalls Openflow entry. func (c *clause) deleteAddrFlows(addrType types.AddressType, addresses []types.Address) error { for _, addr := range addresses { match := c.generateAddressConjMatch(addr, addrType) contextKey := match.generateGlobalMapKey() err := c.deleteConjunctiveMatchFlow(contextKey) if err != nil { return err } } return nil } // deleteAllMatches deletes all conjunctiveMatchFlow in the clause, and removes Openflow entry. deleteAllMatches // is always invoked when NetworkPolicy rule is deleted. func (c *clause) deleteAllMatches() error { for key := range c.matches { err := c.deleteConjunctiveMatchFlow(key) if err != nil { return err } } return nil } func (c *policyRuleConjunction) getAddressClause(addrType types.AddressType) *clause { switch addrType { case types.SrcAddress: return c.fromClause case types.DstAddress: return c.toClause default: klog.Errorf("no address clause use AddressType %d", addrType) return nil } } // InstallPolicyRuleFlows installs flows for a new NetworkPolicy rule. Rule should include all fields in the // NetworkPolicy rule. Each ingress/egress policy rule installs Openflow entries on two tables, one for ruleTable and // the other for dropTable. If a packet does not pass the ruleTable, it will be dropped by the dropTable. // NetworkPolicyController will make sure only one goroutine operates on a PolicyRule and addresses in the rule. // For a normal NetworkPolicy rule, these Openflow entries are installed: 1) 1 conjunction action flow, and 0 or multiple // conjunction except flows, the number of conjunction excepts flows is decided by the addresses in rule.ExceptFrom and // rule.ExceptTo is configured; 2) multiple conjunctive match flows, the flow number depends on addresses in rule.From // and rule.To, and service ports in rule.Service; and 3) multiple default drop flows, the number is dependent on // on the addresses in rule.From for an egress rule, and addresses in rule.To for an ingress rule. // For ALLOW-ALL rule, the Openflow entries installed on the switch are similar to a normal rule. The differences include, // 1) rule.Service is nil; and 2) rule.To has only one address "0.0.0.0/0" for egress rule, and rule.From is "0.0.0.0/0" // for ingress rule. // For DENY-ALL rule, only the default drop flow is installed for the addresses in rule.From for egress rule, or // addresses in rule.To for ingress rule. No conjunctive match flow or conjunction action except flows are installed. // A DENY-ALL rule is configured with rule.ID, rule.Direction, and either rule.From(egress rule) or rule.To(ingress rule). // Other fields in the rule should be nil. // If there is an error in any clause's addAddrFlows or addServiceFlows, the conjunction action flow will never be hit. // If the default drop flow is already installed before this error, all packets will be dropped by the default drop flow, // Otherwise all packets will be allowed. func (c *client) InstallPolicyRuleFlows(rule *types.PolicyRule) error { c.replayMutex.RLock() defer c.replayMutex.RUnlock() // Check if the policyRuleConjunction is added into cache or not. If yes, return nil. conj := c.getPolicyRuleConjunction(rule.ID) if conj != nil { klog.V(2).Infof("PolicyRuleConjunction %d is already added in cache", rule.ID) return nil } var ruleTable, dropTable binding.Table var isEgressRule = false switch rule.Direction { case v1.PolicyTypeEgress: ruleTable = c.pipeline[egressRuleTable] dropTable = c.pipeline[egressDefaultTable] isEgressRule = true default: ruleTable = c.pipeline[ingressRuleTable] dropTable = c.pipeline[ingressDefaultTable] } conj = &policyRuleConjunction{ id: rule.ID, } var fromID, toID, serviceID, nClause uint8 // Calculate clause ID if rule.From != nil { nClause += 1 fromID = nClause } if rule.To != nil { nClause += 1 toID = nClause } if rule.Service != nil { nClause += 1 serviceID = nClause } // Conjunction action flows are installed only if the number of clauses in the conjunction is > 1. It should be a rule // to drop all packets. If the number is 1, no conjunctive match flows or conjunction action flows are installed, // but the default drop flow is installed. if nClause > 1 { // Install action flows. var actionFlows = []binding.Flow{ c.conjunctionActionFlow(rule.ID, ruleTable.GetID(), dropTable.GetNext()), } if rule.ExceptFrom != nil { for _, addr := range rule.ExceptFrom { flow := c.conjunctionExceptionFlow(rule.ID, ruleTable.GetID(), dropTable.GetID(), addr.GetMatchKey(types.SrcAddress), addr.GetValue()) actionFlows = append(actionFlows, flow) } } if rule.ExceptTo != nil { for _, addr := range rule.ExceptTo { flow := c.conjunctionExceptionFlow(rule.ID, ruleTable.GetID(), dropTable.GetID(), addr.GetMatchKey(types.DstAddress), addr.GetValue()) actionFlows = append(actionFlows, flow) } } for _, flow := range actionFlows { err := flow.Add() if err != nil { return err } } conj.actionFlows = actionFlows } // Install conjunctive match flows if exists in rule.Form/To/Service var defaultTable binding.Table if rule.From != nil { if isEgressRule { defaultTable = dropTable } else { defaultTable = nil } conj.fromClause = conj.newClause(fromID, nClause, ruleTable, defaultTable) if err := conj.fromClause.addAddrFlows(c, types.SrcAddress, rule.From); err != nil { return err } } if rule.To != nil { if !isEgressRule { defaultTable = dropTable } else { defaultTable = nil } conj.toClause = conj.newClause(toID, nClause, ruleTable, defaultTable) if err := conj.toClause.addAddrFlows(c, types.DstAddress, rule.To); err != nil { return err } } if rule.Service != nil { conj.serviceClause = conj.newClause(serviceID, nClause, ruleTable, nil) if err := conj.serviceClause.addServiceFlows(c, rule.Service); err != nil { return err } } c.policyCache.Store(rule.ID, conj) return nil } func (c *policyRuleConjunction) newClause(clauseID uint8, nClause uint8, ruleTable, dropTable binding.Table) *clause { return &clause{ ruleTable: ruleTable, dropTable: dropTable, matches: make(map[string]*conjMatchFlowContext, 0), action: &conjunctiveAction{ conjID: c.id, clauseID: clauseID, nClause: nClause, }, } } func (c *client) getPolicyRuleConjunction(ruleID uint32) *policyRuleConjunction { conj, found := c.policyCache.Load(ruleID) if !found { return nil } return conj.(*policyRuleConjunction) } // UninstallPolicyRuleFlows removes the Openflow entry relevant to the specified NetworkPolicy rule. // UninstallPolicyRuleFlows will do nothing if no Openflow entry for the rule is installed. func (c *client) UninstallPolicyRuleFlows(ruleID uint32) error { c.replayMutex.RLock() defer c.replayMutex.RUnlock() conj := c.getPolicyRuleConjunction(ruleID) if conj == nil { klog.V(2).Infof("policyRuleConjunction with ID %d not found", ruleID) return nil } // Delete action flows for _, flow := range conj.actionFlows { err := flow.Delete() if err != nil { return err } } // Remove conjunctive match flows grouped by this PolicyRuleConjunction's clauses. if conj.fromClause != nil { err := conj.fromClause.deleteAllMatches() if err != nil { return err } } if conj.toClause != nil { err := conj.toClause.deleteAllMatches() if err != nil { return err } } if conj.serviceClause != nil { err := conj.serviceClause.deleteAllMatches() if err != nil { return err } } // Remove policyRuleConjunction from client's policyCache. c.policyCache.Delete(ruleID) return nil } func (c *client) replayPolicyFlows() { addActionFlows := func(conj *policyRuleConjunction) { for _, flow := range conj.actionFlows { if err := flow.Add(); err != nil { klog.Errorf("Error when replaying flow: %v", err) } } } c.policyCache.Range(func(key, value interface{}) bool { addActionFlows(value.(*policyRuleConjunction)) return true }) for _, ctx := range c.globalConjMatchFlowCache { if ctx.dropFlow != nil { if err := ctx.dropFlow.Add(); err != nil { klog.Errorf("Error when replaying flow: %v", err) } } if ctx.flow != nil { if err := ctx.flow.Add(); err != nil { klog.Errorf("Error when replaying flow: %v", err) } } } } // AddPolicyRuleAddress adds one or multiple addresses to the specified NetworkPolicy rule. If addrType is srcAddress, the // addresses are added to PolicyRule.From, else to PolicyRule.To. func (c *client) AddPolicyRuleAddress(ruleID uint32, addrType types.AddressType, addresses []types.Address) error { c.replayMutex.RLock() defer c.replayMutex.RUnlock() conj := c.getPolicyRuleConjunction(ruleID) // If policyRuleConjunction doesn't exist in client's policyCache return not found error. It should not happen, since // NetworkPolicyController will guarantee the policyRuleConjunction is created before this method is called. The check // here is for safety. if conj == nil { return newConjunctionNotFound(ruleID) } var clause = conj.getAddressClause(addrType) // Check if the clause is nil or not. The clause is nil if the addrType is an unsupported type. if clause == nil { return fmt.Errorf("no clause is using addrType %d", addrType) } return clause.addAddrFlows(c, addrType, addresses) } // DeletePolicyRuleAddress removes addresses from the specified NetworkPolicy rule. If addrType is srcAddress, the addresses // are removed from PolicyRule.From, else from PolicyRule.To. func (c *client) DeletePolicyRuleAddress(ruleID uint32, addrType types.AddressType, addresses []types.Address) error { c.replayMutex.RLock() defer c.replayMutex.RUnlock() conj := c.getPolicyRuleConjunction(ruleID) // If policyRuleConjunction doesn't exist in client's policyCache return not found error. It should not happen, since // NetworkPolicyController will guarantee the policyRuleConjunction is created before this method is called. The check // here is for safety. if conj == nil { return newConjunctionNotFound(ruleID) } var clause = conj.getAddressClause(addrType) // Check if the clause is nil or not. The clause is nil if the addrType is an unsupported type. if clause == nil { return fmt.Errorf("no clause is using addrType %d", addrType) } // Remove policyRuleConjunction to actions of conjunctive match using specific address. return clause.deleteAddrFlows(addrType, addresses) }
1
10,880
I assume you want to change NetworkPolicy to use bundles in a separate PR later?
antrea-io-antrea
go
@@ -77,7 +77,7 @@ MARKER_FILE_LIGHT_VERSION = "%s/.light-version" % dirs.static_libs IMAGE_NAME_SFN_LOCAL = "amazon/aws-stepfunctions-local" ARTIFACTS_REPO = "https://github.com/localstack/localstack-artifacts" SFN_PATCH_URL_PREFIX = ( - f"{ARTIFACTS_REPO}/raw/047cc6dcd2e31f5ff3ec52d293c61b875f606958/stepfunctions-local-patch" + f"{ARTIFACTS_REPO}/raw/a4adc8f4da9c7ec0d93b50ca5b73dd14df791c0e/stepfunctions-local-patch" ) SFN_PATCH_CLASS1 = "com/amazonaws/stepfunctions/local/runtime/Config.class" SFN_PATCH_CLASS2 = (
1
#!/usr/bin/env python import functools import glob import logging import os import platform import re import shutil import stat import sys import tempfile import time from pathlib import Path from typing import Callable, Dict, List, Tuple import requests from plugin import Plugin, PluginManager from localstack import config from localstack.config import dirs, is_env_true from localstack.constants import ( DEFAULT_SERVICE_PORTS, DYNAMODB_JAR_URL, ELASTICMQ_JAR_URL, ELASTICSEARCH_DEFAULT_VERSION, ELASTICSEARCH_DELETE_MODULES, ELASTICSEARCH_PLUGIN_LIST, KMS_URL_PATTERN, LOCALSTACK_MAVEN_VERSION, MODULE_MAIN_PATH, OPENSEARCH_DEFAULT_VERSION, STS_JAR_URL, ) from localstack.runtime import hooks from localstack.utils.common import ( chmod_r, download, file_exists_not_empty, get_arch, is_windows, load_file, mkdir, new_tmp_file, parallelize, retry, rm_rf, run, safe_run, save_file, untar, unzip, ) from localstack.utils.docker_utils import DOCKER_CLIENT LOG = logging.getLogger(__name__) INSTALL_DIR_NPM = "%s/node_modules" % MODULE_MAIN_PATH # FIXME: migrate to infra INSTALL_DIR_DDB = "%s/dynamodb" % dirs.static_libs INSTALL_DIR_KCL = "%s/amazon-kinesis-client" % dirs.static_libs INSTALL_DIR_STEPFUNCTIONS = "%s/stepfunctions" % dirs.static_libs INSTALL_DIR_KMS = "%s/kms" % dirs.static_libs INSTALL_DIR_ELASTICMQ = "%s/elasticmq" % dirs.static_libs INSTALL_PATH_LOCALSTACK_FAT_JAR = "%s/localstack-utils-fat.jar" % dirs.static_libs INSTALL_PATH_DDB_JAR = os.path.join(INSTALL_DIR_DDB, "DynamoDBLocal.jar") INSTALL_PATH_KCL_JAR = os.path.join(INSTALL_DIR_KCL, "aws-java-sdk-sts.jar") INSTALL_PATH_STEPFUNCTIONS_JAR = os.path.join(INSTALL_DIR_STEPFUNCTIONS, "StepFunctionsLocal.jar") INSTALL_PATH_KMS_BINARY_PATTERN = os.path.join(INSTALL_DIR_KMS, "local-kms.<arch>.bin") INSTALL_PATH_ELASTICMQ_JAR = os.path.join(INSTALL_DIR_ELASTICMQ, "elasticmq-server.jar") INSTALL_PATH_KINESALITE_CLI = os.path.join(INSTALL_DIR_NPM, "kinesalite", "cli.js") INSTALL_PATH_KINESIS_MOCK = os.path.join(dirs.static_libs, "kinesis-mock") URL_LOCALSTACK_FAT_JAR = ( "https://repo1.maven.org/maven2/" + "cloud/localstack/localstack-utils/{v}/localstack-utils-{v}-fat.jar" ).format(v=LOCALSTACK_MAVEN_VERSION) MARKER_FILE_LIGHT_VERSION = "%s/.light-version" % dirs.static_libs IMAGE_NAME_SFN_LOCAL = "amazon/aws-stepfunctions-local" ARTIFACTS_REPO = "https://github.com/localstack/localstack-artifacts" SFN_PATCH_URL_PREFIX = ( f"{ARTIFACTS_REPO}/raw/047cc6dcd2e31f5ff3ec52d293c61b875f606958/stepfunctions-local-patch" ) SFN_PATCH_CLASS1 = "com/amazonaws/stepfunctions/local/runtime/Config.class" SFN_PATCH_CLASS2 = ( "com/amazonaws/stepfunctions/local/runtime/executors/task/LambdaTaskStateExecutor.class" ) SFN_PATCH_CLASS_STARTER = "cloud/localstack/StepFunctionsStarter.class" SFN_PATCH_CLASS_REGION = "cloud/localstack/RegionAspect.class" SFN_PATCH_FILE_METAINF = "META-INF/aop.xml" # additional JAR libs required for multi-region and persistence (PRO only) support MAVEN_REPO = "https://repo1.maven.org/maven2" URL_ASPECTJRT = f"{MAVEN_REPO}/org/aspectj/aspectjrt/1.9.7/aspectjrt-1.9.7.jar" URL_ASPECTJWEAVER = f"{MAVEN_REPO}/org/aspectj/aspectjweaver/1.9.7/aspectjweaver-1.9.7.jar" JAR_URLS = [URL_ASPECTJRT, URL_ASPECTJWEAVER] # kinesis-mock version KINESIS_MOCK_VERSION = os.environ.get("KINESIS_MOCK_VERSION") or "0.2.2" KINESIS_MOCK_RELEASE_URL = ( "https://api.github.com/repos/etspaceman/kinesis-mock/releases/tags/" + KINESIS_MOCK_VERSION ) # debugpy module DEBUGPY_MODULE = "debugpy" DEBUGPY_DEPENDENCIES = ["gcc", "python3-dev", "musl-dev"] # Target version for javac, to ensure compatibility with earlier JREs JAVAC_TARGET_VERSION = "1.8" # SQS backend implementation provider - either "moto" or "elasticmq" SQS_BACKEND_IMPL = os.environ.get("SQS_PROVIDER") or "moto" # GO Lambda runtime GO_RUNTIME_VERSION = "0.4.0" GO_RUNTIME_DOWNLOAD_URL_TEMPLATE = "https://github.com/localstack/awslamba-go-runtime/releases/download/v{version}/awslamba-go-runtime-{version}-{os}-{arch}.tar.gz" GO_INSTALL_FOLDER = os.path.join(config.dirs.var_libs, "awslamba-go-runtime") GO_LAMBDA_RUNTIME = os.path.join(GO_INSTALL_FOLDER, "aws-lambda-mock") GO_LAMBDA_MOCKSERVER = os.path.join(GO_INSTALL_FOLDER, "mockserver") # Terraform (used for tests) TERRAFORM_VERSION = "1.1.3" TERRAFORM_URL_TEMPLATE = ( "https://releases.hashicorp.com/terraform/{version}/terraform_{version}_{os}_{arch}.zip" ) TERRAFORM_BIN = os.path.join(dirs.static_libs, f"terraform-{TERRAFORM_VERSION}", "terraform") # Java Test Jar Download (used for tests) TEST_LAMBDA_JAVA = os.path.join(config.dirs.var_libs, "localstack-utils-tests.jar") MAVEN_BASE_URL = "https://repo.maven.apache.org/maven2" TEST_LAMBDA_JAR_URL = "{url}/cloud/localstack/{name}/{version}/{name}-{version}-tests.jar".format( version=LOCALSTACK_MAVEN_VERSION, url=MAVEN_BASE_URL, name="localstack-utils" ) def get_elasticsearch_install_version(version: str) -> str: from localstack.services.es import versions if config.SKIP_INFRA_DOWNLOADS: return ELASTICSEARCH_DEFAULT_VERSION return versions.get_install_version(version) def get_elasticsearch_install_dir(version: str) -> str: version = get_elasticsearch_install_version(version) if version == ELASTICSEARCH_DEFAULT_VERSION and not os.path.exists(MARKER_FILE_LIGHT_VERSION): # install the default version into a subfolder of the code base install_dir = os.path.join(dirs.static_libs, "elasticsearch") else: # put all other versions into the TMP_FOLDER install_dir = os.path.join(config.dirs.tmp, "elasticsearch", version) return install_dir def install_elasticsearch(version=None): from localstack.services.es import versions if not version: version = ELASTICSEARCH_DEFAULT_VERSION version = get_elasticsearch_install_version(version) install_dir = get_elasticsearch_install_dir(version) installed_executable = os.path.join(install_dir, "bin", "elasticsearch") if not os.path.exists(installed_executable): log_install_msg("Elasticsearch (%s)" % version) es_url = versions.get_download_url(version) install_dir_parent = os.path.dirname(install_dir) mkdir(install_dir_parent) # download and extract archive tmp_archive = os.path.join(config.dirs.tmp, "localstack.%s" % os.path.basename(es_url)) download_and_extract_with_retry(es_url, tmp_archive, install_dir_parent) elasticsearch_dir = glob.glob(os.path.join(install_dir_parent, "elasticsearch*")) if not elasticsearch_dir: raise Exception("Unable to find Elasticsearch folder in %s" % install_dir_parent) shutil.move(elasticsearch_dir[0], install_dir) for dir_name in ("data", "logs", "modules", "plugins", "config/scripts"): dir_path = os.path.join(install_dir, dir_name) mkdir(dir_path) chmod_r(dir_path, 0o777) # install default plugins for plugin in ELASTICSEARCH_PLUGIN_LIST: plugin_binary = os.path.join(install_dir, "bin", "elasticsearch-plugin") plugin_dir = os.path.join(install_dir, "plugins", plugin) if not os.path.exists(plugin_dir): LOG.info("Installing Elasticsearch plugin %s", plugin) def try_install(): safe_run([plugin_binary, "install", "-b", plugin]) # We're occasionally seeing javax.net.ssl.SSLHandshakeException -> add download retries download_attempts = 3 try: retry(try_install, retries=download_attempts - 1, sleep=2) except Exception: LOG.warning( "Unable to download Elasticsearch plugin '%s' after %s attempts", plugin, download_attempts, ) if not os.environ.get("IGNORE_ES_DOWNLOAD_ERRORS"): raise # delete some plugins to free up space for plugin in ELASTICSEARCH_DELETE_MODULES: module_dir = os.path.join(install_dir, "modules", plugin) rm_rf(module_dir) # disable x-pack-ml plugin (not working on Alpine) xpack_dir = os.path.join(install_dir, "modules", "x-pack-ml", "platform") rm_rf(xpack_dir) # patch JVM options file - replace hardcoded heap size settings jvm_options_file = os.path.join(install_dir, "config", "jvm.options") if os.path.exists(jvm_options_file): jvm_options = load_file(jvm_options_file) jvm_options_replaced = re.sub( r"(^-Xm[sx][a-zA-Z0-9\.]+$)", r"# \1", jvm_options, flags=re.MULTILINE ) if jvm_options != jvm_options_replaced: save_file(jvm_options_file, jvm_options_replaced) def get_opensearch_install_version(version: str) -> str: from localstack.services.opensearch import versions if config.SKIP_INFRA_DOWNLOADS: return OPENSEARCH_DEFAULT_VERSION return versions.get_install_version(version) def get_opensearch_install_dir(version: str) -> str: version = get_opensearch_install_version(version) return os.path.join(config.dirs.var_libs, "opensearch", version) def install_opensearch(version=None): from localstack.services.opensearch import versions if not version: version = OPENSEARCH_DEFAULT_VERSION version = get_opensearch_install_version(version) install_dir = get_opensearch_install_dir(version) installed_executable = os.path.join(install_dir, "bin", "opensearch") if not os.path.exists(installed_executable): log_install_msg("OpenSearch (%s)" % version) opensearch_url = versions.get_download_url(version) install_dir_parent = os.path.dirname(install_dir) mkdir(install_dir_parent) # download and extract archive tmp_archive = os.path.join( config.dirs.tmp, "localstack.%s" % os.path.basename(opensearch_url) ) download_and_extract_with_retry(opensearch_url, tmp_archive, install_dir_parent) opensearch_dir = glob.glob(os.path.join(install_dir_parent, "opensearch*")) if not opensearch_dir: raise Exception("Unable to find OpenSearch folder in %s" % install_dir_parent) shutil.move(opensearch_dir[0], install_dir) for dir_name in ("data", "logs", "modules", "plugins", "config/scripts"): dir_path = os.path.join(install_dir, dir_name) mkdir(dir_path) chmod_r(dir_path, 0o777) # patch JVM options file - replace hardcoded heap size settings jvm_options_file = os.path.join(install_dir, "config", "jvm.options") if os.path.exists(jvm_options_file): jvm_options = load_file(jvm_options_file) jvm_options_replaced = re.sub( r"(^-Xm[sx][a-zA-Z0-9\.]+$)", r"# \1", jvm_options, flags=re.MULTILINE ) if jvm_options != jvm_options_replaced: save_file(jvm_options_file, jvm_options_replaced) def install_sqs_provider(): if SQS_BACKEND_IMPL == "elasticmq": install_elasticmq() def install_elasticmq(): # TODO remove this function if we stop using ElasticMQ entirely if not os.path.exists(INSTALL_PATH_ELASTICMQ_JAR): log_install_msg("ElasticMQ") mkdir(INSTALL_DIR_ELASTICMQ) # download archive tmp_archive = os.path.join(config.dirs.tmp, "elasticmq-server.jar") if not os.path.exists(tmp_archive): download(ELASTICMQ_JAR_URL, tmp_archive) shutil.copy(tmp_archive, INSTALL_DIR_ELASTICMQ) def install_kinesis(): if config.KINESIS_PROVIDER == "kinesalite": return install_kinesalite() elif config.KINESIS_PROVIDER == "kinesis-mock": return install_kinesis_mock() else: raise ValueError("unknown kinesis provider %s" % config.KINESIS_PROVIDER) def install_kinesalite(): if not os.path.exists(INSTALL_PATH_KINESALITE_CLI): log_install_msg("Kinesis") run('cd "%s" && npm install' % MODULE_MAIN_PATH) def install_kinesis_mock(): target_dir = INSTALL_PATH_KINESIS_MOCK machine = platform.machine().lower() system = platform.system().lower() version = platform.version().lower() is_probably_m1 = system == "darwin" and ("arm64" in version or "arm32" in version) LOG.debug("getting kinesis-mock for %s %s", system, machine) if is_env_true("KINESIS_MOCK_FORCE_JAVA"): # sometimes the static binaries may have problems, and we want to fal back to Java bin_file = "kinesis-mock.jar" elif (machine == "x86_64" or machine == "amd64") and not is_probably_m1: if system == "windows": bin_file = "kinesis-mock-mostly-static.exe" elif system == "linux": bin_file = "kinesis-mock-linux-amd64-static" elif system == "darwin": bin_file = "kinesis-mock-macos-amd64-dynamic" else: bin_file = "kinesis-mock.jar" else: bin_file = "kinesis-mock.jar" bin_file_path = os.path.join(target_dir, bin_file) if os.path.exists(bin_file_path): LOG.debug("kinesis-mock found at %s", bin_file_path) return bin_file_path response = requests.get(KINESIS_MOCK_RELEASE_URL) if not response.ok: raise ValueError( "Could not get list of releases from %s: %s" % (KINESIS_MOCK_RELEASE_URL, response.text) ) github_release = response.json() download_url = None for asset in github_release.get("assets", []): # find the correct binary in the release if asset["name"] == bin_file: download_url = asset["browser_download_url"] break if download_url is None: raise ValueError( "could not find required binary %s in release %s" % (bin_file, KINESIS_MOCK_RELEASE_URL) ) mkdir(target_dir) LOG.info("downloading kinesis-mock binary from %s", download_url) download(download_url, bin_file_path) chmod_r(bin_file_path, 0o777) return bin_file_path def install_local_kms(): local_arch = f"{platform.system().lower()}-{get_arch()}" binary_path = INSTALL_PATH_KMS_BINARY_PATTERN.replace("<arch>", local_arch) if not os.path.exists(binary_path): log_install_msg("KMS") mkdir(INSTALL_DIR_KMS) kms_url = KMS_URL_PATTERN.replace("<arch>", local_arch) download(kms_url, binary_path) chmod_r(binary_path, 0o777) def install_stepfunctions_local(): if not os.path.exists(INSTALL_PATH_STEPFUNCTIONS_JAR): # pull the JAR file from the Docker image, which is more up-to-date than the downloadable JAR file # TODO: works only when running on the host, outside of Docker -> add a fallback if running in Docker? log_install_msg("Step Functions") mkdir(INSTALL_DIR_STEPFUNCTIONS) DOCKER_CLIENT.pull_image(IMAGE_NAME_SFN_LOCAL) docker_name = "tmp-ls-sfn" DOCKER_CLIENT.run_container( IMAGE_NAME_SFN_LOCAL, remove=True, entrypoint="", name=docker_name, detach=True, command=["sleep", "15"], ) time.sleep(5) DOCKER_CLIENT.copy_from_container( docker_name, local_path=dirs.static_libs, container_path="/home/stepfunctionslocal/" ) path = Path(f"{dirs.static_libs}/stepfunctionslocal/") for file in path.glob("*.jar"): file.rename(Path(INSTALL_DIR_STEPFUNCTIONS) / file.name) rm_rf("%s/stepfunctionslocal" % dirs.static_libs) classes = [ SFN_PATCH_CLASS1, SFN_PATCH_CLASS2, SFN_PATCH_CLASS_REGION, SFN_PATCH_CLASS_STARTER, SFN_PATCH_FILE_METAINF, ] for patch_class in classes: patch_url = f"{SFN_PATCH_URL_PREFIX}/{patch_class}" add_file_to_jar(patch_class, patch_url, target_jar=INSTALL_PATH_STEPFUNCTIONS_JAR) # special case for Manifest file - extract first, replace content, then update in JAR file manifest_file = os.path.join(INSTALL_DIR_STEPFUNCTIONS, "META-INF", "MANIFEST.MF") if not os.path.exists(manifest_file): content = run(["unzip", "-p", INSTALL_PATH_STEPFUNCTIONS_JAR, "META-INF/MANIFEST.MF"]) content = re.sub( "Main-Class: .+", "Main-Class: cloud.localstack.StepFunctionsStarter", content ) classpath = " ".join([os.path.basename(jar) for jar in JAR_URLS]) content = re.sub(r"Class-Path: \. ", f"Class-Path: {classpath} . ", content) save_file(manifest_file, content) run( ["zip", INSTALL_PATH_STEPFUNCTIONS_JAR, "META-INF/MANIFEST.MF"], cwd=INSTALL_DIR_STEPFUNCTIONS, ) # download additional jar libs for jar_url in JAR_URLS: target = os.path.join(INSTALL_DIR_STEPFUNCTIONS, os.path.basename(jar_url)) if not file_exists_not_empty(target): download(jar_url, target) def add_file_to_jar(class_file, class_url, target_jar, base_dir=None): base_dir = base_dir or os.path.dirname(target_jar) patch_class_file = os.path.join(base_dir, class_file) if not os.path.exists(patch_class_file): download(class_url, patch_class_file) run(["zip", target_jar, class_file], cwd=base_dir) def install_dynamodb_local(): if not os.path.exists(INSTALL_PATH_DDB_JAR): log_install_msg("DynamoDB") # download and extract archive tmp_archive = os.path.join(tempfile.gettempdir(), "localstack.ddb.zip") download_and_extract_with_retry(DYNAMODB_JAR_URL, tmp_archive, INSTALL_DIR_DDB) # fix logging configuration for DynamoDBLocal log4j2_config = """<Configuration status="WARN"> <Appenders> <Console name="Console" target="SYSTEM_OUT"> <PatternLayout pattern="%d{HH:mm:ss.SSS} [%t] %-5level %logger{36} - %msg%n"/> </Console> </Appenders> <Loggers> <Root level="WARN"><AppenderRef ref="Console"/></Root> </Loggers> </Configuration>""" log4j2_file = os.path.join(INSTALL_DIR_DDB, "log4j2.xml") save_file(log4j2_file, log4j2_config) run('cd "%s" && zip -u DynamoDBLocal.jar log4j2.xml || true' % INSTALL_DIR_DDB) def install_amazon_kinesis_client_libs(): # install KCL/STS JAR files if not os.path.exists(INSTALL_PATH_KCL_JAR): mkdir(INSTALL_DIR_KCL) tmp_archive = os.path.join(tempfile.gettempdir(), "aws-java-sdk-sts.jar") if not os.path.exists(tmp_archive): download(STS_JAR_URL, tmp_archive) shutil.copy(tmp_archive, INSTALL_DIR_KCL) # Compile Java files from localstack.utils.kinesis import kclipy_helper classpath = kclipy_helper.get_kcl_classpath() if is_windows(): classpath = re.sub(r":([^\\])", r";\1", classpath) java_files = "%s/utils/kinesis/java/cloud/localstack/*.java" % MODULE_MAIN_PATH class_files = "%s/utils/kinesis/java/cloud/localstack/*.class" % MODULE_MAIN_PATH if not glob.glob(class_files): run( 'javac -source %s -target %s -cp "%s" %s' % (JAVAC_TARGET_VERSION, JAVAC_TARGET_VERSION, classpath, java_files) ) def install_lambda_java_libs(): # install LocalStack "fat" JAR file (contains all dependencies) if not os.path.exists(INSTALL_PATH_LOCALSTACK_FAT_JAR): log_install_msg("LocalStack Java libraries", verbatim=True) download(URL_LOCALSTACK_FAT_JAR, INSTALL_PATH_LOCALSTACK_FAT_JAR) def install_lambda_java_testlibs(): # Download the LocalStack Utils Test jar file from the maven repo if not os.path.exists(TEST_LAMBDA_JAVA): mkdir(os.path.dirname(TEST_LAMBDA_JAVA)) download(TEST_LAMBDA_JAR_URL, TEST_LAMBDA_JAVA) def install_go_lambda_runtime(): if os.path.isfile(GO_LAMBDA_RUNTIME): return log_install_msg("Installing golang runtime") system = platform.system().lower() arch = get_arch() if system not in ["linux"]: raise ValueError("unsupported os %s for awslambda-go-runtime" % system) if arch not in ["amd64", "arm64"]: raise ValueError("unsupported arch %s for awslambda-go-runtime" % arch) url = GO_RUNTIME_DOWNLOAD_URL_TEMPLATE.format( version=GO_RUNTIME_VERSION, os=system, arch=arch, ) download_and_extract(url, GO_INSTALL_FOLDER) st = os.stat(GO_LAMBDA_RUNTIME) os.chmod(GO_LAMBDA_RUNTIME, st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) st = os.stat(GO_LAMBDA_MOCKSERVER) os.chmod(GO_LAMBDA_MOCKSERVER, st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) def install_cloudformation_libs(): from localstack.services.cloudformation import deployment_utils # trigger download of CF module file deployment_utils.get_cfn_response_mod_file() def install_terraform() -> str: if os.path.isfile(TERRAFORM_BIN): return TERRAFORM_BIN log_install_msg(f"Installing terraform {TERRAFORM_VERSION}") system = platform.system().lower() arch = get_arch() url = TERRAFORM_URL_TEMPLATE.format(version=TERRAFORM_VERSION, os=system, arch=arch) download_and_extract(url, os.path.dirname(TERRAFORM_BIN)) chmod_r(TERRAFORM_BIN, 0o777) return TERRAFORM_BIN def get_terraform_binary() -> str: if not os.path.isfile(TERRAFORM_BIN): install_terraform() return TERRAFORM_BIN def install_component(name): installer = installers.get(name) if installer: installer() def install_components(names): parallelize(install_component, names) install_lambda_java_libs() def install_all_components(): # install dependencies - make sure that install_components(..) is called before hooks.install below! install_components(DEFAULT_SERVICE_PORTS.keys()) hooks.install.run() def install_debugpy_and_dependencies(): try: import debugpy assert debugpy logging.debug("Debugpy module already Installed") except ModuleNotFoundError: logging.debug("Installing Debugpy module") import pip if hasattr(pip, "main"): pip.main(["install", DEBUGPY_MODULE]) else: pip._internal.main(["install", DEBUGPY_MODULE]) # ----------------- # HELPER FUNCTIONS # ----------------- def log_install_msg(component, verbatim=False): component = component if verbatim else "local %s server" % component LOG.info("Downloading and installing %s. This may take some time.", component) def download_and_extract(archive_url, target_dir, retries=0, sleep=3, tmp_archive=None): mkdir(target_dir) if tmp_archive: _, ext = os.path.splitext(tmp_archive) else: _, ext = os.path.splitext(archive_url) tmp_archive = tmp_archive or new_tmp_file() if not os.path.exists(tmp_archive) or os.path.getsize(tmp_archive) <= 0: # create temporary placeholder file, to avoid duplicate parallel downloads save_file(tmp_archive, "") for i in range(retries + 1): try: download(archive_url, tmp_archive) break except Exception: time.sleep(sleep) if ext == ".zip": unzip(tmp_archive, target_dir) elif ext == ".gz" or ext == ".bz2": untar(tmp_archive, target_dir) else: raise Exception("Unsupported archive format: %s" % ext) def download_and_extract_with_retry(archive_url, tmp_archive, target_dir): try: download_and_extract(archive_url, target_dir, tmp_archive=tmp_archive) except Exception as e: # try deleting and re-downloading the zip file LOG.info("Unable to extract file, re-downloading ZIP archive %s: %s", tmp_archive, e) rm_rf(tmp_archive) download_and_extract(archive_url, target_dir, tmp_archive=tmp_archive) # kept here for backwards compatibility (installed on "make init" - TODO should be removed) installers = { "cloudformation": install_cloudformation_libs, "dynamodb": install_dynamodb_local, "kinesis": install_kinesis, "kms": install_local_kms, "sqs": install_sqs_provider, "stepfunctions": install_stepfunctions_local, } Installer = Tuple[str, Callable] class InstallerRepository(Plugin): namespace = "localstack.installer" def get_installer(self) -> List[Installer]: raise NotImplementedError class CommunityInstallerRepository(InstallerRepository): name = "community" def get_installer(self) -> List[Installer]: return [ ("awslamba-go-runtime", install_go_lambda_runtime), ("cloudformation-libs", install_cloudformation_libs), ("dynamodb-local", install_dynamodb_local), ("elasticmq", install_elasticmq), ("elasticsearch", install_elasticsearch), ("opensearch", install_opensearch), ("kinesalite", install_kinesalite), ("kinesis-client-libs", install_amazon_kinesis_client_libs), ("kinesis-mock", install_kinesis_mock), ("lambda-java-libs", install_lambda_java_libs), ("local-kms", install_local_kms), ("stepfunctions-local", install_stepfunctions_local), ("terraform", install_terraform), ] class InstallerManager: def __init__(self): self.repositories: PluginManager[InstallerRepository] = PluginManager( InstallerRepository.namespace ) @functools.lru_cache() def get_installers(self) -> Dict[str, Callable]: installer: List[Installer] = [] for repo in self.repositories.load_all(): installer.extend(repo.get_installer()) return dict(installer) def install(self, package: str, *args, **kwargs): installer = self.get_installers().get(package) if not installer: raise ValueError("no installer for package %s" % package) return installer(*args, **kwargs) def main(): if len(sys.argv) > 1: # set test API key so pro install hooks are called os.environ["LOCALSTACK_API_KEY"] = os.environ.get("LOCALSTACK_API_KEY") or "test" if sys.argv[1] == "libs": print("Initializing installation.") logging.basicConfig(level=logging.INFO) logging.getLogger("requests").setLevel(logging.WARNING) install_all_components() if sys.argv[1] in ("libs", "testlibs"): # Install additional libraries for testing install_amazon_kinesis_client_libs() install_lambda_java_testlibs() print("Done.") if __name__ == "__main__": main()
1
14,245
nit: We could parameterize the commit hash, as it's used in multiple places.
localstack-localstack
py
@@ -2,6 +2,7 @@ // Licensed under the MIT License. See License.txt in the project root for license information. var Constants = require('./constants'); +var _ = require('underscore'); /** * Checks if a parsed URL is HTTPS
1
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. var Constants = require('./constants'); /** * Checks if a parsed URL is HTTPS * * @param {object} urlToCheck The url to check * @return {bool} True if the URL is HTTPS; false otherwise. */ exports.urlIsHTTPS = function (urlToCheck) { return urlToCheck.protocol.toLowerCase() === Constants.HTTPS; }; /** * Provides the version of nodejs on the system. * * @return {object} An object specifying the major, minor and patch version of nodejs on the system. */ exports.getNodeVersion = function () { var parsedVersion = process.version.split('.'); return { major: parseInt(parsedVersion[0].substr(1), 10), minor: parseInt(parsedVersion[1], 10), patch: parseInt(parsedVersion[2], 10) }; }; /** * Encodes an URI. * * @param {string} uri The URI to be encoded. * @return {string} The encoded URI. */ exports.encodeUri = function (uri) { return encodeURIComponent(uri) .replace(/!/g, '%21') .replace(/'/g, '%27') .replace(/\(/g, '%28') .replace(/\)/g, '%29') .replace(/\*/g, '%2A'); }; exports = module.exports;
1
20,700
move this above the "Constants" as this is 3rd party ones
Azure-autorest
java
@@ -12,10 +12,16 @@ class AuthenticationPoliciesTest(BaseWebTest, unittest.TestCase): def test_basic_auth_is_accepted_by_default(self): self.app.get(self.collection_url, headers=self.headers, status=200) + # Check that the capability is exposed on the homepage. + resp = self.app.get('/') + assert 'basicauth' in resp.json['capabilities'] def test_basic_auth_is_accepted_if_enabled_in_settings(self): app = self.make_app({'multiauth.policies': 'basicauth'}) app.get(self.collection_url, headers=self.headers, status=200) + # Check that the capability is exposed on the homepage. + resp = self.app.get('/') + assert 'basicauth' in resp.json['capabilities'] def test_basic_auth_is_declined_if_disabled_in_settings(self): app = self.make_app({
1
import mock import uuid from kinto.core import authentication from kinto.core import utils from kinto.core.testing import DummyRequest, unittest from .support import BaseWebTest class AuthenticationPoliciesTest(BaseWebTest, unittest.TestCase): def test_basic_auth_is_accepted_by_default(self): self.app.get(self.collection_url, headers=self.headers, status=200) def test_basic_auth_is_accepted_if_enabled_in_settings(self): app = self.make_app({'multiauth.policies': 'basicauth'}) app.get(self.collection_url, headers=self.headers, status=200) def test_basic_auth_is_declined_if_disabled_in_settings(self): app = self.make_app({ 'multiauth.policies': 'dummy', 'multiauth.policy.dummy.use': ('pyramid.authentication.' 'RepozeWho1AuthenticationPolicy')}) app.get(self.collection_url, headers=self.headers, status=401) def test_views_are_forbidden_if_unknown_auth_method(self): app = self.make_app({'multiauth.policies': 'basicauth'}) self.headers['Authorization'] = 'Carrier' app.get(self.collection_url, headers=self.headers, status=401) self.headers['Authorization'] = 'Carrier pigeon' app.get(self.collection_url, headers=self.headers, status=401) def test_principals_are_fetched_from_permission_backend(self): patch = mock.patch(('tests.core.support.' 'AllowAuthorizationPolicy.permits')) self.addCleanup(patch.stop) mocked = patch.start() self.permission.add_user_principal(self.principal, 'group:admin') self.app.get(self.collection_url, headers=self.headers) _, principals, _ = mocked.call_args[0] self.assertIn('group:admin', principals) def test_user_principals_are_cached_per_user(self): patch = mock.patch.object(self.permission, 'get_user_principals', wraps=self.permission.get_user_principals) self.addCleanup(patch.stop) mocked = patch.start() batch = { "defaults": { "headers": self.headers, "path": "/mushrooms" }, "requests": [ {}, {}, {}, {"headers": {"Authorization": "Basic Ym9iOg=="}}, {"headers": {"Authorization": "Basic bWF0Og=="}}, ] } self.app.post_json('/batch', batch) self.assertEqual(mocked.call_count, 3) def test_basicauth_hash_is_computed_only_once(self): # hmac_digest() is used in Basic Authentication only. patch = mock.patch('kinto.core.utils.hmac_digest', return_value='abcdef') self.addCleanup(patch.stop) mocked = patch.start() body = {"data": {"name": "haha"}} record_url = self.get_item_url(uuid.uuid4()) self.app.put_json(record_url, body, headers=self.headers) self.assertEqual(mocked.call_count, 1) class BasicAuthenticationPolicyTest(unittest.TestCase): def setUp(self): self.policy = authentication.BasicAuthAuthenticationPolicy() self.request = DummyRequest() self.request.headers['Authorization'] = 'Basic bWF0Og==' @mock.patch('kinto.core.utils.hmac_digest') def test_userid_is_hashed(self, mocked): mocked.return_value = 'yeah' user_id = self.policy.unauthenticated_userid(self.request) self.assertIn('yeah', user_id) def test_userid_is_built_using_password(self): auth_password = utils.encode64('user:secret1', encoding='ascii') self.request.headers['Authorization'] = 'Basic %s' % auth_password user_id1 = self.policy.unauthenticated_userid(self.request) auth_password = utils.encode64('user:secret2', encoding='ascii') self.request.headers['Authorization'] = 'Basic %s' % auth_password user_id2 = self.policy.unauthenticated_userid(self.request) self.assertNotEqual(user_id1, user_id2) def test_views_are_forbidden_if_basic_is_wrong(self): self.request.headers['Authorization'] = 'Basic abc' user_id = self.policy.unauthenticated_userid(self.request) self.assertIsNone(user_id) def test_returns_none_if_username_is_empty(self): auth_password = utils.encode64(':secret', encoding='ascii') self.request.headers['Authorization'] = 'Basic %s' % auth_password user_id = self.policy.unauthenticated_userid(self.request) self.assertIsNone(user_id) def test_providing_empty_password_is_supported(self): auth_password = utils.encode64('secret:', encoding='ascii') self.request.headers['Authorization'] = 'Basic %s' % auth_password user_id = self.policy.unauthenticated_userid(self.request) self.assertIsNotNone(user_id)
1
10,238
`assert not in`
Kinto-kinto
py
@@ -310,13 +310,15 @@ func (engine *DockerTaskEngine) monitorExecAgentRunning(ctx context.Context, // to finish monitoring. // This is inspired from containers streaming stats from Docker. time.Sleep(retry.AddJitter(time.Nanosecond, engine.monitorExecAgentsInterval/2)) - _, err = engine.execCmdMgr.RestartAgentIfStopped(ctx, engine.client, task, c, dockerID) + status, err := engine.execCmdMgr.RestartAgentIfStopped(ctx, engine.client, task, c, dockerID) if err != nil { seelog.Errorf("Task engine [%s]: Failed to restart ExecCommandAgent Process for container [%s]: %v", task.Arn, dockerID, err) + mTask.emitContainerEvent(mTask.Task, c, "") } - // whether we restarted or failed to restart, we'll want to emit a state change event - // redundant state change events like RUNNING->RUNNING are allowed - mTask.emitContainerEvent(mTask.Task, c, "") + if status == execcmd.Restarted { + mTask.emitContainerEvent(mTask.Task, c, "") + } + } // MustInit blocks and retries until an engine can be initialized.
1
// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). You may // not use this file except in compliance with the License. A copy of the // License is located at // // http://aws.amazon.com/apache2.0/ // // or in the "license" file accompanying this file. This file is distributed // on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either // express or implied. See the License for the specific language governing // permissions and limitations under the License. // Package engine contains the core logic for managing tasks package engine import ( "context" "fmt" "os" "path/filepath" "regexp" "strconv" "strings" "sync" "time" "github.com/aws/amazon-ecs-agent/agent/api" apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" apierrors "github.com/aws/amazon-ecs-agent/agent/api/errors" apitask "github.com/aws/amazon-ecs-agent/agent/api/task" apitaskstatus "github.com/aws/amazon-ecs-agent/agent/api/task/status" "github.com/aws/amazon-ecs-agent/agent/config" "github.com/aws/amazon-ecs-agent/agent/containermetadata" "github.com/aws/amazon-ecs-agent/agent/credentials" "github.com/aws/amazon-ecs-agent/agent/data" "github.com/aws/amazon-ecs-agent/agent/dockerclient" "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi" "github.com/aws/amazon-ecs-agent/agent/ecscni" "github.com/aws/amazon-ecs-agent/agent/engine/dependencygraph" "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" "github.com/aws/amazon-ecs-agent/agent/engine/execcmd" "github.com/aws/amazon-ecs-agent/agent/eventstream" "github.com/aws/amazon-ecs-agent/agent/metrics" "github.com/aws/amazon-ecs-agent/agent/statechange" "github.com/aws/amazon-ecs-agent/agent/taskresource" "github.com/aws/amazon-ecs-agent/agent/taskresource/credentialspec" "github.com/aws/amazon-ecs-agent/agent/taskresource/firelens" "github.com/aws/amazon-ecs-agent/agent/utils" "github.com/aws/amazon-ecs-agent/agent/utils/retry" utilsync "github.com/aws/amazon-ecs-agent/agent/utils/sync" "github.com/aws/amazon-ecs-agent/agent/utils/ttime" dockercontainer "github.com/docker/docker/api/types/container" "github.com/cihub/seelog" "github.com/docker/docker/api/types" "github.com/pkg/errors" ) const ( //DockerEndpointEnvVariable is the environment variable that can override the Docker endpoint DockerEndpointEnvVariable = "DOCKER_HOST" // DockerDefaultEndpoint is the default value for the Docker endpoint DockerDefaultEndpoint = "unix:///var/run/docker.sock" labelPrefix = "com.amazonaws.ecs." labelTaskARN = labelPrefix + "task-arn" labelContainerName = labelPrefix + "container-name" labelTaskDefinitionFamily = labelPrefix + "task-definition-family" labelTaskDefinitionVersion = labelPrefix + "task-definition-version" labelCluster = labelPrefix + "cluster" cniSetupTimeout = 1 * time.Minute cniCleanupTimeout = 30 * time.Second minGetIPBridgeTimeout = time.Second maxGetIPBridgeTimeout = 10 * time.Second getIPBridgeRetryJitterMultiplier = 0.2 getIPBridgeRetryDelayMultiplier = 2 ipamCleanupTmeout = 5 * time.Second minEngineConnectRetryDelay = 200 * time.Second maxEngineConnectRetryDelay = 2 * time.Second engineConnectRetryJitterMultiplier = 0.20 engineConnectRetryDelayMultiplier = 1.5 // logDriverTypeFirelens is the log driver type for containers that want to use the firelens container to send logs. logDriverTypeFirelens = "awsfirelens" logDriverTypeFluentd = "fluentd" logDriverTag = "tag" logDriverFluentdAddress = "fluentd-address" dataLogDriverPath = "/data/firelens/" logDriverAsyncConnect = "fluentd-async-connect" logDriverSubSecondPrecision = "fluentd-sub-second-precision" dataLogDriverSocketPath = "/socket/fluent.sock" socketPathPrefix = "unix://" // fluentTagDockerFormat is the format for the log tag, which is "containerName-firelens-taskID" fluentTagDockerFormat = "%s-firelens-%s" // Environment variables are needed for firelens fluentNetworkHost = "FLUENT_HOST" fluentNetworkPort = "FLUENT_PORT" FluentNetworkPortValue = "24224" FluentAWSVPCHostValue = "127.0.0.1" defaultMonitorExecAgentsInterval = 15 * time.Minute ) // DockerTaskEngine is a state machine for managing a task and its containers // in ECS. // // DockerTaskEngine implements an abstraction over the DockerGoClient so that // it does not have to know about tasks, only containers // The DockerTaskEngine interacts with Docker to implement a TaskEngine type DockerTaskEngine struct { // implements TaskEngine cfg *config.Config ctx context.Context initialized bool mustInitLock sync.Mutex // state stores all tasks this task engine is aware of, including their // current state and mappings to/from dockerId and name. // This is used to checkpoint state to disk so tasks may survive agent // failures or updates state dockerstate.TaskEngineState managedTasks map[string]*managedTask taskStopGroup *utilsync.SequentialWaitGroup events <-chan dockerapi.DockerContainerChangeEvent stateChangeEvents chan statechange.Event client dockerapi.DockerClient dataClient data.Client cniClient ecscni.CNIClient containerChangeEventStream *eventstream.EventStream stopEngine context.CancelFunc // tasksLock is a mutex that the task engine must acquire before changing // any task's state which it manages. Since this is a lock that encompasses // all tasks, it must not acquire it for any significant duration // The write mutex should be taken when adding and removing tasks from managedTasks. tasksLock sync.RWMutex credentialsManager credentials.Manager _time ttime.Time _timeOnce sync.Once imageManager ImageManager containerStatusToTransitionFunction map[apicontainerstatus.ContainerStatus]transitionApplyFunc metadataManager containermetadata.Manager // taskSteadyStatePollInterval is the duration that a managed task waits // once the task gets into steady state before polling the state of all of // the task's containers to re-evaluate if the task is still in steady state // This is set to defaultTaskSteadyStatePollInterval in production code. // This can be used by tests that are looking to ensure that the steady state // verification logic gets executed to set it to a low interval taskSteadyStatePollInterval time.Duration taskSteadyStatePollIntervalJitter time.Duration resourceFields *taskresource.ResourceFields // handleDelay is a function used to delay cleanup. Implementation is // swappable for testing handleDelay func(duration time.Duration) monitorExecAgentsTicker *time.Ticker execCmdMgr execcmd.Manager monitorExecAgentsInterval time.Duration } // NewDockerTaskEngine returns a created, but uninitialized, DockerTaskEngine. // The distinction between created and initialized is that when created it may // be serialized/deserialized, but it will not communicate with docker until it // is also initialized. func NewDockerTaskEngine(cfg *config.Config, client dockerapi.DockerClient, credentialsManager credentials.Manager, containerChangeEventStream *eventstream.EventStream, imageManager ImageManager, state dockerstate.TaskEngineState, metadataManager containermetadata.Manager, resourceFields *taskresource.ResourceFields, execCmdMgr execcmd.Manager) *DockerTaskEngine { dockerTaskEngine := &DockerTaskEngine{ cfg: cfg, client: client, dataClient: data.NewNoopClient(), state: state, managedTasks: make(map[string]*managedTask), taskStopGroup: utilsync.NewSequentialWaitGroup(), stateChangeEvents: make(chan statechange.Event), credentialsManager: credentialsManager, containerChangeEventStream: containerChangeEventStream, imageManager: imageManager, cniClient: ecscni.NewClient(cfg.CNIPluginsPath), metadataManager: metadataManager, taskSteadyStatePollInterval: defaultTaskSteadyStatePollInterval, taskSteadyStatePollIntervalJitter: defaultTaskSteadyStatePollIntervalJitter, resourceFields: resourceFields, handleDelay: time.Sleep, execCmdMgr: execCmdMgr, monitorExecAgentsInterval: defaultMonitorExecAgentsInterval, } dockerTaskEngine.initializeContainerStatusToTransitionFunction() return dockerTaskEngine } func (engine *DockerTaskEngine) initializeContainerStatusToTransitionFunction() { containerStatusToTransitionFunction := map[apicontainerstatus.ContainerStatus]transitionApplyFunc{ apicontainerstatus.ContainerPulled: engine.pullContainer, apicontainerstatus.ContainerCreated: engine.createContainer, apicontainerstatus.ContainerRunning: engine.startContainer, apicontainerstatus.ContainerResourcesProvisioned: engine.provisionContainerResources, apicontainerstatus.ContainerStopped: engine.stopContainer, } engine.containerStatusToTransitionFunction = containerStatusToTransitionFunction } // ImagePullDeleteLock ensures that pulls and deletes do not run at the same time and pulls can be run at the same time for docker >= 1.11.1 // Pulls are serialized as a temporary workaround for a devicemapper issue. (see https://github.com/docker/docker/issues/9718) // Deletes must not run at the same time as pulls to prevent deletion of images that are being used to launch new tasks. var ImagePullDeleteLock sync.RWMutex // UnmarshalJSON restores a previously marshaled task-engine state from json func (engine *DockerTaskEngine) UnmarshalJSON(data []byte) error { return engine.state.UnmarshalJSON(data) } // MarshalJSON marshals into state directly func (engine *DockerTaskEngine) MarshalJSON() ([]byte, error) { return engine.state.MarshalJSON() } // Init initializes a DockerTaskEngine such that it may communicate with docker // and operate normally. // This function must be called before any other function, except serializing and deserializing, can succeed without error. func (engine *DockerTaskEngine) Init(ctx context.Context) error { derivedCtx, cancel := context.WithCancel(ctx) engine.stopEngine = cancel engine.ctx = derivedCtx // Open the event stream before we sync state so that e.g. if a container // goes from running to stopped after we sync with it as "running" we still // have the "went to stopped" event pending so we can be up to date. err := engine.openEventstream(derivedCtx) if err != nil { return err } engine.synchronizeState() // Now catch up and start processing new events per normal go engine.handleDockerEvents(derivedCtx) engine.initialized = true go engine.startPeriodicExecAgentsMonitoring(derivedCtx) return nil } func (engine *DockerTaskEngine) startPeriodicExecAgentsMonitoring(ctx context.Context) { engine.monitorExecAgentsTicker = time.NewTicker(engine.monitorExecAgentsInterval) for { select { case <-engine.monitorExecAgentsTicker.C: go engine.monitorExecAgentProcesses(ctx) case <-ctx.Done(): engine.monitorExecAgentsTicker.Stop() return } } } func (engine *DockerTaskEngine) monitorExecAgentProcesses(ctx context.Context) { // TODO: [ecs-exec]add jitter between containers to not overload docker with top calls engine.tasksLock.RLock() defer engine.tasksLock.RUnlock() for _, mTask := range engine.managedTasks { task := mTask.Task if task.GetKnownStatus() != apitaskstatus.TaskRunning { continue } for _, c := range task.Containers { if execcmd.IsExecEnabledContainer(c) { go engine.monitorExecAgentRunning(ctx, mTask, c) } } } } func (engine *DockerTaskEngine) monitorExecAgentRunning(ctx context.Context, mTask *managedTask, c *apicontainer.Container) { if !c.IsRunning() { return } task := mTask.Task dockerID, err := engine.getDockerID(task, c) if err != nil { seelog.Errorf("Task engine [%s]: Could not retrieve docker id for container", task.Arn) return } // Sleeping here so that all the containers do not call inspect/start exec agent process // at the same time. // The max sleep is 50% of the monitor interval to allow enough buffer time // to finish monitoring. // This is inspired from containers streaming stats from Docker. time.Sleep(retry.AddJitter(time.Nanosecond, engine.monitorExecAgentsInterval/2)) _, err = engine.execCmdMgr.RestartAgentIfStopped(ctx, engine.client, task, c, dockerID) if err != nil { seelog.Errorf("Task engine [%s]: Failed to restart ExecCommandAgent Process for container [%s]: %v", task.Arn, dockerID, err) } // whether we restarted or failed to restart, we'll want to emit a state change event // redundant state change events like RUNNING->RUNNING are allowed mTask.emitContainerEvent(mTask.Task, c, "") } // MustInit blocks and retries until an engine can be initialized. func (engine *DockerTaskEngine) MustInit(ctx context.Context) { if engine.initialized { return } engine.mustInitLock.Lock() defer engine.mustInitLock.Unlock() errorOnce := sync.Once{} taskEngineConnectBackoff := retry.NewExponentialBackoff(minEngineConnectRetryDelay, maxEngineConnectRetryDelay, engineConnectRetryJitterMultiplier, engineConnectRetryDelayMultiplier) retry.RetryWithBackoff(taskEngineConnectBackoff, func() error { if engine.initialized { return nil } err := engine.Init(ctx) if err != nil { errorOnce.Do(func() { seelog.Errorf("Task engine: could not connect to docker daemon: %v", err) }) } return err }) } // SetDataClient sets the saver that is used by the DockerTaskEngine. func (engine *DockerTaskEngine) SetDataClient(client data.Client) { engine.dataClient = client } // Shutdown makes a best-effort attempt to cleanup after the task engine. // This should not be relied on for anything more complicated than testing. func (engine *DockerTaskEngine) Shutdown() { engine.stopEngine() engine.Disable() } // Disable prevents this engine from managing any additional tasks. func (engine *DockerTaskEngine) Disable() { engine.tasksLock.Lock() } // isTaskManaged checks if task for the corresponding arn is present func (engine *DockerTaskEngine) isTaskManaged(arn string) bool { engine.tasksLock.RLock() defer engine.tasksLock.RUnlock() _, ok := engine.managedTasks[arn] return ok } // synchronizeState explicitly goes through each docker container stored in // "state" and updates its KnownStatus appropriately, as well as queueing up // events to push upstream. It also initializes some fields of task resources and eni attachments that won't be populated // from loading state file. func (engine *DockerTaskEngine) synchronizeState() { engine.tasksLock.Lock() defer engine.tasksLock.Unlock() imageStates := engine.state.AllImageStates() if len(imageStates) != 0 { engine.imageManager.AddAllImageStates(imageStates) } eniAttachments := engine.state.AllENIAttachments() for _, eniAttachment := range eniAttachments { timeoutFunc := func() { eniAttachment, ok := engine.state.ENIByMac(eniAttachment.MACAddress) if !ok { seelog.Warnf("Ignoring unmanaged ENI attachment with MAC address: %s", eniAttachment.MACAddress) return } if !eniAttachment.IsSent() { seelog.Warnf("Timed out waiting for ENI ack; removing ENI attachment record with MAC address: %s", eniAttachment.MACAddress) engine.removeENIAttachmentData(eniAttachment.MACAddress) engine.state.RemoveENIAttachment(eniAttachment.MACAddress) } } err := eniAttachment.Initialize(timeoutFunc) if err != nil { // The only case where we get an error from Initialize is that the attachment has expired. In that case, remove the expired // attachment from state. seelog.Warnf("ENI attachment with mac address %s has expired. Removing it from state.", eniAttachment.MACAddress) engine.removeENIAttachmentData(eniAttachment.MACAddress) engine.state.RemoveENIAttachment(eniAttachment.MACAddress) } } tasks := engine.state.AllTasks() tasksToStart := engine.filterTasksToStartUnsafe(tasks) for _, task := range tasks { task.InitializeResources(engine.resourceFields) engine.saveTaskData(task) } for _, task := range tasksToStart { engine.startTask(task) } } // filterTasksToStartUnsafe filters only the tasks that need to be started after // the agent has been restarted. It also synchronizes states of all of the containers // in tasks that need to be started. func (engine *DockerTaskEngine) filterTasksToStartUnsafe(tasks []*apitask.Task) []*apitask.Task { var tasksToStart []*apitask.Task for _, task := range tasks { conts, ok := engine.state.ContainerMapByArn(task.Arn) if !ok { // task hasn't started processing, no need to check container status tasksToStart = append(tasksToStart, task) continue } for _, cont := range conts { engine.synchronizeContainerStatus(cont, task) engine.saveDockerContainerData(cont) // persist the container with the updated information. } tasksToStart = append(tasksToStart, task) // Put tasks that are stopped by acs but hasn't been stopped in wait group if task.GetDesiredStatus().Terminal() && task.GetStopSequenceNumber() != 0 { engine.taskStopGroup.Add(task.GetStopSequenceNumber(), 1) } } return tasksToStart } // updateContainerMetadata sets the container metadata from the docker inspect func updateContainerMetadata(metadata *dockerapi.DockerContainerMetadata, container *apicontainer.Container, task *apitask.Task) { container.SetCreatedAt(metadata.CreatedAt) container.SetStartedAt(metadata.StartedAt) container.SetFinishedAt(metadata.FinishedAt) // Set the labels if it's not set if len(metadata.Labels) != 0 && len(container.GetLabels()) == 0 { container.SetLabels(metadata.Labels) } // Update volume for empty volume container if metadata.Volumes != nil { if container.IsInternal() { task.UpdateMountPoints(container, metadata.Volumes) } else { container.SetVolumes(metadata.Volumes) } } // Set Exitcode if it's not set if metadata.ExitCode != nil { container.SetKnownExitCode(metadata.ExitCode) } // Set port mappings if len(metadata.PortBindings) != 0 && len(container.GetKnownPortBindings()) == 0 { container.SetKnownPortBindings(metadata.PortBindings) } // update the container health information if container.HealthStatusShouldBeReported() { container.SetHealthStatus(metadata.Health) } container.SetNetworkMode(metadata.NetworkMode) container.SetNetworkSettings(metadata.NetworkSettings) } // synchronizeContainerStatus checks and updates the container status with docker func (engine *DockerTaskEngine) synchronizeContainerStatus(container *apicontainer.DockerContainer, task *apitask.Task) { if container.DockerID == "" { seelog.Debugf("Task engine [%s]: found container potentially created while we were down: %s", task.Arn, container.DockerName) // Figure out the dockerid describedContainer, err := engine.client.InspectContainer(engine.ctx, container.DockerName, dockerclient.InspectContainerTimeout) if err != nil { seelog.Warnf("Task engine [%s]: could not find matching container for expected name [%s]: %v", task.Arn, container.DockerName, err) } else { // update the container metadata in case the container was created during agent restart metadata := dockerapi.MetadataFromContainer(describedContainer) updateContainerMetadata(&metadata, container.Container, task) container.DockerID = describedContainer.ID container.Container.SetKnownStatus(dockerapi.DockerStateToState(describedContainer.State)) // update mappings that need dockerid engine.state.AddContainer(container, task) err := engine.imageManager.RecordContainerReference(container.Container) if err != nil { seelog.Warnf("Task engine [%s]: unable to add container reference to image state: %v", task.Arn, err) } } return } currentState, metadata := engine.client.DescribeContainer(engine.ctx, container.DockerID) if metadata.Error != nil { currentState = apicontainerstatus.ContainerStopped // If this is a Docker API error if metadata.Error.ErrorName() == dockerapi.CannotDescribeContainerErrorName { seelog.Warnf("Task engine [%s]: could not describe previously known container [id=%s; name=%s]; assuming dead: %v", task.Arn, container.DockerID, container.DockerName, metadata.Error) if !container.Container.KnownTerminal() { container.Container.ApplyingError = apierrors.NewNamedError(&ContainerVanishedError{}) err := engine.imageManager.RemoveContainerReferenceFromImageState(container.Container) if err != nil { seelog.Warnf("Task engine [%s]: could not remove container reference for image state %s: %v", container.Container.Image, err) } } } else { // If this is a container state error updateContainerMetadata(&metadata, container.Container, task) container.Container.ApplyingError = apierrors.NewNamedError(metadata.Error) } } else { // update the container metadata in case the container status/metadata changed during agent restart updateContainerMetadata(&metadata, container.Container, task) err := engine.imageManager.RecordContainerReference(container.Container) if err != nil { seelog.Warnf("Task engine [%s]: unable to add container reference to image state: %v", task.Arn, err) } if engine.cfg.ContainerMetadataEnabled.Enabled() && !container.Container.IsMetadataFileUpdated() { go engine.updateMetadataFile(task, container) } } if currentState > container.Container.GetKnownStatus() { // update the container known status container.Container.SetKnownStatus(currentState) } // Update task ExecutionStoppedAt timestamp task.RecordExecutionStoppedAt(container.Container) } // checkTaskState inspects the state of all containers within a task and writes // their state to the managed task's container channel. func (engine *DockerTaskEngine) checkTaskState(task *apitask.Task) { defer metrics.MetricsEngineGlobal.RecordTaskEngineMetric("CHECK_TASK_STATE")() for _, container := range task.Containers { dockerID, err := engine.getDockerID(task, container) if err != nil { continue } status, metadata := engine.client.DescribeContainer(engine.ctx, dockerID) engine.tasksLock.RLock() managedTask, ok := engine.managedTasks[task.Arn] engine.tasksLock.RUnlock() if ok { managedTask.emitDockerContainerChange(dockerContainerChange{ container: container, event: dockerapi.DockerContainerChangeEvent{ Status: status, DockerContainerMetadata: metadata, }, }) } } } // sweepTask deletes all the containers associated with a task func (engine *DockerTaskEngine) sweepTask(task *apitask.Task) { for _, cont := range task.Containers { err := engine.removeContainer(task, cont) if err != nil { seelog.Infof("Task engine [%s]: unable to remove old container [%s]: %v", task.Arn, cont.Name, err) } // Internal container(created by ecs-agent) state isn't recorded if cont.IsInternal() { continue } err = engine.imageManager.RemoveContainerReferenceFromImageState(cont) if err != nil { seelog.Errorf("Task engine [%s]: Unable to remove container [%s] reference from image state: %v", task.Arn, cont.Name, err) } } // Clean metadata directory for task if engine.cfg.ContainerMetadataEnabled.Enabled() { err := engine.metadataManager.Clean(task.Arn) if err != nil { seelog.Warnf("Task engine [%s]: clean task metadata failed: %v", task.Arn, err) } } } var removeAll = os.RemoveAll func (engine *DockerTaskEngine) deleteTask(task *apitask.Task) { for _, resource := range task.GetResources() { err := resource.Cleanup() if err != nil { seelog.Warnf("Task engine [%s]: unable to cleanup resource %s: %v", task.Arn, resource.GetName(), err) } else { seelog.Infof("Task engine [%s]: resource %s cleanup complete", task.Arn, resource.GetName()) } } if execcmd.IsExecEnabledTask(task) { // cleanup host exec agent log dirs if tID, err := task.GetID(); err != nil { seelog.Warnf("Task Engine[%s]: error getting task ID for ExecAgent logs cleanup: %v", task.Arn, err) } else { if err := removeAll(filepath.Join(execcmd.ECSAgentExecLogDir, tID)); err != nil { seelog.Warnf("Task Engine[%s]: unable to remove ExecAgent host logs for task: %v", task.Arn, err) } } } // Now remove ourselves from the global state and cleanup channels engine.tasksLock.Lock() engine.state.RemoveTask(task) taskENIs := task.GetTaskENIs() for _, taskENI := range taskENIs { // ENIs that exist only as logical associations on another interface do not have // attachments that need to be removed. if taskENI.IsStandardENI() { seelog.Debugf("Task engine [%s]: removing eni %s from agent state", task.Arn, taskENI.ID) engine.removeENIAttachmentData(taskENI.MacAddress) engine.state.RemoveENIAttachment(taskENI.MacAddress) } else { seelog.Debugf("Task engine [%s]: skipping removing logical eni %s from agent state", task.Arn, taskENI.ID) } } // Remove task and container data from database. engine.removeTaskData(task) seelog.Infof("Task engine [%s]: finished removing task data, removing task from managed tasks", task.Arn) delete(engine.managedTasks, task.Arn) engine.tasksLock.Unlock() } func (engine *DockerTaskEngine) emitTaskEvent(task *apitask.Task, reason string) { event, err := api.NewTaskStateChangeEvent(task, reason) if err != nil { seelog.Infof("Task engine [%s]: unable to create task state change event: %v", task.Arn, err) return } seelog.Infof("Task engine [%s]: Task engine: sending change event [%s]", task.Arn, event.String()) engine.stateChangeEvents <- event } // startTask creates a managedTask construct to track the task and then begins // pushing it towards its desired state when allowed startTask is protected by // the tasksLock lock of 'AddTask'. It should not be called from anywhere // else and should exit quickly to allow AddTask to do more work. func (engine *DockerTaskEngine) startTask(task *apitask.Task) { // Create a channel that may be used to communicate with this task, survey // what tasks need to be waited for for this one to start, and then spin off // a goroutine to oversee this task thisTask := engine.newManagedTask(task) thisTask._time = engine.time() go thisTask.overseeTask() } func (engine *DockerTaskEngine) time() ttime.Time { engine._timeOnce.Do(func() { if engine._time == nil { engine._time = &ttime.DefaultTime{} } }) return engine._time } // openEventstream opens, but does not consume, the docker event stream func (engine *DockerTaskEngine) openEventstream(ctx context.Context) error { events, err := engine.client.ContainerEvents(ctx) if err != nil { return err } engine.events = events return nil } // handleDockerEvents must be called after openEventstream; it processes each // event that it reads from the docker eventstream func (engine *DockerTaskEngine) handleDockerEvents(ctx context.Context) { for { select { case <-ctx.Done(): return case event := <-engine.events: engine.handleDockerEvent(event) } } } // handleDockerEvent is the entrypoint for task modifications originating with // events occurring through Docker, outside the task engine itself. // handleDockerEvent is responsible for taking an event that correlates to a // container and placing it in the context of the task to which that container // belongs. func (engine *DockerTaskEngine) handleDockerEvent(event dockerapi.DockerContainerChangeEvent) { seelog.Debugf("Task engine: handling a docker event: %s", event.String()) task, ok := engine.state.TaskByID(event.DockerID) if !ok { seelog.Debugf("Task engine: event for container [%s] not managed, unable to map container id to task", event.DockerID) return } cont, ok := engine.state.ContainerByID(event.DockerID) if !ok { seelog.Debugf("Task engine: event for container [%s] not managed, unable to map container id to container", event.DockerID) return } // Container health status change does not affect the container status // no need to process this in task manager if event.Type == apicontainer.ContainerHealthEvent { if cont.Container.HealthStatusShouldBeReported() { seelog.Debugf("Task engine: updating container [%s(%s)] health status: %v", cont.Container.Name, cont.DockerID, event.DockerContainerMetadata.Health) cont.Container.SetHealthStatus(event.DockerContainerMetadata.Health) } return } engine.tasksLock.RLock() managedTask, ok := engine.managedTasks[task.Arn] engine.tasksLock.RUnlock() if !ok { seelog.Criticalf("Task engine: could not find managed task [%s] corresponding to a docker event: %s", task.Arn, event.String()) return } seelog.Debugf("Task engine [%s]: writing docker event to the task: %s", task.Arn, event.String()) managedTask.emitDockerContainerChange(dockerContainerChange{container: cont.Container, event: event}) seelog.Debugf("Task engine [%s]: wrote docker event to the task: %s", task.Arn, event.String()) } // StateChangeEvents returns channels to read task and container state changes. These // changes should be read as soon as possible as them not being read will block // processing the task referenced by the event. func (engine *DockerTaskEngine) StateChangeEvents() chan statechange.Event { return engine.stateChangeEvents } // AddTask starts tracking a task func (engine *DockerTaskEngine) AddTask(task *apitask.Task) { defer metrics.MetricsEngineGlobal.RecordTaskEngineMetric("ADD_TASK")() err := task.PostUnmarshalTask(engine.cfg, engine.credentialsManager, engine.resourceFields, engine.client, engine.ctx) if err != nil { seelog.Errorf("Task engine [%s]: unable to add task to the engine: %v", task.Arn, err) task.SetKnownStatus(apitaskstatus.TaskStopped) task.SetDesiredStatus(apitaskstatus.TaskStopped) engine.emitTaskEvent(task, err.Error()) return } engine.tasksLock.Lock() defer engine.tasksLock.Unlock() existingTask, exists := engine.state.TaskByArn(task.Arn) if !exists { // This will update the container desired status task.UpdateDesiredStatus() engine.state.AddTask(task) if dependencygraph.ValidDependencies(task) { engine.startTask(task) } else { seelog.Errorf("Task engine [%s]: unable to progress task with circular dependencies", task.Arn) task.SetKnownStatus(apitaskstatus.TaskStopped) task.SetDesiredStatus(apitaskstatus.TaskStopped) err := TaskDependencyError{task.Arn} engine.emitTaskEvent(task, err.Error()) } return } // Update task engine.updateTaskUnsafe(existingTask, task) } // ListTasks returns the tasks currently managed by the DockerTaskEngine func (engine *DockerTaskEngine) ListTasks() ([]*apitask.Task, error) { return engine.state.AllTasks(), nil } // GetTaskByArn returns the task identified by that ARN func (engine *DockerTaskEngine) GetTaskByArn(arn string) (*apitask.Task, bool) { return engine.state.TaskByArn(arn) } func (engine *DockerTaskEngine) pullContainer(task *apitask.Task, container *apicontainer.Container) dockerapi.DockerContainerMetadata { switch container.Type { case apicontainer.ContainerCNIPause, apicontainer.ContainerNamespacePause: // pause images are managed at startup return dockerapi.DockerContainerMetadata{} } if engine.imagePullRequired(engine.cfg.ImagePullBehavior, container, task.Arn) { // Record the pullStoppedAt timestamp defer func() { timestamp := engine.time().Now() task.SetPullStoppedAt(timestamp) }() seelog.Infof("Task engine [%s]: pulling image %s for container %s concurrently", task.Arn, container.Image, container.Name) return engine.concurrentPull(task, container) } // No pull image is required, just update container reference and use cached image. engine.updateContainerReference(false, container, task.Arn) // Return the metadata without any error return dockerapi.DockerContainerMetadata{Error: nil} } // imagePullRequired returns true if pulling image is required, or return false if local image cache // should be used, by inspecting the agent pull behavior variable defined in config. The caller has // to make sure the container passed in is not an internal container. func (engine *DockerTaskEngine) imagePullRequired(imagePullBehavior config.ImagePullBehaviorType, container *apicontainer.Container, taskArn string) bool { switch imagePullBehavior { case config.ImagePullOnceBehavior: // If this image has been pulled successfully before, don't pull the image, // otherwise pull the image as usual, regardless whether the image exists or not // (the image can be prepopulated with the AMI and never be pulled). imageState, ok := engine.imageManager.GetImageStateFromImageName(container.Image) if ok && imageState.GetPullSucceeded() { seelog.Infof("Task engine [%s]: image %s for container %s has been pulled once, not pulling it again", taskArn, container.Image, container.Name) return false } return true case config.ImagePullPreferCachedBehavior: // If the behavior is prefer cached, don't pull if we found cached image // by inspecting the image. _, err := engine.client.InspectImage(container.Image) if err != nil { return true } seelog.Infof("Task engine [%s]: found cached image %s, use it directly for container %s", taskArn, container.Image, container.Name) return false default: // Need to pull the image for always and default agent pull behavior return true } } func (engine *DockerTaskEngine) concurrentPull(task *apitask.Task, container *apicontainer.Container) dockerapi.DockerContainerMetadata { seelog.Debugf("Task engine [%s]: attempting to obtain ImagePullDeleteLock to pull image %s for container %s", task.Arn, container.Image, container.Name) ImagePullDeleteLock.RLock() seelog.Debugf("Task engine [%s]: acquired ImagePullDeleteLock, start pulling image %s for container %s", task.Arn, container.Image, container.Name) defer seelog.Debugf("Task engine [%s]: released ImagePullDeleteLock after pulling image %s for container %s", task.Arn, container.Image, container.Name) defer ImagePullDeleteLock.RUnlock() // Record the task pull_started_at timestamp pullStart := engine.time().Now() ok := task.SetPullStartedAt(pullStart) if ok { seelog.Infof("Task engine [%s]: recording timestamp for starting image pulltime: %s", task.Arn, pullStart) } metadata := engine.pullAndUpdateContainerReference(task, container) if metadata.Error == nil { seelog.Infof("Task engine [%s]: finished pulling image %s for container %s in %s", task.Arn, container.Image, container.Name, time.Since(pullStart).String()) } else { seelog.Errorf("Task engine [%s]: failed to pull image %s for container %s: %v", task.Arn, container.Image, container.Name, metadata.Error) } return metadata } func (engine *DockerTaskEngine) pullAndUpdateContainerReference(task *apitask.Task, container *apicontainer.Container) dockerapi.DockerContainerMetadata { // If a task is blocked here for some time, and before it starts pulling image, // the task's desired status is set to stopped, then don't pull the image if task.GetDesiredStatus() == apitaskstatus.TaskStopped { seelog.Infof("Task engine [%s]: task's desired status is stopped, skipping pulling image %s for container %s", task.Arn, container.Image, container.Name) container.SetDesiredStatus(apicontainerstatus.ContainerStopped) return dockerapi.DockerContainerMetadata{Error: TaskStoppedBeforePullBeginError{task.Arn}} } // Set the credentials for pull from ECR if necessary if container.ShouldPullWithExecutionRole() { executionCredentials, ok := engine.credentialsManager.GetTaskCredentials(task.GetExecutionCredentialsID()) if !ok { seelog.Errorf("Task engine [%s]: unable to acquire ECR credentials for image %s for container %s", task.Arn, container.Image, container.Name) return dockerapi.DockerContainerMetadata{ Error: dockerapi.CannotPullECRContainerError{ FromError: errors.New("engine ecr credentials: not found"), }, } } iamCredentials := executionCredentials.GetIAMRoleCredentials() container.SetRegistryAuthCredentials(iamCredentials) // Clean up the ECR pull credentials after pulling defer container.SetRegistryAuthCredentials(credentials.IAMRoleCredentials{}) } // Apply registry auth data from ASM if required if container.ShouldPullWithASMAuth() { if err := task.PopulateASMAuthData(container); err != nil { seelog.Errorf("Task engine [%s]: unable to acquire Docker registry credentials for image %s for container %s", task.Arn, container.Image, container.Name) return dockerapi.DockerContainerMetadata{ Error: dockerapi.CannotPullContainerAuthError{ FromError: errors.New("engine docker private registry credentials: not found"), }, } } defer container.SetASMDockerAuthConfig(types.AuthConfig{}) } metadata := engine.client.PullImage(engine.ctx, container.Image, container.RegistryAuthentication, engine.cfg.ImagePullTimeout) // Don't add internal images(created by ecs-agent) into imagemanger state if container.IsInternal() { return metadata } pullSucceeded := metadata.Error == nil engine.updateContainerReference(pullSucceeded, container, task.Arn) return metadata } func (engine *DockerTaskEngine) updateContainerReference(pullSucceeded bool, container *apicontainer.Container, taskArn string) { err := engine.imageManager.RecordContainerReference(container) if err != nil { seelog.Errorf("Task engine [%s]: unable to add container reference to image state: %v", taskArn, err) } imageState, ok := engine.imageManager.GetImageStateFromImageName(container.Image) if ok && pullSucceeded { // Only need to update the pullSucceeded flag of the image state when its not yet set to true. if !imageState.GetPullSucceeded() { imageState.SetPullSucceeded(true) err = engine.dataClient.SaveImageState(imageState) if err != nil { seelog.Warnf("Task engine [%s]: unable to save image state: %v", taskArn, err) } } } engine.state.AddImageState(imageState) } func (engine *DockerTaskEngine) createContainer(task *apitask.Task, container *apicontainer.Container) dockerapi.DockerContainerMetadata { seelog.Infof("Task engine [%s]: creating container: %s", task.Arn, container.Name) client := engine.client if container.DockerConfig.Version != nil { client = client.WithVersion(dockerclient.DockerVersion(*container.DockerConfig.Version)) } dockerContainerName := "" containerMap, ok := engine.state.ContainerMapByArn(task.Arn) if !ok { containerMap = make(map[string]*apicontainer.DockerContainer) } else { // looking for container that has docker name but not created for _, v := range containerMap { if v.Container.Name == container.Name { dockerContainerName = v.DockerName break } } } // Resolve HostConfig // we have to do this in create, not start, because docker no longer handles // merging create config with start hostconfig the same; e.g. memory limits // get lost dockerClientVersion, versionErr := client.APIVersion() if versionErr != nil { return dockerapi.DockerContainerMetadata{Error: CannotGetDockerClientVersionError{versionErr}} } hostConfig, hcerr := task.DockerHostConfig(container, containerMap, dockerClientVersion, engine.cfg) if hcerr != nil { return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(hcerr)} } if container.AWSLogAuthExecutionRole() { err := task.ApplyExecutionRoleLogsAuth(hostConfig, engine.credentialsManager) if err != nil { return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(err)} } } firelensConfig := container.GetFirelensConfig() if firelensConfig != nil { err := task.AddFirelensContainerBindMounts(firelensConfig, hostConfig, engine.cfg) if err != nil { return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(err)} } cerr := task.PopulateSecretLogOptionsToFirelensContainer(container) if cerr != nil { return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(cerr)} } if firelensConfig.Type == firelens.FirelensConfigTypeFluentd { // For fluentd router, needs to specify FLUENT_UID to root in order for the fluentd process to access // the socket created by Docker. container.MergeEnvironmentVariables(map[string]string{ "FLUENT_UID": "0", }) } } // If the container is using a special log driver type "awsfirelens", it means the container wants to use // the firelens container to send logs. In this case, override the log driver type to be fluentd // and specify appropriate tag and fluentd-address, so that the logs are sent to and routed by the firelens container. // Update the environment variables FLUENT_HOST and FLUENT_PORT depending on the supported network modes - bridge // and awsvpc. For reference - https://docs.docker.com/config/containers/logging/fluentd/. if hostConfig.LogConfig.Type == logDriverTypeFirelens { hostConfig.LogConfig = getFirelensLogConfig(task, container, hostConfig, engine.cfg) if task.IsNetworkModeAWSVPC() { container.MergeEnvironmentVariables(map[string]string{ fluentNetworkHost: FluentAWSVPCHostValue, fluentNetworkPort: FluentNetworkPortValue, }) } else if container.GetNetworkModeFromHostConfig() == "" || container.GetNetworkModeFromHostConfig() == apitask.BridgeNetworkMode { ipAddress, ok := getContainerHostIP(task.GetFirelensContainer().GetNetworkSettings()) if !ok { err := apierrors.DockerClientConfigError{Msg: "unable to get BridgeIP for task in bridge mode"} return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(&err)} } container.MergeEnvironmentVariables(map[string]string{ fluentNetworkHost: ipAddress, fluentNetworkPort: FluentNetworkPortValue, }) } } //Apply the log driver secret into container's LogConfig and Env secrets to container.Environment hasSecretAsEnvOrLogDriver := func(s apicontainer.Secret) bool { return s.Type == apicontainer.SecretTypeEnv || s.Target == apicontainer.SecretTargetLogDriver } if container.HasSecret(hasSecretAsEnvOrLogDriver) { err := task.PopulateSecrets(hostConfig, container) if err != nil { return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(err)} } } // Populate credentialspec resource if container.RequiresCredentialSpec() { seelog.Debugf("Obtained container %s with credentialspec resource requirement for task %s.", container.Name, task.Arn) var credSpecResource *credentialspec.CredentialSpecResource resource, ok := task.GetCredentialSpecResource() if !ok || len(resource) <= 0 { resMissingErr := &apierrors.DockerClientConfigError{Msg: "unable to fetch task resource credentialspec"} return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(resMissingErr)} } credSpecResource = resource[0].(*credentialspec.CredentialSpecResource) containerCredSpec, err := container.GetCredentialSpec() if err == nil && containerCredSpec != "" { // CredentialSpec mapping: input := credentialspec:file://test.json, output := credentialspec=file://test.json desiredCredSpecInjection, err := credSpecResource.GetTargetMapping(containerCredSpec) if err != nil || desiredCredSpecInjection == "" { missingErr := &apierrors.DockerClientConfigError{Msg: "unable to fetch valid credentialspec mapping"} return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(missingErr)} } // Inject containers' hostConfig.SecurityOpt with the credentialspec resource seelog.Infof("Injecting container %s with credentialspec %s.", container.Name, desiredCredSpecInjection) if len(hostConfig.SecurityOpt) == 0 { hostConfig.SecurityOpt = []string{desiredCredSpecInjection} } else { for idx, opt := range hostConfig.SecurityOpt { if strings.HasPrefix(opt, "credentialspec:") { hostConfig.SecurityOpt[idx] = desiredCredSpecInjection } } } } else { emptyErr := &apierrors.DockerClientConfigError{Msg: "unable to fetch valid credentialspec: " + err.Error()} return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(emptyErr)} } } if container.ShouldCreateWithEnvFiles() { err := task.MergeEnvVarsFromEnvfiles(container) if err != nil { seelog.Errorf("Error populating environment variables from specified files into container %s", container.Name) return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(err)} } } if execcmd.IsExecEnabledContainer(container) { tId, err := task.GetID() if err != nil { herr := &apierrors.HostConfigError{Msg: err.Error()} return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(herr)} } err = engine.execCmdMgr.InitializeContainer(tId, container, hostConfig) if err != nil { herr := &apierrors.HostConfigError{Msg: err.Error()} return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(herr)} } } config, err := task.DockerConfig(container, dockerClientVersion) if err != nil { return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(err)} } // Augment labels with some metadata from the agent. Explicitly do this last // such that it will always override duplicates in the provided raw config // data. config.Labels[labelTaskARN] = task.Arn config.Labels[labelContainerName] = container.Name config.Labels[labelTaskDefinitionFamily] = task.Family config.Labels[labelTaskDefinitionVersion] = task.Version config.Labels[labelCluster] = engine.cfg.Cluster if dockerContainerName == "" { // only alphanumeric and hyphen characters are allowed reInvalidChars := regexp.MustCompile("[^A-Za-z0-9-]+") name := reInvalidChars.ReplaceAllString(container.Name, "") dockerContainerName = "ecs-" + task.Family + "-" + task.Version + "-" + name + "-" + utils.RandHex() // Pre-add the container in case we stop before the next, more useful, // AddContainer call. This ensures we have a way to get the container if // we die before 'createContainer' returns because we can inspect by // name engine.state.AddContainer(&apicontainer.DockerContainer{ DockerName: dockerContainerName, Container: container, }, task) seelog.Infof("Task engine [%s]: created container name mapping for task: %s -> %s", task.Arn, container.Name, dockerContainerName) } // Create metadata directory and file then populate it with common metadata of all containers of this task // Afterwards add this directory to the container's mounts if file creation was successful if engine.cfg.ContainerMetadataEnabled.Enabled() && !container.IsInternal() { info, infoErr := engine.client.Info(engine.ctx, dockerclient.InfoTimeout) if infoErr != nil { seelog.Warnf("Task engine [%s]: unable to get docker info : %v", task.Arn, infoErr) } mderr := engine.metadataManager.Create(config, hostConfig, task, container.Name, info.SecurityOptions) if mderr != nil { seelog.Warnf("Task engine [%s]: unable to create metadata for container %s: %v", task.Arn, container.Name, mderr) } } createContainerBegin := time.Now() metadata := client.CreateContainer(engine.ctx, config, hostConfig, dockerContainerName, dockerclient.CreateContainerTimeout) if metadata.DockerID != "" { seelog.Infof("Task engine [%s]: created docker container for task: %s -> %s", task.Arn, container.Name, metadata.DockerID) dockerContainer := &apicontainer.DockerContainer{DockerID: metadata.DockerID, DockerName: dockerContainerName, Container: container} engine.state.AddContainer(dockerContainer, task) engine.saveDockerContainerData(dockerContainer) } container.SetLabels(config.Labels) seelog.Infof("Task engine [%s]: created docker container for task: %s -> %s, took %s", task.Arn, container.Name, metadata.DockerID, time.Since(createContainerBegin)) container.SetRuntimeID(metadata.DockerID) return metadata } func getFirelensLogConfig(task *apitask.Task, container *apicontainer.Container, hostConfig *dockercontainer.HostConfig, cfg *config.Config) dockercontainer.LogConfig { fields := strings.Split(task.Arn, "/") taskID := fields[len(fields)-1] tag := fmt.Sprintf(fluentTagDockerFormat, container.Name, taskID) fluentd := socketPathPrefix + filepath.Join(cfg.DataDirOnHost, dataLogDriverPath, taskID, dataLogDriverSocketPath) logConfig := hostConfig.LogConfig logConfig.Type = logDriverTypeFluentd logConfig.Config = make(map[string]string) logConfig.Config[logDriverTag] = tag logConfig.Config[logDriverFluentdAddress] = fluentd logConfig.Config[logDriverAsyncConnect] = strconv.FormatBool(true) logConfig.Config[logDriverSubSecondPrecision] = strconv.FormatBool(true) seelog.Debugf("Applying firelens log config for container %s: %v", container.Name, logConfig) return logConfig } func (engine *DockerTaskEngine) startContainer(task *apitask.Task, container *apicontainer.Container) dockerapi.DockerContainerMetadata { seelog.Infof("Task engine [%s]: starting container: %s (Runtime ID: %s)", task.Arn, container.Name, container.GetRuntimeID()) client := engine.client if container.DockerConfig.Version != nil { client = client.WithVersion(dockerclient.DockerVersion(*container.DockerConfig.Version)) } dockerID, err := engine.getDockerID(task, container) if err != nil { return dockerapi.DockerContainerMetadata{ Error: dockerapi.CannotStartContainerError{ FromError: err, }, } } startContainerBegin := time.Now() dockerContainerMD := client.StartContainer(engine.ctx, dockerID, engine.cfg.ContainerStartTimeout) if dockerContainerMD.Error != nil { return dockerContainerMD } seelog.Infof("Task engine [%s]: started docker container for task: %s -> %s, took %s", task.Arn, container.Name, dockerContainerMD.DockerID, time.Since(startContainerBegin)) // Get metadata through container inspection and available task information then write this to the metadata file // Performs this in the background to avoid delaying container start // TODO: Add a state to the apicontainer.Container for the status of the metadata file (Whether it needs update) and // add logic to engine state restoration to do a metadata update for containers that are running after the agent was restarted if engine.cfg.ContainerMetadataEnabled.Enabled() && !container.IsInternal() { go func() { err := engine.metadataManager.Update(engine.ctx, dockerID, task, container.Name) if err != nil { seelog.Warnf("Task engine [%s]: failed to update metadata file for container %s: %v", task.Arn, container.Name, err) return } container.SetMetadataFileUpdated() seelog.Debugf("Task engine [%s]: updated metadata file for container %s", task.Arn, container.Name) }() } // If container is a firelens container, fluent host is needed to be added to the environment variable for the task. // For the supported network mode - bridge and awsvpc, the awsvpc take the host 127.0.0.1 but in bridge mode, // there is a need to wait for the IP to be present before the container using the firelens can be created. if container.GetFirelensConfig() != nil { if !task.IsNetworkModeAWSVPC() && (container.GetNetworkModeFromHostConfig() == "" || container.GetNetworkModeFromHostConfig() == apitask.BridgeNetworkMode) { _, gotContainerIP := getContainerHostIP(dockerContainerMD.NetworkSettings) if !gotContainerIP { getIPBridgeBackoff := retry.NewExponentialBackoff(minGetIPBridgeTimeout, maxGetIPBridgeTimeout, getIPBridgeRetryJitterMultiplier, getIPBridgeRetryDelayMultiplier) contextWithTimeout, cancel := context.WithTimeout(engine.ctx, time.Minute) defer cancel() err := retry.RetryWithBackoffCtx(contextWithTimeout, getIPBridgeBackoff, func() error { inspectOutput, err := engine.client.InspectContainer(engine.ctx, dockerContainerMD.DockerID, dockerclient.InspectContainerTimeout) if err != nil { return err } _, gotIPBridge := getContainerHostIP(inspectOutput.NetworkSettings) if gotIPBridge { dockerContainerMD.NetworkSettings = inspectOutput.NetworkSettings return nil } else { return errors.New("Bridge IP not available to use for firelens") } }) if err != nil { return dockerapi.DockerContainerMetadata{ Error: dockerapi.CannotStartContainerError{FromError: err}, } } } } } if execcmd.IsExecEnabledContainer(container) { if err := engine.execCmdMgr.StartAgent(engine.ctx, engine.client, task, container, dockerID); err != nil { seelog.Errorf("Task engine [%s]: Failed to start ExecCommandAgent Process for container [%s]: %v", task.Arn, container.Name, err) } // whether we started or failed to start, we'll want to emit a state change event // redundant state change events like RUNNING->RUNNING are allowed mTask, ok := engine.managedTasks[task.Arn] if ok { mTask.emitContainerEvent(mTask.Task, container, "") } else { seelog.Errorf("Task engine [%s]: Failed to update status of ExecCommandAgent Process for container [%s]: managed task not found", task.Arn, container.Name) } } return dockerContainerMD } func (engine *DockerTaskEngine) provisionContainerResources(task *apitask.Task, container *apicontainer.Container) dockerapi.DockerContainerMetadata { seelog.Infof("Task engine [%s]: setting up container resources for container [%s]", task.Arn, container.Name) containerInspectOutput, err := engine.inspectContainer(task, container) if err != nil { return dockerapi.DockerContainerMetadata{ Error: ContainerNetworkingError{ fromError: errors.Wrap(err, "container resource provisioning: cannot setup task network namespace due to error inspecting pause container"), }, } } task.SetPausePIDInVolumeResources(strconv.Itoa(containerInspectOutput.State.Pid)) cniConfig, err := engine.buildCNIConfigFromTaskContainer(task, containerInspectOutput, true) if err != nil { return dockerapi.DockerContainerMetadata{ Error: ContainerNetworkingError{ fromError: errors.Wrap(err, "container resource provisioning: unable to build cni configuration"), }, } } // Invoke the libcni to config the network namespace for the container result, err := engine.cniClient.SetupNS(engine.ctx, cniConfig, cniSetupTimeout) if err != nil { seelog.Errorf("Task engine [%s]: unable to configure pause container namespace: %v", task.Arn, err) return dockerapi.DockerContainerMetadata{ DockerID: cniConfig.ContainerID, Error: ContainerNetworkingError{errors.Wrap(err, "container resource provisioning: failed to setup network namespace")}, } } taskIP := result.IPs[0].Address.IP.String() seelog.Infof("Task engine [%s]: associated with ip address '%s'", task.Arn, taskIP) engine.state.AddTaskIPAddress(taskIP, task.Arn) task.SetLocalIPAddress(taskIP) engine.saveTaskData(task) return dockerapi.DockerContainerMetadata{ DockerID: cniConfig.ContainerID, } } // cleanupPauseContainerNetwork will clean up the network namespace of pause container func (engine *DockerTaskEngine) cleanupPauseContainerNetwork(task *apitask.Task, container *apicontainer.Container) error { delay := time.Duration(engine.cfg.ENIPauseContainerCleanupDelaySeconds) * time.Second if engine.handleDelay != nil && delay > 0 { seelog.Infof("Task engine [%s]: waiting %s before cleaning up pause container.", task.Arn, delay) engine.handleDelay(delay) } containerInspectOutput, err := engine.inspectContainer(task, container) if err != nil { return errors.Wrap(err, "engine: cannot cleanup task network namespace due to error inspecting pause container") } seelog.Infof("Task engine [%s]: cleaning up the network namespace", task.Arn) cniConfig, err := engine.buildCNIConfigFromTaskContainer(task, containerInspectOutput, false) if err != nil { return errors.Wrapf(err, "engine: failed cleanup task network namespace, task: %s", task.String()) } return engine.cniClient.CleanupNS(engine.ctx, cniConfig, cniCleanupTimeout) } // buildCNIConfigFromTaskContainer builds a CNI config for the task and container. func (engine *DockerTaskEngine) buildCNIConfigFromTaskContainer( task *apitask.Task, containerInspectOutput *types.ContainerJSON, includeIPAMConfig bool) (*ecscni.Config, error) { cniConfig := &ecscni.Config{ BlockInstanceMetadata: engine.cfg.AWSVPCBlockInstanceMetdata.Enabled(), MinSupportedCNIVersion: config.DefaultMinSupportedCNIVersion, } if engine.cfg.OverrideAWSVPCLocalIPv4Address != nil && len(engine.cfg.OverrideAWSVPCLocalIPv4Address.IP) != 0 && len(engine.cfg.OverrideAWSVPCLocalIPv4Address.Mask) != 0 { cniConfig.IPAMV4Address = engine.cfg.OverrideAWSVPCLocalIPv4Address } if len(engine.cfg.AWSVPCAdditionalLocalRoutes) != 0 { cniConfig.AdditionalLocalRoutes = engine.cfg.AWSVPCAdditionalLocalRoutes } cniConfig.ContainerPID = strconv.Itoa(containerInspectOutput.State.Pid) cniConfig.ContainerID = containerInspectOutput.ID cniConfig, err := task.BuildCNIConfig(includeIPAMConfig, cniConfig) if err != nil { return nil, errors.Wrapf(err, "engine: failed to build cni configuration from task") } return cniConfig, nil } func (engine *DockerTaskEngine) inspectContainer(task *apitask.Task, container *apicontainer.Container) (*types.ContainerJSON, error) { dockerID, err := engine.getDockerID(task, container) if err != nil { return nil, err } return engine.client.InspectContainer(engine.ctx, dockerID, dockerclient.InspectContainerTimeout) } func (engine *DockerTaskEngine) stopContainer(task *apitask.Task, container *apicontainer.Container) dockerapi.DockerContainerMetadata { seelog.Infof("Task engine [%s]: stopping container [%s]", task.Arn, container.Name) dockerID, err := engine.getDockerID(task, container) if err != nil { return dockerapi.DockerContainerMetadata{ Error: dockerapi.CannotStopContainerError{ FromError: err, }, } } // Cleanup the pause container network namespace before stop the container if container.Type == apicontainer.ContainerCNIPause { err := engine.cleanupPauseContainerNetwork(task, container) if err != nil { seelog.Errorf("Task engine [%s]: unable to cleanup pause container network namespace: %v", task.Arn, err) } seelog.Infof("Task engine [%s]: cleaned pause container network namespace", task.Arn) } apiTimeoutStopContainer := container.GetStopTimeout() if apiTimeoutStopContainer <= 0 { apiTimeoutStopContainer = engine.cfg.DockerStopTimeout } return engine.client.StopContainer(engine.ctx, dockerID, apiTimeoutStopContainer) } func (engine *DockerTaskEngine) removeContainer(task *apitask.Task, container *apicontainer.Container) error { seelog.Infof("Task engine [%s]: removing container: %s", task.Arn, container.Name) dockerID, err := engine.getDockerID(task, container) if err != nil { return err } return engine.client.RemoveContainer(engine.ctx, dockerID, dockerclient.RemoveContainerTimeout) } // updateTaskUnsafe determines if a new transition needs to be applied to the // referenced task, and if needed applies it. It should not be called anywhere // but from 'AddTask' and is protected by the tasksLock lock there. func (engine *DockerTaskEngine) updateTaskUnsafe(task *apitask.Task, update *apitask.Task) { managedTask, ok := engine.managedTasks[task.Arn] if !ok { seelog.Criticalf("Task engine [%s]: ACS message for a task we thought we managed, but don't! Aborting.", task.Arn) return } // Keep the lock because sequence numbers cannot be correct unless they are // also read in the order addtask was called // This does block the engine's ability to ingest any new events (including // stops for past tasks, ack!), but this is necessary for correctness updateDesiredStatus := update.GetDesiredStatus() seelog.Debugf("Task engine [%s]: putting update on the acs channel: [%s] with seqnum [%d]", task.Arn, updateDesiredStatus.String(), update.StopSequenceNumber) managedTask.emitACSTransition(acsTransition{ desiredStatus: updateDesiredStatus, seqnum: update.StopSequenceNumber, }) seelog.Debugf("Task engine [%s]: update taken off the acs channel: [%s] with seqnum [%d]", task.Arn, updateDesiredStatus.String(), update.StopSequenceNumber) } // transitionContainer calls applyContainerState, and then notifies the managed // task of the change. transitionContainer is called by progressTask and // by handleStoppedToRunningContainerTransition. func (engine *DockerTaskEngine) transitionContainer(task *apitask.Task, container *apicontainer.Container, to apicontainerstatus.ContainerStatus) { // Let docker events operate async so that we can continue to handle ACS / other requests // This is safe because 'applyContainerState' will not mutate the task metadata := engine.applyContainerState(task, container, to) engine.tasksLock.RLock() managedTask, ok := engine.managedTasks[task.Arn] engine.tasksLock.RUnlock() if ok { managedTask.emitDockerContainerChange(dockerContainerChange{ container: container, event: dockerapi.DockerContainerChangeEvent{ Status: to, DockerContainerMetadata: metadata, }, }) } } // applyContainerState moves the container to the given state by calling the // function defined in the transitionFunctionMap for the state func (engine *DockerTaskEngine) applyContainerState(task *apitask.Task, container *apicontainer.Container, nextState apicontainerstatus.ContainerStatus) dockerapi.DockerContainerMetadata { transitionFunction, ok := engine.transitionFunctionMap()[nextState] if !ok { seelog.Criticalf("Task engine [%s]: unsupported desired state transition for container [%s]: %s", task.Arn, container.Name, nextState.String()) return dockerapi.DockerContainerMetadata{Error: &impossibleTransitionError{nextState}} } metadata := transitionFunction(task, container) if metadata.Error != nil { seelog.Infof("Task engine [%s]: error transitioning container [%s (Runtime ID: %s)] to [%s]: %v", task.Arn, container.Name, container.GetRuntimeID(), nextState.String(), metadata.Error) } else { seelog.Debugf("Task engine [%s]: transitioned container [%s (Runtime ID: %s)] to [%s]", task.Arn, container.Name, container.GetRuntimeID(), nextState.String()) } return metadata } // transitionFunctionMap provides the logic for the simple state machine of the // DockerTaskEngine. Each desired state maps to a function that can be called // to try and move the task to that desired state. func (engine *DockerTaskEngine) transitionFunctionMap() map[apicontainerstatus.ContainerStatus]transitionApplyFunc { return engine.containerStatusToTransitionFunction } type transitionApplyFunc (func(*apitask.Task, *apicontainer.Container) dockerapi.DockerContainerMetadata) // State is a function primarily meant for testing usage; it is explicitly not // part of the TaskEngine interface and should not be relied upon. // It returns an internal representation of the state of this DockerTaskEngine. func (engine *DockerTaskEngine) State() dockerstate.TaskEngineState { return engine.state } // Version returns the underlying docker version. func (engine *DockerTaskEngine) Version() (string, error) { return engine.client.Version(engine.ctx, dockerclient.VersionTimeout) } func (engine *DockerTaskEngine) updateMetadataFile(task *apitask.Task, cont *apicontainer.DockerContainer) { err := engine.metadataManager.Update(engine.ctx, cont.DockerID, task, cont.Container.Name) if err != nil { seelog.Errorf("Task engine [%s]: failed to update metadata file for container %s: %v", task.Arn, cont.Container.Name, err) } else { cont.Container.SetMetadataFileUpdated() seelog.Debugf("Task engine [%s]: updated metadata file for container %s", task.Arn, cont.Container.Name) } } func getContainerHostIP(networkSettings *types.NetworkSettings) (string, bool) { if networkSettings == nil { return "", false } else if networkSettings.IPAddress != "" { return networkSettings.IPAddress, true } else if len(networkSettings.Networks) > 0 { for mode, network := range networkSettings.Networks { if mode == apitask.BridgeNetworkMode && network.IPAddress != "" { return network.IPAddress, true } } } return "", false } func (engine *DockerTaskEngine) getDockerID(task *apitask.Task, container *apicontainer.Container) (string, error) { runtimeID := container.GetRuntimeID() if runtimeID != "" { return runtimeID, nil } containerMap, ok := engine.state.ContainerMapByArn(task.Arn) if !ok { return "", errors.Errorf("container name=%s belongs to unrecognized task taskArn=%s", container.Name, task.Arn) } dockerContainer, ok := containerMap[container.Name] if !ok { return "", errors.Errorf("container name=%s not recognized by agent", container.Name) } if dockerContainer.DockerID == "" { return dockerContainer.DockerName, nil } return dockerContainer.DockerID, nil }
1
25,565
just curious, why was this modified?
aws-amazon-ecs-agent
go
@@ -1302,6 +1302,7 @@ void ComTdbExeUtilFastDelete::displayContents(Space * space, ComTdbExeUtilHiveTruncate::ComTdbExeUtilHiveTruncate( char * tableName, ULng32 tableNameLen, + char * hiveTableName, char * tableLocation, char * partnLocation, char * hostName,
1
/********************************************************************** // @@@ START COPYRIGHT @@@ // // Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. // // @@@ END COPYRIGHT @@@ **********************************************************************/ /* -*-C++-*- **************************************************************************** * * File: ComTdbExeUtil.cpp * Description: * * Created: 5/6/98 * Language: C++ * * * * **************************************************************************** */ #include "ComTdbExeUtil.h" #include "ComTdbCommon.h" #include "ComSmallDefs.h" /////////////////////////////////////////////////////////////////////////// // // Methods for class ComTdbExeUtil // /////////////////////////////////////////////////////////////////////////// ComTdbExeUtil::ComTdbExeUtil(Lng32 type, char * query, ULng32 querylen, Int16 querycharset, char * tableName, ULng32 tableNameLen, ex_expr * input_expr, ULng32 input_rowlen, ex_expr * output_expr, ULng32 output_rowlen, ex_expr_base * scan_expr, ex_cri_desc * work_cri_desc, const unsigned short work_atp_index, ex_cri_desc * given_cri_desc, ex_cri_desc * returned_cri_desc, queue_index down, queue_index up, Lng32 num_buffers, ULng32 buffer_size) : ComTdbGenericUtil(query, querylen, querycharset, tableName, tableNameLen, input_expr, input_rowlen, output_expr, output_rowlen, work_cri_desc, work_atp_index, given_cri_desc, returned_cri_desc, down, up, num_buffers, buffer_size), type_(type), child_(NULL), scanExpr_(scan_expr), flags_(0), explOptionsStr_(NULL) { setNodeType(ComTdb::ex_EXE_UTIL); } Long ComTdbExeUtil::pack(void * space) { child_.pack(space); scanExpr_.pack(space); if (explOptionsStr_) explOptionsStr_.pack(space); if (NEOCatalogName_) NEOCatalogName_.pack(space); return ComTdbGenericUtil::pack(space); } Lng32 ComTdbExeUtil::unpack(void * base, void * reallocator) { if(child_.unpack(base, reallocator)) return -1; if(scanExpr_.unpack(base, reallocator)) return -1; if(explOptionsStr_.unpack(base)) return -1; if(NEOCatalogName_.unpack(base)) return -1; return ComTdbGenericUtil::unpack(base, reallocator); } const ComTdb* ComTdbExeUtil::getChild(Int32 pos) const { if (pos == 0) return child_; else return NULL; } /////////////////////////////////////////////////////////////////////////// // // Methods for class ComTdbExeUtilDisplayExplain // /////////////////////////////////////////////////////////////////////////// ComTdbExeUtilDisplayExplain::ComTdbExeUtilDisplayExplain (char * query, ULng32 querylen, Int16 querycharset, char * moduleName, char * stmtName, char optionX, ex_expr * input_expr, ULng32 input_rowlen, ex_expr * output_expr, ULng32 output_rowlen, ex_cri_desc * work_cri_desc, const unsigned short work_atp_index, Lng32 colDescSize, Lng32 outputRowSize, ex_cri_desc * given_cri_desc, ex_cri_desc * returned_cri_desc, queue_index down, queue_index up, Lng32 num_buffers, ULng32 buffer_size) : ComTdbExeUtil(ComTdbExeUtil::DISPLAY_EXPLAIN_, query, querylen, querycharset, NULL, 0, input_expr, input_rowlen, output_expr, output_rowlen, NULL, work_cri_desc, work_atp_index, given_cri_desc, returned_cri_desc, down, up, num_buffers, buffer_size), moduleName_(moduleName), stmtName_(stmtName), colDescSize_(colDescSize), outputRowSize_(outputRowSize), flags_(0) { setNodeType(ComTdb::ex_DISPLAY_EXPLAIN); setOptionX(optionX); } void ComTdbExeUtilDisplayExplain::setOptionX(char c) // move from char to mask_ { flags_ &= OPTION_OFF; // clear in case reused switch(c) { case 'e' : flags_ |= OPTION_E; break; // expert mode case 'f' : flags_ |= OPTION_F; break; // summary mode case 'm' : flags_ |= OPTION_M; break; // machine readable mode case 'n' : flags_ |= OPTION_N; break; // normal mode default : assert(c == 'n'); // always fail, input not supported } return; } Long ComTdbExeUtilDisplayExplain::pack(void * space) { if (moduleName_) moduleName_.pack(space); if (stmtName_) stmtName_.pack(space); return ComTdbExeUtil::pack(space); } Lng32 ComTdbExeUtilDisplayExplain::unpack(void * base, void * reallocator) { if(moduleName_.unpack(base)) return -1; if(stmtName_.unpack(base)) return -1; return ComTdbExeUtil::unpack(base, reallocator); } void ComTdbExeUtilDisplayExplain::displayContents(Space * space,ULng32 flag) { ComTdb::displayContents(space,flag & 0xFFFFFFFE); if(flag & 0x00000008) { char buf[100]; str_sprintf(buf, "\nFor ComTdbExeUtilDisplayExplain :"); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); char c = 'm'; if (isOptionN()) {c = 'n';} else if (isOptionF()) {c = 'f';} else if (isOptionE()) {c = 'e';} str_sprintf(buf,"optionX_ = %c", c); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); } if (flag & 0x00000001) { displayExpression(space,flag); displayChildren(space,flag); } } /////////////////////////////////////////////////////////////////////////// // // Methods for class ComTdbExeUtilDisplayExplainComplex // /////////////////////////////////////////////////////////////////////////// ComTdbExeUtilDisplayExplainComplex::ComTdbExeUtilDisplayExplainComplex (Lng32 explainType, char * qry1, char * qry2, char * qry3, char * qry4, char * objectName, Lng32 objectNameLen, ex_expr * input_expr, ULng32 input_rowlen, ex_expr * output_expr, ULng32 output_rowlen, ex_cri_desc * work_cri_desc, const unsigned short work_atp_index, ex_cri_desc * given_cri_desc, ex_cri_desc * returned_cri_desc, queue_index down, queue_index up, Lng32 num_buffers, ULng32 buffer_size) : ComTdbExeUtil(ComTdbExeUtil::DISPLAY_EXPLAIN_COMPLEX_, NULL, 0, (Int16)SQLCHARSETCODE_UNKNOWN, objectName, objectNameLen, input_expr, input_rowlen, output_expr, output_rowlen, NULL, work_cri_desc, work_atp_index, given_cri_desc, returned_cri_desc, down, up, num_buffers, buffer_size), explainType_(explainType), qry1_(qry1), qry2_(qry2), qry3_(qry3), qry4_(qry4), flags_(0) { setNodeType(ComTdb::ex_DISPLAY_EXPLAIN_COMPLEX); } Long ComTdbExeUtilDisplayExplainComplex::pack(void * space) { if (qry1_) qry1_.pack(space); if (qry2_) qry2_.pack(space); if (qry3_) qry3_.pack(space); if (qry4_) qry4_.pack(space); return ComTdbExeUtil::pack(space); } Lng32 ComTdbExeUtilDisplayExplainComplex::unpack(void * base, void * reallocator) { if (qry1_.unpack(base)) return -1; if (qry2_.unpack(base)) return -1; if (qry3_.unpack(base)) return -1; if (qry4_.unpack(base)) return -1; return ComTdbExeUtil::unpack(base, reallocator); } void ComTdbExeUtilDisplayExplainComplex::displayContents(Space * space,ULng32 flag) { ComTdb::displayContents(space,flag & 0xFFFFFFFE); if(flag & 0x00000008) { char buf[1000]; str_sprintf(buf, "\nFor ComTdbExeUtilDisplayExplainComplex :"); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); if (qry1_) { char query[400]; if (strlen(qry1_) > 390) { strncpy(query, qry1_, 390); query[390] = 0; strcat(query, "..."); } else strcpy(query, qry1_); str_sprintf(buf,"Qry1 = %s ",query); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); } if (qry2_) { char query[400]; if (strlen(qry2_) > 390) { strncpy(query, qry2_, 390); query[390] = 0; strcat(query, "..."); } else strcpy(query, qry2_); str_sprintf(buf,"Qry2 = %s ",query); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); } if (qry3_) { char query[400]; if (strlen(qry3_) > 390) { strncpy(query, qry3_, 390); query[390] = 0; strcat(query, "..."); } else strcpy(query, qry3_); str_sprintf(buf,"Qry3 = %s ",query); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); } if (qry4_) { char query[400]; if (strlen(qry4_) > 390) { strncpy(query, qry4_, 390); query[390] = 0; strcat(query, "..."); } else strcpy(query, qry4_); str_sprintf(buf,"Qry4 = %s ",query); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); } if (objectName_) { str_sprintf(buf,"ObjectName = %s ",getObjectName()); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); } } if (flag & 0x00000001) { displayExpression(space,flag); displayChildren(space,flag); } } ////////////////////////////////////////////////////////////////////////// // // Methods for class ComTdbExeUtilMaintainObject // ////////////////////////////////////////////////////////////////////////// ComTdbExeUtilMaintainObject::ComTdbExeUtilMaintainObject( char * objectName, ULng32 objectNameLen, char * schemaName, ULng32 schemaNameLen, UInt16 ot, char * parentTableName, ULng32 parentTableNameLen, ex_expr * input_expr, ULng32 input_rowlen, ex_expr * output_expr, ULng32 output_rowlen, ex_cri_desc * work_cri_desc, const unsigned short work_atp_index, ex_cri_desc * given_cri_desc, ex_cri_desc * returned_cri_desc, queue_index down, queue_index up, Lng32 num_buffers, ULng32 buffer_size) : ComTdbExeUtil(ComTdbExeUtil::MAINTAIN_OBJECT_, NULL, 0, (Int16)SQLCHARSETCODE_UNKNOWN, objectName, objectNameLen, input_expr, input_rowlen, output_expr, output_rowlen, NULL, work_cri_desc, work_atp_index, given_cri_desc, returned_cri_desc, down, up, num_buffers, buffer_size), ot_(ot), schemaName_(schemaName), schemaNameLen_(schemaNameLen), parentTableName_(parentTableName), parentTableNameLen_(parentTableNameLen), flags_(0), controlFlags_(0), controlFlags2_(0), formatFlags_(0), maintainedTableCreateTime_(0), parentTableObjectUID_(0), from_(0), to_(0), flags2_(0) { setNodeType(ComTdb::ex_MAINTAIN_OBJECT); } Long ComTdbExeUtilMaintainObject::pack(void * space) { reorgTableOptions_.pack(space); reorgIndexOptions_.pack(space); updStatsTableOptions_.pack(space); updStatsMvlogOptions_.pack(space); updStatsMvsOptions_.pack(space); updStatsMvgroupOptions_.pack(space); refreshMvgroupOptions_.pack(space); refreshMvsOptions_.pack(space); reorgMvgroupOptions_.pack(space); reorgMvsOptions_.pack(space); reorgMvsIndexOptions_.pack(space); cleanMaintainCITOptions_.pack(space); indexList_.pack(space); refreshMvgroupList_.pack(space); refreshMvsList_.pack(space); reorgMvgroupList_.pack(space); reorgMvsList_.pack(space); reorgMvsIndexList_.pack(space); updStatsMvgroupList_.pack(space); updStatsMvsList_.pack(space); multiTablesNamesList_.pack(space); multiTablesCreateTimeList_.pack(space); skippedMultiTablesNamesList_.pack(space); if (parentTableName_) parentTableName_.pack(space); if (schemaName_) schemaName_.pack(space); return ComTdbExeUtil::pack(space); } Lng32 ComTdbExeUtilMaintainObject::unpack(void * base, void * reallocator) { if(reorgTableOptions_.unpack(base)) return -1; if(reorgIndexOptions_.unpack(base)) return -1; if(updStatsTableOptions_.unpack(base)) return -1; if(updStatsMvlogOptions_.unpack(base)) return -1; if(updStatsMvsOptions_.unpack(base)) return -1; if(updStatsMvgroupOptions_.unpack(base)) return -1; if(refreshMvgroupOptions_.unpack(base)) return -1; if(refreshMvsOptions_.unpack(base)) return -1; if(reorgMvgroupOptions_.unpack(base)) return -1; if(reorgMvsOptions_.unpack(base)) return -1; if(reorgMvsIndexOptions_.unpack(base)) return -1; if(cleanMaintainCITOptions_.unpack(base)) return -1; if(indexList_.unpack(base, reallocator)) return -1; if(refreshMvgroupList_.unpack(base, reallocator)) return -1; if(refreshMvsList_.unpack(base, reallocator)) return -1; if(reorgMvgroupList_.unpack(base, reallocator)) return -1; if(reorgMvsList_.unpack(base, reallocator)) return -1; if(reorgMvsIndexList_.unpack(base, reallocator)) return -1; if(updStatsMvgroupList_.unpack(base, reallocator)) return -1; if(updStatsMvsList_.unpack(base, reallocator)) return -1; if(multiTablesNamesList_.unpack(base, reallocator)) return -1; if(multiTablesCreateTimeList_.unpack(base, reallocator)) return -1; if(skippedMultiTablesNamesList_.unpack(base, reallocator)) return -1; if(parentTableName_.unpack(base)) return -1; if(schemaName_.unpack(base)) return -1; return ComTdbExeUtil::unpack(base, reallocator); } void ComTdbExeUtilMaintainObject::setParams(NABoolean reorgTable, NABoolean reorgIndex, NABoolean updStatsTable, NABoolean updStatsMvlog, NABoolean updStatsMvs, NABoolean updStatsMvgroup, NABoolean refreshMvgroup, NABoolean refreshMvs, NABoolean reorgMvgroup, NABoolean reorgMvs, NABoolean reorgMvsIndex, NABoolean continueOnError, NABoolean cleanMaintainCIT, NABoolean getSchemaLabelStats, NABoolean getLabelStats, NABoolean getTableLabelStats, NABoolean getIndexLabelStats, NABoolean getLabelStatsIncIndexes, NABoolean getLabelStatsIncInternal, NABoolean getLabelStatsIncRelated ) { setReorgTable(reorgTable); setReorgIndex(reorgIndex); setUpdStatsTable(updStatsTable); setUpdStatsMvlog(updStatsMvlog); setUpdStatsMvs(updStatsMvs); setUpdStatsMvgroup(updStatsMvgroup); setRefreshMvgroup(refreshMvgroup); setRefreshMvs(refreshMvs); setReorgMvgroup(reorgMvgroup); setReorgMvs(reorgMvs); setReorgMvsIndex(reorgMvsIndex); setContinueOnError(continueOnError); setCleanMaintainCIT(cleanMaintainCIT); setSchemaLabelStats(getSchemaLabelStats); setTableLabelStats(getTableLabelStats); setIndexLabelStats(getIndexLabelStats); setLabelStatsIncIndexes(getLabelStatsIncIndexes); setLabelStatsIncInternal(getLabelStatsIncInternal); setLabelStatsIncRelated(getLabelStatsIncRelated); } void ComTdbExeUtilMaintainObject::setOptionsParams (char* reorgTableOptions, char* reorgIndexOptions, char* updStatsTableOptions, char* updStatsMvlogOptions, char* updStatsMvsOptions, char* updStatsMvgroupOptions, char* refreshMvgroupOptions, char* refreshMvsOptions, char* reorgMvgroupOptions, char* reorgMvsOptions, char* reorgMvsIndexOptions, char* cleanMaintainCITOptions) { reorgTableOptions_ = reorgTableOptions; reorgIndexOptions_ = reorgIndexOptions; updStatsTableOptions_ = updStatsTableOptions; updStatsMvlogOptions_ = updStatsMvlogOptions; updStatsMvsOptions_ = updStatsMvsOptions; updStatsMvgroupOptions_ = updStatsMvgroupOptions; refreshMvgroupOptions_ = refreshMvgroupOptions; refreshMvsOptions_ = refreshMvsOptions; reorgMvgroupOptions_ = reorgMvgroupOptions; reorgMvsOptions_ = reorgMvsOptions; reorgMvsIndexOptions_ = reorgMvsIndexOptions; cleanMaintainCITOptions_ = cleanMaintainCITOptions; } void ComTdbExeUtilMaintainObject::setLists(Queue* indexList, Queue* refreshMvgroupList, Queue* refreshMvsList, Queue* reorgMvgroupList, Queue* reorgMvsList, Queue* reorgMvsIndexList, Queue* updStatsMvgroupList, Queue* updStatsMvsList, Queue* multiTablesNamesList, Queue* skippedMultiTablesNamesList) { indexList_ = indexList; refreshMvgroupList_ = refreshMvgroupList; refreshMvsList_ = refreshMvsList; reorgMvgroupList_ = reorgMvgroupList; reorgMvsList_ = reorgMvsList; reorgMvsIndexList_ = reorgMvsIndexList; updStatsMvgroupList_ = updStatsMvgroupList; updStatsMvsList_ = updStatsMvsList; multiTablesNamesList_ = multiTablesNamesList; skippedMultiTablesNamesList_ = skippedMultiTablesNamesList; } void ComTdbExeUtilMaintainObject::setControlParams (NABoolean disableReorgTable, NABoolean enableReorgTable, NABoolean disableReorgIndex, NABoolean enableReorgIndex, NABoolean disableUpdStatsTable, NABoolean enableUpdStatsTable, NABoolean disableUpdStatsMvs, NABoolean enableUpdStatsMvs, NABoolean disableRefreshMvs, NABoolean enableRefreshMvs, NABoolean disableReorgMvs, NABoolean enableReorgMvs, NABoolean resetReorgTable, NABoolean resetUpdStatsTable, NABoolean resetUpdStatsMvs, NABoolean resetRefreshMvs, NABoolean resetReorgMvs, NABoolean resetReorgIndex, NABoolean enableUpdStatsMvlog, NABoolean disableUpdStatsMvlog, NABoolean resetUpdStatsMvlog, NABoolean enableReorgMvsIndex, NABoolean disableReorgMvsIndex, NABoolean resetReorgMvsIndex, NABoolean enableRefreshMvgroup, NABoolean disableRefreshMvgroup, NABoolean resetRefreshMvgroup, NABoolean enableReorgMvgroup, NABoolean disableReorgMvgroup, NABoolean resetReorgMvgroup, NABoolean enableUpdStatsMvgroup, NABoolean disableUpdStatsMvgroup, NABoolean resetUpdStatsMvgroup, NABoolean enableTableLabelStats, NABoolean disableTableLabelStats, NABoolean resetTableLabelStats, NABoolean enableIndexLabelStats, NABoolean disableIndexLabelStats, NABoolean resetIndexLabelStats ) { setDisableReorgTable(disableReorgTable); setDisableReorgIndex(disableReorgIndex); setDisableUpdStatsTable(disableUpdStatsTable); setDisableUpdStatsMvs(disableUpdStatsMvs); setDisableRefreshMvs(disableRefreshMvs); setDisableReorgMvs(disableReorgMvs); setEnableReorgTable(enableReorgTable); setEnableReorgIndex(enableReorgIndex); setEnableUpdStatsTable(enableUpdStatsTable); setEnableUpdStatsMvs(enableUpdStatsMvs); setEnableRefreshMvs(enableRefreshMvs); setEnableReorgMvs(enableReorgMvs); setResetReorgTable(resetReorgTable); setResetUpdStatsTable(resetUpdStatsTable); setResetUpdStatsMvs(resetUpdStatsMvs); setResetRefreshMvs(resetRefreshMvs); setResetReorgMvs(resetReorgMvs); setResetReorgIndex(resetReorgIndex); setEnableUpdStatsMvlog(enableUpdStatsMvlog); setDisableUpdStatsMvlog(disableUpdStatsMvlog); setResetUpdStatsMvlog(resetUpdStatsMvlog); setEnableReorgMvsIndex(enableReorgMvsIndex); setDisableReorgMvsIndex(disableReorgMvsIndex); setResetReorgMvsIndex(resetReorgMvsIndex); setEnableRefreshMvgroup(enableRefreshMvgroup); setDisableRefreshMvgroup(disableRefreshMvgroup); setResetRefreshMvgroup(resetRefreshMvgroup); setEnableReorgMvgroup(enableReorgMvgroup); setDisableReorgMvgroup(disableReorgMvgroup); setResetReorgMvgroup(resetReorgMvgroup); setEnableUpdStatsMvgroup(enableUpdStatsMvgroup); setDisableUpdStatsMvgroup(disableUpdStatsMvgroup); setResetUpdStatsMvgroup(resetUpdStatsMvgroup); setEnableTableLabelStats(enableTableLabelStats); setDisableTableLabelStats(disableTableLabelStats); setResetTableLabelStats(resetTableLabelStats); setEnableIndexLabelStats(enableIndexLabelStats); setDisableIndexLabelStats(disableIndexLabelStats); setResetIndexLabelStats(resetIndexLabelStats); } void ComTdbExeUtilMaintainObject::displayContents(Space * space, ULng32 flag) { ComTdb::displayContents(space,flag & 0xFFFFFFFE); if(flag & 0x00000008) { char buf[1000]; str_sprintf(buf, "\nFor ComTdbExeUtilMaintainObject :"); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); if (getTableName() != NULL) { str_sprintf(buf,"Tablename = %s ",getTableName()); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); } } if (flag & 0x00000001) { displayExpression(space,flag); displayChildren(space,flag); } } /////////////////////////////////////////////////////////////////////////// // // Methods for class ComTdbExeUtilLoadVolatileTable // /////////////////////////////////////////////////////////////////////////// ComTdbExeUtilLoadVolatileTable::ComTdbExeUtilLoadVolatileTable (char * tableName, ULng32 tableNameLen, char * insertQuery, char * updStatsQuery, Int16 queryCharSet, Int64 threshold, ex_cri_desc * work_cri_desc, const unsigned short work_atp_index, ex_cri_desc * given_cri_desc, ex_cri_desc * returned_cri_desc, queue_index down, queue_index up, Lng32 num_buffers, ULng32 buffer_size) : ComTdbExeUtil(ComTdbExeUtil::LOAD_VOLATILE_TABLE_, NULL, 0, queryCharSet/*for insertQuery & updStatsQuery*/, tableName, tableNameLen, NULL, 0, NULL, 0, NULL, work_cri_desc, work_atp_index, given_cri_desc, returned_cri_desc, down, up, num_buffers, buffer_size), insertQuery_(insertQuery), updStatsQuery_(updStatsQuery), threshold_(threshold), flags_(0) { setNodeType(ComTdb::ex_LOAD_VOLATILE_TABLE); } Long ComTdbExeUtilLoadVolatileTable::pack(void * space) { if (insertQuery_) insertQuery_.pack(space); if (updStatsQuery_) updStatsQuery_.pack(space); return ComTdbExeUtil::pack(space); } Lng32 ComTdbExeUtilLoadVolatileTable::unpack(void * base, void * reallocator) { if(insertQuery_.unpack(base)) return -1; if(updStatsQuery_.unpack(base)) return -1; return ComTdbExeUtil::unpack(base, reallocator); } void ComTdbExeUtilLoadVolatileTable::displayContents(Space * space,ULng32 flag) { ComTdb::displayContents(space,flag & 0xFFFFFFFE); if(flag & 0x00000008) { char buf[1000]; str_sprintf(buf, "\nFor ComTdbExeUtilLoadVolatileTable :"); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); if (getTableName() != NULL) { str_sprintf(buf,"Tablename = %s ",getTableName()); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); } if (insertQuery_) { char query[400]; if (strlen(insertQuery_) > 390) { strncpy(query, insertQuery_, 390); query[390] = 0; strcat(query, "..."); } else strcpy(query, insertQuery_); str_sprintf(buf,"Insert Query = %s ",query); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); } if (updStatsQuery_) { char query[400]; if (strlen(updStatsQuery_) > 390) { strncpy(query, updStatsQuery_, 390); query[390] = 0; strcat(query, "..."); } else strcpy(query, updStatsQuery_); str_sprintf(buf,"UpdStats Query = %s ",query); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); } str_sprintf(buf,"Threshold = %Ld ", threshold_); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); } if (flag & 0x00000001) { displayExpression(space,flag); displayChildren(space,flag); } } /////////////////////////////////////////////////////////////////////////// // // Methods for class ComTdbExeUtilCleanupVolatileTables // /////////////////////////////////////////////////////////////////////////// ComTdbExeUtilCleanupVolatileTables::ComTdbExeUtilCleanupVolatileTables (char * catName, ULng32 catNameLen, ex_cri_desc * work_cri_desc, const unsigned short work_atp_index, ex_cri_desc * given_cri_desc, ex_cri_desc * returned_cri_desc, queue_index down, queue_index up, Lng32 num_buffers, ULng32 buffer_size) : ComTdbExeUtil(ComTdbExeUtil::CLEANUP_VOLATILE_SCHEMA_, NULL, 0, (Int16)SQLCHARSETCODE_UNKNOWN, catName, catNameLen, NULL, 0, NULL, 0, NULL, work_cri_desc, work_atp_index, given_cri_desc, returned_cri_desc, down, up, num_buffers, buffer_size), flags_(0) { setNodeType(ComTdb::ex_CLEANUP_VOLATILE_TABLES); } void ComTdbExeUtilCleanupVolatileTables::displayContents(Space * space,ULng32 flag) { ComTdb::displayContents(space,flag & 0xFFFFFFFE); if(flag & 0x00000008) { char buf[1000]; str_sprintf(buf, "\nFor ComTdbExeUtilCleanupVolatileTables :"); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); if (getTableName() != NULL) { str_sprintf(buf,"Tablename = %s ",getTableName()); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); } } if (flag & 0x00000001) { displayExpression(space,flag); displayChildren(space,flag); } } /////////////////////////////////////////////////////////////////////////// // // Methods for class ComTdbExeUtilGetVotalileInfo // /////////////////////////////////////////////////////////////////////////// ComTdbExeUtilGetVolatileInfo::ComTdbExeUtilGetVolatileInfo ( char * param1, char * param2, ex_cri_desc * work_cri_desc, const unsigned short work_atp_index, ex_cri_desc * given_cri_desc, ex_cri_desc * returned_cri_desc, queue_index down, queue_index up, Lng32 num_buffers, ULng32 buffer_size) : ComTdbExeUtil(ComTdbExeUtil::GET_VOLATILE_INFO, NULL, 0, (Int16)SQLCHARSETCODE_UNKNOWN, NULL, 0, NULL, 0, NULL, 0, NULL, work_cri_desc, work_atp_index, given_cri_desc, returned_cri_desc, down, up, num_buffers, buffer_size), flags_(0), param1_(param1), param2_(param2) { setNodeType(ComTdb::ex_GET_VOLATILE_INFO); } Long ComTdbExeUtilGetVolatileInfo::pack(void * space) { if (param1_) param1_.pack(space); if (param2_) param2_.pack(space); return ComTdbExeUtil::pack(space); } Lng32 ComTdbExeUtilGetVolatileInfo::unpack(void * base, void * reallocator) { if(param1_.unpack(base)) return -1; if(param2_.unpack(base)) return -1; return ComTdbExeUtil::unpack(base, reallocator); } void ComTdbExeUtilGetVolatileInfo::displayContents(Space * space,ULng32 flag) { ComTdb::displayContents(space,flag & 0xFFFFFFFE); if(flag & 0x00000008) { char buf[1000]; str_sprintf(buf, "\nFor ComTdbExeUtilGetVotalileInfo :"); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); if (getTableName() != NULL) { str_sprintf(buf,"Tablename = %s ",getTableName()); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); } } if (flag & 0x00000001) { displayExpression(space,flag); displayChildren(space,flag); } } /////////////////////////////////////////////////////////////////////////// // // Methods for class ComTdbExeUtilGetErrorInfo // /////////////////////////////////////////////////////////////////////////// ComTdbExeUtilGetErrorInfo::ComTdbExeUtilGetErrorInfo ( Lng32 errNum, ex_cri_desc * work_cri_desc, const unsigned short work_atp_index, ex_cri_desc * given_cri_desc, ex_cri_desc * returned_cri_desc, queue_index down, queue_index up, Lng32 num_buffers, ULng32 buffer_size) : ComTdbExeUtil(ComTdbExeUtil::GET_ERROR_INFO_, NULL, 0, (Int16)SQLCHARSETCODE_UNKNOWN, NULL, 0, NULL, 0, NULL, 0, NULL, work_cri_desc, work_atp_index, given_cri_desc, returned_cri_desc, down, up, num_buffers, buffer_size), flags_(0), errNum_(errNum) { setNodeType(ComTdb::ex_GET_ERROR_INFO); } void ComTdbExeUtilGetErrorInfo::displayContents(Space * space,ULng32 flag) { ComTdb::displayContents(space,flag & 0xFFFFFFFE); if(flag & 0x00000008) { char buf[1000]; str_sprintf(buf, "\nFor ComTdbExeUtilGetErrorInfo :"); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); str_sprintf(buf,"ErrorNum = %d ", errNum_); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); } if (flag & 0x00000001) { displayExpression(space,flag); displayChildren(space,flag); } } //LCOV_EXCL_STOP /////////////////////////////////////////////////////////////////////////// // // Methods for class ComTdbExeUtilCreateTableAs // /////////////////////////////////////////////////////////////////////////// ComTdbExeUtilCreateTableAs::ComTdbExeUtilCreateTableAs (char * tableName, ULng32 tableNameLen, char * ctQuery, char * siQuery, char * viQuery, char * usQuery, Int64 threshold, ex_cri_desc * work_cri_desc, const unsigned short work_atp_index, ex_cri_desc * given_cri_desc, ex_cri_desc * returned_cri_desc, queue_index down, queue_index up, Lng32 num_buffers, ULng32 buffer_size) : ComTdbExeUtil(ComTdbExeUtil::CREATE_TABLE_AS_, NULL, 0, (Int16)SQLCHARSETCODE_UNKNOWN, tableName, tableNameLen, NULL, 0, NULL, 0, NULL, work_cri_desc, work_atp_index, given_cri_desc, returned_cri_desc, down, up, num_buffers, buffer_size), ctQuery_(ctQuery), siQuery_(siQuery), viQuery_(viQuery), usQuery_(usQuery), threshold_(threshold), flags_(0) { setNodeType(ComTdb::ex_CREATE_TABLE_AS); } Long ComTdbExeUtilCreateTableAs::pack(void * space) { if (ctQuery_) ctQuery_.pack(space); if (siQuery_) siQuery_.pack(space); if (viQuery_) viQuery_.pack(space); if (usQuery_) usQuery_.pack(space); return ComTdbExeUtil::pack(space); } Lng32 ComTdbExeUtilCreateTableAs::unpack(void * base, void * reallocator) { if(ctQuery_.unpack(base)) return -1; if(siQuery_.unpack(base)) return -1; if(viQuery_.unpack(base)) return -1; if(usQuery_.unpack(base)) return -1; return ComTdbExeUtil::unpack(base, reallocator); } void ComTdbExeUtilCreateTableAs::displayContents(Space * space,ULng32 flag) { ComTdb::displayContents(space,flag & 0xFFFFFFFE); if(flag & 0x00000008) { char buf[1000]; str_sprintf(buf, "\nFor ComTdbExeUtilCreateTableAs :"); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); if (getTableName() != NULL) { str_sprintf(buf,"Tablename = %s ",getTableName()); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); } if (ctQuery_) { char query[400]; if (strlen(ctQuery_) > 390) { strncpy(query, ctQuery_, 390); query[390] = 0; strcat(query, "..."); } else strcpy(query, ctQuery_); str_sprintf(buf,"Create Query = %s ",query); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); } if (siQuery_) { char query[400]; if (strlen(siQuery_) > 390) { strncpy(query, siQuery_, 390); query[390] = 0; strcat(query, "..."); } else strcpy(query, siQuery_); str_sprintf(buf,"Sidetree Insert Query = %s ",query); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); } if (viQuery_) { char query[400]; if (strlen(viQuery_) > 390) { strncpy(query, viQuery_, 390); query[390] = 0; strcat(query, "..."); } else strcpy(query, viQuery_); str_sprintf(buf,"VSBB Insert Query = %s ",query); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); } if (usQuery_) { char query[400]; if (strlen(usQuery_) > 390) { strncpy(query, usQuery_, 390); query[390] = 0; strcat(query, "..."); } else strcpy(query, usQuery_); str_sprintf(buf,"UpdStats Query = %s ",query); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); } } if (flag & 0x00000001) { displayExpression(space,flag); displayChildren(space,flag); } } /////////////////////////////////////////////////////////////////////////// // // Methods for class ComTdbExeUtilFastDelete // /////////////////////////////////////////////////////////////////////////// ComTdbExeUtilFastDelete::ComTdbExeUtilFastDelete( char * tableName, ULng32 tableNameLen, char * primaryPartnLoc, Queue * indexList, char * stmt, ULng32 stmtLen, Lng32 numEsps, Int64 objectUID, Lng32 numLOBs, char * lobNumArray, ex_cri_desc * work_cri_desc, const unsigned short work_atp_index, ex_cri_desc * given_cri_desc, ex_cri_desc * returned_cri_desc, queue_index down, queue_index up, Lng32 num_buffers, ULng32 buffer_size) : ComTdbExeUtil(ComTdbExeUtil::FAST_DELETE_, NULL, 0, (Int16)SQLCHARSETCODE_UNKNOWN, tableName, tableNameLen, NULL, 0, NULL, 0, NULL, work_cri_desc, work_atp_index, given_cri_desc, returned_cri_desc, down, up, num_buffers, buffer_size), flags_(0), indexList_(indexList), purgedataStmt_(stmt), purgedataStmtLen_(stmtLen), primaryPartnLoc_(primaryPartnLoc), numEsps_(numEsps), objectUID_(objectUID), numLOBs_(numLOBs), lobNumArray_(lobNumArray) { setNodeType(ComTdb::ex_FAST_DELETE); } Long ComTdbExeUtilFastDelete::pack(void * space) { indexList_.pack(space); if (purgedataStmt_) purgedataStmt_.pack(space); if (primaryPartnLoc_) primaryPartnLoc_.pack(space); if (lobNumArray_) lobNumArray_.pack(space); return ComTdbExeUtil::pack(space); } Lng32 ComTdbExeUtilFastDelete::unpack(void * base, void * reallocator) { if(indexList_.unpack(base, reallocator)) return -1; if (purgedataStmt_.unpack(base)) return -1; if (primaryPartnLoc_.unpack(base)) return -1; if(lobNumArray_.unpack(base)) return -1; return ComTdbExeUtil::unpack(base, reallocator); } short ComTdbExeUtilFastDelete::getLOBnum(short i) { if ((i > numLOBs_) || (i <= 0)) return -1; short lobNum = *((short*)&getLOBnumArray()[2*(i-1)]); return lobNum; } void ComTdbExeUtilFastDelete::displayContents(Space * space, ULng32 flag) { ComTdb::displayContents(space,flag & 0xFFFFFFFE); if(flag & 0x00000008) { char buf[500]; str_sprintf(buf, "\nFor ComTdbExeUtilFastDelete :"); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); if (getTableName() != NULL) { str_sprintf(buf,"Tablename = %s ",getTableName()); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); } if (purgedataStmt_) { str_sprintf(buf,"purgedataStmt_ = %s ", purgedataStmt()); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); } if (numLOBs_ > 0) { str_sprintf(buf, "numLOBs_ = %d ", numLOBs_); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); } } if (flag & 0x00000001) { displayExpression(space,flag); displayChildren(space,flag); } } /////////////////////////////////////////////////////////////////////////// // // Methods for class ComTdbExeUtilHiveTruncate // /////////////////////////////////////////////////////////////////////////// ComTdbExeUtilHiveTruncate::ComTdbExeUtilHiveTruncate( char * tableName, ULng32 tableNameLen, char * tableLocation, char * partnLocation, char * hostName, Lng32 portNum, Int64 modTS, ex_cri_desc * given_cri_desc, ex_cri_desc * returned_cri_desc, queue_index down, queue_index up, Lng32 num_buffers, ULng32 buffer_size) : ComTdbExeUtil(ComTdbExeUtil::HIVE_TRUNCATE_, NULL, 0, (Int16)SQLCHARSETCODE_UNKNOWN, tableName, tableNameLen, NULL, 0, NULL, 0, NULL, NULL, 0, given_cri_desc, returned_cri_desc, down, up, num_buffers, buffer_size), flags_(0), tableLocation_(tableLocation), partnLocation_(partnLocation), hdfsHost_(hostName), hdfsPort_(portNum), modTS_(modTS) { setNodeType(ComTdb::ex_HIVE_TRUNCATE); } Long ComTdbExeUtilHiveTruncate::pack(void * space) { if (tableLocation_) tableLocation_.pack(space); if (hdfsHost_) hdfsHost_.pack(space); if (partnLocation_) partnLocation_.pack(space); return ComTdbExeUtil::pack(space); } Lng32 ComTdbExeUtilHiveTruncate::unpack(void * base, void * reallocator) { if(tableLocation_.unpack(base)) return -1; if(hdfsHost_.unpack(base)) return -1; if (partnLocation_.unpack(base)) return -1; return ComTdbExeUtil::unpack(base, reallocator); } void ComTdbExeUtilHiveTruncate::displayContents(Space * space, ULng32 flag) { ComTdb::displayContents(space,flag & 0xFFFFFFFE); if(flag & 0x00000008) { char buf[500]; str_sprintf(buf, "\nFor ComTdbExeUtilHiveTruncate :"); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); if (getTableName() != NULL) { str_sprintf(buf,"Tablename = %s ",getTableName()); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); } if (getTableLocation() != NULL) { str_sprintf(buf,"tableLocation_ = %s ", getTableLocation()); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); } if (getPartnLocation() != NULL) { str_sprintf(buf,"partnLocation_ = %s ", getPartnLocation()); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); } } if (flag & 0x00000001) { displayExpression(space,flag); displayChildren(space,flag); } } /////////////////////////////////////////////////////////////////////////// // // Methods for class ComTdbExeUtilGetStatistics // /////////////////////////////////////////////////////////////////////////// ComTdbExeUtilGetStatistics::ComTdbExeUtilGetStatistics ( char * stmtName, short statsReqType, short statsMergeType, short activeQueryNum, ex_cri_desc * work_cri_desc, const unsigned short work_atp_index, ex_cri_desc * given_cri_desc, ex_cri_desc * returned_cri_desc, queue_index down, queue_index up, Lng32 num_buffers, ULng32 buffer_size) : ComTdbExeUtil(ComTdbExeUtil::GET_STATISTICS_, NULL, 0, (Int16)SQLCHARSETCODE_UNKNOWN, NULL, 0, NULL, 0, NULL, 0, NULL, work_cri_desc, work_atp_index, given_cri_desc, returned_cri_desc, down, up, num_buffers, buffer_size), flags_(0), stmtName_(stmtName), statsReqType_(statsReqType), statsMergeType_(statsMergeType), activeQueryNum_(activeQueryNum) { setNodeType(ComTdb::ex_GET_STATISTICS); } Long ComTdbExeUtilGetStatistics::pack(void * space) { if (stmtName_) stmtName_.pack(space); return ComTdbExeUtil::pack(space); } Lng32 ComTdbExeUtilGetStatistics::unpack(void * base, void * reallocator) { if(stmtName_.unpack(base)) return -1; return ComTdbExeUtil::unpack(base, reallocator); } void ComTdbExeUtilGetStatistics::displayContents(Space * space,ULng32 flag) { ComTdb::displayContents(space,flag & 0xFFFFFFFE); if(flag & 0x00000008) { char buf[1000]; str_sprintf(buf, "\nFor ComTdbExeUtilGetStatistics :"); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); if ( stmtName_ != (NABasicPtr)NULL ) { str_sprintf(buf,"StmtName = %s ", getStmtName()); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); } } if (flag & 0x00000001) { displayExpression(space,flag); displayChildren(space,flag); } } void ComTdbExeUtilGetProcessStatistics::displayContents(Space * space,ULng32 flag) { ComTdb::displayContents(space,flag & 0xFFFFFFFE); if(flag & 0x00000008) { char buf[1000]; str_sprintf(buf, "\nFor ComTdbExeUtilGetProcessStatistics :"); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); if ( stmtName_ != (NABasicPtr)NULL ) { str_sprintf(buf,"Pid = %s ", getPid()); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); } } if (flag & 0x00000001) { displayExpression(space,flag); displayChildren(space,flag); } } //LCOV_EXCL_START /////////////////////////////////////////////////////////////////////////// // // Methods for class ComTdbExeUtilGetUID // /////////////////////////////////////////////////////////////////////////// ComTdbExeUtilGetUID::ComTdbExeUtilGetUID ( Int64 uid, ex_cri_desc * work_cri_desc, const unsigned short work_atp_index, ex_cri_desc * given_cri_desc, ex_cri_desc * returned_cri_desc, queue_index down, queue_index up, Lng32 num_buffers, ULng32 buffer_size) : ComTdbExeUtil(ComTdbExeUtil::GET_UID_, NULL, 0, (Int16)SQLCHARSETCODE_UNKNOWN, NULL, 0, NULL, 0, NULL, 0, NULL, work_cri_desc, work_atp_index, given_cri_desc, returned_cri_desc, down, up, num_buffers, buffer_size), flags_(0), uid_(uid) { setNodeType(ComTdb::ex_GET_UID); } Long ComTdbExeUtilGetUID::pack(void * space) { return ComTdbExeUtil::pack(space); } Lng32 ComTdbExeUtilGetUID::unpack(void * base, void * reallocator) { return ComTdbExeUtil::unpack(base, reallocator); } void ComTdbExeUtilGetUID::displayContents(Space * space,ULng32 flag) { ComTdb::displayContents(space,flag & 0xFFFFFFFE); if(flag & 0x00000008) { char buf[100]; str_sprintf(buf, "\nFor ComTdbExeUtilGetUID :"); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); str_sprintf(buf,"UID = %LD", uid_); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); } if (flag & 0x00000001) { displayExpression(space,flag); displayChildren(space,flag); } } /////////////////////////////////////////////////////////////////////////// // // Methods for class ComTdbExeUtilGetQID // /////////////////////////////////////////////////////////////////////////// ComTdbExeUtilGetQID::ComTdbExeUtilGetQID ( char * stmtName, ex_cri_desc * work_cri_desc, const unsigned short work_atp_index, ex_cri_desc * given_cri_desc, ex_cri_desc * returned_cri_desc, queue_index down, queue_index up, Lng32 num_buffers, ULng32 buffer_size) : ComTdbExeUtil(ComTdbExeUtil::GET_QID_, NULL, 0, (Int16)SQLCHARSETCODE_UNKNOWN, NULL, 0, NULL, 0, NULL, 0, NULL, work_cri_desc, work_atp_index, given_cri_desc, returned_cri_desc, down, up, num_buffers, buffer_size), flags_(0), stmtName_(stmtName) { setNodeType(ComTdb::ex_GET_QID); } Long ComTdbExeUtilGetQID::pack(void * space) { if (stmtName_) stmtName_.pack(space); return ComTdbExeUtil::pack(space); } Lng32 ComTdbExeUtilGetQID::unpack(void * base, void * reallocator) { if (stmtName_.unpack(base)) return -1; return ComTdbExeUtil::unpack(base, reallocator); } void ComTdbExeUtilGetQID::displayContents(Space * space,ULng32 flag) { ComTdb::displayContents(space,flag & 0xFFFFFFFE); if(flag & 0x00000008) { char buf[100]; str_sprintf(buf, "\nFor ComTdbExeUtilGetQID :"); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); str_sprintf(buf,"stmtName_ = %s ", getStmtName()); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); } if (flag & 0x00000001) { displayExpression(space,flag); displayChildren(space,flag); } } /////////////////////////////////////////////////////////////////////////// // // Methods for class ComTdbExeUtilPopulateInMemStats // /////////////////////////////////////////////////////////////////////////// ComTdbExeUtilPopulateInMemStats::ComTdbExeUtilPopulateInMemStats ( Int64 uid, char * inMemHistogramsTableName, char * inMemHistintsTableName, char * sourceTableCatName, char * sourceTableSchName, char * sourceTableObjName, char * sourceHistogramsTableName, char * sourceHistintsTableName, ex_cri_desc * work_cri_desc, const unsigned short work_atp_index, ex_cri_desc * given_cri_desc, ex_cri_desc * returned_cri_desc, queue_index down, queue_index up, Lng32 num_buffers, ULng32 buffer_size) : ComTdbExeUtil(ComTdbExeUtil::POP_IN_MEM_STATS_, NULL, 0, (Int16)SQLCHARSETCODE_UNKNOWN, NULL, 0, NULL, 0, NULL, 0, NULL, work_cri_desc, work_atp_index, given_cri_desc, returned_cri_desc, down, up, num_buffers, buffer_size), flags_(0), uid_(uid), inMemHistogramsTableName_(inMemHistogramsTableName), inMemHistintsTableName_(inMemHistintsTableName), sourceTableCatName_(sourceTableCatName), sourceTableSchName_(sourceTableSchName), sourceTableObjName_(sourceTableObjName), sourceHistogramsTableName_(sourceHistogramsTableName), sourceHistintsTableName_(sourceHistintsTableName) { setNodeType(ComTdb::ex_POP_IN_MEM_STATS); } Long ComTdbExeUtilPopulateInMemStats::pack(void * space) { if (inMemHistogramsTableName_) inMemHistogramsTableName_.pack(space); if (inMemHistintsTableName_) inMemHistintsTableName_.pack(space); if (sourceTableCatName_) sourceTableCatName_.pack(space); if (sourceTableSchName_) sourceTableSchName_.pack(space); if (sourceTableObjName_) sourceTableObjName_.pack(space); if (sourceHistogramsTableName_) sourceHistogramsTableName_.pack(space); if (sourceHistintsTableName_) sourceHistintsTableName_.pack(space); return ComTdbExeUtil::pack(space); } Lng32 ComTdbExeUtilPopulateInMemStats::unpack(void * base, void * reallocator) { if (inMemHistogramsTableName_.unpack(base)) return -1; if (inMemHistintsTableName_.unpack(base)) return -1; if (sourceTableCatName_.unpack(base)) return -1; if (sourceTableSchName_.unpack(base)) return -1; if (sourceTableObjName_.unpack(base)) return -1; if (sourceHistogramsTableName_.unpack(base)) return -1; if (sourceHistintsTableName_.unpack(base)) return -1; return ComTdbExeUtil::unpack(base, reallocator); } void ComTdbExeUtilPopulateInMemStats::displayContents(Space * space,ULng32 flag) { ComTdb::displayContents(space,flag & 0xFFFFFFFE); if(flag & 0x00000008) { char buf[1000]; str_sprintf(buf, "\nFor ComTdbExeUtilPopulateInMemStats :"); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); str_sprintf(buf,"UID = %LD", uid_); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); if ((char *)inMemHistogramsTableName_ != (char *)NULL) { str_sprintf(buf,"inMemHistogramsTableName_ = %s ", getInMemHistogramsTableName()); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); } if ((char *)inMemHistintsTableName_ != (char *)NULL) { str_sprintf(buf,"inMemHistintsTableName_ = %s ", getInMemHistintsTableName()); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); } if ((char *)sourceTableCatName_ != (char *)NULL) { str_sprintf(buf,"sourceTableCatName_ = %s ", getSourceTableCatName()); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); } if ((char *)sourceTableSchName_ != (char *)NULL) { str_sprintf(buf,"sourceTableSchName_ = %s ", getSourceTableSchName()); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); } if ((char *)sourceTableObjName_ != (char *)NULL) { str_sprintf(buf,"sourceTableCatName_ = %s ", getSourceTableObjName()); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); } if ((char *)sourceHistogramsTableName_ != (char *)NULL) { str_sprintf(buf,"sourceHistogramsTableName_ = %s ", getSourceHistogramsTableName()); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); } if ((char *)sourceHistintsTableName_ != (char *)NULL) { str_sprintf(buf,"sourceHistintsTableName_ = %s ", getSourceHistintsTableName()); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); } } if (flag & 0x00000001) { displayExpression(space,flag); displayChildren(space,flag); } } /////////////////////////////////////////////////////////////////////////// // // Methods for class ComTdbExeUtilAqrWnrInsert // /////////////////////////////////////////////////////////////////////////// ComTdbExeUtilAqrWnrInsert::ComTdbExeUtilAqrWnrInsert( char * tableName, ULng32 tableNameLen, ex_cri_desc * work_cri_desc, const unsigned short work_atp_index, ex_cri_desc * given_cri_desc, ex_cri_desc * returned_cri_desc, queue_index down, queue_index up, Lng32 num_buffers, ULng32 buffer_size) : ComTdbExeUtil(ComTdbExeUtil::AQR_WNR_INSERT_, (char *) eye_AQR_WNR_INS, 0, (Int16)SQLCHARSETCODE_UNKNOWN, tableName, tableNameLen, NULL, 0, NULL, 0, NULL, work_cri_desc, work_atp_index, given_cri_desc, returned_cri_desc, down, up, num_buffers, buffer_size), aqrWnrInsflags_(0) { setNodeType(ComTdb::ex_ARQ_WNR_INSERT); } void ComTdbExeUtilAqrWnrInsert::displayContents(Space * space, ULng32 flag) { ComTdb::displayContents(space,flag & 0xFFFFFFFE); if(flag & 0x00000008) { char buf[1000]; str_sprintf(buf, "\nFor ComTdbExeUtilAqrWnrInsert:"); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); if (getTableName() != NULL) { str_sprintf(buf,"Tablename = %s ",getTableName()); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); } str_sprintf(buf, "Lock target = %s ", doLockTarget() ? "ON" : "OFF"); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); } if (flag & 0x00000001) { displayExpression(space,flag); displayChildren(space,flag); } } ////////////////////////////////////////////////////////////////////////// // // Methods for class ComTdbExeUtilLongRunning // /////////////////////////////////////////////////////////////////////////// ComTdbExeUtilLongRunning::ComTdbExeUtilLongRunning( char * tableName, ULng32 tableNameLen, ex_cri_desc * work_cri_desc, const unsigned short work_atp_index, ex_cri_desc * given_cri_desc, ex_cri_desc * returned_cri_desc, queue_index down, queue_index up, Lng32 num_buffers, ULng32 buffer_size) : ComTdbExeUtil(ComTdbExeUtil::LONG_RUNNING_, NULL, 0, (Int16)SQLCHARSETCODE_UNKNOWN, tableName, tableNameLen, NULL, 0, NULL, 0, NULL, work_cri_desc, work_atp_index, given_cri_desc, returned_cri_desc, down, up, num_buffers, buffer_size), flags_(0), lruStmt_(NULL), lruStmtLen_(0), lruStmtWithCK_(NULL), lruStmtWithCKLen_(0), predicate_(NULL), predicateLen_(0), multiCommitSize_(0) { setNodeType(ComTdb::ex_LONG_RUNNING); } Long ComTdbExeUtilLongRunning::pack(void * space) { if (lruStmt_) lruStmt_.pack(space); if(lruStmtWithCK_) lruStmtWithCK_.pack(space); if (predicate_) predicate_.pack(space); return ComTdbExeUtil::pack(space); } Lng32 ComTdbExeUtilLongRunning::unpack(void * base, void * reallocator) { if(lruStmt_.unpack(base)) return -1; if(lruStmtWithCK_.unpack(base)) return -1; if (predicate_.unpack(base)) return -1; return ComTdbExeUtil::unpack(base, reallocator); } void ComTdbExeUtilLongRunning::setPredicate(Space *space, char *predicate) { if (predicate != NULL) { predicateLen_ = strlen(predicate); predicate_ = space->allocateAlignedSpace ((ULng32)predicateLen_ + 1); strcpy(predicate_, predicate); } } void ComTdbExeUtilLongRunning::displayContents(Space * space, ULng32 flag) { ComTdb::displayContents(space,flag & 0xFFFFFFFE); if(flag & 0x00000008) { char buf[1000]; str_sprintf(buf, "\nFor ComTdbExeUtilLongRunning :"); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); if (getTableName() != NULL) { str_sprintf(buf,"Tablename = %s ",getTableName()); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); } } if (flag & 0x00000001) { displayExpression(space,flag); } } /////////////////////////////////////////////////////////////////////////// // // Methods for class ComTdbExeUtilGetMetadataInfo // /////////////////////////////////////////////////////////////////////////// ComTdbExeUtilGetMetadataInfo::ComTdbExeUtilGetMetadataInfo ( QueryType queryType, char * cat, char * sch, char * obj, char * pattern, char * param1, ex_expr_base * scan_expr, ex_cri_desc * work_cri_desc, const unsigned short work_atp_index, ex_cri_desc * given_cri_desc, ex_cri_desc * returned_cri_desc, queue_index down, queue_index up, Lng32 num_buffers, ULng32 buffer_size, char * server, char * zkPort) : ComTdbExeUtil(ComTdbExeUtil::GET_METADATA_INFO_, NULL, 0, (Int16)SQLCHARSETCODE_UNKNOWN, NULL, 0, NULL, 0, NULL, 0, scan_expr, work_cri_desc, work_atp_index, given_cri_desc, returned_cri_desc, down, up, num_buffers, buffer_size), queryType_(queryType), cat_(cat), sch_(sch), obj_(obj), pattern_(pattern), param1_(param1), flags_(0), server_(server), zkPort_(zkPort) { setNodeType(ComTdb::ex_GET_METADATA_INFO); } Long ComTdbExeUtilGetMetadataInfo::pack(void * space) { if (cat_) cat_.pack(space); if (sch_) sch_.pack(space); if (obj_) obj_.pack(space); if (pattern_) pattern_.pack(space); if (param1_) param1_.pack(space); if (server_) server_.pack(space); if (zkPort_) zkPort_.pack(space); return ComTdbExeUtil::pack(space); } Lng32 ComTdbExeUtilGetMetadataInfo::unpack(void * base, void * reallocator) { if (cat_.unpack(base)) return -1; if (sch_.unpack(base)) return -1; if (obj_.unpack(base)) return -1; if (pattern_.unpack(base)) return -1; if (param1_.unpack(base)) return -1; if (server_.unpack(base)) return -1; if (zkPort_.unpack(base)) return -1; return ComTdbExeUtil::unpack(base, reallocator); } void ComTdbExeUtilGetMetadataInfo::displayContents(Space * space,ULng32 flag) { ComTdb::displayContents(space,flag & 0xFFFFFFFE); if(flag & 0x00000008) { char buf[1000]; str_sprintf(buf, "\nFor ComTdbExeUtilGetMetadataInfo :"); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); str_sprintf(buf, "QueryType: %d", queryType_); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); if (getCat() != NULL) { str_sprintf(buf,"Catalog = %s ", getCat()); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); } if (getSch() != NULL) { str_sprintf(buf,"Schema = %s ", getSch()); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); } if (getObj() != NULL) { str_sprintf(buf,"Object = %s ", getObj()); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); } if (getPattern() != NULL) { str_sprintf(buf,"Pattern = %s ", getPattern()); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); } if (getParam1() != NULL) { str_sprintf(buf,"Param1 = %s ", getParam1()); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); } str_sprintf(buf, "Flags = %b",flags_); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); } if (flag & 0x00000001) { displayExpression(space,flag); displayChildren(space,flag); } } /////////////////////////////////////////////////////////////////////////// // // Methods for class ComTdbExeUtilGetHiveMetadataInfo // /////////////////////////////////////////////////////////////////////////// ComTdbExeUtilGetHiveMetadataInfo::ComTdbExeUtilGetHiveMetadataInfo ( QueryType queryType, char * cat, char * sch, char * obj, char * pattern, char * param1, ex_expr_base * scan_expr, ex_cri_desc * work_cri_desc, const unsigned short work_atp_index, ex_cri_desc * given_cri_desc, ex_cri_desc * returned_cri_desc, queue_index down, queue_index up, Lng32 num_buffers, ULng32 buffer_size) : ComTdbExeUtilGetMetadataInfo( queryType, cat, sch, obj, pattern, param1, scan_expr, work_cri_desc, work_atp_index, given_cri_desc, returned_cri_desc, down, up, num_buffers, buffer_size, NULL, NULL), unused1_(NULL), unused2_(NULL), unused3_(NULL), unused4_(NULL) { setType(ComTdbExeUtil::GET_HIVE_METADATA_INFO_); setNodeType(ComTdb::ex_GET_HIVE_METADATA_INFO); } Long ComTdbExeUtilGetHiveMetadataInfo::pack(void * space) { return ComTdbExeUtilGetMetadataInfo::pack(space); } Lng32 ComTdbExeUtilGetHiveMetadataInfo::unpack(void * base, void * reallocator) { return ComTdbExeUtilGetMetadataInfo::unpack(base, reallocator); } void ComTdbExeUtilGetHiveMetadataInfo::displayContents(Space * space,ULng32 flag) { ComTdbExeUtilGetMetadataInfo::displayContents(space,flag & 0xFFFFFFFE); if (flag & 0x00000001) { displayExpression(space,flag); displayChildren(space,flag); } } /////////////////////////////////////////////////////////////////////////// // // Methods for class ComTdbExeUtilShowSet // /////////////////////////////////////////////////////////////////////////// ComTdbExeUtilShowSet::ComTdbExeUtilShowSet ( UInt16 type, char * param1, char * param2, ex_cri_desc * work_cri_desc, const unsigned short work_atp_index, ex_cri_desc * given_cri_desc, ex_cri_desc * returned_cri_desc, queue_index down, queue_index up, Lng32 num_buffers, ULng32 buffer_size) : ComTdbExeUtil(ComTdbExeUtil::SHOW_SET_, NULL, 0, (Int16)SQLCHARSETCODE_UNKNOWN, NULL, 0, NULL, 0, NULL, 0, NULL, work_cri_desc, work_atp_index, given_cri_desc, returned_cri_desc, down, up, num_buffers, buffer_size), type_(type), flags_(0), param1_(param1), param2_(param2) { setNodeType(ComTdb::ex_SHOW_SET); } Long ComTdbExeUtilShowSet::pack(void * space) { if (param1_) param1_.pack(space); if (param2_) param2_.pack(space); return ComTdbExeUtil::pack(space); } Lng32 ComTdbExeUtilShowSet::unpack(void * base, void * reallocator) { if(param1_.unpack(base)) return -1; if(param2_.unpack(base)) return -1; return ComTdbExeUtil::unpack(base, reallocator); } void ComTdbExeUtilShowSet::displayContents(Space * space,ULng32 flag) { ComTdb::displayContents(space,flag & 0xFFFFFFFE); if(flag & 0x00000008) { char buf[100]; str_sprintf(buf, "\nFor ComTdbExeUtilShowSet :"); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); } if (flag & 0x00000001) { displayExpression(space,flag); displayChildren(space,flag); } } /////////////////////////////////////////////////////////////////////////// // // Methods for class ComTdbExeUtilAQR // /////////////////////////////////////////////////////////////////////////// ComTdbExeUtilAQR::ComTdbExeUtilAQR ( Lng32 task, ex_cri_desc * given_cri_desc, ex_cri_desc * returned_cri_desc, queue_index down, queue_index up, Lng32 num_buffers, ULng32 buffer_size) : ComTdbExeUtil(ComTdbExeUtil::AQR_, NULL, 0, (Int16)SQLCHARSETCODE_UNKNOWN, NULL, 0, NULL, 0, NULL, 0, NULL, NULL, 0, given_cri_desc, returned_cri_desc, down, up, num_buffers, buffer_size), task_(task), flags_(0) { setNodeType(ComTdb::ex_AQR); } Long ComTdbExeUtilAQR::pack(void * space) { return ComTdbExeUtil::pack(space); } Lng32 ComTdbExeUtilAQR::unpack(void * base, void * reallocator) { return ComTdbExeUtil::unpack(base, reallocator); } void ComTdbExeUtilAQR::displayContents(Space * space,ULng32 flag) { ComTdb::displayContents(space,flag & 0xFFFFFFFE); if(flag & 0x00000008) { char buf[100]; str_sprintf(buf, "\nFor ComTdbExeUtilAQR :"); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); } if (flag & 0x00000001) { displayExpression(space,flag); displayChildren(space,flag); } } /////////////////////////////////////////////////////////////////////////// // // Methods for class ComTdbExeUtilLobExtract // /////////////////////////////////////////////////////////////////////////// ComTdbExeUtilLobExtract::ComTdbExeUtilLobExtract ( char * handle, Lng32 handleLen, ExtractToType toType, Int64 bufAddr, Int64 extractSizeAddr, Int64 intParam1, Int64 intParam2, Lng32 lobStorageType, char * stringParam1, char * stringParam2, char * stringParam3, char * lobHdfsServer, Lng32 lobHdfsPort, ex_expr * input_expr, ULng32 input_rowlen, ex_cri_desc * work_cri_desc, const unsigned short work_atp_index, ex_cri_desc * given_cri_desc, ex_cri_desc * returned_cri_desc, queue_index down, queue_index up, Lng32 num_buffers, ULng32 buffer_size) : ComTdbExeUtil(ComTdbExeUtil::LOB_EXTRACT_, NULL, 0, (Int16)SQLCHARSETCODE_UNKNOWN, NULL, 0, input_expr, input_rowlen, NULL, 0, NULL, work_cri_desc, work_atp_index, given_cri_desc, returned_cri_desc, down, up, num_buffers, buffer_size), handle_(handle), handleLen_(handleLen), toType_((short)toType), bufAddr_(bufAddr), extractSizeIOAddr_(extractSizeAddr), lobStorageType_(lobStorageType), stringParam1_(stringParam1), stringParam2_(stringParam2), stringParam3_(stringParam3), lobHdfsServer_(lobHdfsServer), lobHdfsPort_(lobHdfsPort), totalBufSize_(0), flags_(0) { setNodeType(ComTdb::ex_LOB_EXTRACT); if (toType_ == ExtractToType::TO_FILE_) { // extractSize_ is irrelevant since the whole lob will be read into the output file // bufAddr_ is not passed in by user. It is a CQD value LOB_OUTPUT_SIZE extractSizeIOAddr_ = 0; bufAddr_ = 0; } } Long ComTdbExeUtilLobExtract::pack(void * space) { if (handle_) handle_.pack(space); if (stringParam1_) stringParam1_.pack(space); if (stringParam2_) stringParam2_.pack(space); if (stringParam3_) stringParam3_.pack(space); if (lobHdfsServer_) lobHdfsServer_.pack(space); return ComTdbExeUtil::pack(space); } Lng32 ComTdbExeUtilLobExtract::unpack(void * base, void * reallocator) { if (handle_.unpack(base)) return -1; if (stringParam1_.unpack(base)) return -1; if (stringParam2_.unpack(base)) return -1; if (stringParam3_.unpack(base)) return -1; if (lobHdfsServer_.unpack(base)) return -1; return ComTdbExeUtil::unpack(base, reallocator); } void ComTdbExeUtilLobExtract::displayContents(Space * space,ULng32 flag) { ComTdb::displayContents(space,flag & 0xFFFFFFFE); if(flag & 0x00000008) { char buf[100]; str_sprintf(buf, "\nFor ComTdbExeUtilLobExtract :"); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); } if (flag & 0x00000001) { displayExpression(space,flag); displayChildren(space,flag); } } /////////////////////////////////////////////////////////////////////////// // // Methods for class ComTdbExeUtilLobShowddl // /////////////////////////////////////////////////////////////////////////// ComTdbExeUtilLobShowddl::ComTdbExeUtilLobShowddl ( char * tableName, char * schName, short schNameLen, Int64 objectUID, Lng32 numLOBs, char * lobNumArray, char * lobLocArray, char * lobTypeArray, short maxLocLen, short sdOptions, ex_cri_desc * given_cri_desc, ex_cri_desc * returned_cri_desc, queue_index down, queue_index up, Lng32 num_buffers, ULng32 buffer_size) : ComTdbExeUtil(ComTdbExeUtil::LOB_SHOWDDL_, NULL, 0, (Int16)SQLCHARSETCODE_UNKNOWN, tableName, strlen(tableName), NULL, 0, NULL, 0, NULL, NULL, 0, given_cri_desc, returned_cri_desc, down, up, num_buffers, buffer_size), flags_(0), objectUID_(objectUID), numLOBs_(numLOBs), lobNumArray_(lobNumArray), lobLocArray_(lobLocArray), lobTypeArray_(lobTypeArray), maxLocLen_(maxLocLen), sdOptions_(sdOptions), schName_(schName), schNameLen_(schNameLen) { setNodeType(ComTdb::ex_LOB_SHOWDDL); } Long ComTdbExeUtilLobShowddl::pack(void * space) { if (schName_) schName_.pack(space); if (lobNumArray_) lobNumArray_.pack(space); if (lobLocArray_) lobLocArray_.pack(space); if (lobTypeArray_) lobTypeArray_.pack(space); return ComTdbExeUtil::pack(space); } Lng32 ComTdbExeUtilLobShowddl::unpack(void * base, void * reallocator) { if(schName_.unpack(base)) return -1; if(lobNumArray_.unpack(base)) return -1; if(lobLocArray_.unpack(base)) return -1; if(lobTypeArray_.unpack(base)) return -1; return ComTdbExeUtil::unpack(base, reallocator); } void ComTdbExeUtilLobShowddl::displayContents(Space * space,ULng32 flag) { ComTdb::displayContents(space,flag & 0xFFFFFFFE); if(flag & 0x00000008) { char buf[100]; str_sprintf(buf, "\nFor ComTdbExeUtilLobShowddl :"); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); } if (flag & 0x00000001) { displayExpression(space,flag); displayChildren(space,flag); } } short ComTdbExeUtilLobShowddl::getLOBnum(short i) { if ((i > numLOBs_) || (i <= 0)) return -1; short lobNum = *((short*)&getLOBnumArray()[2*(i-1)]); return lobNum; } NABoolean ComTdbExeUtilLobShowddl::getIsExternalLobCol(short i) { NABoolean isExternal = (*((Int32*)&getLOBtypeArray()[4*(i-1)]) == Lob_External_HDFS_File); return isExternal; } char * ComTdbExeUtilLobShowddl::getLOBloc(short i) { if ((i > numLOBs_) || (i <= 0)) return NULL; char * lobLoc = &getLOBlocArray()[maxLocLen_*(i-1)]; return lobLoc; } ///////////////////////////////////////////////////////////////////////////////// // class ComTdbExeUtilHiveMDaccess ///////////////////////////////////////////////////////////////////////////////// ComTdbExeUtilHiveMDaccess::ComTdbExeUtilHiveMDaccess() : ComTdbExeUtil() { } ComTdbExeUtilHiveMDaccess::ComTdbExeUtilHiveMDaccess( MDType type, ULng32 tupleLen, ex_cri_desc *criDescParentDown, ex_cri_desc *criDescParentUp, ex_cri_desc *workCriDesc, unsigned short workAtpIndex, queue_index queueSizeDown, queue_index queueSizeUp, Lng32 numBuffers, ULng32 bufferSize, ex_expr *scanPred, char * hivePredStr, char * schemaName) : ComTdbExeUtil(ComTdbExeUtil::HIVE_MD_ACCESS_, 0, 0, 0, // query,querylen,querycharset NULL, 0, // tablename,tablenamelen NULL, tupleLen, NULL, tupleLen, scanPred, workCriDesc, workAtpIndex, criDescParentDown, criDescParentUp, queueSizeDown, queueSizeUp, numBuffers, bufferSize), mdType_(type), hivePredStr_(hivePredStr), schema_(schemaName) { setNodeType(ComTdb::ex_HIVE_MD_ACCESS); } // Return the number of expressions held by the explain TDB (2) // They are enumerated as: 0 - scanPred, 1 - paramsExpr Int32 ComTdbExeUtilHiveMDaccess::numExpressions() const { return(1); } // Return the expression names of the explain TDB based on some // enumeration. 0 - scanPred, 1 - paramsExpr const char * ComTdbExeUtilHiveMDaccess::getExpressionName(Int32 expNum) const { switch(expNum) { case 0: return "Scan Expr"; default: return 0; } } // Return the expressions of the explain TDB based on some // enumeration. 0 - scanPred, 1 - paramsExpr ex_expr * ComTdbExeUtilHiveMDaccess::getExpressionNode(Int32 expNum) { switch(expNum) { case 0: return scanExpr_; default: return 0; } } // Pack the explainTdb: Convert all pointers to offsets relative // to the space object. Long ComTdbExeUtilHiveMDaccess::pack(void * space) { if (hivePredStr_) hivePredStr_.pack(space); if (schema_) schema_.pack(space); return ComTdbExeUtil::pack(space); } // Unpack the explainTdb.: Convert all offsets relative to base // to pointers Lng32 ComTdbExeUtilHiveMDaccess::unpack(void * base, void * reallocator) { if (hivePredStr_.unpack(base)) return -1; if (schema_.unpack(base)) return -1; return ComTdbExeUtil::unpack(base, reallocator); } void ComTdbExeUtilHiveMDaccess::displayContents(Space * space,ULng32 flag) { ComTdb::displayContents(space,flag & 0xFFFFFFFE); if(flag & 0x00000008) { char buf[2000]; str_sprintf(buf, "\nFor ComTdbExeUtilHiveMDaccess :"); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); if (hivePredStr()) { str_sprintf(buf,"hivePredStr_ = %s", hivePredStr()); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); } if (getSchema()) { str_sprintf(buf,"schema_ = %s", getSchema()); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); } } if (flag & 0x00000001) { displayExpression(space,flag); displayChildren(space,flag); } } //********************************************* //ComTdbExeUtilHBaseBulkLoad //******************************************** ComTdbExeUtilHBaseBulkLoad::ComTdbExeUtilHBaseBulkLoad(char * tableName, ULng32 tableNameLen, char * ldStmtStr, ex_cri_desc * work_cri_desc, const unsigned short work_atp_index, ex_cri_desc * given_cri_desc, ex_cri_desc * returned_cri_desc, queue_index down, queue_index up, Lng32 num_buffers, ULng32 buffer_size, char * errCountTab, char * loggingLoc ) : ComTdbExeUtil(ComTdbExeUtil::HBASE_LOAD_, NULL, 0, (Int16)SQLCHARSETCODE_UNKNOWN, tableName, tableNameLen, NULL, 0, NULL, 0, NULL, work_cri_desc, work_atp_index, given_cri_desc, returned_cri_desc, down, up, num_buffers, buffer_size), ldQuery_(ldStmtStr), flags_(0), maxErrorRows_(0), errCountTable_(errCountTab), loggingLocation_(loggingLoc) { setNodeType(ComTdb::ex_HBASE_LOAD); } Long ComTdbExeUtilHBaseBulkLoad::pack(void * space) { if (ldQuery_) ldQuery_.pack(space); if(errCountTable_) errCountTable_.pack(space); if(loggingLocation_) loggingLocation_.pack(space); return ComTdbExeUtil::pack(space); } Lng32 ComTdbExeUtilHBaseBulkLoad::unpack(void * base, void * reallocator) { if(ldQuery_.unpack(base)) return -1; if(errCountTable_.unpack(base)) return -1; if(loggingLocation_.unpack(base)) return -1; return ComTdbExeUtil::unpack(base, reallocator); } void ComTdbExeUtilHBaseBulkLoad::displayContents(Space * space,ULng32 flag) { ComTdb::displayContents(space,flag & 0xFFFFFFFE); if (flag & 0x00000008) { char buf[1000]; str_sprintf(buf, "\nFor ComTdbExeUtilHbaseLoad :"); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); if (getTableName() != NULL) { str_sprintf(buf,"Tablename = %s ",getTableName()); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); } if (ldQuery_) { char query[400]; if (strlen(ldQuery_) > 390) { strncpy(query, ldQuery_, 390); query[390] = 0; strcat(query, "..."); } else strcpy(query, ldQuery_); str_sprintf(buf,"ld Query = %s ",query); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); } if (getLogErrorRows()) { if (loggingLocation_) { str_sprintf(buf, "Logging location = %s ", loggingLocation_.getPointer()); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); } } if (maxErrorRows_ > 0) { str_sprintf(buf, "Max Error Rows = %d", maxErrorRows_); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); if (errCountTable_) { str_sprintf(buf, "Error Counter Table Name = %s ", errCountTable_.getPointer()); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); } } } if (flag & 0x00000001) { displayExpression(space,flag); displayChildren(space,flag); } } //********************************************* //ComTdbExeUtilHBaseBulkUnLoad //******************************************** ComTdbExeUtilHBaseBulkUnLoad::ComTdbExeUtilHBaseBulkUnLoad(char * tableName, ULng32 tableNameLen, char * uldStmtStr, char * extractLocation, ex_cri_desc * work_cri_desc, const unsigned short work_atp_index, ex_cri_desc * given_cri_desc, ex_cri_desc * returned_cri_desc, queue_index down, queue_index up, Lng32 num_buffers, ULng32 buffer_size ) : ComTdbExeUtil(ComTdbExeUtil::HBASE_UNLOAD_, NULL, 0, (Int16)SQLCHARSETCODE_UNKNOWN, tableName, tableNameLen, NULL, 0, NULL, 0, NULL, work_cri_desc, work_atp_index, given_cri_desc, returned_cri_desc, down, up, num_buffers, buffer_size), uldQuery_(uldStmtStr), flags_(0), compressType_(0), extractLocation_(extractLocation), scanType_(0), snapshotSuffix_(NULL) { setNodeType(ComTdb::ex_HBASE_UNLOAD); } Long ComTdbExeUtilHBaseBulkUnLoad::pack(void * space) { if (uldQuery_) uldQuery_.pack(space); if (mergePath_) mergePath_.pack(space); if (extractLocation_) extractLocation_.pack(space); if (snapshotSuffix_) snapshotSuffix_.pack(space); return ComTdbExeUtil::pack(space); } Lng32 ComTdbExeUtilHBaseBulkUnLoad::unpack(void * base, void * reallocator) { if(uldQuery_.unpack(base)) return -1; if(mergePath_.unpack(base)) return -1; if(extractLocation_.unpack(base)) return -1; if(snapshotSuffix_.unpack(base)) return -1; return ComTdbExeUtil::unpack(base, reallocator); } void ComTdbExeUtilHBaseBulkUnLoad::displayContents(Space * space,ULng32 flag) { ComTdb::displayContents(space,flag & 0xFFFFFFFE); if(flag & 0x00000008) { char buf[1000]; str_sprintf(buf, "\nFor ComTdbExeUtilHbaseUnLoad :"); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); if (getTableName() != NULL) { str_sprintf(buf,"Tablename = %s ",getTableName()); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); } if (uldQuery_) { char query[400]; if (strlen(uldQuery_) > 390) { strncpy(query, uldQuery_, 390); query[390] = 0; strcat(query, "..."); } else strcpy(query, uldQuery_); str_sprintf(buf,"uld Query = %s ",query); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); } /////NEED TO ADD rthe remaning INFO } if (flag & 0x00000001) { displayExpression(space,flag); displayChildren(space,flag); } } ComTdbExeUtilRegionStats::ComTdbExeUtilRegionStats ( char * tableName, ex_expr_base * input_expr, ULng32 input_rowlen, ex_expr_base * scan_expr, ex_cri_desc * work_cri_desc, const unsigned short work_atp_index, ex_cri_desc * given_cri_desc, ex_cri_desc * returned_cri_desc, queue_index down, queue_index up, Lng32 num_buffers, ULng32 buffer_size) : ComTdbExeUtil(ComTdbExeUtil::REGION_STATS_, NULL, 0, (Int16)SQLCHARSETCODE_UNKNOWN, tableName, strlen(tableName), input_expr, input_rowlen, NULL, 0, scan_expr, work_cri_desc, work_atp_index, given_cri_desc, returned_cri_desc, down, up, num_buffers, buffer_size), flags_(0) { setNodeType(ComTdb::ex_REGION_STATS); } ComTdbExeUtilLobInfo::ComTdbExeUtilLobInfo ( char * tableName, Int64 objectUID, Lng32 numLOBs, char *lobColArray, char * lobNumArray, char * lobLocArray, char *lobTypeArray, Int32 hdfsPort, char *hdfsServer, NABoolean tableFormat, ex_cri_desc * work_cri_desc, const unsigned short work_atp_index, ex_cri_desc * given_cri_desc, ex_cri_desc * returned_cri_desc, queue_index down, queue_index up, Lng32 num_buffers, ULng32 buffer_size) : ComTdbExeUtil(ComTdbExeUtil::LOB_INFO_, NULL, 0, (Int16)SQLCHARSETCODE_UNKNOWN, tableName, strlen(tableName), NULL, 0, NULL, 0, NULL, work_cri_desc, work_atp_index, given_cri_desc, returned_cri_desc, down, up, num_buffers, buffer_size), flags_(0), objectUID_(objectUID), numLOBs_(numLOBs), lobColArray_(lobColArray), lobNumArray_(lobNumArray), lobLocArray_(lobLocArray), lobTypeArray_(lobTypeArray), hdfsPort_(0), hdfsServer_(hdfsServer), tableFormat_(tableFormat) { setNodeType(ComTdb::ex_LOB_INFO); } Long ComTdbExeUtilLobInfo::pack(void * space) { if (lobColArray_) lobColArray_.pack(space); if (lobNumArray_) lobNumArray_.pack(space); if (lobLocArray_) lobLocArray_.pack(space); if(lobTypeArray_) lobTypeArray_.pack(space); if (hdfsServer_) hdfsServer_.pack(space); return ComTdbExeUtil::pack(space); } Lng32 ComTdbExeUtilLobInfo::unpack(void * base, void * reallocator) { if (lobColArray_.unpack(base)) return -1; if(lobNumArray_.unpack(base)) return -1; if(lobLocArray_.unpack(base)) return -1; if(lobTypeArray_.unpack(base)) return -1; if(hdfsServer_.unpack(base)) return -1; return ComTdbExeUtil::unpack(base, reallocator); }
1
14,451
I'm curious why the table name is bound at compile time? Is it just to save the table create/drop overhead in a prepare-once-execute-many situation?
apache-trafodion
cpp
@@ -58,10 +58,6 @@ public class ProcessJob extends AbstractProcessJob { public ProcessJob(final String jobId, final Props sysProps, final Props jobProps, final Logger log) { super(jobId, sysProps, jobProps, log); - - // this is in line with what other job types (hadoopJava, spark, pig, hive) - // is doing - jobProps.put(CommonJobProperties.JOB_ID, jobId); } @Override
1
/* * Copyright 2012 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.jobExecutor; import java.io.File; import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.concurrent.TimeUnit; import org.apache.log4j.Logger; import azkaban.flow.CommonJobProperties; import azkaban.jobExecutor.utils.process.AzkabanProcess; import azkaban.jobExecutor.utils.process.AzkabanProcessBuilder; import azkaban.utils.Pair; import azkaban.utils.Props; import azkaban.utils.SystemMemoryInfo; /** * A job that runs a simple unix command */ public class ProcessJob extends AbstractProcessJob { public static final String COMMAND = "command"; private static final long KILL_TIME_MS = 5000; private volatile AzkabanProcess process; private static final String MEMCHECK_ENABLED = "memCheck.enabled"; private static final String MEMCHECK_FREEMEMDECRAMT = "memCheck.freeMemDecrAmt"; public static final String AZKABAN_MEMORY_CHECK = "azkaban.memory.check"; public static final String NATIVE_LIB_FOLDER = "azkaban.native.lib"; public static final String EXECUTE_AS_USER = "execute.as.user"; public static final String USER_TO_PROXY = "user.to.proxy"; public static final String KRB5CCNAME = "KRB5CCNAME"; public ProcessJob(final String jobId, final Props sysProps, final Props jobProps, final Logger log) { super(jobId, sysProps, jobProps, log); // this is in line with what other job types (hadoopJava, spark, pig, hive) // is doing jobProps.put(CommonJobProperties.JOB_ID, jobId); } @Override public void run() throws Exception { try { resolveProps(); } catch (Exception e) { handleError("Bad property definition! " + e.getMessage(), e); } if (sysProps.getBoolean(MEMCHECK_ENABLED, true) && jobProps.getBoolean(AZKABAN_MEMORY_CHECK, true)) { long freeMemDecrAmt = sysProps.getLong(MEMCHECK_FREEMEMDECRAMT, 0); Pair<Long, Long> memPair = getProcMemoryRequirement(); boolean isMemGranted = SystemMemoryInfo.canSystemGrantMemory(memPair.getFirst(), memPair.getSecond(), freeMemDecrAmt); if (!isMemGranted) { throw new Exception( String .format( "Cannot request memory (Xms %d kb, Xmx %d kb) from system for job %s", memPair.getFirst(), memPair.getSecond(), getId())); } } List<String> commands = null; try { commands = getCommandList(); } catch (Exception e) { handleError("Job set up failed " + e.getCause(), e); } long startMs = System.currentTimeMillis(); if (commands == null) { handleError("There are no commands to execute", null); } info(commands.size() + " commands to execute."); File[] propFiles = initPropsFiles(); // change krb5ccname env var so that each job execution gets its own cache Map<String, String> envVars = getEnvironmentVariables(); envVars.put(KRB5CCNAME, getKrb5ccname(jobProps)); // determine whether to run as Azkaban or run as effectiveUser, // by default, run as effectiveUser String executeAsUserBinaryPath = null; String effectiveUser = null; boolean isExecuteAsUser = sysProps.getBoolean(EXECUTE_AS_USER, true); // nativeLibFolder specifies the path for execute-as-user file, // which will change user from Azkaban to effectiveUser if (isExecuteAsUser) { String nativeLibFolder = sysProps.getString(NATIVE_LIB_FOLDER); executeAsUserBinaryPath = String.format("%s/%s", nativeLibFolder, "execute-as-user"); effectiveUser = getEffectiveUser(jobProps); if ("root".equals(effectiveUser)) { throw new RuntimeException( "Not permitted to proxy as root through Azkaban"); } } for (String command : commands) { AzkabanProcessBuilder builder = null; if (isExecuteAsUser) { command = String.format("%s %s %s", executeAsUserBinaryPath, effectiveUser, command); info("Command: " + command); builder = new AzkabanProcessBuilder(partitionCommandLine(command)) .setEnv(envVars).setWorkingDir(getCwd()).setLogger(getLog()) .enableExecuteAsUser().setExecuteAsUserBinaryPath(executeAsUserBinaryPath) .setEffectiveUser(effectiveUser); } else { info("Command: " + command); builder = new AzkabanProcessBuilder(partitionCommandLine(command)) .setEnv(envVars).setWorkingDir(getCwd()).setLogger(getLog()); } if (builder.getEnv().size() > 0) { info("Environment variables: " + builder.getEnv()); } info("Working directory: " + builder.getWorkingDir()); // print out the Job properties to the job log. this.logJobProperties(); boolean success = false; this.process = builder.build(); try { this.process.run(); success = true; } catch (Throwable e) { for (File file : propFiles) if (file != null && file.exists()) file.delete(); throw new RuntimeException(e); } finally { this.process = null; info("Process completed " + (success ? "successfully" : "unsuccessfully") + " in " + ((System.currentTimeMillis() - startMs) / 1000) + " seconds."); } } // Get the output properties from this job. generateProperties(propFiles[1]); } /** * <pre> * This method extracts the kerberos ticket cache file name from the jobprops. * This method will ensure that each job execution will have its own kerberos ticket cache file * Given that the code only sets an environmental variable, the number of files created corresponds * to the number of processes that are doing kinit in their flow, which should not be an inordinately * high number. * </pre> * * @return file name: the kerberos ticket cache file to use */ private String getKrb5ccname(Props jobProps) { String effectiveUser = getEffectiveUser(jobProps); String projectName = jobProps.getString(CommonJobProperties.PROJECT_NAME).replace(" ", "_"); String flowId = jobProps.getString(CommonJobProperties.FLOW_ID).replace(" ", "_"); String jobId = jobProps.getString(CommonJobProperties.JOB_ID).replace(" ", "_"); // execId should be an int and should not have space in it, ever String execId = jobProps.getString(CommonJobProperties.EXEC_ID); String krb5ccname = String.format("/tmp/krb5cc__%s__%s__%s__%s__%s", projectName, flowId, jobId, execId, effectiveUser); return krb5ccname; } /** * <pre> * Determines what user id should the process job run as, in the following order of precedence: * 1. USER_TO_PROXY * 2. SUBMIT_USER * </pre> * * @param jobProps * @return the user that Azkaban is going to execute as */ private String getEffectiveUser(Props jobProps) { String effectiveUser = null; if (jobProps.containsKey(USER_TO_PROXY)) { effectiveUser = jobProps.getString(USER_TO_PROXY); } else if (jobProps.containsKey(CommonJobProperties.SUBMIT_USER)) { effectiveUser = jobProps.getString(CommonJobProperties.SUBMIT_USER); } else { throw new RuntimeException( "Internal Error: No user.to.proxy or submit.user in the jobProps"); } info("effective user is: " + effectiveUser); return effectiveUser; } /** * This is used to get the min/max memory size requirement by processes. * SystemMemoryInfo can use the info to determine if the memory request can be * fulfilled. For Java process, this should be Xms/Xmx setting. * * @return pair of min/max memory size */ protected Pair<Long, Long> getProcMemoryRequirement() throws Exception { return new Pair<Long, Long>(0L, 0L); } protected void handleError(String errorMsg, Exception e) throws Exception { error(errorMsg); if (e != null) { throw new Exception(errorMsg, e); } else { throw new Exception(errorMsg); } } protected List<String> getCommandList() { List<String> commands = new ArrayList<String>(); commands.add(jobProps.getString(COMMAND)); for (int i = 1; jobProps.containsKey(COMMAND + "." + i); i++) { commands.add(jobProps.getString(COMMAND + "." + i)); } return commands; } @Override public void cancel() throws InterruptedException { if (process == null) throw new IllegalStateException("Not started."); boolean killed = process.softKill(KILL_TIME_MS, TimeUnit.MILLISECONDS); if (!killed) { warn("Kill with signal TERM failed. Killing with KILL signal."); process.hardKill(); } } @Override public double getProgress() { return process != null && process.isComplete() ? 1.0 : 0.0; } public int getProcessId() { return process.getProcessId(); } public String getPath() { return _jobPath == null ? "" : _jobPath; } /** * Splits the command into a unix like command line structure. Quotes and * single quotes are treated as nested strings. * * @param command * @return */ public static String[] partitionCommandLine(final String command) { ArrayList<String> commands = new ArrayList<String>(); int index = 0; StringBuffer buffer = new StringBuffer(command.length()); boolean isApos = false; boolean isQuote = false; while (index < command.length()) { char c = command.charAt(index); switch (c) { case ' ': if (!isQuote && !isApos) { String arg = buffer.toString(); buffer = new StringBuffer(command.length() - index); if (arg.length() > 0) { commands.add(arg); } } else { buffer.append(c); } break; case '\'': if (!isQuote) { isApos = !isApos; } else { buffer.append(c); } break; case '"': if (!isApos) { isQuote = !isQuote; } else { buffer.append(c); } break; default: buffer.append(c); } index++; } if (buffer.length() > 0) { String arg = buffer.toString(); commands.add(arg); } return commands.toArray(new String[commands.size()]); } }
1
12,423
quick question, isn't ProcessJob used by all job types? Then why is JOB_ID not found?
azkaban-azkaban
java
@@ -264,6 +264,15 @@ public class AccountSettings { s.put("trashFolderSelection", Settings.versions( new V(54, new EnumSetting<>(SpecialFolderSelection.class, SpecialFolderSelection.AUTOMATIC)) )); + s.put("resize_image_enabled", Settings.versions( + new V(55, new BooleanSetting(Account.DEFAULT_RESIZE_IMAGE_ENABLED)) + )); + s.put("resize_image_circumference", Settings.versions( + new V(55, new StringSetting(Integer.toString(Account.DEFAULT_RESIZE_IMAGE_CIRCUMFERENCE))) + )); + s.put("resize_image_quality", Settings.versions( + new V(55, new StringSetting(Integer.toString(Account.DEFAULT_RESIZE_IMAGE_QUALITY))) + )); // note that there is no setting for openPgpProvider, because this will have to be set up together // with the actual provider after import anyways.
1
package com.fsck.k9.preferences; import java.util.Collections; import java.util.HashMap; import java.util.LinkedHashMap; import java.util.Map; import java.util.Set; import java.util.TreeMap; import android.content.Context; import com.fsck.k9.Account; import com.fsck.k9.Account.DeletePolicy; import com.fsck.k9.Account.Expunge; import com.fsck.k9.Account.FolderMode; import com.fsck.k9.Account.MessageFormat; import com.fsck.k9.Account.QuoteStyle; import com.fsck.k9.Account.Searchable; import com.fsck.k9.Account.ShowPictures; import com.fsck.k9.Account.SortType; import com.fsck.k9.Account.SpecialFolderSelection; import com.fsck.k9.AccountPreferenceSerializer; import com.fsck.k9.DI; import com.fsck.k9.K9; import com.fsck.k9.core.R; import com.fsck.k9.mailstore.StorageManager; import com.fsck.k9.preferences.Settings.BooleanSetting; import com.fsck.k9.preferences.Settings.ColorSetting; import com.fsck.k9.preferences.Settings.EnumSetting; import com.fsck.k9.preferences.Settings.IntegerRangeSetting; import com.fsck.k9.preferences.Settings.InvalidSettingValueException; import com.fsck.k9.preferences.Settings.PseudoEnumSetting; import com.fsck.k9.preferences.Settings.SettingsDescription; import com.fsck.k9.preferences.Settings.SettingsUpgrader; import com.fsck.k9.preferences.Settings.StringSetting; import com.fsck.k9.preferences.Settings.V; public class AccountSettings { static final Map<String, TreeMap<Integer, SettingsDescription>> SETTINGS; private static final Map<Integer, SettingsUpgrader> UPGRADERS; static { Map<String, TreeMap<Integer, SettingsDescription>> s = new LinkedHashMap<>(); /* * When adding new settings here, be sure to increment {@link Settings.VERSION} * and use that for whatever you add here. */ s.put("alwaysBcc", Settings.versions( new V(11, new StringSetting("")) )); s.put("alwaysShowCcBcc", Settings.versions( new V(13, new BooleanSetting(false)) )); s.put("archiveFolderName", Settings.versions( new V(1, new StringSetting(SettingsUpgraderV53.FOLDER_NONE)), new V(53, new StringSetting(null)) )); s.put("autoExpandFolderName", Settings.versions( new V(1, new StringSetting("INBOX")) )); s.put("automaticCheckIntervalMinutes", Settings.versions( new V(1, new IntegerResourceSetting(-1, R.array.check_frequency_values)) )); s.put("chipColor", Settings.versions( new V(1, new ColorSetting(0xFF0000FF)) )); s.put("defaultQuotedTextShown", Settings.versions( new V(1, new BooleanSetting(AccountPreferenceSerializer.DEFAULT_QUOTED_TEXT_SHOWN)) )); s.put("deletePolicy", Settings.versions( new V(1, new DeletePolicySetting(DeletePolicy.NEVER)) )); s.put("displayCount", Settings.versions( new V(1, new IntegerResourceSetting(K9.DEFAULT_VISIBLE_LIMIT, R.array.display_count_values)) )); s.put("draftsFolderName", Settings.versions( new V(1, new StringSetting(SettingsUpgraderV53.FOLDER_NONE)), new V(53, new StringSetting(null)) )); s.put("expungePolicy", Settings.versions( new V(1, new StringResourceSetting(Expunge.EXPUNGE_IMMEDIATELY.name(), R.array.expunge_policy_values)) )); s.put("folderDisplayMode", Settings.versions( new V(1, new EnumSetting<>(FolderMode.class, FolderMode.NOT_SECOND_CLASS)) )); s.put("folderPushMode", Settings.versions( new V(1, new EnumSetting<>(FolderMode.class, FolderMode.FIRST_CLASS)) )); s.put("folderSyncMode", Settings.versions( new V(1, new EnumSetting<>(FolderMode.class, FolderMode.FIRST_CLASS)) )); s.put("folderTargetMode", Settings.versions( new V(1, new EnumSetting<>(FolderMode.class, FolderMode.NOT_SECOND_CLASS)) )); s.put("goToUnreadMessageSearch", Settings.versions( new V(1, new BooleanSetting(false)) )); s.put("idleRefreshMinutes", Settings.versions( new V(1, new IntegerResourceSetting(24, R.array.idle_refresh_period_values)) )); s.put("inboxFolderName", Settings.versions( new V(1, new StringSetting("INBOX")) )); s.put("led", Settings.versions( new V(1, new BooleanSetting(true)) )); s.put("ledColor", Settings.versions( new V(1, new ColorSetting(0xFF0000FF)) )); s.put("localStorageProvider", Settings.versions( new V(1, new StorageProviderSetting()) )); s.put("markMessageAsReadOnView", Settings.versions( new V(7, new BooleanSetting(true)) )); s.put("maxPushFolders", Settings.versions( new V(1, new IntegerRangeSetting(0, 100, 10)) )); s.put("maximumAutoDownloadMessageSize", Settings.versions( new V(1, new IntegerResourceSetting(32768, R.array.autodownload_message_size_values)) )); s.put("maximumPolledMessageAge", Settings.versions( new V(1, new IntegerResourceSetting(-1, R.array.message_age_values)) )); s.put("messageFormat", Settings.versions( new V(1, new EnumSetting<>(MessageFormat.class, AccountPreferenceSerializer.DEFAULT_MESSAGE_FORMAT)) )); s.put("messageFormatAuto", Settings.versions( new V(2, new BooleanSetting(AccountPreferenceSerializer.DEFAULT_MESSAGE_FORMAT_AUTO)) )); s.put("messageReadReceipt", Settings.versions( new V(1, new BooleanSetting(AccountPreferenceSerializer.DEFAULT_MESSAGE_READ_RECEIPT)) )); s.put("notifyMailCheck", Settings.versions( new V(1, new BooleanSetting(false)) )); s.put("notifyNewMail", Settings.versions( new V(1, new BooleanSetting(false)) )); s.put("folderNotifyNewMailMode", Settings.versions( new V(34, new EnumSetting<>(FolderMode.class, FolderMode.ALL)) )); s.put("notifySelfNewMail", Settings.versions( new V(1, new BooleanSetting(true)) )); s.put("pushPollOnConnect", Settings.versions( new V(1, new BooleanSetting(true)) )); s.put("quotePrefix", Settings.versions( new V(1, new StringSetting(AccountPreferenceSerializer.DEFAULT_QUOTE_PREFIX)) )); s.put("quoteStyle", Settings.versions( new V(1, new EnumSetting<>(QuoteStyle.class, AccountPreferenceSerializer.DEFAULT_QUOTE_STYLE)) )); s.put("replyAfterQuote", Settings.versions( new V(1, new BooleanSetting(AccountPreferenceSerializer.DEFAULT_REPLY_AFTER_QUOTE)) )); s.put("ring", Settings.versions( new V(1, new BooleanSetting(true)) )); s.put("ringtone", Settings.versions( new V(1, new RingtoneSetting("content://settings/system/notification_sound")) )); s.put("searchableFolders", Settings.versions( new V(1, new EnumSetting<>(Searchable.class, Searchable.ALL)) )); s.put("sentFolderName", Settings.versions( new V(1, new StringSetting(SettingsUpgraderV53.FOLDER_NONE)), new V(53, new StringSetting(null)) )); s.put("sortTypeEnum", Settings.versions( new V(9, new EnumSetting<>(SortType.class, Account.DEFAULT_SORT_TYPE)) )); s.put("sortAscending", Settings.versions( new V(9, new BooleanSetting(Account.DEFAULT_SORT_ASCENDING)) )); s.put("showPicturesEnum", Settings.versions( new V(1, new EnumSetting<>(ShowPictures.class, ShowPictures.NEVER)) )); s.put("signatureBeforeQuotedText", Settings.versions( new V(1, new BooleanSetting(false)) )); s.put("spamFolderName", Settings.versions( new V(1, new StringSetting(SettingsUpgraderV53.FOLDER_NONE)), new V(53, new StringSetting(null)) )); s.put("stripSignature", Settings.versions( new V(2, new BooleanSetting(AccountPreferenceSerializer.DEFAULT_STRIP_SIGNATURE)) )); s.put("subscribedFoldersOnly", Settings.versions( new V(1, new BooleanSetting(false)) )); s.put("syncRemoteDeletions", Settings.versions( new V(1, new BooleanSetting(true)) )); s.put("trashFolderName", Settings.versions( new V(1, new StringSetting(SettingsUpgraderV53.FOLDER_NONE)), new V(53, new StringSetting(null)) )); s.put("useCompression.MOBILE", Settings.versions( new V(1, new BooleanSetting(true)) )); s.put("useCompression.OTHER", Settings.versions( new V(1, new BooleanSetting(true)) )); s.put("useCompression.WIFI", Settings.versions( new V(1, new BooleanSetting(true)) )); s.put("vibrate", Settings.versions( new V(1, new BooleanSetting(false)) )); s.put("vibratePattern", Settings.versions( new V(1, new IntegerResourceSetting(0, R.array.vibrate_pattern_values)) )); s.put("vibrateTimes", Settings.versions( new V(1, new IntegerResourceSetting(5, R.array.vibrate_times_label)) )); s.put("allowRemoteSearch", Settings.versions( new V(18, new BooleanSetting(true)) )); s.put("remoteSearchNumResults", Settings.versions( new V(18, new IntegerResourceSetting(AccountPreferenceSerializer.DEFAULT_REMOTE_SEARCH_NUM_RESULTS, R.array.remote_search_num_results_values)) )); s.put("remoteSearchFullText", Settings.versions( new V(18, new BooleanSetting(false)) )); s.put("notifyContactsMailOnly", Settings.versions( new V(42, new BooleanSetting(false)) )); s.put("openPgpHideSignOnly", Settings.versions( new V(50, new BooleanSetting(true)) )); s.put("openPgpEncryptSubject", Settings.versions( new V(51, new BooleanSetting(true)) )); s.put("openPgpEncryptAllDrafts", Settings.versions( new V(55, new BooleanSetting(true)) )); s.put("autocryptMutualMode", Settings.versions( new V(50, new BooleanSetting(false)) )); s.put("uploadSentMessages", Settings.versions( new V(52, new BooleanSetting(true)) )); s.put("archiveFolderSelection", Settings.versions( new V(54, new EnumSetting<>(SpecialFolderSelection.class, SpecialFolderSelection.AUTOMATIC)) )); s.put("draftsFolderSelection", Settings.versions( new V(54, new EnumSetting<>(SpecialFolderSelection.class, SpecialFolderSelection.AUTOMATIC)) )); s.put("sentFolderSelection", Settings.versions( new V(54, new EnumSetting<>(SpecialFolderSelection.class, SpecialFolderSelection.AUTOMATIC)) )); s.put("spamFolderSelection", Settings.versions( new V(54, new EnumSetting<>(SpecialFolderSelection.class, SpecialFolderSelection.AUTOMATIC)) )); s.put("trashFolderSelection", Settings.versions( new V(54, new EnumSetting<>(SpecialFolderSelection.class, SpecialFolderSelection.AUTOMATIC)) )); // note that there is no setting for openPgpProvider, because this will have to be set up together // with the actual provider after import anyways. SETTINGS = Collections.unmodifiableMap(s); Map<Integer, SettingsUpgrader> u = new HashMap<>(); u.put(53, new SettingsUpgraderV53()); u.put(54, new SettingsUpgraderV54()); UPGRADERS = Collections.unmodifiableMap(u); } static Map<String, Object> validate(int version, Map<String, String> importedSettings, boolean useDefaultValues) { return Settings.validate(version, SETTINGS, importedSettings, useDefaultValues); } public static Set<String> upgrade(int version, Map<String, Object> validatedSettings) { return Settings.upgrade(version, UPGRADERS, SETTINGS, validatedSettings); } public static Map<String, String> convert(Map<String, Object> settings) { return Settings.convert(settings, SETTINGS); } static Map<String, String> getAccountSettings(Storage storage, String uuid) { Map<String, String> result = new HashMap<>(); String prefix = uuid + "."; for (String key : SETTINGS.keySet()) { String value = storage.getString(prefix + key, null); if (value != null) { result.put(key, value); } } return result; } private static class IntegerResourceSetting extends PseudoEnumSetting<Integer> { private final Context context = DI.get(Context.class); private final Map<Integer, String> mapping; IntegerResourceSetting(int defaultValue, int resId) { super(defaultValue); Map<Integer, String> mapping = new HashMap<>(); String[] values = context.getResources().getStringArray(resId); for (String value : values) { int intValue = Integer.parseInt(value); mapping.put(intValue, value); } this.mapping = Collections.unmodifiableMap(mapping); } @Override protected Map<Integer, String> getMapping() { return mapping; } @Override public Integer fromString(String value) throws InvalidSettingValueException { try { return Integer.parseInt(value); } catch (NumberFormatException e) { throw new InvalidSettingValueException(); } } } private static class StringResourceSetting extends PseudoEnumSetting<String> { private final Context context = DI.get(Context.class); private final Map<String, String> mapping; StringResourceSetting(String defaultValue, int resId) { super(defaultValue); Map<String, String> mapping = new HashMap<>(); String[] values = context.getResources().getStringArray(resId); for (String value : values) { mapping.put(value, value); } this.mapping = Collections.unmodifiableMap(mapping); } @Override protected Map<String, String> getMapping() { return mapping; } @Override public String fromString(String value) throws InvalidSettingValueException { if (!mapping.containsKey(value)) { throw new InvalidSettingValueException(); } return value; } } private static class RingtoneSetting extends SettingsDescription<String> { RingtoneSetting(String defaultValue) { super(defaultValue); } @Override public String fromString(String value) { //TODO: add validation return value; } } private static class StorageProviderSetting extends SettingsDescription<String> { private final Context context = DI.get(Context.class); StorageProviderSetting() { super(null); } @Override public String getDefaultValue() { return StorageManager.getInstance(context).getDefaultProviderId(); } @Override public String fromString(String value) { StorageManager storageManager = StorageManager.getInstance(context); Map<String, String> providers = storageManager.getAvailableProviders(); if (providers.containsKey(value)) { return value; } throw new RuntimeException("Validation failed"); } } private static class DeletePolicySetting extends PseudoEnumSetting<Integer> { private Map<Integer, String> mapping; DeletePolicySetting(DeletePolicy defaultValue) { super(defaultValue.setting); Map<Integer, String> mapping = new HashMap<>(); mapping.put(DeletePolicy.NEVER.setting, "NEVER"); mapping.put(DeletePolicy.ON_DELETE.setting, "DELETE"); mapping.put(DeletePolicy.MARK_AS_READ.setting, "MARK_AS_READ"); this.mapping = Collections.unmodifiableMap(mapping); } @Override protected Map<Integer, String> getMapping() { return mapping; } @Override public Integer fromString(String value) throws InvalidSettingValueException { try { Integer deletePolicy = Integer.parseInt(value); if (mapping.containsKey(deletePolicy)) { return deletePolicy; } } catch (NumberFormatException e) { /* do nothing */ } throw new InvalidSettingValueException(); } } /** * Upgrades settings from version 52 to 53 * * Replace folder entries of "-NONE-" with {@code null}. */ private static class SettingsUpgraderV53 implements SettingsUpgrader { private static final String FOLDER_NONE = "-NONE-"; @Override public Set<String> upgrade(Map<String, Object> settings) { upgradeFolderEntry(settings, "archiveFolderName"); upgradeFolderEntry(settings, "autoExpandFolderName"); upgradeFolderEntry(settings, "draftsFolderName"); upgradeFolderEntry(settings, "sentFolderName"); upgradeFolderEntry(settings, "spamFolderName"); upgradeFolderEntry(settings, "trashFolderName"); return null; } private void upgradeFolderEntry(Map<String, Object> settings, String key) { String archiveFolderName = (String) settings.get(key); if (FOLDER_NONE.equals(archiveFolderName)) { settings.put(key, null); } } } /** * Upgrades settings from version 53 to 54 * * Inserts folder selection entries with a value of "MANUAL" */ private static class SettingsUpgraderV54 implements SettingsUpgrader { private static final String FOLDER_SELECTION_MANUAL = "MANUAL"; @Override public Set<String> upgrade(Map<String, Object> settings) { settings.put("archiveFolderSelection", FOLDER_SELECTION_MANUAL); settings.put("draftsFolderSelection", FOLDER_SELECTION_MANUAL); settings.put("sentFolderSelection", FOLDER_SELECTION_MANUAL); settings.put("spamFolderSelection", FOLDER_SELECTION_MANUAL); settings.put("trashFolderSelection", FOLDER_SELECTION_MANUAL); return null; } } }
1
17,351
looks like you forgot to actually increase the version
k9mail-k-9
java
@@ -43,5 +43,13 @@ namespace OpenTelemetry.Instrumentation.AspNet /// The type of this object depends on the event, which is given by the above parameter.</para> /// </remarks> public Action<Activity, string, object> Enrich { get; set; } + + /// <summary> + /// Gets or sets a value indicating whether to record http server attributes or not. + /// </summary> + /// <remarks> + /// https://github.com/open-telemetry/opentelemetry-specification/blob/a2758014f408f64ff84728918d671ee3fdab2225/specification/trace/semantic_conventions/http.md#http-server-semantic-conventions. + /// </remarks> + public bool RecordHttpServerAttributes { get; set; } } }
1
// <copyright file="AspNetInstrumentationOptions.cs" company="OpenTelemetry Authors"> // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // </copyright> using System; using System.Diagnostics; using System.Web; namespace OpenTelemetry.Instrumentation.AspNet { /// <summary> /// Options for ASP.NET instrumentation. /// </summary> public class AspNetInstrumentationOptions { /// <summary> /// Gets or sets a Filter function to filter instrumentation for requests on a per request basis. /// The Filter gets the HttpContext, and should return a boolean. /// If Filter returns true, the request is collected. /// If Filter returns false or throw exception, the request is filtered out. /// </summary> public Func<HttpContext, bool> Filter { get; set; } /// <summary> /// Gets or sets an action to enrich an Activity. /// </summary> /// <remarks> /// <para><see cref="Activity"/>: the activity being enriched.</para> /// <para>string: the name of the event.</para> /// <para>object: the raw object from which additional information can be extracted to enrich the activity. /// The type of this object depends on the event, which is given by the above parameter.</para> /// </remarks> public Action<Activity, string, object> Enrich { get; set; } } }
1
18,226
I think it might be helpful if we add `Default value: False.` on the end of the summary.
open-telemetry-opentelemetry-dotnet
.cs
@@ -80,7 +80,7 @@ module.exports = function(realmConstructor) { // result in sync rejecting the writes. `_waitForDownload` ensures that the session is kept // alive until our callback has returned, which prevents it from being torn down and recreated // when we close the schemaless Realm and open it with the correct schema. - if (!config.sync.fullSynchronization && config.schema === undefined) { + if (config.sync.fullSynchronized !== undefined && !config.sync.fullSynchronization && config.schema === undefined) { throw new Error('Query-based sync requires a schema.'); } let syncSession;
1
//////////////////////////////////////////////////////////////////////////// // // Copyright 2016 Realm Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // //////////////////////////////////////////////////////////////////////////// 'use strict'; const URL = require('url-parse'); let getOwnPropertyDescriptors = Object.getOwnPropertyDescriptors || function(obj) { return Object.getOwnPropertyNames(obj).reduce(function (descriptors, name) { descriptors[name] = Object.getOwnPropertyDescriptor(obj, name); return descriptors; }, {}); }; function setConstructorOnPrototype(klass) { if (klass.prototype.constructor !== klass) { Object.defineProperty(klass.prototype, 'constructor', { value: klass, configurable: true, writable: true }); } } // Return a configuration usable by `Realm.open` when waiting for a download. // It must have caching disabled, and no schema or schema version specified. function waitForDownloadConfig(config) { if (!config) { return {_cache: false}; } if (typeof config == 'string') { return {path: config, _cache: false}; } if (typeof config == 'object') { return Object.assign({}, config, {schema: undefined, schemaVersion: undefined, _cache: false}); } // Unknown type. Pass the config through. return config; } module.exports = function(realmConstructor) { // Add the specified Array methods to the Collection prototype. Object.defineProperties(realmConstructor.Collection.prototype, require('./collection-methods')); setConstructorOnPrototype(realmConstructor.Collection); setConstructorOnPrototype(realmConstructor.List); setConstructorOnPrototype(realmConstructor.Results); setConstructorOnPrototype(realmConstructor.Object); //Add async open API Object.defineProperties(realmConstructor, getOwnPropertyDescriptors({ open(config) { // If no config is defined, we should just open the default realm if (config === undefined) { config = {}; } // For local Realms we open the Realm and return it in a resolved Promise. if (!("sync" in config)) { let promise = Promise.resolve(new realmConstructor(config)); promise.progress = (callback) => { }; return promise; } // For synced Realms we open the Realm without specifying the schema and then wait until // the Realm has finished its initial sync with the server. We then reopen it with the correct // schema. This avoids writing the schema to a potentially read-only Realm file, which would // result in sync rejecting the writes. `_waitForDownload` ensures that the session is kept // alive until our callback has returned, which prevents it from being torn down and recreated // when we close the schemaless Realm and open it with the correct schema. if (!config.sync.fullSynchronization && config.schema === undefined) { throw new Error('Query-based sync requires a schema.'); } let syncSession; let promise = new Promise((resolve, reject) => { let realm = new realmConstructor(waitForDownloadConfig(config)); realm._waitForDownload( (session) => { syncSession = session; }, (error) => { realm.close(); if (error) { setTimeout(() => { reject(error); }, 1); } else { try { let syncedRealm = new realmConstructor(config); setTimeout(() => { resolve(syncedRealm); }, 1); } catch (e) { reject(e); } } }); }); promise.progress = (callback) => { if (syncSession) { syncSession.addProgressNotification('download', 'forCurrentlyOutstandingWork', callback); } return promise; }; return promise; }, openAsync(config, callback, progressCallback) { const message = "Realm.openAsync is now deprecated in favor of Realm.open. This function will be removed in future versions."; (console.warn || console.log).call(console, message); let promise = this.open(config) if (progressCallback) { promise.progress(progressCallback) } promise.then(realm => { callback(null, realm) }).catch(error => { callback(error); }); }, createTemplateObject(objectSchema) { let obj = {}; for (let key in objectSchema.properties) { let type; if (typeof objectSchema.properties[key] === 'string' || objectSchema.properties[key] instanceof String) { // Simple declaration of the type type = objectSchema.properties[key]; } else { // Advanced property setup const property = objectSchema.properties[key]; // if optional is set, it wil take precedence over any `?` set on the type parameter if (property.optional === true) { continue; } // If a default value is explicitly set, always set the property if (property.default !== undefined) { obj[key] = property.default; continue; } type = property.type; } // Set the default value for all required primitive types. // Lists are always treated as empty if not specified and references to objects are always optional switch (type) { case 'bool': obj[key] = false; break; case 'int': obj[key] = 0; break; case 'float': obj[key] = 0.0; break; case 'double': obj[key] = 0.0; break; case 'string': obj[key] = ""; break; case 'data': obj[key] = new ArrayBuffer(0); break; case 'date': obj[key] = new Date(0); break; } } return obj; } })); // Add sync methods if (realmConstructor.Sync) { let userMethods = require('./user-methods'); Object.defineProperties(realmConstructor.Sync.User, getOwnPropertyDescriptors(userMethods.static)); Object.defineProperties(realmConstructor.Sync.User.prototype, getOwnPropertyDescriptors(userMethods.instance)); Object.defineProperty(realmConstructor.Sync.User, '_realmConstructor', { value: realmConstructor }); realmConstructor.Sync.AuthError = require('./errors').AuthError; if (realmConstructor.Sync.removeAllListeners) { process.on('exit', realmConstructor.Sync.removeAllListeners); process.on('SIGINT', function () { realmConstructor.Sync.removeAllListeners(); process.exit(2); }); process.on('uncaughtException', function(e) { realmConstructor.Sync.removeAllListeners(); /* eslint-disable no-console */ console.log(e.stack); process.exit(99); }); } setConstructorOnPrototype(realmConstructor.Sync.User); setConstructorOnPrototype(realmConstructor.Sync.Session); // A configuration for a default Realm realmConstructor.automaticSyncConfiguration = function() { let user; if (arguments.length === 0) { let users = this.Sync.User.all; let identities = Object.keys(users); if (identities.length === 1) { user = users[identities[0]]; } else { new Error(`One and only one user should be logged in but found ${users.length} users.`); } } else if (arguments.length === 1) { user = arguments[0]; } else { new Error(`Zero or one argument expected.`); } let url = new URL(user.server); let secure = (url.protocol === 'https:')?'s':''; let port = (url.port === undefined)?'9080':url.port let realmUrl = `realm${secure}://${url.hostname}:${port}/default`; let config = { sync: { user: user, url: realmUrl, } }; return config; } if (realmConstructor.Sync._setFeatureToken) { realmConstructor.Sync.setFeatureToken = function(featureToken) { console.log('Realm.Sync.setFeatureToken() is deprecated and you can remove any calls to it.'); } } // Keep these value in sync with subscription_state.hpp realmConstructor.Sync.SubscriptionState = { Error: -1, // An error occurred while creating or processing the partial sync subscription. Creating: 2, // The subscription is being created. Pending: 0, // The subscription was created, but has not yet been processed by the sync server. Complete: 1, // The subscription has been processed by the sync server and data is being synced to the device. Invalidated: 3, // The subscription has been removed. }; realmConstructor.Sync.ConnectionState = { Disconnected: "disconnected", Connecting: "connecting", Connected: "connected", } // Define the permission schemas as constructors so that they can be // passed into directly to functions which want object type names const permissionsSchema = Object.freeze({ Class: function() {}, Permission: function() {}, Realm: function() {}, Role: function() {}, User: function() {}, }); permissionsSchema.Permission.schema = Object.freeze({ name: '__Permission', properties: { role: '__Role', canRead: {type: 'bool', default: false}, canUpdate: {type: 'bool', default: false}, canDelete: {type: 'bool', default: false}, canSetPermissions: {type: 'bool', default: false}, canQuery: {type: 'bool', default: false}, canCreate: {type: 'bool', default: false}, canModifySchema: {type: 'bool', default: false}, } }); permissionsSchema.User.schema = Object.freeze({ name: '__User', primaryKey: 'id', properties: { id: 'string', role: '__Role' } }); permissionsSchema.Role.schema = Object.freeze({ name: '__Role', primaryKey: 'name', properties: { name: 'string', members: '__User[]' } }); permissionsSchema.Class.schema = Object.freeze({ name: '__Class', primaryKey: 'name', properties: { name: 'string', permissions: '__Permission[]' } }); permissionsSchema.Realm.schema = Object.freeze({ name: '__Realm', primaryKey: 'id', properties: { id: 'int', permissions: '__Permission[]' } }); if (!realmConstructor.Permissions) { Object.defineProperty(realmConstructor, 'Permissions', { value: permissionsSchema, configurable: false }); } } // TODO: Remove this now useless object. var types = Object.freeze({ 'BOOL': 'bool', 'INT': 'int', 'FLOAT': 'float', 'DOUBLE': 'double', 'STRING': 'string', 'DATE': 'date', 'DATA': 'data', 'OBJECT': 'object', 'LIST': 'list', }); Object.defineProperty(realmConstructor, 'Types', { get: function() { if (typeof console != 'undefined') { /* global console */ /* eslint-disable no-console */ var stack = new Error().stack.split("\n").slice(2).join("\n"); var msg = '`Realm.Types` is deprecated! Please specify the type name as lowercase string instead!\n'+stack; if (console.warn != undefined) { console.warn(msg); } else { console.log(msg); } /* eslint-enable no-console */ } return types; }, configurable: true }); }
1
17,124
I think the check should be `config.sync.fullSynchronization === false` - otherwise this will get triggered even when full sync is `true`.
realm-realm-js
js
@@ -924,8 +924,11 @@ func (a *WebAPI) GenerateApplicationSealedSecret(ctx context.Context, req *webse return nil, err } - sse := piped.SealedSecretEncryption - if sse == nil { + se := piped.SecretEncryption + if se == nil { + se = piped.SealedSecretEncryption + } + if se == nil { return nil, status.Error(codes.FailedPrecondition, "The piped does not contain the encryption configuration") }
1
// Copyright 2020 The PipeCD Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package grpcapi import ( "context" "encoding/base64" "errors" "fmt" "strings" "time" "github.com/google/uuid" "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "github.com/pipe-cd/pipe/pkg/app/api/applicationlivestatestore" "github.com/pipe-cd/pipe/pkg/app/api/commandstore" "github.com/pipe-cd/pipe/pkg/app/api/service/webservice" "github.com/pipe-cd/pipe/pkg/app/api/stagelogstore" "github.com/pipe-cd/pipe/pkg/cache" "github.com/pipe-cd/pipe/pkg/cache/memorycache" "github.com/pipe-cd/pipe/pkg/cache/rediscache" "github.com/pipe-cd/pipe/pkg/config" "github.com/pipe-cd/pipe/pkg/crypto" "github.com/pipe-cd/pipe/pkg/datastore" "github.com/pipe-cd/pipe/pkg/filestore" "github.com/pipe-cd/pipe/pkg/git" "github.com/pipe-cd/pipe/pkg/insight/insightstore" "github.com/pipe-cd/pipe/pkg/model" "github.com/pipe-cd/pipe/pkg/redis" "github.com/pipe-cd/pipe/pkg/rpc/rpcauth" ) type encrypter interface { Encrypt(text string) (string, error) } // WebAPI implements the behaviors for the gRPC definitions of WebAPI. type WebAPI struct { applicationStore datastore.ApplicationStore environmentStore datastore.EnvironmentStore deploymentStore datastore.DeploymentStore pipedStore datastore.PipedStore projectStore datastore.ProjectStore apiKeyStore datastore.APIKeyStore stageLogStore stagelogstore.Store applicationLiveStateStore applicationlivestatestore.Store commandStore commandstore.Store insightStore insightstore.Store encrypter encrypter appProjectCache cache.Cache deploymentProjectCache cache.Cache pipedProjectCache cache.Cache envProjectCache cache.Cache insightCache cache.Cache projectsInConfig map[string]config.ControlPlaneProject logger *zap.Logger } // NewWebAPI creates a new WebAPI instance. func NewWebAPI( ctx context.Context, ds datastore.DataStore, fs filestore.Store, sls stagelogstore.Store, alss applicationlivestatestore.Store, cmds commandstore.Store, is insightstore.Store, rd redis.Redis, projs map[string]config.ControlPlaneProject, encrypter encrypter, logger *zap.Logger) *WebAPI { a := &WebAPI{ applicationStore: datastore.NewApplicationStore(ds), environmentStore: datastore.NewEnvironmentStore(ds), deploymentStore: datastore.NewDeploymentStore(ds), pipedStore: datastore.NewPipedStore(ds), projectStore: datastore.NewProjectStore(ds), apiKeyStore: datastore.NewAPIKeyStore(ds), stageLogStore: sls, applicationLiveStateStore: alss, commandStore: cmds, insightStore: is, projectsInConfig: projs, encrypter: encrypter, appProjectCache: memorycache.NewTTLCache(ctx, 24*time.Hour, 3*time.Hour), deploymentProjectCache: memorycache.NewTTLCache(ctx, 24*time.Hour, 3*time.Hour), pipedProjectCache: memorycache.NewTTLCache(ctx, 24*time.Hour, 3*time.Hour), envProjectCache: memorycache.NewTTLCache(ctx, 24*time.Hour, 3*time.Hour), insightCache: rediscache.NewTTLCache(rd, 3*time.Hour), logger: logger.Named("web-api"), } return a } // Register registers all handling of this service into the specified gRPC server. func (a *WebAPI) Register(server *grpc.Server) { webservice.RegisterWebServiceServer(server, a) } func (a *WebAPI) AddEnvironment(ctx context.Context, req *webservice.AddEnvironmentRequest) (*webservice.AddEnvironmentResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } env := model.Environment{ Id: uuid.New().String(), Name: req.Name, Desc: req.Desc, ProjectId: claims.Role.ProjectId, } err = a.environmentStore.AddEnvironment(ctx, &env) if errors.Is(err, datastore.ErrAlreadyExists) { return nil, status.Error(codes.AlreadyExists, "The environment already exists") } if err != nil { a.logger.Error("failed to create environment", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to create environment") } return &webservice.AddEnvironmentResponse{}, nil } func (a *WebAPI) UpdateEnvironmentDesc(ctx context.Context, req *webservice.UpdateEnvironmentDescRequest) (*webservice.UpdateEnvironmentDescResponse, error) { return nil, status.Error(codes.Unimplemented, "") } func (a *WebAPI) ListEnvironments(ctx context.Context, req *webservice.ListEnvironmentsRequest) (*webservice.ListEnvironmentsResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } opts := datastore.ListOptions{ Filters: []datastore.ListFilter{ { Field: "ProjectId", Operator: datastore.OperatorEqual, Value: claims.Role.ProjectId, }, }, } envs, err := a.environmentStore.ListEnvironments(ctx, opts) if err != nil { a.logger.Error("failed to get environments", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to get environments") } return &webservice.ListEnvironmentsResponse{ Environments: envs, }, nil } func (a *WebAPI) EnableEnvironment(ctx context.Context, req *webservice.EnableEnvironmentRequest) (*webservice.EnableEnvironmentResponse, error) { if err := a.updateEnvironmentEnable(ctx, req.EnvironmentId, true); err != nil { return nil, err } return &webservice.EnableEnvironmentResponse{}, nil } func (a *WebAPI) DisableEnvironment(ctx context.Context, req *webservice.DisableEnvironmentRequest) (*webservice.DisableEnvironmentResponse, error) { if err := a.updateEnvironmentEnable(ctx, req.EnvironmentId, false); err != nil { return nil, err } return &webservice.DisableEnvironmentResponse{}, nil } // DeleteEnvironment deletes the given environment and all applications that belong to it. // It returns a FailedPrecondition error if any Piped is still using that environment. func (a *WebAPI) DeleteEnvironment(ctx context.Context, req *webservice.DeleteEnvironmentRequest) (*webservice.DeleteEnvironmentResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } if err := a.validateEnvBelongsToProject(ctx, req.EnvironmentId, claims.Role.ProjectId); err != nil { return nil, err } // Check if no Piped has permission to the given environment. pipeds, err := a.pipedStore.ListPipeds(ctx, datastore.ListOptions{ Filters: []datastore.ListFilter{ { Field: "ProjectId", Operator: datastore.OperatorEqual, Value: claims.Role.ProjectId, }, { Field: "EnvIds", Operator: datastore.OperatorContains, Value: req.EnvironmentId, }, { Field: "Disabled", Operator: datastore.OperatorEqual, Value: false, }, }, }) if err != nil { a.logger.Error("failed to fetch Pipeds linked to the given environment", zap.String("env-id", req.EnvironmentId), zap.Error(err), ) return nil, status.Error(codes.Internal, "Failed to validate the deletion operation") } if len(pipeds) > 0 { pipedNames := make([]string, 0, len(pipeds)) for _, p := range pipeds { pipedNames = append(pipedNames, p.Name) } return nil, status.Errorf( codes.FailedPrecondition, "Found Pipeds linked the environment to be deleted. Please remove this environment from all Pipeds (%s) on the Piped settings page", strings.Join(pipedNames, ","), ) } // Delete all applications that belongs to the given env. apps, _, err := a.applicationStore.ListApplications(ctx, datastore.ListOptions{ Filters: []datastore.ListFilter{ { Field: "ProjectId", Operator: datastore.OperatorEqual, Value: claims.Role.ProjectId, }, { Field: "EnvId", Operator: datastore.OperatorEqual, Value: req.EnvironmentId, }, }, }) if err != nil { a.logger.Error("failed to fetch applications that belongs to the given environment", zap.String("env-id", req.EnvironmentId), zap.Error(err), ) return nil, status.Error(codes.Internal, "Failed to fetch applications that belongs to the given environment") } for _, app := range apps { if app.ProjectId != claims.Role.ProjectId { continue } err := a.applicationStore.DeleteApplication(ctx, app.Id) if err == nil { continue } switch err { case datastore.ErrNotFound: return nil, status.Error(codes.Internal, "The application is not found") case datastore.ErrInvalidArgument: return nil, status.Error(codes.InvalidArgument, "Invalid value to delete") default: a.logger.Error("failed to delete the application", zap.String("application-id", app.Id), zap.Error(err), ) return nil, status.Error(codes.Internal, "Failed to delete the application") } } if err := a.environmentStore.DeleteEnvironment(ctx, req.EnvironmentId); err != nil { switch err { case datastore.ErrNotFound: return nil, status.Error(codes.NotFound, "The environment is not found") case datastore.ErrInvalidArgument: return nil, status.Error(codes.InvalidArgument, "Invalid value to delete") default: a.logger.Error("failed to delete the environment", zap.String("env-id", req.EnvironmentId), zap.Error(err), ) return nil, status.Error(codes.Internal, "Failed to delete the environment") } } return &webservice.DeleteEnvironmentResponse{}, nil } func (a *WebAPI) updateEnvironmentEnable(ctx context.Context, envID string, enable bool) error { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return err } if err := a.validateEnvBelongsToProject(ctx, envID, claims.Role.ProjectId); err != nil { return err } var updater func(context.Context, string) error if enable { updater = a.environmentStore.EnableEnvironment } else { updater = a.environmentStore.DisableEnvironment } if err := updater(ctx, envID); err != nil { switch err { case datastore.ErrNotFound: return status.Error(codes.NotFound, "The environment is not found") case datastore.ErrInvalidArgument: return status.Error(codes.InvalidArgument, "Invalid value for update") default: a.logger.Error("failed to update the environment", zap.String("env-id", envID), zap.Error(err), ) return status.Error(codes.Internal, "Failed to update the environment") } } return nil } // validateEnvBelongsToProject checks if the given piped belongs to the given project. // It gives back error unless the env belongs to the project. func (a *WebAPI) validateEnvBelongsToProject(ctx context.Context, envID, projectID string) error { eid, err := a.envProjectCache.Get(envID) if err == nil { if projectID != eid { return status.Error(codes.PermissionDenied, "Requested environment doesn't belong to the project you logged in") } return nil } env, err := getEnvironment(ctx, a.environmentStore, envID, a.logger) if err != nil { return err } a.envProjectCache.Put(envID, env.ProjectId) if projectID != env.ProjectId { return status.Error(codes.PermissionDenied, "Requested environment doesn't belong to the project you logged in") } return nil } func (a *WebAPI) RegisterPiped(ctx context.Context, req *webservice.RegisterPipedRequest) (*webservice.RegisterPipedResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } key, keyHash, err := model.GeneratePipedKey() if err != nil { a.logger.Error("failed to generate piped key", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to generate the piped key") } piped := model.Piped{ Id: uuid.New().String(), Name: req.Name, Desc: req.Desc, ProjectId: claims.Role.ProjectId, EnvIds: req.EnvIds, Status: model.Piped_OFFLINE, } if err := piped.AddKey(keyHash, claims.Subject, time.Now()); err != nil { return nil, status.Error(codes.FailedPrecondition, fmt.Sprintf("Failed to create key: %v", err)) } err = a.pipedStore.AddPiped(ctx, &piped) if errors.Is(err, datastore.ErrAlreadyExists) { return nil, status.Error(codes.AlreadyExists, "The piped already exists") } if err != nil { a.logger.Error("failed to register piped", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to register piped") } return &webservice.RegisterPipedResponse{ Id: piped.Id, Key: key, }, nil } func (a *WebAPI) UpdatePiped(ctx context.Context, req *webservice.UpdatePipedRequest) (*webservice.UpdatePipedResponse, error) { updater := func(ctx context.Context, pipedID string) error { return a.pipedStore.UpdatePiped(ctx, req.PipedId, func(p *model.Piped) error { p.Name = req.Name p.Desc = req.Desc p.EnvIds = req.EnvIds return nil }) } if err := a.updatePiped(ctx, req.PipedId, updater); err != nil { return nil, err } return &webservice.UpdatePipedResponse{}, nil } func (a *WebAPI) RecreatePipedKey(ctx context.Context, req *webservice.RecreatePipedKeyRequest) (*webservice.RecreatePipedKeyResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } key, keyHash, err := model.GeneratePipedKey() if err != nil { a.logger.Error("failed to generate piped key", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to generate the piped key") } updater := func(ctx context.Context, pipedID string) error { return a.pipedStore.AddKey(ctx, pipedID, keyHash, claims.Subject, time.Now()) } if err := a.updatePiped(ctx, req.Id, updater); err != nil { return nil, err } return &webservice.RecreatePipedKeyResponse{ Key: key, }, nil } func (a *WebAPI) DeleteOldPipedKeys(ctx context.Context, req *webservice.DeleteOldPipedKeysRequest) (*webservice.DeleteOldPipedKeysResponse, error) { if _, err := rpcauth.ExtractClaims(ctx); err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } updater := func(ctx context.Context, pipedID string) error { return a.pipedStore.DeleteOldKeys(ctx, pipedID) } if err := a.updatePiped(ctx, req.PipedId, updater); err != nil { return nil, err } return &webservice.DeleteOldPipedKeysResponse{}, nil } func (a *WebAPI) EnablePiped(ctx context.Context, req *webservice.EnablePipedRequest) (*webservice.EnablePipedResponse, error) { if err := a.updatePiped(ctx, req.PipedId, a.pipedStore.EnablePiped); err != nil { return nil, err } return &webservice.EnablePipedResponse{}, nil } func (a *WebAPI) DisablePiped(ctx context.Context, req *webservice.DisablePipedRequest) (*webservice.DisablePipedResponse, error) { if err := a.updatePiped(ctx, req.PipedId, a.pipedStore.DisablePiped); err != nil { return nil, err } return &webservice.DisablePipedResponse{}, nil } func (a *WebAPI) updatePiped(ctx context.Context, pipedID string, updater func(context.Context, string) error) error { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return err } if err := a.validatePipedBelongsToProject(ctx, pipedID, claims.Role.ProjectId); err != nil { return err } if err := updater(ctx, pipedID); err != nil { switch err { case datastore.ErrNotFound: return status.Error(codes.InvalidArgument, "The piped is not found") case datastore.ErrInvalidArgument: return status.Error(codes.InvalidArgument, "Invalid value for update") default: a.logger.Error("failed to update the piped", zap.String("piped-id", pipedID), zap.Error(err), ) // TODO: Improve error handling, instead of considering all as Internal error like this // we should check the error type to decide to pass its message to the web client or just a generic message. return status.Error(codes.Internal, "Failed to update the piped") } } return nil } // TODO: Consider using piped-stats to decide piped connection status. func (a *WebAPI) ListPipeds(ctx context.Context, req *webservice.ListPipedsRequest) (*webservice.ListPipedsResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } opts := datastore.ListOptions{ Filters: []datastore.ListFilter{ { Field: "ProjectId", Operator: datastore.OperatorEqual, Value: claims.Role.ProjectId, }, }, } if req.Options != nil { if req.Options.Enabled != nil { opts.Filters = append(opts.Filters, datastore.ListFilter{ Field: "Disabled", Operator: datastore.OperatorEqual, Value: !req.Options.Enabled.GetValue(), }) } } pipeds, err := a.pipedStore.ListPipeds(ctx, opts) if err != nil { a.logger.Error("failed to get pipeds", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to get pipeds") } // Redact all sensitive data inside piped message before sending to the client. for i := range pipeds { pipeds[i].RedactSensitiveData() } return &webservice.ListPipedsResponse{ Pipeds: pipeds, }, nil } func (a *WebAPI) GetPiped(ctx context.Context, req *webservice.GetPipedRequest) (*webservice.GetPipedResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } piped, err := getPiped(ctx, a.pipedStore, req.PipedId, a.logger) if err != nil { return nil, err } if err := a.validatePipedBelongsToProject(ctx, req.PipedId, claims.Role.ProjectId); err != nil { return nil, err } // Redact all sensitive data inside piped message before sending to the client. piped.RedactSensitiveData() return &webservice.GetPipedResponse{ Piped: piped, }, nil } // validatePipedBelongsToProject checks if the given piped belongs to the given project. // It gives back error unless the piped belongs to the project. func (a *WebAPI) validatePipedBelongsToProject(ctx context.Context, pipedID, projectID string) error { pid, err := a.pipedProjectCache.Get(pipedID) if err == nil { if pid != projectID { return status.Error(codes.PermissionDenied, "Requested piped doesn't belong to the project you logged in") } return nil } piped, err := getPiped(ctx, a.pipedStore, pipedID, a.logger) if err != nil { return err } a.pipedProjectCache.Put(pipedID, piped.ProjectId) if piped.ProjectId != projectID { return status.Error(codes.PermissionDenied, "Requested piped doesn't belong to the project you logged in") } return nil } // TODO: Validate the specified piped to ensure that it belongs to the specified environment. func (a *WebAPI) AddApplication(ctx context.Context, req *webservice.AddApplicationRequest) (*webservice.AddApplicationResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } piped, err := getPiped(ctx, a.pipedStore, req.PipedId, a.logger) if err != nil { return nil, err } if piped.ProjectId != claims.Role.ProjectId { return nil, status.Error(codes.InvalidArgument, "Requested piped does not belong to your project") } gitpath, err := makeGitPath( req.GitPath.Repo.Id, req.GitPath.Path, req.GitPath.ConfigFilename, piped, a.logger, ) if err != nil { return nil, err } app := model.Application{ Id: uuid.New().String(), Name: req.Name, EnvId: req.EnvId, PipedId: req.PipedId, ProjectId: claims.Role.ProjectId, GitPath: gitpath, Kind: req.Kind, CloudProvider: req.CloudProvider, Description: req.Description, } err = a.applicationStore.AddApplication(ctx, &app) if errors.Is(err, datastore.ErrAlreadyExists) { return nil, status.Error(codes.AlreadyExists, "The application already exists") } if err != nil { a.logger.Error("failed to create application", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to create application") } return &webservice.AddApplicationResponse{ ApplicationId: app.Id, }, nil } func (a *WebAPI) UpdateApplication(ctx context.Context, req *webservice.UpdateApplicationRequest) (*webservice.UpdateApplicationResponse, error) { updater := func(app *model.Application) error { app.Name = req.Name app.EnvId = req.EnvId app.PipedId = req.PipedId app.Kind = req.Kind app.CloudProvider = req.CloudProvider return nil } if err := a.updateApplication(ctx, req.ApplicationId, req.PipedId, updater); err != nil { return nil, err } return &webservice.UpdateApplicationResponse{}, nil } func (a *WebAPI) UpdateApplicationDescription(ctx context.Context, req *webservice.UpdateApplicationDescriptionRequest) (*webservice.UpdateApplicationDescriptionResponse, error) { updater := func(app *model.Application) error { app.Description = req.Description return nil } if err := a.updateApplication(ctx, req.ApplicationId, "", updater); err != nil { return nil, err } return &webservice.UpdateApplicationDescriptionResponse{}, nil } func (a *WebAPI) updateApplication(ctx context.Context, id, pipedID string, updater func(app *model.Application) error) error { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return err } // Ensure that the specified piped is assignable for this application. if pipedID != "" { piped, err := getPiped(ctx, a.pipedStore, pipedID, a.logger) if err != nil { return err } if piped.ProjectId != claims.Role.ProjectId { return status.Error(codes.InvalidArgument, "Requested piped does not belong to your project") } } err = a.applicationStore.UpdateApplication(ctx, id, updater) if err != nil { a.logger.Error("failed to update application", zap.Error(err)) return status.Error(codes.Internal, "Failed to update application") } return nil } func (a *WebAPI) EnableApplication(ctx context.Context, req *webservice.EnableApplicationRequest) (*webservice.EnableApplicationResponse, error) { if err := a.updateApplicationEnable(ctx, req.ApplicationId, true); err != nil { return nil, err } return &webservice.EnableApplicationResponse{}, nil } func (a *WebAPI) DisableApplication(ctx context.Context, req *webservice.DisableApplicationRequest) (*webservice.DisableApplicationResponse, error) { if err := a.updateApplicationEnable(ctx, req.ApplicationId, false); err != nil { return nil, err } return &webservice.DisableApplicationResponse{}, nil } func (a *WebAPI) DeleteApplication(ctx context.Context, req *webservice.DeleteApplicationRequest) (*webservice.DeleteApplicationResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } if err := a.validateAppBelongsToProject(ctx, req.ApplicationId, claims.Role.ProjectId); err != nil { return nil, err } if err := a.applicationStore.DeleteApplication(ctx, req.ApplicationId); err != nil { switch err { case datastore.ErrNotFound: return nil, status.Error(codes.NotFound, "The application is not found") case datastore.ErrInvalidArgument: return nil, status.Error(codes.InvalidArgument, "Invalid value to delete") default: a.logger.Error("failed to delete the application", zap.String("application-id", req.ApplicationId), zap.Error(err), ) return nil, status.Error(codes.Internal, "Failed to delete the application") } } return &webservice.DeleteApplicationResponse{}, nil } func (a *WebAPI) updateApplicationEnable(ctx context.Context, appID string, enable bool) error { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return err } if err := a.validateAppBelongsToProject(ctx, appID, claims.Role.ProjectId); err != nil { return err } var updater func(context.Context, string) error if enable { updater = a.applicationStore.EnableApplication } else { updater = a.applicationStore.DisableApplication } if err := updater(ctx, appID); err != nil { switch err { case datastore.ErrNotFound: return status.Error(codes.NotFound, "The application is not found") case datastore.ErrInvalidArgument: return status.Error(codes.InvalidArgument, "Invalid value for update") default: a.logger.Error("failed to update the application", zap.String("application-id", appID), zap.Error(err), ) return status.Error(codes.Internal, "Failed to update the application") } } return nil } func (a *WebAPI) ListApplications(ctx context.Context, req *webservice.ListApplicationsRequest) (*webservice.ListApplicationsResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } orders := []datastore.Order{ { Field: "UpdatedAt", Direction: datastore.Desc, }, { Field: "Id", Direction: datastore.Asc, }, } filters := []datastore.ListFilter{ { Field: "ProjectId", Operator: datastore.OperatorEqual, Value: claims.Role.ProjectId, }, } if o := req.Options; o != nil { if o.Enabled != nil { filters = append(filters, datastore.ListFilter{ Field: "Disabled", Operator: datastore.OperatorEqual, Value: !o.Enabled.GetValue(), }) } // Allowing multiple so that it can do In Query later. // Currently only the first value is used. if len(o.Kinds) > 0 { filters = append(filters, datastore.ListFilter{ Field: "Kind", Operator: datastore.OperatorEqual, Value: o.Kinds[0], }) } if len(o.SyncStatuses) > 0 { filters = append(filters, datastore.ListFilter{ Field: "SyncState.Status", Operator: datastore.OperatorEqual, Value: o.SyncStatuses[0], }) } if len(o.EnvIds) > 0 { filters = append(filters, datastore.ListFilter{ Field: "EnvId", Operator: datastore.OperatorEqual, Value: o.EnvIds[0], }) } if o.Name != "" { filters = append(filters, datastore.ListFilter{ Field: "Name", Operator: datastore.OperatorEqual, Value: o.Name, }) } } apps, _, err := a.applicationStore.ListApplications(ctx, datastore.ListOptions{ Filters: filters, Orders: orders, }) if err != nil { a.logger.Error("failed to get applications", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to get applications") } return &webservice.ListApplicationsResponse{ Applications: apps, }, nil } func (a *WebAPI) SyncApplication(ctx context.Context, req *webservice.SyncApplicationRequest) (*webservice.SyncApplicationResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } app, err := getApplication(ctx, a.applicationStore, req.ApplicationId, a.logger) if err != nil { return nil, err } if claims.Role.ProjectId != app.ProjectId { return nil, status.Error(codes.InvalidArgument, "Requested application does not belong to your project") } cmd := model.Command{ Id: uuid.New().String(), PipedId: app.PipedId, ApplicationId: app.Id, ProjectId: app.ProjectId, Type: model.Command_SYNC_APPLICATION, Commander: claims.Subject, SyncApplication: &model.Command_SyncApplication{ ApplicationId: app.Id, SyncStrategy: req.SyncStrategy, }, } if err := addCommand(ctx, a.commandStore, &cmd, a.logger); err != nil { return nil, err } return &webservice.SyncApplicationResponse{ CommandId: cmd.Id, }, nil } func (a *WebAPI) GetApplication(ctx context.Context, req *webservice.GetApplicationRequest) (*webservice.GetApplicationResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } app, err := getApplication(ctx, a.applicationStore, req.ApplicationId, a.logger) if err != nil { return nil, err } if app.ProjectId != claims.Role.ProjectId { return nil, status.Error(codes.InvalidArgument, "Requested application does not belong to your project") } return &webservice.GetApplicationResponse{ Application: app, }, nil } func (a *WebAPI) GenerateApplicationSealedSecret(ctx context.Context, req *webservice.GenerateApplicationSealedSecretRequest) (*webservice.GenerateApplicationSealedSecretResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } piped, err := getPiped(ctx, a.pipedStore, req.PipedId, a.logger) if err != nil { return nil, err } if err := a.validatePipedBelongsToProject(ctx, req.PipedId, claims.Role.ProjectId); err != nil { return nil, err } sse := piped.SealedSecretEncryption if sse == nil { return nil, status.Error(codes.FailedPrecondition, "The piped does not contain the encryption configuration") } data := req.Data if req.Base64Encoding { data = base64.StdEncoding.EncodeToString([]byte(data)) } var enc encrypter switch model.SealedSecretManagementType(sse.Type) { case model.SealedSecretManagementSealingKey: if sse.PublicKey == "" { return nil, status.Error(codes.FailedPrecondition, "The piped does not contain a public key") } enc, err = crypto.NewHybridEncrypter(sse.PublicKey) if err != nil { a.logger.Error("failed to initialize the crypter", zap.Error(err)) return nil, status.Error(codes.FailedPrecondition, "Failed to initialize the encrypter") } default: return nil, status.Error(codes.FailedPrecondition, "The piped does not contain a valid encryption type") } encryptedText, err := enc.Encrypt(data) if err != nil { a.logger.Error("failed to encrypt the secret", zap.Error(err)) return nil, status.Error(codes.FailedPrecondition, "Failed to encrypt the secret") } return &webservice.GenerateApplicationSealedSecretResponse{ Data: encryptedText, }, nil } // validateAppBelongsToProject checks if the given application belongs to the given project. // It gives back error unless the application belongs to the project. func (a *WebAPI) validateAppBelongsToProject(ctx context.Context, appID, projectID string) error { pid, err := a.appProjectCache.Get(appID) if err == nil { if pid != projectID { return status.Error(codes.PermissionDenied, "Requested application doesn't belong to the project you logged in") } return nil } app, err := getApplication(ctx, a.applicationStore, appID, a.logger) if err != nil { return err } a.appProjectCache.Put(appID, app.ProjectId) if app.ProjectId != projectID { return status.Error(codes.PermissionDenied, "Requested application doesn't belong to the project you logged in") } return nil } func (a *WebAPI) ListDeployments(ctx context.Context, req *webservice.ListDeploymentsRequest) (*webservice.ListDeploymentsResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } orders := []datastore.Order{ { Field: "UpdatedAt", Direction: datastore.Desc, }, { Field: "Id", Direction: datastore.Asc, }, } filters := []datastore.ListFilter{ { Field: "ProjectId", Operator: datastore.OperatorEqual, Value: claims.Role.ProjectId, }, } if o := req.Options; o != nil { // Allowing multiple so that it can do In Query later. // Currently only the first value is used. if len(o.Statuses) > 0 { filters = append(filters, datastore.ListFilter{ Field: "Status", Operator: datastore.OperatorEqual, Value: o.Statuses[0], }) } if len(o.Kinds) > 0 { filters = append(filters, datastore.ListFilter{ Field: "Kind", Operator: datastore.OperatorEqual, Value: o.Kinds[0], }) } if len(o.ApplicationIds) > 0 { filters = append(filters, datastore.ListFilter{ Field: "ApplicationId", Operator: datastore.OperatorEqual, Value: o.ApplicationIds[0], }) } if len(o.EnvIds) > 0 { filters = append(filters, datastore.ListFilter{ Field: "EnvId", Operator: datastore.OperatorEqual, Value: o.EnvIds[0], }) } } deployments, cursor, err := a.deploymentStore.ListDeployments(ctx, datastore.ListOptions{ Filters: filters, Orders: orders, Limit: int(req.PageSize), Cursor: req.Cursor, }) if err != nil { a.logger.Error("failed to get deployments", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to get deployments") } return &webservice.ListDeploymentsResponse{ Deployments: deployments, Cursor: cursor, }, nil } func (a *WebAPI) GetDeployment(ctx context.Context, req *webservice.GetDeploymentRequest) (*webservice.GetDeploymentResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } deployment, err := getDeployment(ctx, a.deploymentStore, req.DeploymentId, a.logger) if err != nil { return nil, err } if claims.Role.ProjectId != deployment.ProjectId { return nil, status.Error(codes.InvalidArgument, "Requested deployment does not belong to your project") } return &webservice.GetDeploymentResponse{ Deployment: deployment, }, nil } // validateDeploymentBelongsToProject checks if the given deployment belongs to the given project. // It gives back error unless the deployment belongs to the project. func (a *WebAPI) validateDeploymentBelongsToProject(ctx context.Context, deploymentID, projectID string) error { pid, err := a.deploymentProjectCache.Get(deploymentID) if err == nil { if pid != projectID { return status.Error(codes.PermissionDenied, "Requested deployment doesn't belong to the project you logged in") } return nil } deployment, err := getDeployment(ctx, a.deploymentStore, deploymentID, a.logger) if err != nil { return err } a.deploymentProjectCache.Put(deploymentID, deployment.ProjectId) if deployment.ProjectId != projectID { return status.Error(codes.PermissionDenied, "Requested deployment doesn't belong to the project you logged in") } return nil } func (a *WebAPI) GetStageLog(ctx context.Context, req *webservice.GetStageLogRequest) (*webservice.GetStageLogResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } if err := a.validateDeploymentBelongsToProject(ctx, req.DeploymentId, claims.Role.ProjectId); err != nil { return nil, err } blocks, completed, err := a.stageLogStore.FetchLogs(ctx, req.DeploymentId, req.StageId, req.RetriedCount, req.OffsetIndex) if errors.Is(err, stagelogstore.ErrNotFound) { return nil, status.Error(codes.NotFound, "The stage log not found") } if err != nil { a.logger.Error("failed to get stage logs", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to get stage logs") } return &webservice.GetStageLogResponse{ Blocks: blocks, Completed: completed, }, nil } func (a *WebAPI) CancelDeployment(ctx context.Context, req *webservice.CancelDeploymentRequest) (*webservice.CancelDeploymentResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } deployment, err := getDeployment(ctx, a.deploymentStore, req.DeploymentId, a.logger) if err != nil { return nil, err } if claims.Role.ProjectId != deployment.ProjectId { return nil, status.Error(codes.InvalidArgument, "Requested deployment does not belong to your project") } if model.IsCompletedDeployment(deployment.Status) { return nil, status.Errorf(codes.FailedPrecondition, "could not cancel the deployment because it was already completed") } cmd := model.Command{ Id: uuid.New().String(), PipedId: deployment.PipedId, ApplicationId: deployment.ApplicationId, ProjectId: deployment.ProjectId, DeploymentId: req.DeploymentId, Type: model.Command_CANCEL_DEPLOYMENT, Commander: claims.Subject, CancelDeployment: &model.Command_CancelDeployment{ DeploymentId: req.DeploymentId, ForceRollback: req.ForceRollback, ForceNoRollback: req.ForceNoRollback, }, } if err := addCommand(ctx, a.commandStore, &cmd, a.logger); err != nil { return nil, err } return &webservice.CancelDeploymentResponse{ CommandId: cmd.Id, }, nil } func (a *WebAPI) ApproveStage(ctx context.Context, req *webservice.ApproveStageRequest) (*webservice.ApproveStageResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } deployment, err := getDeployment(ctx, a.deploymentStore, req.DeploymentId, a.logger) if err != nil { return nil, err } if err := a.validateDeploymentBelongsToProject(ctx, req.DeploymentId, claims.Role.ProjectId); err != nil { return nil, err } stage, ok := deployment.StageStatusMap()[req.StageId] if !ok { return nil, status.Error(codes.FailedPrecondition, "The stage was not found in the deployment") } if model.IsCompletedStage(stage) { return nil, status.Errorf(codes.FailedPrecondition, "Could not approve the stage because it was already completed") } commandID := uuid.New().String() cmd := model.Command{ Id: commandID, PipedId: deployment.PipedId, ApplicationId: deployment.ApplicationId, ProjectId: deployment.ProjectId, DeploymentId: req.DeploymentId, StageId: req.StageId, Type: model.Command_APPROVE_STAGE, Commander: claims.Subject, ApproveStage: &model.Command_ApproveStage{ DeploymentId: req.DeploymentId, StageId: req.StageId, }, } if err := addCommand(ctx, a.commandStore, &cmd, a.logger); err != nil { return nil, err } return &webservice.ApproveStageResponse{ CommandId: commandID, }, nil } func (a *WebAPI) GetApplicationLiveState(ctx context.Context, req *webservice.GetApplicationLiveStateRequest) (*webservice.GetApplicationLiveStateResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } if err := a.validateAppBelongsToProject(ctx, req.ApplicationId, claims.Role.ProjectId); err != nil { return nil, err } snapshot, err := a.applicationLiveStateStore.GetStateSnapshot(ctx, req.ApplicationId) if err != nil { a.logger.Error("failed to get application live state", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to get application live state") } return &webservice.GetApplicationLiveStateResponse{ Snapshot: snapshot, }, nil } // GetProject gets the specified porject without sensitive data. func (a *WebAPI) GetProject(ctx context.Context, req *webservice.GetProjectRequest) (*webservice.GetProjectResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } project, err := a.getProject(ctx, claims.Role.ProjectId) if err != nil { return nil, err } // Redact all sensitive data inside project message before sending to the client. project.RedactSensitiveData() return &webservice.GetProjectResponse{ Project: project, }, nil } func (a *WebAPI) getProject(ctx context.Context, projectID string) (*model.Project, error) { if p, ok := a.projectsInConfig[projectID]; ok { return &model.Project{ Id: p.Id, Desc: p.Desc, StaticAdmin: &model.ProjectStaticUser{ Username: p.StaticAdmin.Username, PasswordHash: p.StaticAdmin.PasswordHash, }, }, nil } project, err := a.projectStore.GetProject(ctx, projectID) if errors.Is(err, datastore.ErrNotFound) { return nil, status.Error(codes.NotFound, "The project is not found") } if err != nil { a.logger.Error("failed to get project", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to get project") } return project, nil } // UpdateProjectStaticAdmin updates the static admin user settings. func (a *WebAPI) UpdateProjectStaticAdmin(ctx context.Context, req *webservice.UpdateProjectStaticAdminRequest) (*webservice.UpdateProjectStaticAdminResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } if _, ok := a.projectsInConfig[claims.Role.ProjectId]; ok { return nil, status.Error(codes.FailedPrecondition, "Failed to update a debug project specified in the control-plane configuration") } if err := a.projectStore.UpdateProjectStaticAdmin(ctx, claims.Role.ProjectId, req.Username, req.Password); err != nil { a.logger.Error("failed to update static admin", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to update static admin") } return &webservice.UpdateProjectStaticAdminResponse{}, nil } // EnableStaticAdmin enables static admin login. func (a *WebAPI) EnableStaticAdmin(ctx context.Context, req *webservice.EnableStaticAdminRequest) (*webservice.EnableStaticAdminResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } if _, ok := a.projectsInConfig[claims.Role.ProjectId]; ok { return nil, status.Error(codes.FailedPrecondition, "Failed to update a debug project specified in the control-plane configuration") } if err := a.projectStore.EnableStaticAdmin(ctx, claims.Role.ProjectId); err != nil { a.logger.Error("failed to enable static admin login", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to enable static admin login") } return &webservice.EnableStaticAdminResponse{}, nil } // DisableStaticAdmin disables static admin login. func (a *WebAPI) DisableStaticAdmin(ctx context.Context, req *webservice.DisableStaticAdminRequest) (*webservice.DisableStaticAdminResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } if _, ok := a.projectsInConfig[claims.Role.ProjectId]; ok { return nil, status.Error(codes.FailedPrecondition, "Failed to update a debug project specified in the control-plane configuration") } if err := a.projectStore.DisableStaticAdmin(ctx, claims.Role.ProjectId); err != nil { a.logger.Error("failed to disenable static admin login", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to disenable static admin login") } return &webservice.DisableStaticAdminResponse{}, nil } // UpdateProjectSSOConfig updates the sso settings. func (a *WebAPI) UpdateProjectSSOConfig(ctx context.Context, req *webservice.UpdateProjectSSOConfigRequest) (*webservice.UpdateProjectSSOConfigResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } if _, ok := a.projectsInConfig[claims.Role.ProjectId]; ok { return nil, status.Error(codes.FailedPrecondition, "Failed to update a debug project specified in the control-plane configuration") } if err := req.Sso.Encrypt(a.encrypter); err != nil { a.logger.Error("failed to encrypt sensitive data in sso configurations", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to encrypt sensitive data in sso configurations") } if err := a.projectStore.UpdateProjectSSOConfig(ctx, claims.Role.ProjectId, req.Sso); err != nil { a.logger.Error("failed to update project single sign on settings", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to update project single sign on settings") } return &webservice.UpdateProjectSSOConfigResponse{}, nil } // UpdateProjectRBACConfig updates the sso settings. func (a *WebAPI) UpdateProjectRBACConfig(ctx context.Context, req *webservice.UpdateProjectRBACConfigRequest) (*webservice.UpdateProjectRBACConfigResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } if _, ok := a.projectsInConfig[claims.Role.ProjectId]; ok { return nil, status.Error(codes.FailedPrecondition, "Failed to update a debug project specified in the control-plane configuration") } if err := a.projectStore.UpdateProjectRBACConfig(ctx, claims.Role.ProjectId, req.Rbac); err != nil { a.logger.Error("failed to update project single sign on settings", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to update project single sign on settings") } return &webservice.UpdateProjectRBACConfigResponse{}, nil } // GetMe gets information about the current user. func (a *WebAPI) GetMe(ctx context.Context, req *webservice.GetMeRequest) (*webservice.GetMeResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } return &webservice.GetMeResponse{ Subject: claims.Subject, AvatarUrl: claims.AvatarURL, ProjectId: claims.Role.ProjectId, ProjectRole: claims.Role.ProjectRole, }, nil } func (a *WebAPI) GetCommand(ctx context.Context, req *webservice.GetCommandRequest) (*webservice.GetCommandResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } cmd, err := getCommand(ctx, a.commandStore, req.CommandId, a.logger) if err != nil { return nil, err } if claims.Role.ProjectId != cmd.ProjectId { return nil, status.Error(codes.InvalidArgument, "Requested command does not belong to your project") } return &webservice.GetCommandResponse{ Command: cmd, }, nil } func (a *WebAPI) ListDeploymentConfigTemplates(ctx context.Context, req *webservice.ListDeploymentConfigTemplatesRequest) (*webservice.ListDeploymentConfigTemplatesResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } app, err := getApplication(ctx, a.applicationStore, req.ApplicationId, a.logger) if err != nil { return nil, err } if err := a.validateAppBelongsToProject(ctx, req.ApplicationId, claims.Role.ProjectId); err != nil { return nil, err } var templates []*webservice.DeploymentConfigTemplate switch app.Kind { case model.ApplicationKind_KUBERNETES: templates = k8sDeploymentConfigTemplates case model.ApplicationKind_TERRAFORM: templates = terraformDeploymentConfigTemplates case model.ApplicationKind_CROSSPLANE: templates = crossplaneDeploymentConfigTemplates case model.ApplicationKind_LAMBDA: templates = lambdaDeploymentConfigTemplates case model.ApplicationKind_CLOUDRUN: templates = cloudrunDeploymentConfigTemplates case model.ApplicationKind_ECS: templates = ecsDeploymentConfigTemplates default: return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("Unknown application kind %v", app.Kind)) } for _, t := range templates { g := app.GetGitPath() filename := g.ConfigFilename if filename == "" { filename = ".pipe.yaml" } t.FileCreationUrl, err = git.MakeFileCreationURL(g.Repo.Remote, g.Path, g.Repo.Branch, filename, t.Content) if err != nil { a.logger.Error("failed to make a link to create a file", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to make a link to create a file") } } if len(req.Labels) == 0 { return &webservice.ListDeploymentConfigTemplatesResponse{Templates: templates}, nil } filtered := filterDeploymentConfigTemplates(templates, req.Labels) return &webservice.ListDeploymentConfigTemplatesResponse{Templates: filtered}, nil } // Returns the one from the given templates with all the specified labels. func filterDeploymentConfigTemplates(templates []*webservice.DeploymentConfigTemplate, labels []webservice.DeploymentConfigTemplateLabel) []*webservice.DeploymentConfigTemplate { filtered := make([]*webservice.DeploymentConfigTemplate, 0, len(templates)) L: for _, template := range templates { for _, l := range labels { if !template.HasLabel(l) { continue L } } filtered = append(filtered, template) } return filtered } func (a *WebAPI) GenerateAPIKey(ctx context.Context, req *webservice.GenerateAPIKeyRequest) (*webservice.GenerateAPIKeyResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } id := uuid.New().String() key, hash, err := model.GenerateAPIKey(id) if err != nil { a.logger.Error("failed to generate API key", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to generate API key") } apiKey := model.APIKey{ Id: id, Name: req.Name, KeyHash: hash, ProjectId: claims.Role.ProjectId, Role: req.Role, Creator: claims.Subject, } err = a.apiKeyStore.AddAPIKey(ctx, &apiKey) if errors.Is(err, datastore.ErrAlreadyExists) { return nil, status.Error(codes.AlreadyExists, "The API key already exists") } if err != nil { a.logger.Error("failed to create API key", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to create API key") } return &webservice.GenerateAPIKeyResponse{ Key: key, }, nil } func (a *WebAPI) DisableAPIKey(ctx context.Context, req *webservice.DisableAPIKeyRequest) (*webservice.DisableAPIKeyResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } if err := a.apiKeyStore.DisableAPIKey(ctx, req.Id, claims.Role.ProjectId); err != nil { switch err { case datastore.ErrNotFound: return nil, status.Error(codes.InvalidArgument, "The API key is not found") case datastore.ErrInvalidArgument: return nil, status.Error(codes.InvalidArgument, "Invalid value for update") default: a.logger.Error("failed to disable the API key", zap.String("apikey-id", req.Id), zap.Error(err), ) return nil, status.Error(codes.Internal, "Failed to disable the API key") } } return &webservice.DisableAPIKeyResponse{}, nil } func (a *WebAPI) ListAPIKeys(ctx context.Context, req *webservice.ListAPIKeysRequest) (*webservice.ListAPIKeysResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } opts := datastore.ListOptions{ Filters: []datastore.ListFilter{ { Field: "ProjectId", Operator: datastore.OperatorEqual, Value: claims.Role.ProjectId, }, }, } if req.Options != nil { if req.Options.Enabled != nil { opts.Filters = append(opts.Filters, datastore.ListFilter{ Field: "Disabled", Operator: datastore.OperatorEqual, Value: !req.Options.Enabled.GetValue(), }) } } apiKeys, err := a.apiKeyStore.ListAPIKeys(ctx, opts) if err != nil { a.logger.Error("failed to list API keys", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to list API keys") } // Redact all sensitive data inside API key before sending to the client. for i := range apiKeys { apiKeys[i].RedactSensitiveData() } return &webservice.ListAPIKeysResponse{ Keys: apiKeys, }, nil } // GetInsightData returns the accumulated insight data. func (a *WebAPI) GetInsightData(ctx context.Context, req *webservice.GetInsightDataRequest) (*webservice.GetInsightDataResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } count := int(req.DataPointCount) from := time.Unix(req.RangeFrom, 0) chunks, err := insightstore.LoadChunksFromCache(a.insightCache, claims.Role.ProjectId, req.ApplicationId, req.MetricsKind, req.Step, from, count) if err != nil { a.logger.Error("failed to load chunks from cache", zap.Error(err)) chunks, err = a.insightStore.LoadChunks(ctx, claims.Role.ProjectId, req.ApplicationId, req.MetricsKind, req.Step, from, count) if err != nil { a.logger.Error("failed to load chunks from insightstore", zap.Error(err)) return nil, err } if err := insightstore.PutChunksToCache(a.insightCache, chunks); err != nil { a.logger.Error("failed to put chunks to cache", zap.Error(err)) } } idp, err := chunks.ExtractDataPoints(req.Step, from, count) if err != nil { a.logger.Error("failed to extract data points from chunks", zap.Error(err)) } var updateAt int64 for _, c := range chunks { accumulatedTo := c.GetAccumulatedTo() if accumulatedTo > updateAt { updateAt = accumulatedTo } } return &webservice.GetInsightDataResponse{ UpdatedAt: updateAt, DataPoints: idp, Type: model.InsightResultType_MATRIX, Matrix: []*model.InsightSampleStream{ { DataPoints: idp, }, }, }, nil } func (a *WebAPI) GetInsightApplicationCount(ctx context.Context, req *webservice.GetInsightApplicationCountRequest) (*webservice.GetInsightApplicationCountResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } // TODO: Cache application counts in the cache service. c, err := a.insightStore.LoadApplicationCounts(ctx, claims.Role.ProjectId) if err != nil { if err == filestore.ErrNotFound { return nil, status.Error(codes.NotFound, "Not found") } a.logger.Error("failed to load application counts", zap.Error(err)) return nil, status.Error(codes.Internal, "failed to load application counts") } counts := make([]*model.InsightApplicationCount, 0, len(c.Counts)) for i := range c.Counts { counts = append(counts, &c.Counts[i]) } return &webservice.GetInsightApplicationCountResponse{ Counts: counts, UpdatedAt: c.UpdatedAt, }, nil }
1
17,197
this blown my mind
pipe-cd-pipe
go
@@ -335,6 +335,10 @@ type MayaAPIServiceOutputLabel string const ( ReplicaStatusAPILbl MayaAPIServiceOutputLabel = "vsm.openebs.io/replica-status" + //ReplicaContainerStatus MayaAPIServiceOutputLabel = "vsm.openebs.io/replica-cont-status" + + ControllerContainerStatus MayaAPIServiceOutputLabel = "vsm.openebs.io/container-status" + ControllerStatusAPILbl MayaAPIServiceOutputLabel = "vsm.openebs.io/controller-status" TargetPortalsAPILbl MayaAPIServiceOutputLabel = "vsm.openebs.io/targetportals"
1
package v1 // NomadEnvironmentVariable is a typed label that defines environment variables // that are understood by Nomad type NomadEnvironmentVariable string const ( // NomadAddressEnvKey is the environment variable that determines the // Nomad server address where the Job request can be directed to. NomadAddressEnvKey NomadEnvironmentVariable = "NOMAD_ADDR" // NomadRegionEnvKey is the environment variable that determines the Nomad region // where the Job request can be directed to. NomadRegionEnvKey NomadEnvironmentVariable = "NOMAD_REGION" ) // EnvironmentVariableLabel is a typed label that defines environment variable // labels that are passed as request options during provisioning. type EnvironmentVariableLabel string const ( // EnvVariableContextLbl is the label that can be optionally set as one of the // request option during VSM provisioning operations. Its value is used // to set the context (/ prefix) against the environment variables for that // particular request. EnvVariableContextLbl EnvironmentVariableLabel = "env.mapi.openebs.io/env-var-ctx" ) // EnvironmentVariableDefaults is a typed label that defines the environment variable // defaults type EnvironmentVariableDefaults string const ( // Default value for environment variable context EnvVariableContextDef EnvironmentVariableDefaults = "DEFAULT" ) // EnvironmentVariableKey is a typed label that define the environment variables type EnvironmentVariableKey string const ( // PVPProfileNameEnvVarKey is the environment variable key for persistent // volume provisioner's profile name // // Usage: // <CTX>_PVP_PROFILE_NAME = <some value> PVPProfileNameEnvVarKey EnvironmentVariableKey = "_PVP_PROFILE_NAME" // PVPNameEnvVarKey is the environment variable key for persistent volume // provisioner's name // // Usage: // <CTX>_PVP_NAME = <some value> PVPNameEnvVarKey EnvironmentVariableKey = "_PVP_NAME" // PVPControllerImageEnvVarKey is the environment variable key for persistent // volume provisioner's controller image // // Usage: // <CTX>_CONTROLLER_IMAGE = <some value> PVPControllerImageEnvVarKey EnvironmentVariableKey = "_CONTROLLER_IMAGE" // PVPPersistentPathEnvVarKey is the environment variable key for persistent // volume provisioner's replica persistent path // // Usage: // <CTX>_PERSISTENT_PATH = <some value> PVPPersistentPathEnvVarKey EnvironmentVariableKey = "_PERSISTENT_PATH" // PVPStorageSizeEnvVarKey is the environment variable key for persistent // volume provisioner's replica size // // Usage: // <CTX>_STORAGE_SIZE = <some value> PVPStorageSizeEnvVarKey EnvironmentVariableKey = "_STORAGE_SIZE" // PVPReplicaCountEnvVarKey is the environment variable key for persistent // volume provisioner's replica count // // Usage: // <CTX>_REPLICA_COUNT = <some value> PVPReplicaCountEnvVarKey EnvironmentVariableKey = "_REPLICA_COUNT" // PVPReplicaImageEnvVarKey is the environment variable key for persistent // volume provisioner's replica image // // Usage: // <CTX>_REPLICA_IMAGE = <some value> PVPReplicaImageEnvVarKey EnvironmentVariableKey = "_REPLICA_IMAGE" // PVPControllerCountEnvVarKey is the environment variable key for persistent // volume provisioner's controller count // // Usage: // <CTX>_CONTROLLER_COUNT = <some value> PVPControllerCountEnvVarKey EnvironmentVariableKey = "_CONTROLLER_COUNT" // PVPReplicaTopologyKeyEnvVarKey is the environment variable key for persistent // volume provisioner's replica topology key // // Usage: // <CTX>_REPLICA_TOPOLOGY_KEY = <some value> PVPReplicaTopologyKeyEnvVarKey EnvironmentVariableKey = "_REPLICA_TOPOLOGY_KEY" // PVPControllerNodeTaintTolerationEnvVarKey is the environment variable key // for persistent volume provisioner's node taint toleration // // Usage: // <CTX>_CONTROLLER_NODE_TAINT_TOLERATION = <some value> PVPControllerNodeTaintTolerationEnvVarKey EnvironmentVariableKey = "_CONTROLLER_NODE_TAINT_TOLERATION" // PVPReplicaNodeTaintTolerationEnvVarKey is the environment variable key for // persistent volume provisioner's node taint toleration // // Usage: // <CTX>__REPLICA_NODE_TAINT_TOLERATION = <some value> PVPReplicaNodeTaintTolerationEnvVarKey EnvironmentVariableKey = "_REPLICA_NODE_TAINT_TOLERATION" // OrchestratorNameEnvVarKey is the environment variable key for // orchestration provider's name // // Usage: // <CTX>_ORCHESTRATOR_NAME = <some value> OrchestratorNameEnvVarKey EnvironmentVariableKey = "_ORCHESTRATOR_NAME" // OrchestratorRegionEnvVarKey is the environment variable key for orchestration // provider's region // // Usage: // <CTX>_ORCHESTRATOR_REGION = <some value> OrchestratorRegionEnvVarKey EnvironmentVariableKey = "_ORCHESTRATOR_REGION" // OrchestratorDCEnvVarKey is the environment variable key for orchestration // provider's datacenter // // Usage: // <CTX>_ORCHESTRATOR_DC = <some value> OrchestratorDCEnvVarKey EnvironmentVariableKey = "_ORCHESTRATOR_DC" // OrchestratorAddressEnvVarKey is the environment variable key for orchestration // provider's address // // Usage: // <CTX>_<REGION>_<DC>_ORCHESTRATOR_ADDR = 10.20.1.1 OrchestratorAddressEnvVarKey EnvironmentVariableKey = "_ORCHESTRATOR_ADDR" // OrchestratorCNTypeEnvVarKey is the environment variable key for orchestration // provider's network type // // Usage: // <CTX>_ORCHESTRATOR_CN_TYPE = <some value> OrchestratorCNTypeEnvVarKey EnvironmentVariableKey = "_ORCHESTRATOR_CN_TYPE" // OrchestratorCNInterfaceEnvVarKey is the environment variable key for orchestration // provider's network interface // // Usage: // <CTX>_ORCHESTRATOR_CN_INTERFACE = <some value> OrchestratorCNInterfaceEnvVarKey EnvironmentVariableKey = "_ORCHESTRATOR_CN_INTERFACE" // OrchestratorCNAddrEnvVarKey is the environment variable key for orchestration // provider's network address // // Usage: // <CTX>_ORCHESTRATOR_CN_ADDRESS = <some value> OrchestratorCNAddrEnvVarKey EnvironmentVariableKey = "_ORCHESTRATOR_CN_ADDRESS" // OrchestratorNSEnvVarKey is the environment variable key for orchestration // provider's namespace // // Usage: // <CTX>_ORCHESTRATOR_NS = <some value> OrchestratorNSEnvVarKey EnvironmentVariableKey = "_ORCHESTRATOR_NS" // OrchestratorInClusterEnvVarKey is the environment variable key for orchestration // provider's in-cluster flag // // Usage: // <CTX>_ORCHESTRATOR_IN_CLUSTER = <some value> OrchestratorInClusterEnvVarKey EnvironmentVariableKey = "_ORCHESTRATOR_IN_CLUSTER" ) // OrchProviderProfileLabel is a typed label to determine orchestration provider // profile's values. type OrchProviderProfileLabel string const ( // Label / Tag for an orchestrator profile name OrchProfileNameLbl OrchProviderProfileLabel = "orchprovider.mapi.openebs.io/profile-name" // Label / Tag for an orchestrator region OrchRegionLbl OrchProviderProfileLabel = "orchprovider.mapi.openebs.io/region" // Label / Tag for an orchestrator datacenter OrchDCLbl OrchProviderProfileLabel = "orchprovider.mapi.openebs.io/dc" // OrchAddrLbl is the Label / Tag for an orchestrator address OrchAddrLbl OrchProviderProfileLabel = "orchprovider.mapi.openebs.io/address" // Label / Tag for an orchestrator namespace OrchNSLbl OrchProviderProfileLabel = "orchprovider.mapi.openebs.io/ns" // OrchInClusterLbl is the label for setting the in cluster flag. This is used // during provisioning operations. It sets if the provisioning is meant to be // within cluster or outside the cluster. OrchInClusterLbl OrchProviderProfileLabel = "orchprovider.mapi.openebs.io/in-cluster" // OrchCNTypeLbl is the Label / Tag for an orchestrator's networking type OrchCNTypeLbl OrchProviderProfileLabel = "orchprovider.mapi.openebs.io/cn-type" // OrchCNNetworkAddrLbl is the Label / Tag for an orchestrator's network address // in CIDR notation OrchCNNetworkAddrLbl OrchProviderProfileLabel = "orchprovider.mapi.openebs.io/cn-addr" // OrchCNSubnetLbl is the Label / Tag for an orchestrator's network subnet OrchCNSubnetLbl OrchProviderProfileLabel = "orchprovider.mapi.openebs.io/cn-subnet" // OrchCNInterfaceLbl is the Label / Tag for an orchestrator's network interface OrchCNInterfaceLbl OrchProviderProfileLabel = "orchprovider.mapi.openebs.io/cn-interface" ) // OrchProviderDefaults is a typed label to provide default values w.r.t // orchestration provider properties. type OrchProviderDefaults string const ( // Default value for orchestrator's network address // NOTE: Should be in valid CIDR notation OrchNetworkAddrDef OrchProviderDefaults = "172.28.128.1/24" // Default value for orchestrator's in-cluster flag OrchInClusterDef OrchProviderDefaults = "true" // Default value for orchestrator namespace OrchNSDef OrchProviderDefaults = "default" // OrchRegionDef is the default value of orchestrator region OrchRegionDef OrchProviderDefaults = "global" // OrchDCDef is the default value of orchestrator datacenter OrchDCDef OrchProviderDefaults = "dc1" // OrchAddressDef is the default value of orchestrator address OrchAddressDef OrchProviderDefaults = "127.0.0.1" // OrchCNTypeDef is the default value of orchestrator network type OrchCNTypeDef OrchProviderDefaults = "host" // OrchCNInterfaceDef is the default value of orchestrator network interface OrchCNInterfaceDef OrchProviderDefaults = "enp0s8" ) // VolumeProvisionerProfileLabel is a typed label to determine volume provisioner // profile values. type VolumeProvisionerProfileLabel string const ( // Label / Tag for a persistent volume provisioner profile's name PVPProfileNameLbl VolumeProvisionerProfileLabel = "volumeprovisioner.mapi.openebs.io/profile-name" // Label / Tag for a persistent volume provisioner's replica support PVPReqReplicaLbl VolumeProvisionerProfileLabel = "volumeprovisioner.mapi.openebs.io/req-replica" // Label / Tag for a persistent volume provisioner's networking support PVPReqNetworkingLbl VolumeProvisionerProfileLabel = "volumeprovisioner.mapi.openebs.io/req-networking" // Deprecate // Label / Tag for a persistent volume provisioner's replica count PVPReplicaCountLbl VolumeProvisionerProfileLabel = "volumeprovisioner.mapi.openebs.io/replica-count" // Label / Tag for a persistent volume provisioner's persistent path count PVPPersistentPathCountLbl VolumeProvisionerProfileLabel = PVPReplicaCountLbl // Label / Tag for a persistent volume provisioner's storage size PVPStorageSizeLbl VolumeProvisionerProfileLabel = "volumeprovisioner.mapi.openebs.io/storage-size" // Label / Tag for a persistent volume provisioner's replica IPs PVPReplicaIPsLbl VolumeProvisionerProfileLabel = "volumeprovisioner.mapi.openebs.io/replica-ips" // Label / Tag for a persistent volume provisioner's replica image PVPReplicaImageLbl VolumeProvisionerProfileLabel = "volumeprovisioner.mapi.openebs.io/replica-image" // Label / Tag for a persistent volume provisioner's controller count PVPControllerCountLbl VolumeProvisionerProfileLabel = "volumeprovisioner.mapi.openebs.io/controller-count" // Label / Tag for a persistent volume provisioner's controller image PVPControllerImageLbl VolumeProvisionerProfileLabel = "volumeprovisioner.mapi.openebs.io/controller-image" // Label / Tag for a persistent volume provisioner's controller IPs PVPControllerIPsLbl VolumeProvisionerProfileLabel = "volumeprovisioner.mapi.openebs.io/controller-ips" // Label / Tag for a persistent volume provisioner's persistent path PVPPersistentPathLbl VolumeProvisionerProfileLabel = "volumeprovisioner.mapi.openebs.io/persistent-path" // Label / Tag for a persistent volume provisioner's controller node taint toleration PVPControllerNodeTaintTolerationLbl VolumeProvisionerProfileLabel = "volumeprovisioner.mapi.openebs.io/controller-node-taint-toleration" // Label / Tag for a persistent volume provisioner's replica node taint toleration PVPReplicaNodeTaintTolerationLbl VolumeProvisionerProfileLabel = "volumeprovisioner.mapi.openebs.io/replica-node-taint-toleration" // PVPReplicaTopologyKeyLbl is the label for a persistent volume provisioner's // VSM replica topology key PVPReplicaTopologyKeyLbl VolumeProvisionerProfileLabel = "volumeprovisioner.mapi.openebs.io/replica-topology-key" // PVPNodeAffinityExpressionsLbl is the label to determine the node affinity // of the replica(s). // // NOTE: // 1. These are comma separated key value pairs, where each // key & value is separated by an operator e.g. In, NotIn, Exists, DoesNotExist // // 2. The key & value should have been labeled against a node or group of // nodes belonging to the K8s cluster // // 3. The replica count should match the number of of pairs provided // // Usage: // For OpenEBS volume with 2 replicas: // volumeprovisioner.mapi.openebs.io/node-affinity-expressions= // "<replica-identifier>=kubernetes.io/hostname:In:node1, // <another-replica-identifier>=kubernetes.io/hostname:In:node2" // // Usage: // For OpenEBS volume with 3 replicas: // volumeprovisioner.mapi.openebs.io/node-affinity-expressions= // "<replica-identifier>=kubernetes.io/hostname:In:node1, // <another-replica-identifier>=kubernetes.io/hostname:In:node2, // <yet-another-replica-identifier>=kubernetes.io/hostname:In:node3" // // Usage: // For OpenEBS volume with 3 replicas: // volumeprovisioner.mapi.openebs.io/node-affinity-expressions= // "<replica-identifier>=volumeprovisioner.mapi.openebs.io/replica-zone-1-ssd-1:In:zone-1-ssd-1, // <another-replica-identifier>=openebs.io/replica-zone-1-ssd-2:In:zone-1-ssd-2, // <yet-another-replica-identifier>=openebs.io/replica-zone-2-ssd-1:In:zone-2-ssd-1" // // Usage: // For OpenEBS volume with 3 replicas: // volumeprovisioner.mapi.openebs.io/node-affinity-expressions= // "<replica-identifier>=openebs.io/replica-zone-1-grp-1:In:zone-1-grp-1, // <another-replica-identifier>=openebs.io/replica-zone-1-grp-2:In:zone-1-grp-2, // <yet-another-replica-identifier>=openebs.io/replica-zone-2-grp-1:In:zone-2-grp-1" //PVPNodeAffinityExpressionsLbl VolumeProvisionerProfileLabel = "volumeprovisioner.mapi.openebs.io/node-affinity-expressions" // PVPNodeSelectorKeyLbl is the label to build the node affinity // of the replica based on the key & the replica identifier // // NOTE: // PVPNodeAffinityExpressionsLbl is used here as key is a part of the expressions //PVPNodeSelectorKeyLbl VolumeProvisionerProfileLabel = PVPNodeAffinityExpressionsLbl // PVPNodeSelectorOpLbl is the label to build the node affinity // of the replica based on the operator & the replica identifier // // NOTE: // PVPNodeAffinityExpressionsLbl is used here as operator is a part of the expressions //PVPNodeSelectorOpLbl VolumeProvisionerProfileLabel = PVPNodeAffinityExpressionsLbl // PVPNodeSelectorValueLbl is the label to build the node affinity // of the replica based on the operator & the replica identifier // // NOTE: // PVPNodeAffinityExpressionsLbl is used here as value is a part of the expressions //PVPNodeSelectorValueLbl VolumeProvisionerProfileLabel = PVPNodeAffinityExpressionsLbl // PVPSCNameLbl is the key used to specify the name of storage class. This // applies when OpenEBS volume is orchestrated by Maya using Kubernetes. PVPSCNameLbl VolumeProvisionerProfileLabel = "sc/name" // PVPSCNamespaceLbl is the key used to specify the namespace of storage // class. This applies when OpenEBS volume is orchestrated by Maya using // Kubernetes. //PVPSCNamespaceLbl VolumeProvisionerProfileLabel = "sc/namespace" ) // Deprecate type MayaAPIServiceOutputLabel string // Deprecate all these constants const ( ReplicaStatusAPILbl MayaAPIServiceOutputLabel = "vsm.openebs.io/replica-status" ControllerStatusAPILbl MayaAPIServiceOutputLabel = "vsm.openebs.io/controller-status" TargetPortalsAPILbl MayaAPIServiceOutputLabel = "vsm.openebs.io/targetportals" ClusterIPsAPILbl MayaAPIServiceOutputLabel = "vsm.openebs.io/cluster-ips" ReplicaIPsAPILbl MayaAPIServiceOutputLabel = "vsm.openebs.io/replica-ips" ControllerIPsAPILbl MayaAPIServiceOutputLabel = "vsm.openebs.io/controller-ips" IQNAPILbl MayaAPIServiceOutputLabel = "vsm.openebs.io/iqn" VolumeSizeAPILbl MayaAPIServiceOutputLabel = "vsm.openebs.io/volume-size" // Deprecate ReplicaCountAPILbl MayaAPIServiceOutputLabel = "vsm.openebs.io/replica-count" ) // VolumeProvsionerDefaults is a typed label to provide default values w.r.t // volume provisioner properties. type VolumeProvisionerDefaults string const ( // Default value for persistent volume provisioner's controller count PVPControllerCountDef VolumeProvisionerDefaults = "1" // Default value for persistent volume provisioner's replica count PVPReplicaCountDef VolumeProvisionerDefaults = "2" // Default value for persistent volume provisioner's persistent path count // This should be equal to persistent volume provisioner's replica count PVPPersistentPathCountDef VolumeProvisionerDefaults = PVPReplicaCountDef // Default value for persistent volume provisioner's controller image PVPControllerImageDef VolumeProvisionerDefaults = "openebs/jiva:latest" // Default value for persistent volume provisioner's support for replica PVPReqReplicaDef VolumeProvisionerDefaults = "true" // Default value for persistent volume provisioner's replica image PVPReplicaImageDef VolumeProvisionerDefaults = "openebs/jiva:latest" // Default value for persistent volume provisioner's networking support PVPReqNetworkingDef VolumeProvisionerDefaults = "false" // PVPPersistentPathDef is the default value for persistent volume provisioner's // replica persistent path PVPPersistentPathDef VolumeProvisionerDefaults = "/var/openebs" // PVPStorageSizeDef is the default value for persistent volume provisioner's // replica size PVPStorageSizeDef VolumeProvisionerDefaults = "1G" // PVPNodeSelectorKeyDef is the default value for volume replica's node selector // key //PVPNodeSelectorKeyDef VolumeProvisionerDefaults = "kubernetes.io/hostname" // PVPNodeSelectorOpDef is the default value for volume replica's node selector // operator //PVPNodeSelectorOpDef VolumeProvisionerDefaults = "In" ) // NameLabel type will be used to identify various maya api service components // via this typed label type NameLabel string const ( // Label / Tag for an orchestrator name OrchestratorNameLbl NameLabel = "orchprovider.mapi.openebs.io/name" // Label / Tag for a persistent volume provisioner name VolumeProvisionerNameLbl NameLabel = "volumeprovisioner.mapi.openebs.io/name" ) // OrchestratorRegistry type will be used to register various maya api service // orchestrators. type OrchProviderRegistry string const ( // K8sOrchestrator states Kubernetes as orchestration provider plugin. // This is used for registering Kubernetes as an orchestration provider in maya // api server. K8sOrchestrator OrchProviderRegistry = "kubernetes" // NomadOrchestrator states Nomad as orchestration provider plugin. // This is used for registering Nomad as an orchestration provider in maya api // server. NomadOrchestrator OrchProviderRegistry = "nomad" // DefaultOrchestrator provides the default orchestration provider DefaultOrchestrator = K8sOrchestrator ) // VolumeProvisionerRegistry type will be used to register various maya api // service volume provisioners. type VolumeProvisionerRegistry string const ( // JivaVolumeProvisioner states Jiva as persistent volume provisioner plugin. // This is used for registering Jiva as a volume provisioner in maya api server. JivaVolumeProvisioner VolumeProvisionerRegistry = "jiva" // DefaultVolumeProvisioner provides the default persistent volume provisioner // plugin. DefaultVolumeProvisioner VolumeProvisionerRegistry = JivaVolumeProvisioner ) // OrchProviderProfileRegistry type will be used to register various maya api // service orchestrator profiles type OrchProviderProfileRegistry string const ( // This is the name of PVC as orchestration provider profile // This is used for labelling PVC as a orchestration provider profile PVCOrchestratorProfile OrchProviderProfileRegistry = "pvc" ) // VolumeProvisionerProfileRegistry type will be used to register various maya api service // persistent volume provisioner profiles type VolumeProvisionerProfileRegistry string const ( // This is the name of volume provisioner profile VolumeProvisionerProfile VolumeProvisionerProfileRegistry = "vol" ) type GenericAnnotations string const ( // VolumeProvisionerSelectorKey is used to filter VSMs VolumeProvisionerSelectorKey GenericAnnotations = "openebs/volume-provisioner" // ControllerSelectorKey is used to filter controllers ControllerSelectorKey GenericAnnotations = "openebs/controller" // ControllerSelectorKeyEquals is used to filter controller when // selector logic is used ControllerSelectorKeyEquals GenericAnnotations = ControllerSelectorKey + "=" // ReplicaCountSelectorKey is used to filter replicas //ReplicaCountSelectorKey GenericAnnotations = "openebs/replica-count" // ReplicaSelectorKey is used to filter replicas ReplicaSelectorKey GenericAnnotations = "openebs/replica" // ReplicaSelectorKeyEquals is used to filter replica when // selector logic is used ReplicaSelectorKeyEquals GenericAnnotations = ReplicaSelectorKey + "=" // ServiceSelectorKey is used to filter services ServiceSelectorKey GenericAnnotations = "openebs/controller-service" // ServiceSelectorKeyEquals is used to filter services when selector logic is // used ServiceSelectorKeyEquals GenericAnnotations = ServiceSelectorKey + "=" // SelectorEquals is used to filter SelectorEquals GenericAnnotations = "=" // VSMSelectorKey is used to filter vsm VSMSelectorKey GenericAnnotations = "vsm" // VSMSelectorKeyEquals is used to filter vsm when selector logic is used VSMSelectorKeyEquals GenericAnnotations = VSMSelectorKey + "=" // ControllerSuffix is used as a suffix for controller related names ControllerSuffix GenericAnnotations = "-ctrl" // ReplicaSuffix is used as a suffix for replica related names ReplicaSuffix GenericAnnotations = "-rep" // ServiceSuffix is used as a suffix for service related names ServiceSuffix GenericAnnotations = "-svc" // ContainerSuffix is used as a suffix for container related names ContainerSuffix GenericAnnotations = "-con" ) // TODO // Move these to jiva folder // // JivaAnnotations will be used to provide filtering options like // named-labels, named-suffix, named-prefix, constants, etc. // // NOTE: // These value(s) are generally used / remembered by the consumers of // maya api service type JivaAnnotations string // TODO // Rename these const s.t. they start with Jiva as Key Word const ( // JivaVolumeProvisionerSelectorValue is used to filter jiva based objects JivaVolumeProvisionerSelectorValue JivaAnnotations = "jiva" // JivaControllerSelectorValue is used to filter jiva controller objects JivaControllerSelectorValue JivaAnnotations = "jiva-controller" // JivaReplicaSelectorValue is used to filter jiva replica objects JivaReplicaSelectorValue JivaAnnotations = "jiva-replica" // JivaServiceSelectorValue is used to filter jiva service objects JivaServiceSelectorValue JivaAnnotations = "jiva-controller-service" // PortNameISCSI is the name given to iscsi ports PortNameISCSI JivaAnnotations = "iscsi" // PortNameAPI is the name given to api ports PortNameAPI JivaAnnotations = "api" // JivaCtrlIPHolder is used as a placeholder for persistent volume controller's // IP address // // NOTE: // This is replaced at runtime JivaClusterIPHolder JivaAnnotations = "__CLUSTER_IP__" // JivaStorageSizeHolder is used as a placeholder for persistent volume's // storage capacity // // NOTE: // This is replaced at runtime JivaStorageSizeHolder JivaAnnotations = "__STOR_SIZE__" // JivaVolumeNameHolder JivaAnnotations = "__VOLUME_NAME__" ) // JivaDefaults is a typed label to provide DEFAULT values to Jiva based // persistent volume properties type JivaDefaults string const ( // JivaControllerFrontendDef is used to provide default frontend for jiva // persistent volume controller JivaControllerFrontendDef JivaDefaults = "gotgt" // Jiva's iSCSI Qualified IQN value. JivaIqnFormatPrefix JivaDefaults = "iqn.2016-09.com.openebs.jiva" // JivaISCSIPortDef is used to provide default iscsi port value for jiva // based persistent volumes JivaISCSIPortDef JivaDefaults = "3260" // JivaPersistentMountPathDef is the default mount path used by jiva based // persistent volumes JivaPersistentMountPathDef JivaDefaults = "/openebs" // JivaPersistentMountNameDef is the default mount path name used by jiva based // persistent volumes JivaPersistentMountNameDef JivaDefaults = "openebs" // JivaAPIPortDef is used to provide management port for persistent volume // storage JivaAPIPortDef JivaDefaults = "9501" // JivaReplicaPortOneDef is used to provide port for jiva based persistent // volume replica JivaReplicaPortOneDef JivaDefaults = "9502" // JivaReplicaPortTwoDef is used to provide port for jiva based persistent // volume replica JivaReplicaPortTwoDef JivaDefaults = "9503" // JivaReplicaPortThreeDef is used to provide port for jiva based persistent // volume replica JivaReplicaPortThreeDef JivaDefaults = "9504" // JivaBackEndIPPrefixLbl is used to provide the label for VSM replica IP on // Nomad JivaBackEndIPPrefixLbl JivaDefaults = "JIVA_REP_IP_" ) // These will be used to provide array based constants that are // related to jiva volume provisioner var ( // JivaCtrlCmd is the command used to start jiva controller JivaCtrlCmd = []string{"launch"} // JivaCtrlArgs is the set of arguments provided to JivaCtrlCmd //JivaCtrlArgs = []string{"controller", "--frontend", string(JivaControllerFrontendDef), string(JivaVolumeNameDef)} JivaCtrlArgs = []string{"controller", "--frontend", string(JivaControllerFrontendDef), "--clusterIP", string(JivaClusterIPHolder), string(JivaVolumeNameHolder)} // JivaReplicaCmd is the command used to start jiva replica JivaReplicaCmd = []string{"launch"} // JivaReplicaArgs is the set of arguments provided to JivaReplicaCmd JivaReplicaArgs = []string{"replica", "--frontendIP", string(JivaClusterIPHolder), "--size", string(JivaStorageSizeHolder), string(JivaPersistentMountPathDef)} ) // TODO // Move these to k8s folder // // K8sAnnotations will be used to provide string based constants that are // related to kubernetes as orchestration provider type K8sAnnotations string const ( // K8sKindPod is used to state the k8s Pod K8sKindPod K8sAnnotations = "Pod" // K8sKindDeployment is used to state the k8s Deployment K8sKindDeployment K8sAnnotations = "Deployment" // K8sKindService is used to state the k8s Service K8sKindService K8sAnnotations = "Service" // K8sServiceVersion is used to state the k8s Service version K8sServiceVersion K8sAnnotations = "v1" // K8sPodVersion is used to state the k8s Pod version K8sPodVersion K8sAnnotations = "v1" // K8sDeploymentVersion is used to state the k8s Deployment version K8sDeploymentVersion K8sAnnotations = "extensions/v1beta1" // K8sHostnameTopologyKey is used to specify the hostname as topology key K8sHostnameTopologyKey K8sAnnotations = "kubernetes.io/hostname" )
1
7,188
this should be controller-container-status
openebs-maya
go
@@ -16,10 +16,12 @@ package azkaban.scheduler; +import azkaban.utils.Utils; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Date; import org.joda.time.DateTime; import org.joda.time.DateTimeZone;
1
/* * Copyright 2012 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.scheduler; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; import org.joda.time.Days; import org.joda.time.DurationFieldType; import org.joda.time.Hours; import org.joda.time.Minutes; import org.joda.time.Months; import org.joda.time.ReadablePeriod; import org.joda.time.Seconds; import org.joda.time.Weeks; import azkaban.executor.ExecutionOptions; import azkaban.sla.SlaOption; import azkaban.utils.Pair; public class Schedule { private int scheduleId; private int projectId; private String projectName; private String flowName; private long firstSchedTime; private DateTimeZone timezone; private long lastModifyTime; private ReadablePeriod period; private long nextExecTime; private String submitUser; private String status; private long submitTime; private boolean skipPastOccurrences = true; private ExecutionOptions executionOptions; private List<SlaOption> slaOptions; public Schedule(int scheduleId, int projectId, String projectName, String flowName, String status, long firstSchedTime, DateTimeZone timezone, ReadablePeriod period, long lastModifyTime, long nextExecTime, long submitTime, String submitUser) { this(scheduleId, projectId, projectName, flowName, status, firstSchedTime, timezone, period, lastModifyTime, nextExecTime, submitTime, submitUser, null, null); } public Schedule(int scheduleId, int projectId, String projectName, String flowName, String status, long firstSchedTime, String timezoneId, String period, long lastModifyTime, long nextExecTime, long submitTime, String submitUser, ExecutionOptions executionOptions, List<SlaOption> slaOptions) { this(scheduleId, projectId, projectName, flowName, status, firstSchedTime, DateTimeZone.forID(timezoneId), parsePeriodString(period), lastModifyTime, nextExecTime, submitTime, submitUser, executionOptions, slaOptions); } public Schedule(int scheduleId, int projectId, String projectName, String flowName, String status, long firstSchedTime, DateTimeZone timezone, ReadablePeriod period, long lastModifyTime, long nextExecTime, long submitTime, String submitUser, ExecutionOptions executionOptions, List<SlaOption> slaOptions) { this.scheduleId = scheduleId; this.projectId = projectId; this.projectName = projectName; this.flowName = flowName; this.firstSchedTime = firstSchedTime; this.timezone = timezone; this.lastModifyTime = lastModifyTime; this.period = period; this.nextExecTime = nextExecTime; this.submitUser = submitUser; this.status = status; this.submitTime = submitTime; this.executionOptions = executionOptions; this.slaOptions = slaOptions; } public ExecutionOptions getExecutionOptions() { return executionOptions; } public List<SlaOption> getSlaOptions() { return slaOptions; } public void setFlowOptions(ExecutionOptions executionOptions) { this.executionOptions = executionOptions; } public void setSlaOptions(List<SlaOption> slaOptions) { this.slaOptions = slaOptions; } public String getScheduleName() { return projectName + "." + flowName + " (" + projectId + ")"; } public String toString() { return projectName + "." + flowName + " (" + projectId + ")" + " to be run at (starting) " + new DateTime(firstSchedTime).toDateTimeISO() + " with recurring period of " + (period == null ? "non-recurring" : createPeriodString(period)); } public Pair<Integer, String> getScheduleIdentityPair() { return new Pair<Integer, String>(getProjectId(), getFlowName()); } public void setScheduleId(int scheduleId) { this.scheduleId = scheduleId; } public int getScheduleId() { return scheduleId; } public int getProjectId() { return projectId; } public String getProjectName() { return projectName; } public String getFlowName() { return flowName; } public long getFirstSchedTime() { return firstSchedTime; } public DateTimeZone getTimezone() { return timezone; } public long getLastModifyTime() { return lastModifyTime; } public ReadablePeriod getPeriod() { return period; } public long getNextExecTime() { return nextExecTime; } public String getSubmitUser() { return submitUser; } public String getStatus() { return status; } public long getSubmitTime() { return submitTime; } public boolean updateTime() { if (new DateTime(nextExecTime).isAfterNow()) { return true; } if (period != null) { DateTime nextTime = getNextRuntime(nextExecTime, timezone, period); this.nextExecTime = nextTime.getMillis(); return true; } return false; } public void setNextExecTime(long nextExecTime) { this.nextExecTime = nextExecTime; } private DateTime getNextRuntime(long scheduleTime, DateTimeZone timezone, ReadablePeriod period) { DateTime now = new DateTime(); DateTime date = new DateTime(scheduleTime).withZone(timezone); int count = 0; while (!now.isBefore(date)) { if (count > 100000) { throw new IllegalStateException( "100000 increments of period did not get to present time."); } if (period == null) { break; } else { date = date.plus(period); } count += 1; } return date; } public static ReadablePeriod parsePeriodString(String periodStr) { ReadablePeriod period; char periodUnit = periodStr.charAt(periodStr.length() - 1); if (periodUnit == 'n') { return null; } int periodInt = Integer.parseInt(periodStr.substring(0, periodStr.length() - 1)); switch (periodUnit) { case 'M': period = Months.months(periodInt); break; case 'w': period = Weeks.weeks(periodInt); break; case 'd': period = Days.days(periodInt); break; case 'h': period = Hours.hours(periodInt); break; case 'm': period = Minutes.minutes(periodInt); break; case 's': period = Seconds.seconds(periodInt); break; default: throw new IllegalArgumentException("Invalid schedule period unit '" + periodUnit); } return period; } public static String createPeriodString(ReadablePeriod period) { String periodStr = "n"; if (period == null) { return "n"; } if (period.get(DurationFieldType.months()) > 0) { int months = period.get(DurationFieldType.months()); periodStr = months + "M"; } else if (period.get(DurationFieldType.weeks()) > 0) { int weeks = period.get(DurationFieldType.weeks()); periodStr = weeks + "w"; } else if (period.get(DurationFieldType.days()) > 0) { int days = period.get(DurationFieldType.days()); periodStr = days + "d"; } else if (period.get(DurationFieldType.hours()) > 0) { int hours = period.get(DurationFieldType.hours()); periodStr = hours + "h"; } else if (period.get(DurationFieldType.minutes()) > 0) { int minutes = period.get(DurationFieldType.minutes()); periodStr = minutes + "m"; } else if (period.get(DurationFieldType.seconds()) > 0) { int seconds = period.get(DurationFieldType.seconds()); periodStr = seconds + "s"; } return periodStr; } public Map<String, Object> optionsToObject() { if (executionOptions != null) { HashMap<String, Object> schedObj = new HashMap<String, Object>(); if (executionOptions != null) { schedObj.put("executionOptions", executionOptions.toObject()); } if (slaOptions != null) { List<Object> slaOptionsObject = new ArrayList<Object>(); for (SlaOption sla : slaOptions) { slaOptionsObject.add(sla.toObject()); } schedObj.put("slaOptions", slaOptionsObject); } return schedObj; } return null; } @SuppressWarnings("unchecked") public void createAndSetScheduleOptions(Object obj) { HashMap<String, Object> schedObj = (HashMap<String, Object>) obj; if (schedObj.containsKey("executionOptions")) { ExecutionOptions execOptions = ExecutionOptions.createFromObject(schedObj.get("executionOptions")); this.executionOptions = execOptions; } else if (schedObj.containsKey("flowOptions")) { ExecutionOptions execOptions = ExecutionOptions.createFromObject(schedObj.get("flowOptions")); this.executionOptions = execOptions; execOptions.setConcurrentOption(ExecutionOptions.CONCURRENT_OPTION_SKIP); } else { this.executionOptions = new ExecutionOptions(); this.executionOptions .setConcurrentOption(ExecutionOptions.CONCURRENT_OPTION_SKIP); } if (schedObj.containsKey("slaOptions")) { List<Object> slaOptionsObject = (List<Object>) schedObj.get("slaOptions"); List<SlaOption> slaOptions = new ArrayList<SlaOption>(); for (Object slaObj : slaOptionsObject) { slaOptions.add(SlaOption.fromObject(slaObj)); } this.slaOptions = slaOptions; } } public boolean isRecurring() { return period == null ? false : true; } public boolean skipPastOccurrences() { return skipPastOccurrences; } }
1
11,364
Sort import. You can use IDE's organize import feature.
azkaban-azkaban
java
@@ -24,6 +24,7 @@ import org.openqa.selenium.WebDriver; import org.openqa.selenium.WebElement; import java.io.IOException; +import java.io.UncheckedIOException; import java.net.URL; import java.nio.charset.StandardCharsets; import java.util.ArrayList;
1
// Licensed to the Software Freedom Conservancy (SFC) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The SFC licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package com.thoughtworks.selenium.webdriven; import com.google.common.io.Resources; import org.openqa.selenium.JavascriptExecutor; import org.openqa.selenium.WebDriver; import org.openqa.selenium.WebElement; import java.io.IOException; import java.net.URL; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.concurrent.ConcurrentHashMap; public class JavascriptLibrary { static final String PREFIX = "/" + JavascriptLibrary.class.getPackage() .getName().replace(".", "/") + "/"; private final ConcurrentHashMap<String, String> scripts = new ConcurrentHashMap<>(); private static final String injectableSelenium = "/com/thoughtworks/selenium/webdriven/injectableSelenium.js"; private static final String htmlUtils = "/com/thoughtworks/selenium/webdriven/htmlutils.js"; /** * Loads the named Selenium script and returns it wrapped in an anonymous function. * * @param name The script to load. * @return The loaded script wrapped in an anonymous function. */ public String getSeleniumScript(String name) { String rawFunction = readScript(PREFIX + name); return String.format("function() { return (%s).apply(null, arguments);}", rawFunction); } public void callEmbeddedSelenium(WebDriver driver, String functionName, WebElement element, Object... values) { List<Object> args = new ArrayList<>(); args.add(element); args.addAll(Arrays.asList(values)); String script = readScript(injectableSelenium) + "return browserbot." + functionName + ".apply(browserbot, arguments);"; ((JavascriptExecutor) driver).executeScript(script, args.toArray()); } public Object callEmbeddedHtmlUtils(WebDriver driver, String functionName, WebElement element, Object... values) { List<Object> args = new ArrayList<>(); args.add(element); args.addAll(Arrays.asList(values)); String script = readScript(htmlUtils) + "return htmlutils." + functionName + ".apply(htmlutils, arguments);"; return ((JavascriptExecutor) driver).executeScript(script, args.toArray()); } public Object executeScript(WebDriver driver, String script, Object... args) { if (driver instanceof JavascriptExecutor) { return ((JavascriptExecutor) driver).executeScript(script, args); } throw new UnsupportedOperationException( "The underlying WebDriver instance does not support executing javascript"); } private String readScript(String script) { return scripts.computeIfAbsent(script, this::readScriptImpl); } String readScriptImpl(String script) { URL url = getClass().getResource(script); if (url == null) { throw new RuntimeException("Cannot locate " + script); } try { return Resources.toString(url, StandardCharsets.UTF_8); } catch (IOException e) { throw new RuntimeException(e); } } }
1
19,388
Can you please revert changes to files in the `thoughtworks` package? This is legacy code and we will eventually phase out RC.
SeleniumHQ-selenium
rb
@@ -16,6 +16,7 @@ import ( ) func TestMiningPledgeSector(t *testing.T) { + t.Skip("Unskip when we have implemented production drand component and local drand network for functional tests") tf.FunctionalTest(t) ctx, cancel := context.WithCancel(context.Background())
1
package functional import ( "context" "os" "path/filepath" "testing" "time" "github.com/stretchr/testify/require" "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/node" "github.com/filecoin-project/go-filecoin/internal/pkg/clock" tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" gengen "github.com/filecoin-project/go-filecoin/tools/gengen/util" ) func TestMiningPledgeSector(t *testing.T) { tf.FunctionalTest(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() wd, _ := os.Getwd() genCfgPath := filepath.Join(wd, "..", "fixtures/setup.json") presealPath := filepath.Join(wd, "..", "fixtures/genesis-sectors") genTime := int64(1000000000) blockTime := 1 * time.Second fakeClock := clock.NewFake(time.Unix(genTime, 0)) // Load genesis config fixture. genCfg := loadGenesisConfig(t, genCfgPath) genCfg.Miners = append(genCfg.Miners, &gengen.CreateStorageMinerConfig{ Owner: 1, SectorSize: 2048, }) seed := node.MakeChainSeed(t, genCfg) chainClock := clock.NewChainClockFromClock(uint64(genTime), blockTime, fakeClock) bootstrapMiner := makeNode(ctx, t, seed, chainClock) _, _, err := initNodeGenesisMiner(t, bootstrapMiner, seed, genCfg.Miners[0].Owner, presealPath, genCfg.Miners[0].SectorSize) require.NoError(t, err) newMiner := makeNode(ctx, t, seed, chainClock) seed.GiveKey(t, newMiner, 1) _, _ = seed.GiveMiner(t, newMiner, 1) err = bootstrapMiner.Start(ctx) require.NoError(t, err) err = newMiner.Start(ctx) require.NoError(t, err) defer bootstrapMiner.Stop(ctx) defer newMiner.Stop(ctx) node.ConnectNodes(t, newMiner, bootstrapMiner) // Have bootstrap miner mine continuously so newMiner's pledgeSector can put multiple messages on chain. go simulateBlockMining(ctx, t, fakeClock, blockTime, bootstrapMiner) err = newMiner.StorageMining.Start(ctx) require.NoError(t, err) err = newMiner.PieceManager().PledgeSector(ctx) require.NoError(t, err) // wait while checking to see if the new miner has added any sectors (indicating sealing was successful) for i := 0; i < 100; i++ { ts, err := newMiner.PorcelainAPI.ChainHead() require.NoError(t, err) maddr, err := newMiner.BlockMining.BlockMiningAPI.MinerAddress() require.NoError(t, err) status, err := newMiner.PorcelainAPI.MinerGetStatus(ctx, maddr, ts.Key()) require.NoError(t, err) if status.SectorCount > 0 { return } time.Sleep(2 * time.Second) } t.Fatal("Did not add sectors in the allotted time") }
1
23,520
This PR is off to a rough start. :)
filecoin-project-venus
go
@@ -380,7 +380,7 @@ function normalizeProxyConfiguration(config) { } } else if ('pac' === config.proxyType) { if (config.proxyAutoconfigUrl && !config.pacUrl) { - config.pacUrl = config.proxyAutoconfigUrl; + config.proxyAutoconfigUrl = config.proxyAutoconfigUrl; } } return config;
1
// Licensed to the Software Freedom Conservancy (SFC) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The SFC licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. /** * @fileoverview Defines the {@linkplain Driver WebDriver} client for Firefox. * Before using this module, you must download the latest * [geckodriver release] and ensure it can be found on your system [PATH]. * * Each FirefoxDriver instance will be created with an anonymous profile, * ensuring browser historys do not share session data (cookies, history, cache, * offline storage, etc.) * * __Customizing the Firefox Profile__ * * The {@linkplain Profile} class may be used to configure the browser profile * used with WebDriver, with functions to install additional * {@linkplain Profile#addExtension extensions}, configure browser * {@linkplain Profile#setPreference preferences}, and more. For example, you * may wish to include Firebug: * * const {Builder} = require('selenium-webdriver'); * const firefox = require('selenium-webdriver/firefox'); * * let profile = new firefox.Profile(); * profile.addExtension('/path/to/firebug.xpi'); * profile.setPreference('extensions.firebug.showChromeErrors', true); * * let options = new firefox.Options().setProfile(profile); * let driver = new Builder() * .forBrowser('firefox') * .setFirefoxOptions(options) * .build(); * * The {@linkplain Profile} class may also be used to configure WebDriver based * on a pre-existing browser profile: * * let profile = new firefox.Profile( * '/usr/local/home/bob/.mozilla/firefox/3fgog75h.testing'); * let options = new firefox.Options().setProfile(profile); * * The FirefoxDriver will _never_ modify a pre-existing profile; instead it will * create a copy for it to modify. By extension, there are certain browser * preferences that are required for WebDriver to function properly and they * will always be overwritten. * * __Using a Custom Firefox Binary__ * * On Windows and MacOS, the FirefoxDriver will search for Firefox in its * default installation location: * * - Windows: C:\Program Files and C:\Program Files (x86). * - MacOS: /Applications/Firefox.app * * For Linux, Firefox will always be located on the PATH: `$(where firefox)`. * * Several methods are provided for starting Firefox with a custom executable. * First, on Windows and MacOS, you may configure WebDriver to check the default * install location for a non-release channel. If the requested channel cannot * be found in its default location, WebDriver will fallback to searching your * PATH. _Note:_ on Linux, Firefox is _always_ located on your path, regardless * of the requested channel. * * const {Builder} = require('selenium-webdriver'); * const firefox = require('selenium-webdriver/firefox'); * * let options = new firefox.Options().setBinary(firefox.Channel.NIGHTLY); * let driver = new Builder() * .forBrowser('firefox') * .setFirefoxOptions(options) * .build(); * * On all platforms, you may configrue WebDriver to use a Firefox specific * executable: * * let options = new firefox.Options() * .setBinary('/my/firefox/install/dir/firefox-bin'); * * __Remote Testing__ * * You may customize the Firefox binary and profile when running against a * remote Selenium server. Your custom profile will be packaged as a zip and * transfered to the remote host for use. The profile will be transferred * _once for each new session_. The performance impact should be minimal if * you've only configured a few extra browser preferences. If you have a large * profile with several extensions, you should consider installing it on the * remote host and defining its path via the {@link Options} class. Custom * binaries are never copied to remote machines and must be referenced by * installation path. * * const {Builder} = require('selenium-webdriver'); * const firefox = require('selenium-webdriver/firefox'); * * let options = new firefox.Options() * .setProfile('/profile/path/on/remote/host') * .setBinary('/install/dir/on/remote/host/firefox-bin'); * * let driver = new Builder() * .forBrowser('firefox') * .usingServer('http://127.0.0.1:4444/wd/hub') * .setFirefoxOptions(options) * .build(); * * [geckodriver release]: https://github.com/mozilla/geckodriver/releases/ * [PATH]: http://en.wikipedia.org/wiki/PATH_%28variable%29 */ 'use strict'; const path = require('path'); const url = require('url'); const capabilities = require('../lib/capabilities'); const command = require('../lib/command'); const exec = require('../io/exec'); const http = require('../http'); const httpUtil = require('../http/util'); const io = require('../io'); const net = require('../net'); const portprober = require('../net/portprober'); const remote = require('../remote'); const webdriver = require('../lib/webdriver'); const {Profile} = require('./profile'); /** * Configuration options for the FirefoxDriver. */ class Options { constructor() { /** @private {./profile.Profile} */ this.profile_ = null; /** @private {(Channel|string|null)} */ this.binary_ = null; /** @private {!Array<string>} */ this.args_ = []; /** @private {?../lib/proxy.Config} */ this.proxy_ = null; } /** * Specify additional command line arguments that should be used when starting * the Firefox browser. * * @param {...(string|!Array<string>)} args The arguments to include. * @return {!Options} A self reference. */ addArguments(...args) { this.args_ = this.args_.concat(...args); return this; } /** * Configures the geckodriver to start Firefox in headless mode. * * @return {!Options} A self reference. */ headless() { return this.addArguments('-headless'); } /** * Sets the initial window size when running in * {@linkplain #headless headless} mode. * * @param {{width: number, height: number}} size The desired window size. * @return {!Options} A self reference. * @throws {TypeError} if width or height is unspecified, not a number, or * less than or equal to 0. */ windowSize({width, height}) { function checkArg(arg) { if (typeof arg !== 'number' || arg <= 0) { throw TypeError('Arguments must be {width, height} with numbers > 0'); } } checkArg(width); checkArg(height); return this.addArguments(`--window-size=${width},${height}`); } /** * Sets the profile to use. The profile may be specified as a * {@link Profile} object or as the path to an existing Firefox profile to use * as a template. * * @param {(string|!./profile.Profile)} profile The profile to use. * @return {!Options} A self reference. */ setProfile(profile) { if (typeof profile === 'string') { profile = new Profile(profile); } this.profile_ = profile; return this; } /** * Sets the binary to use. The binary may be specified as the path to a * Firefox executable or a desired release {@link Channel}. * * @param {(string|!Channel)} binary The binary to use. * @return {!Options} A self reference. * @throws {TypeError} If `binary` is an invalid type. */ setBinary(binary) { if (binary instanceof Channel || typeof binary === 'string') { this.binary_ = binary; return this; } throw TypeError('binary must be a string path or Channel object'); } /** * Sets the proxy to use. * * @param {../lib/proxy.Config} proxy The proxy configuration to use. * @return {!Options} A self reference. */ setProxy(proxy) { this.proxy_ = proxy; return this; } /** * Converts these options to a {@link capabilities.Capabilities} instance. * * @return {!capabilities.Capabilities} A new capabilities object. */ toCapabilities() { let caps = capabilities.Capabilities.firefox(); let firefoxOptions = {}; caps.set('moz:firefoxOptions', firefoxOptions); if (this.proxy_) { caps.set(capabilities.Capability.PROXY, this.proxy_); } if (this.args_.length) { firefoxOptions['args'] = this.args_.concat(); } if (this.binary_) { if (this.binary_ instanceof Channel) { firefoxOptions['binary'] = this.binary_.locate(); } else if (typeof this.binary_ === 'string') { firefoxOptions['binary'] = this.binary_; } } if (this.profile_) { // If the user specified a template directory or any extensions to // install, we need to encode the profile as a base64 string (which // requires writing it to disk first). Otherwise, if the user just // specified some custom preferences, we can send those directly. let profile = this.profile_; if (profile.getTemplateDir() || profile.getExtensions().length) { firefoxOptions['profile'] = profile.encode(); } else { let prefs = profile.getPreferences(); if (Object.keys(prefs).length) { firefoxOptions['prefs'] = prefs; } } } return caps; } } /** * Enum of available command contexts. * * Command contexts are specific to Marionette, and may be used with the * {@link #context=} method. Contexts allow you to direct all subsequent * commands to either "content" (default) or "chrome". The latter gives * you elevated security permissions. * * @enum {string} */ const Context = { CONTENT: "content", CHROME: "chrome", }; const GECKO_DRIVER_EXE = process.platform === 'win32' ? 'geckodriver.exe' : 'geckodriver'; /** * _Synchronously_ attempts to locate the geckodriver executable on the current * system. * * @return {?string} the located executable, or `null`. */ function locateSynchronously() { return io.findInPath(GECKO_DRIVER_EXE, true); } /** * @return {string} . * @throws {Error} */ function findGeckoDriver() { let exe = locateSynchronously(); if (!exe) { throw Error( 'The ' + GECKO_DRIVER_EXE + ' executable could not be found on the current ' + 'PATH. Please download the latest version from ' + 'https://github.com/mozilla/geckodriver/releases/ ' + 'and ensure it can be found on your PATH.'); } return exe; } /** * @param {string} file Path to the file to find, relative to the program files * root. * @return {!Promise<?string>} A promise for the located executable. * The promise will resolve to {@code null} if Firefox was not found. */ function findInProgramFiles(file) { let files = [ process.env['PROGRAMFILES'] || 'C:\\Program Files', process.env['PROGRAMFILES(X86)'] || 'C:\\Program Files (x86)' ].map(prefix => path.join(prefix, file)); return io.exists(files[0]).then(function(exists) { return exists ? files[0] : io.exists(files[1]).then(function(exists) { return exists ? files[1] : null; }); }); } function normalizeProxyConfiguration(config) { if ('manual' === config.proxyType) { if (config.ftpProxy && !config.ftpProxyPort) { let hostAndPort = net.splitHostAndPort(config.ftpProxy); config.ftpProxy = hostAndPort.host; config.ftpProxyPort = hostAndPort.port; } if (config.httpProxy && !config.httpProxyPort) { let hostAndPort = net.splitHostAndPort(config.httpProxy); config.httpProxy = hostAndPort.host; config.httpProxyPort = hostAndPort.port; } if (config.sslProxy && !config.sslProxyPort) { let hostAndPort = net.splitHostAndPort(config.sslProxy); config.sslProxy = hostAndPort.host; config.sslProxyPort = hostAndPort.port; } if (config.socksProxy && !config.socksProxyPort) { let hostAndPort = net.splitHostAndPort(config.socksProxy); config.socksProxy = hostAndPort.host; config.socksProxyPort = hostAndPort.port; } } else if ('pac' === config.proxyType) { if (config.proxyAutoconfigUrl && !config.pacUrl) { config.pacUrl = config.proxyAutoconfigUrl; } } return config; } /** @enum {string} */ const ExtensionCommand = { GET_CONTEXT: 'getContext', SET_CONTEXT: 'setContext', INSTALL_ADDON: 'install addon', UNINSTALL_ADDON: 'uninstall addon', }; /** * Creates a command executor with support for Marionette's custom commands. * @param {!Promise<string>} serverUrl The server's URL. * @return {!command.Executor} The new command executor. */ function createExecutor(serverUrl) { let client = serverUrl.then(url => new http.HttpClient(url)); let executor = new http.Executor(client); configureExecutor(executor); return executor; } /** * Configures the given executor with Firefox-specific commands. * @param {!http.Executor} executor the executor to configure. */ function configureExecutor(executor) { executor.defineCommand( ExtensionCommand.GET_CONTEXT, 'GET', '/session/:sessionId/moz/context'); executor.defineCommand( ExtensionCommand.SET_CONTEXT, 'POST', '/session/:sessionId/moz/context'); executor.defineCommand( ExtensionCommand.INSTALL_ADDON, 'POST', '/session/:sessionId/moz/addon/install'); executor.defineCommand( ExtensionCommand.UNINSTALL_ADDON, 'POST', '/session/:sessionId/moz/addon/uninstall'); } /** * Creates {@link selenium-webdriver/remote.DriverService} instances that manage * a [geckodriver](https://github.com/mozilla/geckodriver) server in a child * process. */ class ServiceBuilder extends remote.DriverService.Builder { /** * @param {string=} opt_exe Path to the server executable to use. If omitted, * the builder will attempt to locate the geckodriver on the system PATH. */ constructor(opt_exe) { super(opt_exe || findGeckoDriver()); this.setLoopback(true); // Required. } /** * Enables verbose logging. * * @param {boolean=} opt_trace Whether to enable trace-level logging. By * default, only debug logging is enabled. * @return {!ServiceBuilder} A self reference. */ enableVerboseLogging(opt_trace) { return this.addArguments(opt_trace ? '-vv' : '-v'); } } /** * A WebDriver client for Firefox. */ class Driver extends webdriver.WebDriver { /** * Creates a new Firefox session. * * @param {(Options|capabilities.Capabilities|Object)=} opt_config The * configuration options for this driver, specified as either an * {@link Options} or {@link capabilities.Capabilities}, or as a raw hash * object. * @param {(http.Executor|remote.DriverService)=} opt_executor Either a * pre-configured command executor to use for communicating with an * externally managed remote end (which is assumed to already be running), * or the `DriverService` to use to start the geckodriver in a child * process. * * If an executor is provided, care should e taken not to use reuse it with * other clients as its internal command mappings will be updated to support * Firefox-specific commands. * * _This parameter may only be used with Mozilla's GeckoDriver._ * * @throws {Error} If a custom command executor is provided and the driver is * configured to use the legacy FirefoxDriver from the Selenium project. * @return {!Driver} A new driver instance. */ static createSession(opt_config, opt_executor) { let caps; if (opt_config instanceof Options) { caps = opt_config.toCapabilities(); } else { caps = new capabilities.Capabilities(opt_config); } if (caps.has(capabilities.Capability.PROXY)) { let proxy = normalizeProxyConfiguration(caps.get(capabilities.Capability.PROXY)); caps.set(capabilities.Capability.PROXY, proxy); } let executor; let onQuit; if (opt_executor instanceof http.Executor) { executor = opt_executor; configureExecutor(executor); } else if (opt_executor instanceof remote.DriverService) { executor = createExecutor(opt_executor.start()); onQuit = () => opt_executor.kill(); } else { let service = new ServiceBuilder().build(); executor = createExecutor(service.start()); onQuit = () => service.kill(); } return /** @type {!Driver} */(super.createSession(executor, caps, onQuit)); } /** * This function is a no-op as file detectors are not supported by this * implementation. * @override */ setFileDetector() { } /** * Get the context that is currently in effect. * * @return {!Promise<Context>} Current context. */ getContext() { return this.execute(new command.Command(ExtensionCommand.GET_CONTEXT)); } /** * Changes target context for commands between chrome- and content. * * Changing the current context has a stateful impact on all subsequent * commands. The {@link Context.CONTENT} context has normal web * platform document permissions, as if you would evaluate arbitrary * JavaScript. The {@link Context.CHROME} context gets elevated * permissions that lets you manipulate the browser chrome itself, * with full access to the XUL toolkit. * * Use your powers wisely. * * @param {!Promise<void>} ctx The context to switch to. */ setContext(ctx) { return this.execute( new command.Command(ExtensionCommand.SET_CONTEXT) .setParameter("context", ctx)); } /** * Installs a new addon with the current session. This function will return an * ID that may later be used to {@linkplain #uninstallAddon uninstall} the * addon. * * * @param {string} path Path on the local filesystem to the web extension to * install. * @return {!Promise<string>} A promise that will resolve to an ID for the * newly installed addon. * @see #uninstallAddon */ async installAddon(path) { let buf = await io.read(path); return this.execute( new command.Command(ExtensionCommand.INSTALL_ADDON) .setParameter('addon', buf.toString('base64'))); } /** * Uninstalls an addon from the current browser session's profile. * * @param {(string|!Promise<string>)} id ID of the addon to uninstall. * @return {!Promise} A promise that will resolve when the operation has * completed. * @see #installAddon */ async uninstallAddon(id) { id = await Promise.resolve(id); return this.execute( new command.Command(ExtensionCommand.UNINSTALL_ADDON) .setParameter('id', id)); } } /** * Provides methods for locating the executable for a Firefox release channel * on Windows and MacOS. For other systems (i.e. Linux), Firefox will always * be located on the system PATH. * * @final */ class Channel { /** * @param {string} darwin The path to check when running on MacOS. * @param {string} win32 The path to check when running on Windows. */ constructor(darwin, win32) { /** @private @const */ this.darwin_ = darwin; /** @private @const */ this.win32_ = win32; /** @private {Promise<string>} */ this.found_ = null; } /** * Attempts to locate the Firefox executable for this release channel. This * will first check the default installation location for the channel before * checking the user's PATH. The returned promise will be rejected if Firefox * can not be found. * * @return {!Promise<string>} A promise for the location of the located * Firefox executable. */ locate() { if (this.found_) { return this.found_; } let found; switch (process.platform) { case 'darwin': found = io.exists(this.darwin_) .then(exists => exists ? this.darwin_ : io.findInPath('firefox')); break; case 'win32': found = findInProgramFiles(this.win32_) .then(found => found || io.findInPath('firefox.exe')); break; default: found = Promise.resolve(io.findInPath('firefox')); break; } this.found_ = found.then(found => { if (found) { // TODO: verify version info. return found; } throw Error('Could not locate Firefox on the current system'); }); return this.found_; } } /** * Firefox's developer channel. * @const * @see <https://www.mozilla.org/en-US/firefox/channel/desktop/#aurora> */ Channel.AURORA = new Channel( '/Applications/FirefoxDeveloperEdition.app/Contents/MacOS/firefox-bin', 'Firefox Developer Edition\\firefox.exe'); /** * Firefox's beta channel. Note this is provided mainly for convenience as * the beta channel has the same installation location as the main release * channel. * @const * @see <https://www.mozilla.org/en-US/firefox/channel/desktop/#beta> */ Channel.BETA = new Channel( '/Applications/Firefox.app/Contents/MacOS/firefox-bin', 'Mozilla Firefox\\firefox.exe'); /** * Firefox's release channel. * @const * @see <https://www.mozilla.org/en-US/firefox/desktop/> */ Channel.RELEASE = new Channel( '/Applications/Firefox.app/Contents/MacOS/firefox-bin', 'Mozilla Firefox\\firefox.exe'); /** * Firefox's nightly release channel. * @const * @see <https://www.mozilla.org/en-US/firefox/channel/desktop/#nightly> */ Channel.NIGHTLY = new Channel( '/Applications/FirefoxNightly.app/Contents/MacOS/firefox-bin', 'Nightly\\firefox.exe'); // PUBLIC API exports.Channel = Channel; exports.Context = Context; exports.Driver = Driver; exports.Options = Options; exports.Profile = Profile; exports.ServiceBuilder = ServiceBuilder; exports.locateSynchronously = locateSynchronously;
1
14,984
Actually, it looks like the entire `normalizeProxyConfiguration` function isn't needed anymore
SeleniumHQ-selenium
rb
@@ -32,6 +32,8 @@ </div> </div> -<ul id='breadcrumb-nav'> - <li><%= link_to "Back to main portal", carts_path %></li> -</ul> +<%- unless excluded_portal_link %> + <ul id='breadcrumb-nav'> + <li><%= link_to "Back to main portal", carts_path %></li> + </ul> +<%- end %>
1
<div id='communicart_header'> <div class='container'> <div id='header-identity'> <div id="communicart_logo">Communicart</div> <h1>Approval Portal</h1> </div> <ul class='header-nav'> <%- unless signed_in? %> <li> <%= link_to 'Sign in with MyUSA', "/auth/myusa" %> </li> <%- else %> <li> <%= session['user']['email'] %> <%= image_tag 'icon-user.png', alt: ' ' %> </li> <li> <%= link_to 'Logout', "/logout", method: :post, class: 'login-link' %> <%= image_tag 'icon-logout.png', alt: 'Logout' %> </li> <%- end %> </ul> <%- if signed_in? %> <div class="mycart-link"> <ul class="header-nav"> <li><%= link_to("My Requests", "/carts/index") %> <%= image_tag "img-cart.png", width: '18px', height:'16px' %></li> </ul> </div> <%- end %> </div> </div> <ul id='breadcrumb-nav'> <li><%= link_to "Back to main portal", carts_path %></li> </ul>
1
12,277
How about `unless current_path == carts_path`?
18F-C2
rb
@@ -63,7 +63,7 @@ # Copyright (c) 2021 Lorena B <[email protected]> # Copyright (c) 2021 David Liu <[email protected]> # Copyright (c) 2021 Andreas Finkler <[email protected]> -# Copyright (c) 2021 Or Bahari <[email protected]> +# Copyright (c) 2021-2022 Or Bahari <[email protected]> # Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html # For details: https://github.com/PyCQA/pylint/blob/main/LICENSE
1
# Copyright (c) 2006-2016 LOGILAB S.A. (Paris, FRANCE) <[email protected]> # Copyright (c) 2010 Daniel Harding <[email protected]> # Copyright (c) 2012-2014 Google, Inc. # Copyright (c) 2013-2020 Claudiu Popa <[email protected]> # Copyright (c) 2014 Brett Cannon <[email protected]> # Copyright (c) 2014 Arun Persaud <[email protected]> # Copyright (c) 2015 Nick Bastin <[email protected]> # Copyright (c) 2015 Michael Kefeder <[email protected]> # Copyright (c) 2015 Dmitry Pribysh <[email protected]> # Copyright (c) 2015 Stephane Wirtel <[email protected]> # Copyright (c) 2015 Cosmin Poieana <[email protected]> # Copyright (c) 2015 Florian Bruhin <[email protected]> # Copyright (c) 2015 Radu Ciorba <[email protected]> # Copyright (c) 2015 Ionel Cristian Maries <[email protected]> # Copyright (c) 2016, 2019 Ashley Whetter <[email protected]> # Copyright (c) 2016, 2018 Jakub Wilk <[email protected]> # Copyright (c) 2016-2017 Łukasz Rogalski <[email protected]> # Copyright (c) 2016 Glenn Matthews <[email protected]> # Copyright (c) 2016 Elias Dorneles <[email protected]> # Copyright (c) 2016 Yannack <[email protected]> # Copyright (c) 2016 Alex Jurkiewicz <[email protected]> # Copyright (c) 2017, 2019-2021 Pierre Sassoulas <[email protected]> # Copyright (c) 2017, 2019-2021 hippo91 <[email protected]> # Copyright (c) 2017 danields <[email protected]> # Copyright (c) 2017 Jacques Kvam <[email protected]> # Copyright (c) 2017 ttenhoeve-aa <[email protected]> # Copyright (c) 2018-2019, 2021 Nick Drozd <[email protected]> # Copyright (c) 2018-2019, 2021 Ville Skyttä <[email protected]> # Copyright (c) 2018 Sergei Lebedev <[email protected]> # Copyright (c) 2018 Lucas Cimon <[email protected]> # Copyright (c) 2018 ssolanki <[email protected]> # Copyright (c) 2018 Natalie Serebryakova <[email protected]> # Copyright (c) 2018 Sushobhit <[email protected]> # Copyright (c) 2018 SergeyKosarchuk <[email protected]> # Copyright (c) 2018 Steven M. Vascellaro <[email protected]> # Copyright (c) 2018 Mike Frysinger <[email protected]> # Copyright (c) 2018 Chris Lamb <[email protected]> # Copyright (c) 2018 glmdgrielson <[email protected]> # Copyright (c) 2019 Daniel Draper <[email protected]> # Copyright (c) 2019 Hugo van Kemenade <[email protected]> # Copyright (c) 2019 Niko Wenselowski <[email protected]> # Copyright (c) 2019 Nikita Sobolev <[email protected]> # Copyright (c) 2019 Oisín Moran <[email protected]> # Copyright (c) 2019 Fantix King <[email protected]> # Copyright (c) 2020 Peter Kolbus <[email protected]> # Copyright (c) 2020 ethan-leba <[email protected]> # Copyright (c) 2020 へーさん <[email protected]> # Copyright (c) 2020 Damien Baty <[email protected]> # Copyright (c) 2020 Ram Rachum <[email protected]> # Copyright (c) 2020 Anthony Sottile <[email protected]> # Copyright (c) 2020 bernie gray <[email protected]> # Copyright (c) 2020 Gabriel R Sezefredo <[email protected]> # Copyright (c) 2020 Benny <[email protected]> # Copyright (c) 2020 Anubhav <[email protected]> # Copyright (c) 2021 Daniël van Noord <[email protected]> # Copyright (c) 2021 Tushar Sadhwani <[email protected]> # Copyright (c) 2021 Tim Martin <[email protected]> # Copyright (c) 2021 Jaehoon Hwang <[email protected]> # Copyright (c) 2021 jaydesl <[email protected]> # Copyright (c) 2021 Marc Mueller <[email protected]> # Copyright (c) 2021 bot <[email protected]> # Copyright (c) 2021 Yilei "Dolee" Yang <[email protected]> # Copyright (c) 2021 Lorena B <[email protected]> # Copyright (c) 2021 David Liu <[email protected]> # Copyright (c) 2021 Andreas Finkler <[email protected]> # Copyright (c) 2021 Or Bahari <[email protected]> # Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html # For details: https://github.com/PyCQA/pylint/blob/main/LICENSE """basic checker for Python code""" import collections import itertools import re import sys from typing import TYPE_CHECKING, Any, Dict, Iterator, Optional, Pattern, cast import astroid from astroid import nodes from pylint import checkers, constants, interfaces from pylint import utils as lint_utils from pylint.checkers import utils from pylint.checkers.utils import ( infer_all, is_overload_stub, is_property_deleter, is_property_setter, ) from pylint.reporters.ureports import nodes as reporter_nodes from pylint.utils import LinterStats from pylint.utils.utils import get_global_option if TYPE_CHECKING: from pylint.lint import PyLinter if sys.version_info >= (3, 8): from typing import Literal else: from typing_extensions import Literal class NamingStyle: """It may seem counterintuitive that single naming style has multiple "accepted" forms of regular expressions, but we need to special-case stuff like dunder names in method names. """ ANY: Pattern[str] = re.compile(".*") CLASS_NAME_RGX: Pattern[str] = ANY MOD_NAME_RGX: Pattern[str] = ANY CONST_NAME_RGX: Pattern[str] = ANY COMP_VAR_RGX: Pattern[str] = ANY DEFAULT_NAME_RGX: Pattern[str] = ANY CLASS_ATTRIBUTE_RGX: Pattern[str] = ANY @classmethod def get_regex(cls, name_type): return { "module": cls.MOD_NAME_RGX, "const": cls.CONST_NAME_RGX, "class": cls.CLASS_NAME_RGX, "function": cls.DEFAULT_NAME_RGX, "method": cls.DEFAULT_NAME_RGX, "attr": cls.DEFAULT_NAME_RGX, "argument": cls.DEFAULT_NAME_RGX, "variable": cls.DEFAULT_NAME_RGX, "class_attribute": cls.CLASS_ATTRIBUTE_RGX, "class_const": cls.CONST_NAME_RGX, "inlinevar": cls.COMP_VAR_RGX, }[name_type] class SnakeCaseStyle(NamingStyle): """Regex rules for snake_case naming style.""" CLASS_NAME_RGX = re.compile(r"[^\W\dA-Z][^\WA-Z]+$") MOD_NAME_RGX = re.compile(r"[^\W\dA-Z][^\WA-Z]*$") CONST_NAME_RGX = re.compile(r"([^\W\dA-Z][^\WA-Z]*|__.*__)$") COMP_VAR_RGX = re.compile(r"[^\W\dA-Z][^\WA-Z]*$") DEFAULT_NAME_RGX = re.compile( r"([^\W\dA-Z][^\WA-Z]{2,}|_[^\WA-Z]*|__[^\WA-Z\d_][^\WA-Z]+__)$" ) CLASS_ATTRIBUTE_RGX = re.compile(r"([^\W\dA-Z][^\WA-Z]{2,}|__.*__)$") class CamelCaseStyle(NamingStyle): """Regex rules for camelCase naming style.""" CLASS_NAME_RGX = re.compile(r"[^\W\dA-Z][^\W_]+$") MOD_NAME_RGX = re.compile(r"[^\W\dA-Z][^\W_]*$") CONST_NAME_RGX = re.compile(r"([^\W\dA-Z][^\W_]*|__.*__)$") COMP_VAR_RGX = re.compile(r"[^\W\dA-Z][^\W_]*$") DEFAULT_NAME_RGX = re.compile(r"([^\W\dA-Z][^\W_]{2,}|__[^\W\dA-Z_]\w+__)$") CLASS_ATTRIBUTE_RGX = re.compile(r"([^\W\dA-Z][^\W_]{2,}|__.*__)$") class PascalCaseStyle(NamingStyle): """Regex rules for PascalCase naming style.""" CLASS_NAME_RGX = re.compile(r"[^\W\da-z][^\W_]+$") MOD_NAME_RGX = re.compile(r"[^\W\da-z][^\W_]+$") CONST_NAME_RGX = re.compile(r"([^\W\da-z][^\W_]*|__.*__)$") COMP_VAR_RGX = re.compile(r"[^\W\da-z][^\W_]+$") DEFAULT_NAME_RGX = re.compile(r"([^\W\da-z][^\W_]{2,}|__[^\W\dA-Z_]\w+__)$") CLASS_ATTRIBUTE_RGX = re.compile(r"[^\W\da-z][^\W_]{2,}$") class UpperCaseStyle(NamingStyle): """Regex rules for UPPER_CASE naming style.""" CLASS_NAME_RGX = re.compile(r"[^\W\da-z][^\Wa-z]+$") MOD_NAME_RGX = re.compile(r"[^\W\da-z][^\Wa-z]+$") CONST_NAME_RGX = re.compile(r"([^\W\da-z][^\Wa-z]*|__.*__)$") COMP_VAR_RGX = re.compile(r"[^\W\da-z][^\Wa-z]+$") DEFAULT_NAME_RGX = re.compile(r"([^\W\da-z][^\Wa-z]{2,}|__[^\W\dA-Z_]\w+__)$") CLASS_ATTRIBUTE_RGX = re.compile(r"[^\W\da-z][^\Wa-z]{2,}$") class AnyStyle(NamingStyle): pass NAMING_STYLES = { "snake_case": SnakeCaseStyle, "camelCase": CamelCaseStyle, "PascalCase": PascalCaseStyle, "UPPER_CASE": UpperCaseStyle, "any": AnyStyle, } # do not require a doc string on private/system methods NO_REQUIRED_DOC_RGX = re.compile("^_") REVERSED_PROTOCOL_METHOD = "__reversed__" SEQUENCE_PROTOCOL_METHODS = ("__getitem__", "__len__") REVERSED_METHODS = (SEQUENCE_PROTOCOL_METHODS, (REVERSED_PROTOCOL_METHOD,)) TYPECHECK_COMPARISON_OPERATORS = frozenset(("is", "is not", "==", "!=")) LITERAL_NODE_TYPES = (nodes.Const, nodes.Dict, nodes.List, nodes.Set) UNITTEST_CASE = "unittest.case" TYPE_QNAME = "builtins.type" ABC_METACLASSES = {"_py_abc.ABCMeta", "abc.ABCMeta"} # Python 3.7+, # Name categories that are always consistent with all naming conventions. EXEMPT_NAME_CATEGORIES = {"exempt", "ignore"} # A mapping from qname -> symbol, to be used when generating messages # about dangerous default values as arguments DEFAULT_ARGUMENT_SYMBOLS = dict( zip( [".".join(["builtins", x]) for x in ("set", "dict", "list")], ["set()", "{}", "[]"], ), **{ x: f"{x}()" for x in ( "collections.deque", "collections.ChainMap", "collections.Counter", "collections.OrderedDict", "collections.defaultdict", "collections.UserDict", "collections.UserList", ) }, ) COMPARISON_OPERATORS = frozenset(("==", "!=", "<", ">", "<=", ">=")) # List of methods which can be redefined REDEFINABLE_METHODS = frozenset(("__module__",)) TYPING_FORWARD_REF_QNAME = "typing.ForwardRef" def _redefines_import(node): """Detect that the given node (AssignName) is inside an exception handler and redefines an import from the tryexcept body. Returns True if the node redefines an import, False otherwise. """ current = node while current and not isinstance(current.parent, nodes.ExceptHandler): current = current.parent if not current or not utils.error_of_type(current.parent, ImportError): return False try_block = current.parent.parent for import_node in try_block.nodes_of_class((nodes.ImportFrom, nodes.Import)): for name, alias in import_node.names: if alias: if alias == node.name: return True elif name == node.name: return True return False LOOPLIKE_NODES = ( nodes.For, nodes.ListComp, nodes.SetComp, nodes.DictComp, nodes.GeneratorExp, ) def in_loop(node: nodes.NodeNG) -> bool: """Return whether the node is inside a kind of for loop""" return any(isinstance(parent, LOOPLIKE_NODES) for parent in node.node_ancestors()) def in_nested_list(nested_list, obj): """return true if the object is an element of <nested_list> or of a nested list """ for elmt in nested_list: if isinstance(elmt, (list, tuple)): if in_nested_list(elmt, obj): return True elif elmt == obj: return True return False def _get_break_loop_node(break_node): """Returns the loop node that holds the break node in arguments. Args: break_node (astroid.Break): the break node of interest. Returns: astroid.For or astroid.While: the loop node holding the break node. """ loop_nodes = (nodes.For, nodes.While) parent = break_node.parent while not isinstance(parent, loop_nodes) or break_node in getattr( parent, "orelse", [] ): break_node = parent parent = parent.parent if parent is None: break return parent def _loop_exits_early(loop): """Returns true if a loop may end with a break statement. Args: loop (astroid.For, astroid.While): the loop node inspected. Returns: bool: True if the loop may end with a break statement, False otherwise. """ loop_nodes = (nodes.For, nodes.While) definition_nodes = (nodes.FunctionDef, nodes.ClassDef) inner_loop_nodes = [ _node for _node in loop.nodes_of_class(loop_nodes, skip_klass=definition_nodes) if _node != loop ] return any( _node for _node in loop.nodes_of_class(nodes.Break, skip_klass=definition_nodes) if _get_break_loop_node(_node) not in inner_loop_nodes ) def _is_multi_naming_match(match, node_type, confidence): return ( match is not None and match.lastgroup is not None and match.lastgroup not in EXEMPT_NAME_CATEGORIES and (node_type != "method" or confidence != interfaces.INFERENCE_FAILURE) ) BUILTIN_PROPERTY = "builtins.property" def _get_properties(config): """Returns a tuple of property classes and names. Property classes are fully qualified, such as 'abc.abstractproperty' and property names are the actual names, such as 'abstract_property'. """ property_classes = {BUILTIN_PROPERTY} property_names = set() # Not returning 'property', it has its own check. if config is not None: property_classes.update(config.property_classes) property_names.update( prop.rsplit(".", 1)[-1] for prop in config.property_classes ) return property_classes, property_names def _determine_function_name_type(node: nodes.FunctionDef, config=None): """Determine the name type whose regex the function's name should match. :param node: A function node. :param config: Configuration from which to pull additional property classes. :type config: :class:`optparse.Values` :returns: One of ('function', 'method', 'attr') :rtype: str """ property_classes, property_names = _get_properties(config) if not node.is_method(): return "function" if is_property_setter(node) or is_property_deleter(node): # If the function is decorated using the prop_method.{setter,getter} # form, treat it like an attribute as well. return "attr" decorators = node.decorators.nodes if node.decorators else [] for decorator in decorators: # If the function is a property (decorated with @property # or @abc.abstractproperty), the name type is 'attr'. if isinstance(decorator, nodes.Name) or ( isinstance(decorator, nodes.Attribute) and decorator.attrname in property_names ): inferred = utils.safe_infer(decorator) if ( inferred and hasattr(inferred, "qname") and inferred.qname() in property_classes ): return "attr" return "method" def _has_abstract_methods(node): """Determine if the given `node` has abstract methods. The methods should be made abstract by decorating them with `abc` decorators. """ return len(utils.unimplemented_abstract_methods(node)) > 0 def report_by_type_stats( sect, stats: LinterStats, old_stats: Optional[LinterStats], ): """make a report of * percentage of different types documented * percentage of different types with a bad name """ # percentage of different types documented and/or with a bad name nice_stats: Dict[str, Dict[str, str]] = {} for node_type in ("module", "class", "method", "function"): node_type = cast(Literal["function", "class", "method", "module"], node_type) total = stats.get_node_count(node_type) nice_stats[node_type] = {} if total != 0: undocumented_node = stats.get_undocumented(node_type) documented = total - undocumented_node percent = (documented * 100.0) / total nice_stats[node_type]["percent_documented"] = f"{percent:.2f}" badname_node = stats.get_bad_names(node_type) percent = (badname_node * 100.0) / total nice_stats[node_type]["percent_badname"] = f"{percent:.2f}" lines = ["type", "number", "old number", "difference", "%documented", "%badname"] for node_type in ("module", "class", "method", "function"): node_type = cast(Literal["function", "class", "method", "module"], node_type) new = stats.get_node_count(node_type) old = old_stats.get_node_count(node_type) if old_stats else None diff_str = lint_utils.diff_string(old, new) if old else None lines += [ node_type, str(new), str(old) if old else "NC", diff_str if diff_str else "NC", nice_stats[node_type].get("percent_documented", "0"), nice_stats[node_type].get("percent_badname", "0"), ] sect.append(reporter_nodes.Table(children=lines, cols=6, rheaders=1)) def redefined_by_decorator(node): """return True if the object is a method redefined via decorator. For example: @property def x(self): return self._x @x.setter def x(self, value): self._x = value """ if node.decorators: for decorator in node.decorators.nodes: if ( isinstance(decorator, nodes.Attribute) and getattr(decorator.expr, "name", None) == node.name ): return True return False class _BasicChecker(checkers.BaseChecker): __implements__ = interfaces.IAstroidChecker name = "basic" class BasicErrorChecker(_BasicChecker): msgs = { "E0100": ( "__init__ method is a generator", "init-is-generator", "Used when the special class method __init__ is turned into a " "generator by a yield in its body.", ), "E0101": ( "Explicit return in __init__", "return-in-init", "Used when the special class method __init__ has an explicit " "return value.", ), "E0102": ( "%s already defined line %s", "function-redefined", "Used when a function / class / method is redefined.", ), "E0103": ( "%r not properly in loop", "not-in-loop", "Used when break or continue keywords are used outside a loop.", ), "E0104": ( "Return outside function", "return-outside-function", 'Used when a "return" statement is found outside a function or method.', ), "E0105": ( "Yield outside function", "yield-outside-function", 'Used when a "yield" statement is found outside a function or method.', ), "E0106": ( "Return with argument inside generator", "return-arg-in-generator", 'Used when a "return" statement with an argument is found ' "outside in a generator function or method (e.g. with some " '"yield" statements).', {"maxversion": (3, 3)}, ), "E0107": ( "Use of the non-existent %s operator", "nonexistent-operator", "Used when you attempt to use the C-style pre-increment or " "pre-decrement operator -- and ++, which doesn't exist in Python.", ), "E0108": ( "Duplicate argument name %s in function definition", "duplicate-argument-name", "Duplicate argument names in function definitions are syntax errors.", ), "E0110": ( "Abstract class %r with abstract methods instantiated", "abstract-class-instantiated", "Used when an abstract class with `abc.ABCMeta` as metaclass " "has abstract methods and is instantiated.", ), "W0120": ( "Else clause on loop without a break statement", "useless-else-on-loop", "Loops should only have an else clause if they can exit early " "with a break statement, otherwise the statements under else " "should be on the same scope as the loop itself.", ), "E0112": ( "More than one starred expression in assignment", "too-many-star-expressions", "Emitted when there are more than one starred " "expressions (`*x`) in an assignment. This is a SyntaxError.", ), "E0113": ( "Starred assignment target must be in a list or tuple", "invalid-star-assignment-target", "Emitted when a star expression is used as a starred assignment target.", ), "E0114": ( "Can use starred expression only in assignment target", "star-needs-assignment-target", "Emitted when a star expression is not used in an assignment target.", ), "E0115": ( "Name %r is nonlocal and global", "nonlocal-and-global", "Emitted when a name is both nonlocal and global.", ), "E0116": ( "'continue' not supported inside 'finally' clause", "continue-in-finally", "Emitted when the `continue` keyword is found " "inside a finally clause, which is a SyntaxError.", {"maxversion": (3, 8)}, ), "E0117": ( "nonlocal name %s found without binding", "nonlocal-without-binding", "Emitted when a nonlocal variable does not have an attached " "name somewhere in the parent scopes", ), "E0118": ( "Name %r is used prior to global declaration", "used-prior-global-declaration", "Emitted when a name is used prior a global declaration, " "which results in an error since Python 3.6.", {"minversion": (3, 6)}, ), } @utils.check_messages("function-redefined") def visit_classdef(self, node: nodes.ClassDef) -> None: self._check_redefinition("class", node) def _too_many_starred_for_tuple(self, assign_tuple): starred_count = 0 for elem in assign_tuple.itered(): if isinstance(elem, nodes.Tuple): return self._too_many_starred_for_tuple(elem) if isinstance(elem, nodes.Starred): starred_count += 1 return starred_count > 1 @utils.check_messages("too-many-star-expressions", "invalid-star-assignment-target") def visit_assign(self, node: nodes.Assign) -> None: # Check *a, *b = ... assign_target = node.targets[0] # Check *a = b if isinstance(node.targets[0], nodes.Starred): self.add_message("invalid-star-assignment-target", node=node) if not isinstance(assign_target, nodes.Tuple): return if self._too_many_starred_for_tuple(assign_target): self.add_message("too-many-star-expressions", node=node) @utils.check_messages("star-needs-assignment-target") def visit_starred(self, node: nodes.Starred) -> None: """Check that a Starred expression is used in an assignment target.""" if isinstance(node.parent, nodes.Call): # f(*args) is converted to Call(args=[Starred]), so ignore # them for this check. return if isinstance(node.parent, (nodes.List, nodes.Tuple, nodes.Set, nodes.Dict)): # PEP 448 unpacking. return stmt = node.statement(future=True) if not isinstance(stmt, nodes.Assign): return if stmt.value is node or stmt.value.parent_of(node): self.add_message("star-needs-assignment-target", node=node) @utils.check_messages( "init-is-generator", "return-in-init", "function-redefined", "return-arg-in-generator", "duplicate-argument-name", "nonlocal-and-global", "used-prior-global-declaration", ) def visit_functiondef(self, node: nodes.FunctionDef) -> None: self._check_nonlocal_and_global(node) self._check_name_used_prior_global(node) if not redefined_by_decorator( node ) and not utils.is_registered_in_singledispatch_function(node): self._check_redefinition(node.is_method() and "method" or "function", node) # checks for max returns, branch, return in __init__ returns = node.nodes_of_class( nodes.Return, skip_klass=(nodes.FunctionDef, nodes.ClassDef) ) if node.is_method() and node.name == "__init__": if node.is_generator(): self.add_message("init-is-generator", node=node) else: values = [r.value for r in returns] # Are we returning anything but None from constructors if any(v for v in values if not utils.is_none(v)): self.add_message("return-in-init", node=node) # Check for duplicate names by clustering args with same name for detailed report arg_clusters = collections.defaultdict(list) arguments: Iterator[Any] = filter(None, [node.args.args, node.args.kwonlyargs]) for arg in itertools.chain.from_iterable(arguments): arg_clusters[arg.name].append(arg) # provide detailed report about each repeated argument for argument_duplicates in arg_clusters.values(): if len(argument_duplicates) != 1: for argument in argument_duplicates: self.add_message( "duplicate-argument-name", line=argument.lineno, node=argument, args=(argument.name,), ) visit_asyncfunctiondef = visit_functiondef def _check_name_used_prior_global(self, node): scope_globals = { name: child for child in node.nodes_of_class(nodes.Global) for name in child.names if child.scope() is node } if not scope_globals: return for node_name in node.nodes_of_class(nodes.Name): if node_name.scope() is not node: continue name = node_name.name corresponding_global = scope_globals.get(name) if not corresponding_global: continue global_lineno = corresponding_global.fromlineno if global_lineno and global_lineno > node_name.fromlineno: self.add_message( "used-prior-global-declaration", node=node_name, args=(name,) ) def _check_nonlocal_and_global(self, node): """Check that a name is both nonlocal and global.""" def same_scope(current): return current.scope() is node from_iter = itertools.chain.from_iterable nonlocals = set( from_iter( child.names for child in node.nodes_of_class(nodes.Nonlocal) if same_scope(child) ) ) if not nonlocals: return global_vars = set( from_iter( child.names for child in node.nodes_of_class(nodes.Global) if same_scope(child) ) ) for name in nonlocals.intersection(global_vars): self.add_message("nonlocal-and-global", args=(name,), node=node) @utils.check_messages("return-outside-function") def visit_return(self, node: nodes.Return) -> None: if not isinstance(node.frame(future=True), nodes.FunctionDef): self.add_message("return-outside-function", node=node) @utils.check_messages("yield-outside-function") def visit_yield(self, node: nodes.Yield) -> None: self._check_yield_outside_func(node) @utils.check_messages("yield-outside-function") def visit_yieldfrom(self, node: nodes.YieldFrom) -> None: self._check_yield_outside_func(node) @utils.check_messages("not-in-loop", "continue-in-finally") def visit_continue(self, node: nodes.Continue) -> None: self._check_in_loop(node, "continue") @utils.check_messages("not-in-loop") def visit_break(self, node: nodes.Break) -> None: self._check_in_loop(node, "break") @utils.check_messages("useless-else-on-loop") def visit_for(self, node: nodes.For) -> None: self._check_else_on_loop(node) @utils.check_messages("useless-else-on-loop") def visit_while(self, node: nodes.While) -> None: self._check_else_on_loop(node) @utils.check_messages("nonexistent-operator") def visit_unaryop(self, node: nodes.UnaryOp) -> None: """Check use of the non-existent ++ and -- operators""" if ( (node.op in "+-") and isinstance(node.operand, nodes.UnaryOp) and (node.operand.op == node.op) ): self.add_message("nonexistent-operator", node=node, args=node.op * 2) def _check_nonlocal_without_binding(self, node, name): current_scope = node.scope() while True: if current_scope.parent is None: break if not isinstance(current_scope, (nodes.ClassDef, nodes.FunctionDef)): self.add_message("nonlocal-without-binding", args=(name,), node=node) return if name not in current_scope.locals: current_scope = current_scope.parent.scope() continue # Okay, found it. return if not isinstance(current_scope, nodes.FunctionDef): self.add_message("nonlocal-without-binding", args=(name,), node=node) @utils.check_messages("nonlocal-without-binding") def visit_nonlocal(self, node: nodes.Nonlocal) -> None: for name in node.names: self._check_nonlocal_without_binding(node, name) @utils.check_messages("abstract-class-instantiated") def visit_call(self, node: nodes.Call) -> None: """Check instantiating abstract class with abc.ABCMeta as metaclass. """ for inferred in infer_all(node.func): self._check_inferred_class_is_abstract(inferred, node) def _check_inferred_class_is_abstract(self, inferred, node): if not isinstance(inferred, nodes.ClassDef): return klass = utils.node_frame_class(node) if klass is inferred: # Don't emit the warning if the class is instantiated # in its own body or if the call is not an instance # creation. If the class is instantiated into its own # body, we're expecting that it knows what it is doing. return # __init__ was called abstract_methods = _has_abstract_methods(inferred) if not abstract_methods: return metaclass = inferred.metaclass() if metaclass is None: # Python 3.4 has `abc.ABC`, which won't be detected # by ClassNode.metaclass() for ancestor in inferred.ancestors(): if ancestor.qname() == "abc.ABC": self.add_message( "abstract-class-instantiated", args=(inferred.name,), node=node ) break return if metaclass.qname() in ABC_METACLASSES: self.add_message( "abstract-class-instantiated", args=(inferred.name,), node=node ) def _check_yield_outside_func(self, node): if not isinstance(node.frame(future=True), (nodes.FunctionDef, nodes.Lambda)): self.add_message("yield-outside-function", node=node) def _check_else_on_loop(self, node): """Check that any loop with an else clause has a break statement.""" if node.orelse and not _loop_exits_early(node): self.add_message( "useless-else-on-loop", node=node, # This is not optimal, but the line previous # to the first statement in the else clause # will usually be the one that contains the else:. line=node.orelse[0].lineno - 1, ) def _check_in_loop(self, node, node_name): """check that a node is inside a for or while loop""" for parent in node.node_ancestors(): if isinstance(parent, (nodes.For, nodes.While)): if node not in parent.orelse: return if isinstance(parent, (nodes.ClassDef, nodes.FunctionDef)): break if ( isinstance(parent, nodes.TryFinally) and node in parent.finalbody and isinstance(node, nodes.Continue) ): self.add_message("continue-in-finally", node=node) self.add_message("not-in-loop", node=node, args=node_name) def _check_redefinition(self, redeftype, node): """check for redefinition of a function / method / class name""" parent_frame = node.parent.frame(future=True) # Ignore function stubs created for type information redefinitions = [ i for i in parent_frame.locals[node.name] if not (isinstance(i.parent, nodes.AnnAssign) and i.parent.simple) ] defined_self = next( (local for local in redefinitions if not utils.is_overload_stub(local)), node, ) if defined_self is not node and not astroid.are_exclusive(node, defined_self): # Additional checks for methods which are not considered # redefined, since they are already part of the base API. if ( isinstance(parent_frame, nodes.ClassDef) and node.name in REDEFINABLE_METHODS ): return # Skip typing.overload() functions. if utils.is_overload_stub(node): return # Exempt functions redefined on a condition. if isinstance(node.parent, nodes.If): # Exempt "if not <func>" cases if ( isinstance(node.parent.test, nodes.UnaryOp) and node.parent.test.op == "not" and isinstance(node.parent.test.operand, nodes.Name) and node.parent.test.operand.name == node.name ): return # Exempt "if <func> is not None" cases # pylint: disable=too-many-boolean-expressions if ( isinstance(node.parent.test, nodes.Compare) and isinstance(node.parent.test.left, nodes.Name) and node.parent.test.left.name == node.name and node.parent.test.ops[0][0] == "is" and isinstance(node.parent.test.ops[0][1], nodes.Const) and node.parent.test.ops[0][1].value is None ): return # Check if we have forward references for this node. try: redefinition_index = redefinitions.index(node) except ValueError: pass else: for redefinition in redefinitions[:redefinition_index]: inferred = utils.safe_infer(redefinition) if ( inferred and isinstance(inferred, astroid.Instance) and inferred.qname() == TYPING_FORWARD_REF_QNAME ): return dummy_variables_rgx = lint_utils.get_global_option( self, "dummy-variables-rgx", default=None ) if dummy_variables_rgx and dummy_variables_rgx.match(node.name): return self.add_message( "function-redefined", node=node, args=(redeftype, defined_self.fromlineno), ) class BasicChecker(_BasicChecker): """checks for : * doc strings * number of arguments, local variables, branches, returns and statements in functions, methods * required module attributes * dangerous default values as arguments * redefinition of function / method / class * uses of the global statement """ __implements__ = interfaces.IAstroidChecker name = "basic" msgs = { "W0101": ( "Unreachable code", "unreachable", 'Used when there is some code behind a "return" or "raise" ' "statement, which will never be accessed.", ), "W0102": ( "Dangerous default value %s as argument", "dangerous-default-value", "Used when a mutable value as list or dictionary is detected in " "a default value for an argument.", ), "W0104": ( "Statement seems to have no effect", "pointless-statement", "Used when a statement doesn't have (or at least seems to) any effect.", ), "W0105": ( "String statement has no effect", "pointless-string-statement", "Used when a string is used as a statement (which of course " "has no effect). This is a particular case of W0104 with its " "own message so you can easily disable it if you're using " "those strings as documentation, instead of comments.", ), "W0106": ( 'Expression "%s" is assigned to nothing', "expression-not-assigned", "Used when an expression that is not a function call is assigned " "to nothing. Probably something else was intended.", ), "W0108": ( "Lambda may not be necessary", "unnecessary-lambda", "Used when the body of a lambda expression is a function call " "on the same argument list as the lambda itself; such lambda " "expressions are in all but a few cases replaceable with the " "function being called in the body of the lambda.", ), "W0109": ( "Duplicate key %r in dictionary", "duplicate-key", "Used when a dictionary expression binds the same key multiple times.", ), "W0122": ( "Use of exec", "exec-used", 'Used when you use the "exec" statement (function for Python ' "3), to discourage its usage. That doesn't " "mean you cannot use it !", ), "W0123": ( "Use of eval", "eval-used", 'Used when you use the "eval" function, to discourage its ' "usage. Consider using `ast.literal_eval` for safely evaluating " "strings containing Python expressions " "from untrusted sources. ", ), "W0150": ( "%s statement in finally block may swallow exception", "lost-exception", "Used when a break or a return statement is found inside the " "finally clause of a try...finally block: the exceptions raised " "in the try clause will be silently swallowed instead of being " "re-raised.", ), "W0199": ( "Assert called on a 2-item-tuple. Did you mean 'assert x,y'?", "assert-on-tuple", "A call of assert on a tuple will always evaluate to true if " "the tuple is not empty, and will always evaluate to false if " "it is.", ), "W0124": ( 'Following "as" with another context manager looks like a tuple.', "confusing-with-statement", "Emitted when a `with` statement component returns multiple values " "and uses name binding with `as` only for a part of those values, " "as in with ctx() as a, b. This can be misleading, since it's not " "clear if the context manager returns a tuple or if the node without " "a name binding is another context manager.", ), "W0125": ( "Using a conditional statement with a constant value", "using-constant-test", "Emitted when a conditional statement (If or ternary if) " "uses a constant value for its test. This might not be what " "the user intended to do.", ), "W0126": ( "Using a conditional statement with potentially wrong function or method call due to missing parentheses", "missing-parentheses-for-call-in-test", "Emitted when a conditional statement (If or ternary if) " "seems to wrongly call a function due to missing parentheses", ), "W0127": ( "Assigning the same variable %r to itself", "self-assigning-variable", "Emitted when we detect that a variable is assigned to itself", ), "W0128": ( "Redeclared variable %r in assignment", "redeclared-assigned-name", "Emitted when we detect that a variable was redeclared in the same assignment.", ), "E0111": ( "The first reversed() argument is not a sequence", "bad-reversed-sequence", "Used when the first argument to reversed() builtin " "isn't a sequence (does not implement __reversed__, " "nor __getitem__ and __len__", ), "E0119": ( "format function is not called on str", "misplaced-format-function", "Emitted when format function is not called on str object. " 'e.g doing print("value: {}").format(123) instead of ' 'print("value: {}".format(123)). This might not be what the user ' "intended to do.", ), "W0129": ( "Assert statement has a string literal as its first argument. The assert will %s fail.", "assert-on-string-literal", "Used when an assert statement has a string literal as its first argument, which will " "cause the assert to always pass.", ), } reports = (("RP0101", "Statistics by type", report_by_type_stats),) def __init__(self, linter): super().__init__(linter) self._tryfinallys = None def open(self): """initialize visit variables and statistics""" py_version = get_global_option(self, "py-version") self._py38_plus = py_version >= (3, 8) self._tryfinallys = [] self.linter.stats.reset_node_count() @utils.check_messages("using-constant-test", "missing-parentheses-for-call-in-test") def visit_if(self, node: nodes.If) -> None: self._check_using_constant_test(node, node.test) @utils.check_messages("using-constant-test", "missing-parentheses-for-call-in-test") def visit_ifexp(self, node: nodes.IfExp) -> None: self._check_using_constant_test(node, node.test) @utils.check_messages("using-constant-test", "missing-parentheses-for-call-in-test") def visit_comprehension(self, node: nodes.Comprehension) -> None: if node.ifs: for if_test in node.ifs: self._check_using_constant_test(node, if_test) def _check_using_constant_test(self, node, test): const_nodes = ( nodes.Module, nodes.GeneratorExp, nodes.Lambda, nodes.FunctionDef, nodes.ClassDef, astroid.bases.Generator, astroid.UnboundMethod, astroid.BoundMethod, nodes.Module, ) structs = (nodes.Dict, nodes.Tuple, nodes.Set, nodes.List) # These nodes are excepted, since they are not constant # values, requiring a computation to happen. except_nodes = ( nodes.Call, nodes.BinOp, nodes.BoolOp, nodes.UnaryOp, nodes.Subscript, ) inferred = None emit = isinstance(test, (nodes.Const,) + structs + const_nodes) if not isinstance(test, except_nodes): inferred = utils.safe_infer(test) if emit: self.add_message("using-constant-test", node=node) elif isinstance(inferred, const_nodes): # If the constant node is a FunctionDef or Lambda then # it may be an illicit function call due to missing parentheses call_inferred = None try: if isinstance(inferred, nodes.FunctionDef): call_inferred = inferred.infer_call_result() elif isinstance(inferred, nodes.Lambda): call_inferred = inferred.infer_call_result(node) except astroid.InferenceError: call_inferred = None if call_inferred: try: for inf_call in call_inferred: if inf_call != astroid.Uninferable: self.add_message( "missing-parentheses-for-call-in-test", node=node ) break except astroid.InferenceError: pass self.add_message("using-constant-test", node=node) def visit_module(self, _: nodes.Module) -> None: """check module name, docstring and required arguments""" self.linter.stats.node_count["module"] += 1 def visit_classdef(self, _: nodes.ClassDef) -> None: """check module name, docstring and redefinition increment branch counter """ self.linter.stats.node_count["klass"] += 1 @utils.check_messages( "pointless-statement", "pointless-string-statement", "expression-not-assigned" ) def visit_expr(self, node: nodes.Expr) -> None: """Check for various kind of statements without effect""" expr = node.value if isinstance(expr, nodes.Const) and isinstance(expr.value, str): # treat string statement in a separated message # Handle PEP-257 attribute docstrings. # An attribute docstring is defined as being a string right after # an assignment at the module level, class level or __init__ level. scope = expr.scope() if isinstance(scope, (nodes.ClassDef, nodes.Module, nodes.FunctionDef)): if isinstance(scope, nodes.FunctionDef) and scope.name != "__init__": pass else: sibling = expr.previous_sibling() if ( sibling is not None and sibling.scope() is scope and isinstance(sibling, (nodes.Assign, nodes.AnnAssign)) ): return self.add_message("pointless-string-statement", node=node) return # Ignore if this is : # * a direct function call # * the unique child of a try/except body # * a yield statement # * an ellipsis (which can be used on Python 3 instead of pass) # warn W0106 if we have any underlying function call (we can't predict # side effects), else pointless-statement if ( isinstance(expr, (nodes.Yield, nodes.Await, nodes.Call)) or (isinstance(node.parent, nodes.TryExcept) and node.parent.body == [node]) or (isinstance(expr, nodes.Const) and expr.value is Ellipsis) ): return if any(expr.nodes_of_class(nodes.Call)): self.add_message( "expression-not-assigned", node=node, args=expr.as_string() ) else: self.add_message("pointless-statement", node=node) @staticmethod def _filter_vararg(node, call_args): # Return the arguments for the given call which are # not passed as vararg. for arg in call_args: if isinstance(arg, nodes.Starred): if ( isinstance(arg.value, nodes.Name) and arg.value.name != node.args.vararg ): yield arg else: yield arg @staticmethod def _has_variadic_argument(args, variadic_name): if not args: return True for arg in args: if isinstance(arg.value, nodes.Name): if arg.value.name != variadic_name: return True else: return True return False @utils.check_messages("unnecessary-lambda") def visit_lambda(self, node: nodes.Lambda) -> None: """Check whether the lambda is suspicious""" # if the body of the lambda is a call expression with the same # argument list as the lambda itself, then the lambda is # possibly unnecessary and at least suspicious. if node.args.defaults: # If the arguments of the lambda include defaults, then a # judgment cannot be made because there is no way to check # that the defaults defined by the lambda are the same as # the defaults defined by the function called in the body # of the lambda. return call = node.body if not isinstance(call, nodes.Call): # The body of the lambda must be a function call expression # for the lambda to be unnecessary. return if isinstance(node.body.func, nodes.Attribute) and isinstance( node.body.func.expr, nodes.Call ): # Chained call, the intermediate call might # return something else (but we don't check that, yet). return call_site = astroid.arguments.CallSite.from_call(call) ordinary_args = list(node.args.args) new_call_args = list(self._filter_vararg(node, call.args)) if node.args.kwarg: if self._has_variadic_argument(call.kwargs, node.args.kwarg): return if node.args.vararg: if self._has_variadic_argument(call.starargs, node.args.vararg): return elif call.starargs: return if call.keywords: # Look for additional keyword arguments that are not part # of the lambda's signature lambda_kwargs = {keyword.name for keyword in node.args.defaults} if len(lambda_kwargs) != len(call_site.keyword_arguments): # Different lengths, so probably not identical return if set(call_site.keyword_arguments).difference(lambda_kwargs): return # The "ordinary" arguments must be in a correspondence such that: # ordinary_args[i].name == call.args[i].name. if len(ordinary_args) != len(new_call_args): return for arg, passed_arg in zip(ordinary_args, new_call_args): if not isinstance(passed_arg, nodes.Name): return if arg.name != passed_arg.name: return self.add_message("unnecessary-lambda", line=node.fromlineno, node=node) @utils.check_messages("dangerous-default-value") def visit_functiondef(self, node: nodes.FunctionDef) -> None: """check function name, docstring, arguments, redefinition, variable names, max locals """ if node.is_method(): self.linter.stats.node_count["method"] += 1 else: self.linter.stats.node_count["function"] += 1 self._check_dangerous_default(node) visit_asyncfunctiondef = visit_functiondef def _check_dangerous_default(self, node): """Check for dangerous default values as arguments.""" def is_iterable(internal_node): return isinstance(internal_node, (nodes.List, nodes.Set, nodes.Dict)) defaults = node.args.defaults or [] + node.args.kw_defaults or [] for default in defaults: if not default: continue try: value = next(default.infer()) except astroid.InferenceError: continue if ( isinstance(value, astroid.Instance) and value.qname() in DEFAULT_ARGUMENT_SYMBOLS ): if value is default: msg = DEFAULT_ARGUMENT_SYMBOLS[value.qname()] elif isinstance(value, astroid.Instance) or is_iterable(value): # We are here in the following situation(s): # * a dict/set/list/tuple call which wasn't inferred # to a syntax node ({}, () etc.). This can happen # when the arguments are invalid or unknown to # the inference. # * a variable from somewhere else, which turns out to be a list # or a dict. if is_iterable(default): msg = value.pytype() elif isinstance(default, nodes.Call): msg = f"{value.name}() ({value.qname()})" else: msg = f"{default.as_string()} ({value.qname()})" else: # this argument is a name msg = f"{default.as_string()} ({DEFAULT_ARGUMENT_SYMBOLS[value.qname()]})" self.add_message("dangerous-default-value", node=node, args=(msg,)) @utils.check_messages("unreachable", "lost-exception") def visit_return(self, node: nodes.Return) -> None: """1 - check if the node has a right sibling (if so, that's some unreachable code) 2 - check if the node is inside the 'finally' clause of a 'try...finally' block """ self._check_unreachable(node) # Is it inside final body of a try...finally block ? self._check_not_in_finally(node, "return", (nodes.FunctionDef,)) @utils.check_messages("unreachable") def visit_continue(self, node: nodes.Continue) -> None: """check is the node has a right sibling (if so, that's some unreachable code) """ self._check_unreachable(node) @utils.check_messages("unreachable", "lost-exception") def visit_break(self, node: nodes.Break) -> None: """1 - check if the node has a right sibling (if so, that's some unreachable code) 2 - check if the node is inside the 'finally' clause of a 'try...finally' block """ # 1 - Is it right sibling ? self._check_unreachable(node) # 2 - Is it inside final body of a try...finally block ? self._check_not_in_finally(node, "break", (nodes.For, nodes.While)) @utils.check_messages("unreachable") def visit_raise(self, node: nodes.Raise) -> None: """check if the node has a right sibling (if so, that's some unreachable code) """ self._check_unreachable(node) def _check_misplaced_format_function(self, call_node): if not isinstance(call_node.func, nodes.Attribute): return if call_node.func.attrname != "format": return expr = utils.safe_infer(call_node.func.expr) if expr is astroid.Uninferable: return if not expr: # we are doubtful on inferred type of node, so here just check if format # was called on print() call_expr = call_node.func.expr if not isinstance(call_expr, nodes.Call): return if ( isinstance(call_expr.func, nodes.Name) and call_expr.func.name == "print" ): self.add_message("misplaced-format-function", node=call_node) @utils.check_messages( "eval-used", "exec-used", "bad-reversed-sequence", "misplaced-format-function" ) def visit_call(self, node: nodes.Call) -> None: """visit a Call node -> check if this is not a disallowed builtin call and check for * or ** use """ self._check_misplaced_format_function(node) if isinstance(node.func, nodes.Name): name = node.func.name # ignore the name if it's not a builtin (i.e. not defined in the # locals nor globals scope) if not (name in node.frame(future=True) or name in node.root()): if name == "exec": self.add_message("exec-used", node=node) elif name == "reversed": self._check_reversed(node) elif name == "eval": self.add_message("eval-used", node=node) @utils.check_messages("assert-on-tuple", "assert-on-string-literal") def visit_assert(self, node: nodes.Assert) -> None: """check whether assert is used on a tuple or string literal.""" if ( node.fail is None and isinstance(node.test, nodes.Tuple) and len(node.test.elts) == 2 ): self.add_message("assert-on-tuple", node=node) if isinstance(node.test, nodes.Const) and isinstance(node.test.value, str): if node.test.value: when = "never" else: when = "always" self.add_message("assert-on-string-literal", node=node, args=(when,)) @utils.check_messages("duplicate-key") def visit_dict(self, node: nodes.Dict) -> None: """check duplicate key in dictionary""" keys = set() for k, _ in node.items: if isinstance(k, nodes.Const): key = k.value elif isinstance(k, nodes.Attribute): key = k.as_string() else: continue if key in keys: self.add_message("duplicate-key", node=node, args=key) keys.add(key) def visit_tryfinally(self, node: nodes.TryFinally) -> None: """update try...finally flag""" self._tryfinallys.append(node) def leave_tryfinally(self, _: nodes.TryFinally) -> None: """update try...finally flag""" self._tryfinallys.pop() def _check_unreachable(self, node): """check unreachable code""" unreach_stmt = node.next_sibling() if unreach_stmt is not None: if ( isinstance(node, nodes.Return) and isinstance(unreach_stmt, nodes.Expr) and isinstance(unreach_stmt.value, nodes.Yield) ): # Don't add 'unreachable' for empty generators. # Only add warning if 'yield' is followed by another node. unreach_stmt = unreach_stmt.next_sibling() if unreach_stmt is None: return self.add_message("unreachable", node=unreach_stmt) def _check_not_in_finally(self, node, node_name, breaker_classes=()): """check that a node is not inside a 'finally' clause of a 'try...finally' statement. If we find a parent which type is in breaker_classes before a 'try...finally' block we skip the whole check. """ # if self._tryfinallys is empty, we're not an in try...finally block if not self._tryfinallys: return # the node could be a grand-grand...-child of the 'try...finally' _parent = node.parent _node = node while _parent and not isinstance(_parent, breaker_classes): if hasattr(_parent, "finalbody") and _node in _parent.finalbody: self.add_message("lost-exception", node=node, args=node_name) return _node = _parent _parent = _node.parent def _check_reversed(self, node): """check that the argument to `reversed` is a sequence""" try: argument = utils.safe_infer(utils.get_argument_from_call(node, position=0)) except utils.NoSuchArgumentError: pass else: if argument is astroid.Uninferable: return if argument is None: # Nothing was inferred. # Try to see if we have iter(). if isinstance(node.args[0], nodes.Call): try: func = next(node.args[0].func.infer()) except astroid.InferenceError: return if getattr( func, "name", None ) == "iter" and utils.is_builtin_object(func): self.add_message("bad-reversed-sequence", node=node) return if isinstance(argument, (nodes.List, nodes.Tuple)): return # dicts are reversible, but only from Python 3.8 onwards. Prior to # that, any class based on dict must explicitly provide a # __reversed__ method if not self._py38_plus and isinstance(argument, astroid.Instance): if any( ancestor.name == "dict" and utils.is_builtin_object(ancestor) for ancestor in itertools.chain( (argument._proxied,), argument._proxied.ancestors() ) ): try: argument.locals[REVERSED_PROTOCOL_METHOD] except KeyError: self.add_message("bad-reversed-sequence", node=node) return if hasattr(argument, "getattr"): # everything else is not a proper sequence for reversed() for methods in REVERSED_METHODS: for meth in methods: try: argument.getattr(meth) except astroid.NotFoundError: break else: break else: self.add_message("bad-reversed-sequence", node=node) else: self.add_message("bad-reversed-sequence", node=node) @utils.check_messages("confusing-with-statement") def visit_with(self, node: nodes.With) -> None: # a "with" statement with multiple managers corresponds # to one AST "With" node with multiple items pairs = node.items if pairs: for prev_pair, pair in zip(pairs, pairs[1:]): if isinstance(prev_pair[1], nodes.AssignName) and ( pair[1] is None and not isinstance(pair[0], nodes.Call) ): # Don't emit a message if the second is a function call # there's no way that can be mistaken for a name assignment. # If the line number doesn't match # we assume it's a nested "with". self.add_message("confusing-with-statement", node=node) def _check_self_assigning_variable(self, node): # Detect assigning to the same variable. scope = node.scope() scope_locals = scope.locals rhs_names = [] targets = node.targets if isinstance(targets[0], nodes.Tuple): if len(targets) != 1: # A complex assignment, so bail out early. return targets = targets[0].elts if len(targets) == 1: # Unpacking a variable into the same name. return if isinstance(node.value, nodes.Name): if len(targets) != 1: return rhs_names = [node.value] elif isinstance(node.value, nodes.Tuple): rhs_count = len(node.value.elts) if len(targets) != rhs_count or rhs_count == 1: return rhs_names = node.value.elts for target, lhs_name in zip(targets, rhs_names): if not isinstance(lhs_name, nodes.Name): continue if not isinstance(target, nodes.AssignName): continue # Check that the scope is different from a class level, which is usually # a pattern to expose module level attributes as class level ones. if isinstance(scope, nodes.ClassDef) and target.name in scope_locals: continue if target.name == lhs_name.name: self.add_message( "self-assigning-variable", args=(target.name,), node=target ) def _check_redeclared_assign_name(self, targets): dummy_variables_rgx = lint_utils.get_global_option( self, "dummy-variables-rgx", default=None ) for target in targets: if not isinstance(target, nodes.Tuple): continue found_names = [] for element in target.elts: if isinstance(element, nodes.Tuple): self._check_redeclared_assign_name([element]) elif isinstance(element, nodes.AssignName) and element.name != "_": if dummy_variables_rgx and dummy_variables_rgx.match(element.name): return found_names.append(element.name) names = collections.Counter(found_names) for name, count in names.most_common(): if count > 1: self.add_message( "redeclared-assigned-name", args=(name,), node=target ) @utils.check_messages("self-assigning-variable", "redeclared-assigned-name") def visit_assign(self, node: nodes.Assign) -> None: self._check_self_assigning_variable(node) self._check_redeclared_assign_name(node.targets) @utils.check_messages("redeclared-assigned-name") def visit_for(self, node: nodes.For) -> None: self._check_redeclared_assign_name([node.target]) KNOWN_NAME_TYPES = { "module", "const", "class", "function", "method", "attr", "argument", "variable", "class_attribute", "class_const", "inlinevar", } DEFAULT_NAMING_STYLES = { "module": "snake_case", "const": "UPPER_CASE", "class": "PascalCase", "function": "snake_case", "method": "snake_case", "attr": "snake_case", "argument": "snake_case", "variable": "snake_case", "class_attribute": "any", "class_const": "UPPER_CASE", "inlinevar": "any", } def _create_naming_options(): name_options = [] for name_type in sorted(KNOWN_NAME_TYPES): human_readable_name = constants.HUMAN_READABLE_TYPES[name_type] default_style = DEFAULT_NAMING_STYLES[name_type] name_type = name_type.replace("_", "-") name_options.append( ( f"{name_type}-naming-style", { "default": default_style, "type": "choice", "choices": list(NAMING_STYLES.keys()), "metavar": "<style>", "help": f"Naming style matching correct {human_readable_name} names.", }, ) ) name_options.append( ( f"{name_type}-rgx", { "default": None, "type": "regexp", "metavar": "<regexp>", "help": f"Regular expression matching correct {human_readable_name} names. Overrides {name_type}-naming-style.", }, ) ) return tuple(name_options) class NameChecker(_BasicChecker): msgs = { "C0103": ( '%s name "%s" doesn\'t conform to %s', "invalid-name", "Used when the name doesn't conform to naming rules " "associated to its type (constant, variable, class...).", ), "C0104": ( 'Disallowed name "%s"', "disallowed-name", "Used when the name matches bad-names or bad-names-rgxs- (unauthorized names).", { "old_names": [ ("C0102", "blacklisted-name"), ] }, ), "W0111": ( "Name %s will become a keyword in Python %s", "assign-to-new-keyword", "Used when assignment will become invalid in future " "Python release due to introducing new keyword.", ), } options = ( ( "good-names", { "default": ("i", "j", "k", "ex", "Run", "_"), "type": "csv", "metavar": "<names>", "help": "Good variable names which should always be accepted," " separated by a comma.", }, ), ( "good-names-rgxs", { "default": "", "type": "regexp_csv", "metavar": "<names>", "help": "Good variable names regexes, separated by a comma. If names match any regex," " they will always be accepted", }, ), ( "bad-names", { "default": ("foo", "bar", "baz", "toto", "tutu", "tata"), "type": "csv", "metavar": "<names>", "help": "Bad variable names which should always be refused, " "separated by a comma.", }, ), ( "bad-names-rgxs", { "default": "", "type": "regexp_csv", "metavar": "<names>", "help": "Bad variable names regexes, separated by a comma. If names match any regex," " they will always be refused", }, ), ( "name-group", { "default": (), "type": "csv", "metavar": "<name1:name2>", "help": ( "Colon-delimited sets of names that determine each" " other's naming style when the name regexes" " allow several styles." ), }, ), ( "include-naming-hint", { "default": False, "type": "yn", "metavar": "<y or n>", "help": "Include a hint for the correct naming format with invalid-name.", }, ), ( "property-classes", { "default": ("abc.abstractproperty",), "type": "csv", "metavar": "<decorator names>", "help": "List of decorators that produce properties, such as " "abc.abstractproperty. Add to this list to register " "other decorators that produce valid properties. " "These decorators are taken in consideration only for invalid-name.", }, ), ) + _create_naming_options() KEYWORD_ONSET = {(3, 7): {"async", "await"}} def __init__(self, linter): super().__init__(linter) self._name_category = {} self._name_group = {} self._bad_names = {} self._name_regexps = {} self._name_hints = {} self._good_names_rgxs_compiled = [] self._bad_names_rgxs_compiled = [] def open(self): self.linter.stats.reset_bad_names() for group in self.config.name_group: for name_type in group.split(":"): self._name_group[name_type] = f"group_{group}" regexps, hints = self._create_naming_rules() self._name_regexps = regexps self._name_hints = hints self._good_names_rgxs_compiled = [ re.compile(rgxp) for rgxp in self.config.good_names_rgxs ] self._bad_names_rgxs_compiled = [ re.compile(rgxp) for rgxp in self.config.bad_names_rgxs ] def _create_naming_rules(self): regexps = {} hints = {} for name_type in KNOWN_NAME_TYPES: naming_style_option_name = f"{name_type}_naming_style" naming_style_name = getattr(self.config, naming_style_option_name) regexps[name_type] = NAMING_STYLES[naming_style_name].get_regex(name_type) custom_regex_setting_name = f"{name_type}_rgx" custom_regex = getattr(self.config, custom_regex_setting_name, None) if custom_regex is not None: regexps[name_type] = custom_regex if custom_regex is not None: hints[name_type] = f"{custom_regex.pattern!r} pattern" else: hints[name_type] = f"{naming_style_name} naming style" return regexps, hints @utils.check_messages("disallowed-name", "invalid-name") def visit_module(self, node: nodes.Module) -> None: self._check_name("module", node.name.split(".")[-1], node) self._bad_names = {} def leave_module(self, _: nodes.Module) -> None: for all_groups in self._bad_names.values(): if len(all_groups) < 2: continue groups = collections.defaultdict(list) min_warnings = sys.maxsize prevalent_group, _ = max(all_groups.items(), key=lambda item: len(item[1])) for group in all_groups.values(): groups[len(group)].append(group) min_warnings = min(len(group), min_warnings) if len(groups[min_warnings]) > 1: by_line = sorted( groups[min_warnings], key=lambda group: min(warning[0].lineno for warning in group), ) warnings = itertools.chain(*by_line[1:]) else: warnings = groups[min_warnings][0] for args in warnings: self._raise_name_warning(prevalent_group, *args) @utils.check_messages("disallowed-name", "invalid-name", "assign-to-new-keyword") def visit_classdef(self, node: nodes.ClassDef) -> None: self._check_assign_to_new_keyword_violation(node.name, node) self._check_name("class", node.name, node) for attr, anodes in node.instance_attrs.items(): if not any(node.instance_attr_ancestors(attr)): self._check_name("attr", attr, anodes[0]) @utils.check_messages("disallowed-name", "invalid-name", "assign-to-new-keyword") def visit_functiondef(self, node: nodes.FunctionDef) -> None: # Do not emit any warnings if the method is just an implementation # of a base class method. self._check_assign_to_new_keyword_violation(node.name, node) confidence = interfaces.HIGH if node.is_method(): if utils.overrides_a_method(node.parent.frame(future=True), node.name): return confidence = ( interfaces.INFERENCE if utils.has_known_bases(node.parent.frame(future=True)) else interfaces.INFERENCE_FAILURE ) self._check_name( _determine_function_name_type(node, config=self.config), node.name, node, confidence, ) # Check argument names args = node.args.args if args is not None: self._recursive_check_names(args) visit_asyncfunctiondef = visit_functiondef @utils.check_messages("disallowed-name", "invalid-name") def visit_global(self, node: nodes.Global) -> None: for name in node.names: self._check_name("const", name, node) @utils.check_messages("disallowed-name", "invalid-name", "assign-to-new-keyword") def visit_assignname(self, node: nodes.AssignName) -> None: """check module level assigned names""" self._check_assign_to_new_keyword_violation(node.name, node) frame = node.frame(future=True) assign_type = node.assign_type() if isinstance(assign_type, nodes.Comprehension): self._check_name("inlinevar", node.name, node) elif isinstance(frame, nodes.Module): if isinstance(assign_type, nodes.Assign): if isinstance(utils.safe_infer(assign_type.value), nodes.ClassDef): self._check_name("class", node.name, node) # Don't emit if the name redefines an import # in an ImportError except handler. elif not _redefines_import(node) and isinstance( utils.safe_infer(assign_type.value), nodes.Const ): self._check_name("const", node.name, node) elif isinstance( assign_type, nodes.AnnAssign ) and utils.is_assign_name_annotated_with(node, "Final"): self._check_name("const", node.name, node) elif isinstance(frame, nodes.FunctionDef): # global introduced variable aren't in the function locals if node.name in frame and node.name not in frame.argnames(): if not _redefines_import(node): self._check_name("variable", node.name, node) elif isinstance(frame, nodes.ClassDef): if not list(frame.local_attr_ancestors(node.name)): for ancestor in frame.ancestors(): if ( ancestor.name == "Enum" and ancestor.root().name == "enum" or utils.is_assign_name_annotated_with(node, "Final") ): self._check_name("class_const", node.name, node) break else: self._check_name("class_attribute", node.name, node) def _recursive_check_names(self, args): """check names in a possibly recursive list <arg>""" for arg in args: if isinstance(arg, nodes.AssignName): self._check_name("argument", arg.name, arg) else: self._recursive_check_names(arg.elts) def _find_name_group(self, node_type): return self._name_group.get(node_type, node_type) def _raise_name_warning( self, prevalent_group: Optional[str], node: nodes.NodeNG, node_type: str, name: str, confidence, warning: str = "invalid-name", ) -> None: type_label = constants.HUMAN_READABLE_TYPES[node_type] hint = self._name_hints[node_type] if prevalent_group: # This happens in the multi naming match case. The expected # prevalent group needs to be spelled out to make the message # correct. hint = f"the `{prevalent_group}` group in the {hint}" if self.config.include_naming_hint: hint += f" ({self._name_regexps[node_type].pattern!r} pattern)" args = ( (type_label.capitalize(), name, hint) if warning == "invalid-name" else (type_label.capitalize(), name) ) self.add_message(warning, node=node, args=args, confidence=confidence) self.linter.stats.increase_bad_name(node_type, 1) def _name_allowed_by_regex(self, name: str) -> bool: return name in self.config.good_names or any( pattern.match(name) for pattern in self._good_names_rgxs_compiled ) def _name_disallowed_by_regex(self, name: str) -> bool: return name in self.config.bad_names or any( pattern.match(name) for pattern in self._bad_names_rgxs_compiled ) def _check_name(self, node_type, name, node, confidence=interfaces.HIGH): """check for a name using the type's regexp""" def _should_exempt_from_invalid_name(node): if node_type == "variable": inferred = utils.safe_infer(node) if isinstance(inferred, nodes.ClassDef): return True return False if self._name_allowed_by_regex(name=name): return if self._name_disallowed_by_regex(name=name): self.linter.stats.increase_bad_name(node_type, 1) self.add_message("disallowed-name", node=node, args=name) return regexp = self._name_regexps[node_type] match = regexp.match(name) if _is_multi_naming_match(match, node_type, confidence): name_group = self._find_name_group(node_type) bad_name_group = self._bad_names.setdefault(name_group, {}) warnings = bad_name_group.setdefault(match.lastgroup, []) warnings.append((node, node_type, name, confidence)) if match is None and not _should_exempt_from_invalid_name(node): self._raise_name_warning(None, node, node_type, name, confidence) def _check_assign_to_new_keyword_violation(self, name, node): keyword_first_version = self._name_became_keyword_in_version( name, self.KEYWORD_ONSET ) if keyword_first_version is not None: self.add_message( "assign-to-new-keyword", node=node, args=(name, keyword_first_version), confidence=interfaces.HIGH, ) @staticmethod def _name_became_keyword_in_version(name, rules): for version, keywords in rules.items(): if name in keywords and sys.version_info < version: return ".".join(str(v) for v in version) return None class DocStringChecker(_BasicChecker): msgs = { "C0112": ( "Empty %s docstring", "empty-docstring", "Used when a module, function, class or method has an empty " "docstring (it would be too easy ;).", {"old_names": [("W0132", "old-empty-docstring")]}, ), "C0114": ( "Missing module docstring", "missing-module-docstring", "Used when a module has no docstring." "Empty modules do not require a docstring.", {"old_names": [("C0111", "missing-docstring")]}, ), "C0115": ( "Missing class docstring", "missing-class-docstring", "Used when a class has no docstring." "Even an empty class must have a docstring.", {"old_names": [("C0111", "missing-docstring")]}, ), "C0116": ( "Missing function or method docstring", "missing-function-docstring", "Used when a function or method has no docstring." "Some special methods like __init__ do not require a " "docstring.", {"old_names": [("C0111", "missing-docstring")]}, ), } options = ( ( "no-docstring-rgx", { "default": NO_REQUIRED_DOC_RGX, "type": "regexp", "metavar": "<regexp>", "help": "Regular expression which should only match " "function or class names that do not require a " "docstring.", }, ), ( "docstring-min-length", { "default": -1, "type": "int", "metavar": "<int>", "help": ( "Minimum line length for functions/classes that" " require docstrings, shorter ones are exempt." ), }, ), ) def open(self): self.linter.stats.reset_undocumented() @utils.check_messages("missing-docstring", "empty-docstring") def visit_module(self, node: nodes.Module) -> None: self._check_docstring("module", node) @utils.check_messages("missing-docstring", "empty-docstring") def visit_classdef(self, node: nodes.ClassDef) -> None: if self.config.no_docstring_rgx.match(node.name) is None: self._check_docstring("class", node) @utils.check_messages("missing-docstring", "empty-docstring") def visit_functiondef(self, node: nodes.FunctionDef) -> None: if self.config.no_docstring_rgx.match(node.name) is None: ftype = "method" if node.is_method() else "function" if ( is_property_setter(node) or is_property_deleter(node) or is_overload_stub(node) ): return if isinstance(node.parent.frame(future=True), nodes.ClassDef): overridden = False confidence = ( interfaces.INFERENCE if utils.has_known_bases(node.parent.frame(future=True)) else interfaces.INFERENCE_FAILURE ) # check if node is from a method overridden by its ancestor for ancestor in node.parent.frame(future=True).ancestors(): if ancestor.qname() == "builtins.object": continue if node.name in ancestor and isinstance( ancestor[node.name], nodes.FunctionDef ): overridden = True break self._check_docstring( ftype, node, report_missing=not overridden, confidence=confidence # type: ignore[arg-type] ) elif isinstance(node.parent.frame(future=True), nodes.Module): self._check_docstring(ftype, node) # type: ignore[arg-type] else: return visit_asyncfunctiondef = visit_functiondef def _check_docstring( self, node_type: Literal["class", "function", "method", "module"], node, report_missing=True, confidence=interfaces.HIGH, ): """Check if the node has a non-empty docstring""" docstring = node.doc if docstring is None: docstring = _infer_dunder_doc_attribute(node) if docstring is None: if not report_missing: return lines = utils.get_node_last_lineno(node) - node.lineno if node_type == "module" and not lines: # If the module does not have a body, there's no reason # to require a docstring. return max_lines = self.config.docstring_min_length if node_type != "module" and max_lines > -1 and lines < max_lines: return if node_type == "class": self.linter.stats.undocumented["klass"] += 1 else: self.linter.stats.undocumented[node_type] += 1 if ( node.body and isinstance(node.body[0], nodes.Expr) and isinstance(node.body[0].value, nodes.Call) ): # Most likely a string with a format call. Let's see. func = utils.safe_infer(node.body[0].value.func) if isinstance(func, astroid.BoundMethod) and isinstance( func.bound, astroid.Instance ): # Strings. if func.bound.name in {"str", "unicode", "bytes"}: return if node_type == "module": message = "missing-module-docstring" elif node_type == "class": message = "missing-class-docstring" else: message = "missing-function-docstring" self.add_message(message, node=node, confidence=confidence) elif not docstring.strip(): if node_type == "class": self.linter.stats.undocumented["klass"] += 1 else: self.linter.stats.undocumented[node_type] += 1 self.add_message( "empty-docstring", node=node, args=(node_type,), confidence=confidence ) class PassChecker(_BasicChecker): """check if the pass statement is really necessary""" msgs = { "W0107": ( "Unnecessary pass statement", "unnecessary-pass", 'Used when a "pass" statement that can be avoided is encountered.', ) } @utils.check_messages("unnecessary-pass") def visit_pass(self, node: nodes.Pass) -> None: if len(node.parent.child_sequence(node)) > 1 or ( isinstance(node.parent, (nodes.ClassDef, nodes.FunctionDef)) and (node.parent.doc is not None) ): self.add_message("unnecessary-pass", node=node) def _is_one_arg_pos_call(call): """Is this a call with exactly 1 argument, where that argument is positional? """ return isinstance(call, nodes.Call) and len(call.args) == 1 and not call.keywords def _infer_dunder_doc_attribute(node): # Try to see if we have a `__doc__` attribute. try: docstring = node["__doc__"] except KeyError: return None docstring = utils.safe_infer(docstring) if not docstring: return None if not isinstance(docstring, nodes.Const): return None return docstring.value class ComparisonChecker(_BasicChecker): """Checks for comparisons - singleton comparison: 'expr == True', 'expr == False' and 'expr == None' - yoda condition: 'const "comp" right' where comp can be '==', '!=', '<', '<=', '>' or '>=', and right can be a variable, an attribute, a method or a function """ msgs = { "C0121": ( "Comparison %s should be %s", "singleton-comparison", "Used when an expression is compared to singleton " "values like True, False or None.", ), "C0123": ( "Use isinstance() rather than type() for a typecheck.", "unidiomatic-typecheck", "The idiomatic way to perform an explicit typecheck in " "Python is to use isinstance(x, Y) rather than " "type(x) == Y, type(x) is Y. Though there are unusual " "situations where these give different results.", {"old_names": [("W0154", "old-unidiomatic-typecheck")]}, ), "R0123": ( "Comparison to literal", "literal-comparison", "Used when comparing an object to a literal, which is usually " "what you do not want to do, since you can compare to a different " "literal than what was expected altogether.", ), "R0124": ( "Redundant comparison - %s", "comparison-with-itself", "Used when something is compared against itself.", ), "W0143": ( "Comparing against a callable, did you omit the parenthesis?", "comparison-with-callable", "This message is emitted when pylint detects that a comparison with a " "callable was made, which might suggest that some parenthesis were omitted, " "resulting in potential unwanted behaviour.", ), "W0177": ( "Comparison %s should be %s", "nan-comparison", "Used when an expression is compared to NaN" "values like numpy.NaN and float('nan')", ), } def _check_singleton_comparison( self, left_value, right_value, root_node, checking_for_absence: bool = False ): """Check if == or != is being used to compare a singleton value""" singleton_values = (True, False, None) def _is_singleton_const(node) -> bool: return isinstance(node, nodes.Const) and any( node.value is value for value in singleton_values ) if _is_singleton_const(left_value): singleton, other_value = left_value.value, right_value elif _is_singleton_const(right_value): singleton, other_value = right_value.value, left_value else: return singleton_comparison_example = {False: "'{} is {}'", True: "'{} is not {}'"} # True/False singletons have a special-cased message in case the user is # mistakenly using == or != to check for truthiness if singleton in {True, False}: suggestion_template = ( "{} if checking for the singleton value {}, or {} if testing for {}" ) truthiness_example = {False: "not {}", True: "{}"} truthiness_phrase = {True: "truthiness", False: "falsiness"} # Looks for comparisons like x == True or x != False checking_truthiness = singleton is not checking_for_absence suggestion = suggestion_template.format( singleton_comparison_example[checking_for_absence].format( left_value.as_string(), right_value.as_string() ), singleton, ( "'bool({})'" if not utils.is_test_condition(root_node) and checking_truthiness else "'{}'" ).format( truthiness_example[checking_truthiness].format( other_value.as_string() ) ), truthiness_phrase[checking_truthiness], ) else: suggestion = singleton_comparison_example[checking_for_absence].format( left_value.as_string(), right_value.as_string() ) self.add_message( "singleton-comparison", node=root_node, args=(f"'{root_node.as_string()}'", suggestion), ) def _check_nan_comparison( self, left_value, right_value, root_node, checking_for_absence: bool = False ): def _is_float_nan(node): try: if isinstance(node, nodes.Call) and len(node.args) == 1: if ( node.args[0].value.lower() == "nan" and node.inferred()[0].pytype() == "builtins.float" ): return True return False except AttributeError: return False def _is_numpy_nan(node): if isinstance(node, nodes.Attribute) and node.attrname == "NaN": if isinstance(node.expr, nodes.Name): return node.expr.name in {"numpy", "nmp", "np"} return False def _is_nan(node) -> bool: return _is_float_nan(node) or _is_numpy_nan(node) nan_left = _is_nan(left_value) if not nan_left and not _is_nan(right_value): return absence_text = "" if checking_for_absence: absence_text = "not " if nan_left: suggestion = f"'{absence_text}math.isnan({right_value.as_string()})'" else: suggestion = f"'{absence_text}math.isnan({left_value.as_string()})'" self.add_message( "nan-comparison", node=root_node, args=(f"'{root_node.as_string()}'", suggestion), ) def _check_literal_comparison(self, literal, node: nodes.Compare): """Check if we compare to a literal, which is usually what we do not want to do.""" is_other_literal = isinstance(literal, (nodes.List, nodes.Dict, nodes.Set)) is_const = False if isinstance(literal, nodes.Const): if isinstance(literal.value, bool) or literal.value is None: # Not interested in these values. return is_const = isinstance(literal.value, (bytes, str, int, float)) if is_const or is_other_literal: self.add_message("literal-comparison", node=node) def _check_logical_tautology(self, node: nodes.Compare): """Check if identifier is compared against itself. :param node: Compare node :Example: val = 786 if val == val: # [comparison-with-itself] pass """ left_operand = node.left right_operand = node.ops[0][1] operator = node.ops[0][0] if isinstance(left_operand, nodes.Const) and isinstance( right_operand, nodes.Const ): left_operand = left_operand.value right_operand = right_operand.value elif isinstance(left_operand, nodes.Name) and isinstance( right_operand, nodes.Name ): left_operand = left_operand.name right_operand = right_operand.name if left_operand == right_operand: suggestion = f"{left_operand} {operator} {right_operand}" self.add_message("comparison-with-itself", node=node, args=(suggestion,)) def _check_callable_comparison(self, node): operator = node.ops[0][0] if operator not in COMPARISON_OPERATORS: return bare_callables = (nodes.FunctionDef, astroid.BoundMethod) left_operand, right_operand = node.left, node.ops[0][1] # this message should be emitted only when there is comparison of bare callable # with non bare callable. number_of_bare_callables = 0 for operand in left_operand, right_operand: inferred = utils.safe_infer(operand) # Ignore callables that raise, as well as typing constants # implemented as functions (that raise via their decorator) if ( isinstance(inferred, bare_callables) and "typing._SpecialForm" not in inferred.decoratornames() and not any(isinstance(x, nodes.Raise) for x in inferred.body) ): number_of_bare_callables += 1 if number_of_bare_callables == 1: self.add_message("comparison-with-callable", node=node) @utils.check_messages( "singleton-comparison", "unidiomatic-typecheck", "literal-comparison", "comparison-with-itself", "comparison-with-callable", ) def visit_compare(self, node: nodes.Compare) -> None: self._check_callable_comparison(node) self._check_logical_tautology(node) self._check_unidiomatic_typecheck(node) # NOTE: this checker only works with binary comparisons like 'x == 42' # but not 'x == y == 42' if len(node.ops) != 1: return left = node.left operator, right = node.ops[0] if operator in {"==", "!="}: self._check_singleton_comparison( left, right, node, checking_for_absence=operator == "!=" ) if operator in {"==", "!=", "is", "is not"}: self._check_nan_comparison( left, right, node, checking_for_absence=operator in {"!=", "is not"} ) if operator in {"is", "is not"}: self._check_literal_comparison(right, node) def _check_unidiomatic_typecheck(self, node): operator, right = node.ops[0] if operator in TYPECHECK_COMPARISON_OPERATORS: left = node.left if _is_one_arg_pos_call(left): self._check_type_x_is_y(node, left, operator, right) def _check_type_x_is_y(self, node, left, operator, right): """Check for expressions like type(x) == Y.""" left_func = utils.safe_infer(left.func) if not ( isinstance(left_func, nodes.ClassDef) and left_func.qname() == TYPE_QNAME ): return if operator in {"is", "is not"} and _is_one_arg_pos_call(right): right_func = utils.safe_infer(right.func) if ( isinstance(right_func, nodes.ClassDef) and right_func.qname() == TYPE_QNAME ): # type(x) == type(a) right_arg = utils.safe_infer(right.args[0]) if not isinstance(right_arg, LITERAL_NODE_TYPES): # not e.g. type(x) == type([]) return self.add_message("unidiomatic-typecheck", node=node) def register(linter: "PyLinter") -> None: linter.register_checker(BasicErrorChecker(linter)) linter.register_checker(BasicChecker(linter)) linter.register_checker(NameChecker(linter)) linter.register_checker(DocStringChecker(linter)) linter.register_checker(PassChecker(linter)) linter.register_checker(ComparisonChecker(linter))
1
19,947
You need to modify the copyrite aliases so it's done automatically.
PyCQA-pylint
py
@@ -174,6 +174,13 @@ function createElement(...args) { let type = vnode.type, props = vnode.props; if (typeof type!='function') { + Object.keys(props).forEach(key => { + if (/^on(Ani|Tra)/.test(key)) { + props[key.toLowerCase()] = props[key]; + delete props[key]; + } + }); + if (props.defaultValue) { if (!props.value && props.value!==0) { props.value = props.defaultValue;
1
import { hydrate, render as preactRender, cloneElement as preactCloneElement, createRef, h, Component, options, toChildArray, createContext, Fragment, _unmount } from 'preact'; import * as hooks from 'preact/hooks'; import { Suspense, lazy } from './suspense'; import { assign, removeNode } from '../../src/util'; const version = '16.8.0'; // trick libraries to think we are react /* istanbul ignore next */ const REACT_ELEMENT_TYPE = (typeof Symbol!=='undefined' && Symbol.for && Symbol.for('react.element')) || 0xeac7; const CAMEL_PROPS = /^(?:accent|alignment|arabic|baseline|cap|clip|color|fill|flood|font|glyph|horiz|marker|overline|paint|stop|strikethrough|stroke|text|underline|unicode|units|v|vector|vert|word|writing|x)[A-Z]/; let oldEventHook = options.event; options.event = e => { /* istanbul ignore next */ if (oldEventHook) e = oldEventHook(e); e.persist = () => {}; return e.nativeEvent = e; }; /** * Legacy version of createElement. * @param {import('./internal').VNode["type"]} type The node name or Component constructor */ function createFactory(type) { return createElement.bind(null, type); } /** * Normalize DOM vnode properties. * @param {import('./internal').VNode} vnode The vnode to normalize props of * @param {object | null | undefined} props props to normalize */ function handleElementVNode(vnode, props) { let shouldSanitize, attrs, i; for (i in props) if ((shouldSanitize = CAMEL_PROPS.test(i))) break; if (shouldSanitize) { attrs = vnode.props = {}; for (i in props) { attrs[CAMEL_PROPS.test(i) ? i.replace(/([A-Z0-9])/, '-$1').toLowerCase() : i] = props[i]; } } } /** * Proxy render() since React returns a Component reference. * @param {import('./internal').VNode} vnode VNode tree to render * @param {import('./internal').PreactElement} parent DOM node to render vnode tree into * @param {() => void} [callback] Optional callback that will be called after rendering * @returns {import('./internal').Component | null} The root component reference or null */ function render(vnode, parent, callback) { // React destroys any existing DOM nodes, see #1727 while (parent.firstChild) { removeNode(parent.firstChild); } preactRender(vnode, parent); if (typeof callback==='function') callback(); return vnode ? vnode._component : null; } class ContextProvider { getChildContext() { return this.props.context; } render(props) { return props.children; } } /** * Portal component * @param {object | null | undefined} props */ function Portal(props) { let _this = this; let container = props.container; let wrap = h(ContextProvider, { context: _this.context }, props.vnode); // When we change container we should clear our old container and // indicate a new mount. if (_this._container && _this._container !== container) { if (_this._temp.parentNode) _this._container.removeChild(_this._temp); _unmount(_this._wrap); _this._hasMounted = false; } // When props.vnode is undefined/false/null we are dealing with some kind of // conditional vnode. This should not trigger a render. if (props.vnode) { if (!_this._hasMounted) { // Create a placeholder that we can use to insert into. _this._temp = document.createTextNode(''); // Hydrate existing nodes to keep the dom intact, when rendering // wrap into the container. hydrate('', container); // Insert before first child (will just append if firstChild is null). container.insertBefore(_this._temp, container.firstChild); // At this point we have mounted and should set our container. _this._hasMounted = true; _this._container = container; // Render our wrapping element into temp. preactRender(wrap, container, _this._temp); _this._children = this._temp._children; } else { // When we have mounted and the vnode is present it means the // props have changed or a parent is triggering a rerender. // This implies we only need to call render. But we need to keep // the old tree around, otherwise will treat the vnodes as new and // will wrongly call `componentDidMount` on them container._children = _this._children; preactRender(wrap, container); _this._children = container._children; } } // When we come from a conditional render, on a mounted // portal we should clear the DOM. else if (_this._hasMounted) { if (_this._temp.parentNode) _this._container.removeChild(_this._temp); _unmount(_this._wrap); } // Set the wrapping element for future unmounting. _this._wrap = wrap; _this.componentWillUnmount = () => { if (_this._temp.parentNode) _this._container.removeChild(_this._temp); _unmount(_this._wrap); }; return null; } /** * Create a `Portal` to continue rendering the vnode tree at a different DOM node * @param {import('./internal').VNode} vnode The vnode to render * @param {import('./internal').PreactElement} container The DOM node to continue rendering in to. */ function createPortal(vnode, container) { return h(Portal, { vnode, container }); } const mapFn = (children, fn) => { if (!children) return null; return toChildArray(children).map(fn); }; // This API is completely unnecessary for Preact, so it's basically passthrough. let Children = { map: mapFn, forEach: mapFn, count(children) { return children ? toChildArray(children).length : 0; }, only(children) { children = toChildArray(children); if (children.length!==1) throw new Error('Children.only() expects only one child.'); return children[0]; }, toArray: toChildArray }; /** * Wrap `createElement` to apply various vnode normalizations. * @param {import('./internal').VNode["type"]} type The node name or Component constructor * @param {object | null | undefined} [props] The vnode's properties * @param {Array<import('./internal').ComponentChildren>} [children] The vnode's children * @returns {import('./internal').VNode} */ function createElement(...args) { let vnode = h(...args); let type = vnode.type, props = vnode.props; if (typeof type!='function') { if (props.defaultValue) { if (!props.value && props.value!==0) { props.value = props.defaultValue; } delete props.defaultValue; } if (Array.isArray(props.value) && props.multiple && type==='select') { toChildArray(props.children).forEach((child) => { if (props.value.indexOf(child.props.value)!=-1) { child.props.selected = true; } }); delete props.value; } handleElementVNode(vnode, props); } vnode.preactCompatNormalized = false; return normalizeVNode(vnode); } /** * Normalize a vnode * @param {import('./internal').VNode} vnode */ function normalizeVNode(vnode) { vnode.preactCompatNormalized = true; applyClassName(vnode); return vnode; } /** * Wrap `cloneElement` to abort if the passed element is not a valid element and apply * all vnode normalizations. * @param {import('./internal').VNode} element The vnode to clone * @param {object} props Props to add when cloning * @param {Array<import('./internal').ComponentChildren>} rest Optional component children */ function cloneElement(element) { if (!isValidElement(element)) return element; let vnode = normalizeVNode(preactCloneElement.apply(null, arguments)); return vnode; } /** * Check if the passed element is a valid (p)react node. * @param {*} element The element to check * @returns {boolean} */ function isValidElement(element) { return !!element && element.$$typeof===REACT_ELEMENT_TYPE; } /** * Normalize event handlers like react does. Most famously it uses `onChange` for any input element. * @param {import('./internal').VNode} vnode The vnode to normalize events on */ function applyEventNormalization({ type, props }) { if (!props || typeof type!='string') return; let newProps = {}; for (let i in props) { newProps[i.toLowerCase()] = i; } if (newProps.ondoubleclick) { props.ondblclick = props[newProps.ondoubleclick]; delete props[newProps.ondoubleclick]; } if (newProps.onbeforeinput) { props.onbeforeinput = props[newProps.onbeforeinput]; delete props[newProps.onbeforeinput]; } // for *textual inputs* (incl textarea), normalize `onChange` -> `onInput`: if (newProps.onchange && (type==='textarea' || (type.toLowerCase()==='input' && !/^fil|che|rad/i.test(props.type)))) { let normalized = newProps.oninput || 'oninput'; if (!props[normalized]) { props[normalized] = props[newProps.onchange]; delete props[newProps.onchange]; } } } /** * Remove a component tree from the DOM, including state and event handlers. * @param {import('./internal').PreactElement} container * @returns {boolean} */ function unmountComponentAtNode(container) { if (container._children) { preactRender(null, container); return true; } return false; } /** * Alias `class` prop to `className` if available * @param {import('./internal').VNode} vnode */ function applyClassName(vnode) { let a = vnode.props; if (a.class || a.className) { classNameDescriptor.enumerable = 'className' in a; if (a.className) a.class = a.className; Object.defineProperty(a, 'className', classNameDescriptor); } } let classNameDescriptor = { configurable: true, get() { return this.class; } }; /** * Check if two objects have a different shape * @param {object} a * @param {object} b * @returns {boolean} */ function shallowDiffers(a, b) { for (let i in a) if (!(i in b)) return true; for (let i in b) if (a[i]!==b[i]) return true; return false; } /** * Get the matching DOM node for a component * @param {import('./internal').Component} component * @returns {import('./internal').PreactElement | null} */ function findDOMNode(component) { return component && (component.base || component.nodeType === 1 && component) || null; } /** * Component class with a predefined `shouldComponentUpdate` implementation */ class PureComponent extends Component { constructor(props) { super(props); // Some third-party libraries check if this property is present this.isPureReactComponent = true; } shouldComponentUpdate(props, state) { return shallowDiffers(this.props, props) || shallowDiffers(this.state, state); } } // Some libraries like `react-virtualized` explicitly check for this. Component.prototype.isReactComponent = {}; /** * Memoize a component, so that it only updates when the props actually have * changed. This was previously known as `React.pure`. * @param {import('./internal').FunctionalComponent} c functional component * @param {(prev: object, next: object) => boolean} [comparer] Custom equality function * @returns {import('./internal').FunctionalComponent} */ function memo(c, comparer) { function shouldUpdate(nextProps) { let ref = this.props.ref; let updateRef = ref==nextProps.ref; if (!updateRef) { ref.call ? ref(null) : (ref.current = null); } return (!comparer ? shallowDiffers(this.props, nextProps) : !comparer(this.props, nextProps)) || !updateRef; } function Memoed(props) { this.shouldComponentUpdate = shouldUpdate; return h(c, assign({}, props)); } Memoed.displayName = 'Memo(' + (c.displayName || c.name) + ')'; Memoed._forwarded = true; return Memoed; } // Patch in `UNSAFE_*` lifecycle hooks function setUnsafeDescriptor(obj, key) { Object.defineProperty(obj.prototype, 'UNSAFE_' + key, { configurable: true, get() { return this[key]; }, set(v) { this[key] = v; } }); } setUnsafeDescriptor(Component, 'componentWillMount'); setUnsafeDescriptor(Component, 'componentWillReceiveProps'); setUnsafeDescriptor(Component, 'componentWillUpdate'); /** * Pass ref down to a child. This is mainly used in libraries with HOCs that * wrap components. Using `forwardRef` there is an easy way to get a reference * of the wrapped component instead of one of the wrapper itself. * @param {import('./internal').ForwardFn} fn * @returns {import('./internal').FunctionalComponent} */ function forwardRef(fn) { function Forwarded(props) { let ref = props.ref; delete props.ref; return fn(props, ref); } Forwarded._forwarded = true; Forwarded.displayName = 'ForwardRef(' + (fn.displayName || fn.name) + ')'; return Forwarded; } let oldVNodeHook = options.vnode; options.vnode = vnode => { vnode.$$typeof = REACT_ELEMENT_TYPE; applyEventNormalization(vnode); let type = vnode.type; if (type && type._forwarded && vnode.ref) { vnode.props.ref = vnode.ref; vnode.ref = null; } /* istanbul ignore next */ if (oldVNodeHook) oldVNodeHook(vnode); }; /** * Deprecated way to control batched rendering inside the reconciler, but we * already schedule in batches inside our rendering code * @template Arg * @param {(arg: Arg) => void} callback function that triggers the updated * @param {Arg} [arg] Optional argument that can be passed to the callback */ // eslint-disable-next-line camelcase const unstable_batchedUpdates = (callback, arg) => callback(arg); export * from 'preact/hooks'; export { version, Children, render, render as hydrate, unmountComponentAtNode, createPortal, createElement, createContext, createFactory, cloneElement, createRef, Fragment, isValidElement, findDOMNode, Component, PureComponent, memo, forwardRef, // eslint-disable-next-line camelcase unstable_batchedUpdates, Suspense, lazy }; // React copies the named exports to the default one. export default assign({ version, Children, render, hydrate: render, unmountComponentAtNode, createPortal, createElement, createContext, createFactory, cloneElement, createRef, Fragment, isValidElement, findDOMNode, Component, PureComponent, memo, forwardRef, unstable_batchedUpdates, Suspense, lazy }, hooks);
1
13,985
Quick question: Is this true for all `onAnimation*` and all `onTransition*` events?
preactjs-preact
js
@@ -59,6 +59,8 @@ function Address(data, network, type) { info = Address._transformPublicKey(data); } else if (data.constructor && (data.constructor.name && data.constructor.name === 'Script')) { info = Address._transformScript(data); + } else if (data instanceof Address) { + return data; } else if (typeof(data) === 'string') { info = Address._transformString(data, network, type); } else {
1
'use strict'; var base58check = require('./encoding/base58check'); var networks = require('./networks'); var Hash = require('./crypto/hash'); /** * * Instantiate an address from an address String or Buffer, a public key or script hash Buffer, * or an instance of PublicKey or Script. * * @example * * // validate that an input field is valid * var error = Address.getValidationError(input, 'testnet'); * if (!error) { * var address = Address(input, 'testnet'); * } else { * // invalid network or checksum (typo?) * var message = error.messsage; * } * * // get an address from a public key * var address = Address(publicKey, 'testnet').toString(); * * * @param {String} data - The encoded data in various formats * @param {String} [network] - The network: 'mainnet' or 'testnet' * @param {String} [type] - The type of address: 'script' or 'pubkey' * @returns {Address} A new valid and frozen instance of an Address * @constructor */ function Address(data, network, type) { if (!(this instanceof Address)) { return new Address(data, network, type); } if (!data) { throw new TypeError('First argument is required, please include address data.'); } if (network && (network !== 'mainnet' && network !== 'testnet')) { throw new TypeError('Second argument must be "mainnet" or "testnet".'); } if (type && (type !== 'pubkeyhash' && type !== 'scripthash')) { throw new TypeError('Third argument must be "pubkeyhash" or "scripthash".'); } var info; // transform and validate input data if ((data instanceof Buffer || data instanceof Uint8Array) && data.length === 20) { info = Address._transformHash(data); } else if ((data instanceof Buffer || data instanceof Uint8Array) && data.length === 21) { info = Address._transformBuffer(data, network, type); } else if (data.constructor && (data.constructor.name && data.constructor.name === 'PublicKey')) { info = Address._transformPublicKey(data); } else if (data.constructor && (data.constructor.name && data.constructor.name === 'Script')) { info = Address._transformScript(data); } else if (typeof(data) === 'string') { info = Address._transformString(data, network, type); } else { throw new TypeError('First argument is an unrecognized data format.'); } // set defaults if not set info.network = info.network || network || 'mainnet'; info.type = info.type || type || 'pubkeyhash'; // set the validated values this.hashBuffer = info.hashBuffer; this.network = info.network; this.type = info.type; return this; } /** * * Internal function to transform a hash buffer * * @param {Buffer} hash - An instance of a hash Buffer * @returns {Object} An object with keys: hashBuffer * @private */ Address._transformHash = function(hash){ var info = {}; if (!(hash instanceof Buffer) && !(hash instanceof Uint8Array)) { throw new TypeError('Address supplied is not a buffer.'); } if (hash.length !== 20) { throw new TypeError('Address hashbuffers must be exactly 20 bytes.'); } info.hashBuffer = hash; return info; }; /** * * Internal function to transform a bitcoin address buffer * * @param {Buffer} buffer - An instance of a hex encoded address Buffer * @param {String} [network] - The network: 'mainnet' or 'testnet' * @param {String} [type] - The type: 'pubkeyhash' or 'scripthash' * @returns {Object} An object with keys: hashBuffer, network and type * @private */ Address._transformBuffer = function(buffer, network, type){ var info = {}; if (!(buffer instanceof Buffer) && !(buffer instanceof Uint8Array)) { throw new TypeError('Address supplied is not a buffer.'); } if (buffer.length !== 1 + 20) { throw new TypeError('Address buffers must be exactly 21 bytes.'); } var bufNetwork = false; var bufType = false; switch(buffer[0]){ // the version byte case networks.mainnet.pubkeyhash: bufNetwork = 'mainnet'; bufType = 'pubkeyhash'; break; case networks.mainnet.scripthash: bufNetwork = 'mainnet'; bufType = 'scripthash'; break; case networks.testnet.pubkeyhash: bufNetwork = 'testnet'; bufType = 'pubkeyhash'; break; case networks.testnet.scripthash: bufNetwork = 'testnet'; bufType = 'scripthash'; break; } if (!bufNetwork || (network && network !== bufNetwork)) { throw new TypeError('Address has mismatched network type.'); } if (!bufType || ( type && type !== bufType )) { throw new TypeError('Address has mismatched type.'); } info.hashBuffer = buffer.slice(1); info.network = bufNetwork; info.type = bufType; return info; }; /** * * Internal function to transform a PublicKey * * @param {PublicKey} pubkey - An instance of PublicKey * @returns {Object} An object with keys: hashBuffer, type * @private */ Address._transformPublicKey = function(pubkey){ var info = {}; if (!pubkey.constructor || (pubkey.constructor.name && pubkey.constructor.name !== 'PublicKey')) { throw new TypeError('Address must be an instance of PublicKey.'); } info.hashBuffer = Hash.sha256ripemd160(pubkey.toBuffer()); info.type = 'pubkeyhash'; return info; }; /** * * Internal function to transform a Script * * @param {Script} script - An instance of Script * @returns {Object} An object with keys: hashBuffer, type * @private */ Address._transformScript = function(script){ var info = {}; if (!script.constructor || (script.constructor.name && script.constructor.name !== 'Script')) { throw new TypeError('Address must be an instance of Script.'); } info.hashBuffer = Hash.sha256ripemd160(script.toBuffer()); info.type = 'scripthash'; return info; }; /** * * Internal function to transform a bitcoin address string * * @param {String} data - An instance of PublicKey * @param {String} [network] - The network: 'mainnet' or 'testnet' * @param {String} [type] - The type: 'pubkeyhash' or 'scripthash' * @returns {Object} An object with keys: hashBuffer, network and type * @private */ Address._transformString = function(data, network, type){ if( typeof(data) !== 'string' ) { throw new TypeError('Address supplied is not a string.'); } var addressBuffer = base58check.decode(data); var info = Address._transformBuffer(addressBuffer, network, type); return info; }; /** * * Instantiate an address from a PublicKey instance * * @param {String} data - An instance of PublicKey * @param {String} network - The network: 'mainnet' or 'testnet' * @returns {Address} A new valid and frozen instance of an Address */ Address.fromPublicKey = function(data, network){ var info = Address._transformPublicKey(data); return new Address(info.hashBuffer, network, info.type); }; /** * * Instantiate an address from a ripemd160 public key hash * * @param {Buffer} hash - An instance of buffer of the hash * @param {String} network - The network: 'mainnet' or 'testnet' * @returns {Address} A new valid and frozen instance of an Address */ Address.fromPublicKeyHash = function(hash, network) { var info = Address._transformHash(hash); return new Address(info.hashBuffer, network, 'pubkeyhash'); }; /** * * Instantiate an address from a ripemd160 script hash * * @param {Buffer} hash - An instance of buffer of the hash * @param {String} network - The network: 'mainnet' or 'testnet' * @returns {Address} A new valid and frozen instance of an Address */ Address.fromScriptHash = function(hash, network) { var info = Address._transformHash(hash); return new Address(info.hashBuffer, network, 'scripthash'); }; /** * * Instantiate an address from a Script * * @param {Script} script - An instance of Script * @param {String} network - The network: 'mainnet' or 'testnet' * @returns {Address} A new valid and frozen instance of an Address */ Address.fromScript = function(script, network) { var info = Address._transformScript(script); return new Address(info.hashBuffer, network, info.type); }; /** * * Instantiate an address from a buffer of the address * * @param {Buffer} buffer - An instance of buffer of the address * @param {String} [network] - The network: 'mainnet' or 'testnet' * @param {String} [type] - The type of address: 'script' or 'pubkey' * @returns {Address} A new valid and frozen instance of an Address */ Address.fromBuffer = function(buffer, network, type) { var info = Address._transformBuffer(buffer, network, type); return new Address(info.hashBuffer, info.network, info.type); }; /** * * Instantiate an address from an address string * * @param {String} str - An string of the bitcoin address * @param {String} [network] - The network: 'mainnet' or 'testnet' * @param {String} [type] - The type of address: 'script' or 'pubkey' * @returns {Address} A new valid and frozen instance of an Address */ Address.fromString = function(str, network, type) { var info = Address._transformString(str, network, type); return new Address(info.hashBuffer, info.network, info.type); }; /** * * Will return a validation error if exists * * @example * * var error = Address.getValidationError('15vkcKf7gB23wLAnZLmbVuMiiVDc1Nm4a2', 'testnet'); * // a network mismatch error * * @param {String} data - The encoded data * @param {String} network - The network: 'mainnet' or 'testnet' * @param {String} type - The type of address: 'script' or 'pubkey' * @returns {null|Error} The corresponding error message */ Address.getValidationError = function(data, network, type) { var error; try { new Address(data, network, type); } catch (e) { error = e; } return error; }; /** * * Will return a boolean if an address is valid * * @example * * var valid = Address.isValid('15vkcKf7gB23wLAnZLmbVuMiiVDc1Nm4a2', 'mainnet'); * // true * * @param {String} data - The encoded data * @param {String} network - The network: 'mainnet' or 'testnet' * @param {String} type - The type of address: 'script' or 'pubkey' * @returns {null|Error} The corresponding error message */ Address.isValid = function(data, network, type) { return !Address.getValidationError(data, network, type); }; /** * * Will return a buffer representation of the address * * @returns {Buffer} Bitcoin address buffer */ Address.prototype.toBuffer = function() { var version = new Buffer([networks[this.network][this.type]]); var buf = Buffer.concat([version, this.hashBuffer]); return buf; }; /** * * Will return a the string representation of the address * * @returns {String} Bitcoin address */ Address.prototype.toString = function() { return base58check.encode(this.toBuffer()); }; /** * * Will return a string formatted for the console * * @returns {String} Bitcoin address */ Address.prototype.inspect = function() { return '<Address: ' + this.toString() + ', type: '+this.type+', network: '+this.network+'>'; }; module.exports = Address;
1
13,169
Should sending an address into address error? Since the call isn't needed, and could be fixed easily.
bitpay-bitcore
js
@@ -26,6 +26,7 @@ import java.util.List; import java.util.Map; import java.util.TreeMap; +import org.apache.log4j.MDC; import org.apache.lucene.index.MultiTerms; import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum;
1
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.solr.handler.admin; import java.io.IOException; import java.lang.invoke.MethodHandles; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.List; import java.util.Map; import java.util.TreeMap; import org.apache.lucene.index.MultiTerms; import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.StringHelper; import org.apache.solr.cloud.CloudDescriptor; import org.apache.solr.cloud.ZkShardTerms; import org.apache.solr.common.SolrException; import org.apache.solr.common.cloud.ClusterState; import org.apache.solr.common.cloud.CompositeIdRouter; import org.apache.solr.common.cloud.DocCollection; import org.apache.solr.common.cloud.DocRouter; import org.apache.solr.common.cloud.Slice; import org.apache.solr.common.params.CommonAdminParams; import org.apache.solr.common.params.CoreAdminParams; import org.apache.solr.common.params.SolrParams; import org.apache.solr.core.SolrCore; import org.apache.solr.request.LocalSolrQueryRequest; import org.apache.solr.request.SolrQueryRequest; import org.apache.solr.search.SolrIndexSearcher; import org.apache.solr.update.SolrIndexSplitter; import org.apache.solr.update.SplitIndexCommand; import org.apache.solr.util.RTimer; import org.apache.solr.util.RefCounted; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import static org.apache.solr.common.cloud.DocCollection.DOC_ROUTER; import static org.apache.solr.common.params.CommonParams.PATH; import static org.apache.solr.common.params.CoreAdminParams.GET_RANGES; class SplitOp implements CoreAdminHandler.CoreAdminOp { private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); @Override public void execute(CoreAdminHandler.CallInfo it) throws Exception { SolrParams params = it.req.getParams(); String splitKey = params.get("split.key"); String[] newCoreNames = params.getParams("targetCore"); String cname = params.get(CoreAdminParams.CORE, ""); if ( params.getBool(GET_RANGES, false) ) { handleGetRanges(it, cname); return; } List<DocRouter.Range> ranges = null; String[] pathsArr = params.getParams(PATH); String rangesStr = params.get(CoreAdminParams.RANGES); // ranges=a-b,c-d,e-f if (rangesStr != null) { String[] rangesArr = rangesStr.split(","); if (rangesArr.length == 0) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "There must be at least one range specified to split an index"); } else { ranges = new ArrayList<>(rangesArr.length); for (String r : rangesArr) { try { ranges.add(DocRouter.DEFAULT.fromString(r)); } catch (Exception e) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Exception parsing hexadecimal hash range: " + r, e); } } } } if ((pathsArr == null || pathsArr.length == 0) && (newCoreNames == null || newCoreNames.length == 0)) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Either path or targetCore param must be specified"); } log.info("Invoked split action for core: " + cname); String methodStr = params.get(CommonAdminParams.SPLIT_METHOD, SolrIndexSplitter.SplitMethod.REWRITE.toLower()); SolrIndexSplitter.SplitMethod splitMethod = SolrIndexSplitter.SplitMethod.get(methodStr); if (splitMethod == null) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unsupported value of '" + CommonAdminParams.SPLIT_METHOD + "': " + methodStr); } SolrCore parentCore = it.handler.coreContainer.getCore(cname); List<SolrCore> newCores = null; SolrQueryRequest req = null; try { // TODO: allow use of rangesStr in the future List<String> paths = null; int partitions = pathsArr != null ? pathsArr.length : newCoreNames.length; DocRouter router = null; String routeFieldName = null; if (it.handler.coreContainer.isZooKeeperAware()) { ClusterState clusterState = it.handler.coreContainer.getZkController().getClusterState(); String collectionName = parentCore.getCoreDescriptor().getCloudDescriptor().getCollectionName(); DocCollection collection = clusterState.getCollection(collectionName); String sliceName = parentCore.getCoreDescriptor().getCloudDescriptor().getShardId(); Slice slice = collection.getSlice(sliceName); router = collection.getRouter() != null ? collection.getRouter() : DocRouter.DEFAULT; if (ranges == null) { DocRouter.Range currentRange = slice.getRange(); ranges = currentRange != null ? router.partitionRange(partitions, currentRange) : null; } Object routerObj = collection.get(DOC_ROUTER); // for back-compat with Solr 4.4 if (routerObj instanceof Map) { Map routerProps = (Map) routerObj; routeFieldName = (String) routerProps.get("field"); } } if (pathsArr == null) { newCores = new ArrayList<>(partitions); for (String newCoreName : newCoreNames) { SolrCore newcore = it.handler.coreContainer.getCore(newCoreName); if (newcore != null) { newCores.add(newcore); if (it.handler.coreContainer.isZooKeeperAware()) { // this core must be the only replica in its shard otherwise // we cannot guarantee consistency between replicas because when we add data to this replica CloudDescriptor cd = newcore.getCoreDescriptor().getCloudDescriptor(); ClusterState clusterState = it.handler.coreContainer.getZkController().getClusterState(); if (clusterState.getCollection(cd.getCollectionName()).getSlice(cd.getShardId()).getReplicas().size() != 1) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Core with core name " + newCoreName + " must be the only replica in shard " + cd.getShardId()); } } } else { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Core with core name " + newCoreName + " expected but doesn't exist."); } } } else { paths = Arrays.asList(pathsArr); } req = new LocalSolrQueryRequest(parentCore, params); SplitIndexCommand cmd = new SplitIndexCommand(req, it.rsp, paths, newCores, ranges, router, routeFieldName, splitKey, splitMethod); parentCore.getUpdateHandler().split(cmd); if (it.handler.coreContainer.isZooKeeperAware()) { for (SolrCore newcore : newCores) { // the index of the core changed from empty to have some data, its term must be not zero CloudDescriptor cd = newcore.getCoreDescriptor().getCloudDescriptor(); ZkShardTerms zkShardTerms = it.handler.coreContainer.getZkController().getShardTerms(cd.getCollectionName(), cd.getShardId()); zkShardTerms.ensureHighestTermsAreNotZero(); } } // After the split has completed, someone (here?) should start the process of replaying the buffered updates. } catch (Exception e) { log.error("ERROR executing split:", e); throw e; } finally { if (req != null) req.close(); if (parentCore != null) parentCore.close(); if (newCores != null) { for (SolrCore newCore : newCores) { newCore.close(); } } } } /** * This is called when splitByPrefix is used. * The overseer called us to get recommended splits taking into * account actual document distribution over the hash space. */ private void handleGetRanges(CoreAdminHandler.CallInfo it, String coreName) throws Exception { SolrCore parentCore = it.handler.coreContainer.getCore(coreName); if (parentCore == null) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unknown core " + coreName); } RefCounted<SolrIndexSearcher> searcherHolder = parentCore.getRealtimeSearcher(); try { if (!it.handler.coreContainer.isZooKeeperAware()) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Shard splitByPrefix requires SolrCloud mode."); } else { SolrIndexSearcher searcher = searcherHolder.get(); String routeFieldName = null; String prefixField = "id_prefix"; ClusterState clusterState = it.handler.coreContainer.getZkController().getClusterState(); String collectionName = parentCore.getCoreDescriptor().getCloudDescriptor().getCollectionName(); DocCollection collection = clusterState.getCollection(collectionName); String sliceName = parentCore.getCoreDescriptor().getCloudDescriptor().getShardId(); Slice slice = collection.getSlice(sliceName); DocRouter router = collection.getRouter() != null ? collection.getRouter() : DocRouter.DEFAULT; DocRouter.Range currentRange = slice.getRange(); Object routerObj = collection.get(DOC_ROUTER); // for back-compat with Solr 4.4 if (routerObj instanceof Map) { Map routerProps = (Map) routerObj; routeFieldName = (String) routerProps.get("field"); } if (routeFieldName == null) { routeFieldName = searcher.getSchema().getUniqueKeyField().getName(); } Collection<RangeCount> counts = getHashHistogram(searcher, prefixField, router, collection); if (counts.size() == 0) { // How to determine if we should look at the id field to figure out the prefix buckets? // There may legitimately be no indexed terms in id_prefix if no ids have a prefix yet. // For now, avoid using splitByPrefix unless you are actually using prefixes. counts = getHashHistogramFromId(searcher, searcher.getSchema().getUniqueKeyField().getName(), router, collection); } Collection<DocRouter.Range> splits = getSplits(counts, currentRange); String splitString = toSplitString(splits); if (splitString == null) { return; } it.rsp.add(CoreAdminParams.RANGES, splitString); } } finally { if (searcherHolder != null) searcherHolder.decref(); if (parentCore != null) parentCore.close(); } } static class RangeCount implements Comparable<RangeCount> { DocRouter.Range range; int count; public RangeCount(DocRouter.Range range, int count) { this.range = range; this.count = count; } @Override public int hashCode() { return range.hashCode(); } @Override public boolean equals(Object obj) { if (!(obj instanceof RangeCount)) return false; return this.range.equals( ((RangeCount)obj).range ); } @Override public int compareTo(RangeCount o) { return this.range.compareTo(o.range); } @Override public String toString() { return range.toString() + "=" + count; } } static String toSplitString(Collection<DocRouter.Range> splits) throws Exception { if (splits == null) { return null; } StringBuilder sb = new StringBuilder(); for (DocRouter.Range range : splits) { if (sb.length() > 0) { sb.append(","); } sb.append(range); } return sb.toString(); } /* * Returns a list of range counts sorted by the range lower bound */ static Collection<RangeCount> getHashHistogram(SolrIndexSearcher searcher, String prefixField, DocRouter router, DocCollection collection) throws IOException { RTimer timer = new RTimer(); TreeMap<DocRouter.Range,RangeCount> counts = new TreeMap<>(); Terms terms = MultiTerms.getTerms(searcher.getIndexReader(), prefixField); if (terms == null) { return counts.values(); } int numPrefixes = 0; int numTriLevel = 0; int numCollisions = 0; long sumBuckets = 0; TermsEnum termsEnum = terms.iterator(); BytesRef term; while ((term = termsEnum.next()) != null) { numPrefixes++; String termStr = term.utf8ToString(); int firstSep = termStr.indexOf(CompositeIdRouter.SEPARATOR); // truncate to first separator since we don't support multiple levels currently // NOTE: this does not currently work for tri-level composite ids since the number of bits allocated to the first ID is 16 for a 2 part id // and 8 for a 3 part id! if (firstSep != termStr.length()-1 && firstSep > 0) { numTriLevel++; termStr = termStr.substring(0, firstSep+1); } DocRouter.Range range = router.getSearchRangeSingle(termStr, null, collection); int numDocs = termsEnum.docFreq(); sumBuckets += numDocs; RangeCount rangeCount = new RangeCount(range, numDocs); RangeCount prev = counts.put(rangeCount.range, rangeCount); if (prev != null) { // we hit a hash collision or truncated a prefix to first level, so add the buckets together. rangeCount.count += prev.count; numCollisions++; } } log.info("Split histogram: ms={}, numBuckets={} sumBuckets={} numPrefixes={} numTriLevel={} numCollisions={}", timer.getTime(), counts.size(), sumBuckets, numPrefixes, numTriLevel, numCollisions); return counts.values(); } /** * Returns a list of range counts sorted by the range lower bound, using the indexed "id" field (i.e. the terms are full IDs, not just prefixes) */ static Collection<RangeCount> getHashHistogramFromId(SolrIndexSearcher searcher, String idField, DocRouter router, DocCollection collection) throws IOException { RTimer timer = new RTimer(); TreeMap<DocRouter.Range, RangeCount> counts = new TreeMap<>(); Terms terms = MultiTerms.getTerms(searcher.getIndexReader(), idField); if (terms == null) { return counts.values(); } int numPrefixes = 0; int numCollisions = 0; long sumBuckets = 0; byte sep = (byte) CompositeIdRouter.SEPARATOR.charAt(0); TermsEnum termsEnum = terms.iterator(); BytesRef currPrefix = new BytesRef(); // prefix of the previous "id" term int bucketCount = 0; // count of the number of docs in the current bucket // We're going to iterate over all terms, so do the minimum amount of work per term. // Terms are sorted, so all terms sharing a prefix will be grouped together. The extra work // is really just limited to stepping over all the terms in the id field. for (;;) { BytesRef term = termsEnum.next(); // compare to current prefix bucket and see if this new term shares the same prefix if (term != null && term.length >= currPrefix.length && currPrefix.length > 0) { if (StringHelper.startsWith(term, currPrefix)) { bucketCount++; // use 1 since we are dealing with unique ids continue; } } // At this point the prefix did not match, so if we had a bucket we were working on, record it. if (currPrefix.length > 0) { numPrefixes++; sumBuckets += bucketCount; String currPrefixStr = currPrefix.utf8ToString(); DocRouter.Range range = router.getSearchRangeSingle(currPrefixStr, null, collection); RangeCount rangeCount = new RangeCount(range, bucketCount); bucketCount = 0; RangeCount prev = counts.put(rangeCount.range, rangeCount); if (prev != null) { // we hit a hash collision, so add the buckets together. rangeCount.count += prev.count; numCollisions++; } } // if the current term is null, we ran out of values if (term == null) break; // find the new prefix (if any) // resize if needed if (currPrefix.length < term.length) { currPrefix.bytes = new byte[term.length+10]; } // Copy the bytes up to and including the separator, and set the length if the separator is found. // If there was no separator, then length remains 0 and it's the indicator that we have no prefix bucket currPrefix.length = 0; for (int i=0; i<term.length; i++) { byte b = term.bytes[i + term.offset]; currPrefix.bytes[i] = b; if (b == sep) { currPrefix.length = i + 1; bucketCount++; break; } } } log.info("Split histogram from idField {}: ms={}, numBuckets={} sumBuckets={} numPrefixes={} numCollisions={}", idField, timer.getTime(), counts.size(), sumBuckets, numPrefixes, numCollisions); return counts.values(); } /* * Returns the list of recommended splits, or null if there is not enough information */ static Collection<DocRouter.Range> getSplits(Collection<RangeCount> rawCounts, DocRouter.Range currentRange) throws Exception { int totalCount = 0; RangeCount biggest = null; // keep track of the largest in case we need to split it out into it's own shard RangeCount last = null; // keep track of what the last range is // Remove counts that don't overlap with currentRange (can happen if someone overrode document routing) List<RangeCount> counts = new ArrayList<>(rawCounts.size()); for (RangeCount rangeCount : rawCounts) { if (!rangeCount.range.overlaps(currentRange)) { continue; } totalCount += rangeCount.count; if (biggest == null || rangeCount.count > biggest.count) { biggest = rangeCount; } counts.add(rangeCount); last = rangeCount; } if (counts.size() == 0) { // we don't have any data to go off of, so do the split the normal way return null; } List<DocRouter.Range> targetRanges = new ArrayList<>(); if (counts.size() == 1) { // We have a single range, so we should split it. // Currently, we only split a prefix/bucket when we have just one, but this could be changed/controlled // in the future via a allowedSizeDifference parameter (i.e. if just separating prefix buckets results in // too large of an imbalanced, allow splitting within a prefix) // It may already be a partial range, so figure that out int lower = Math.max(last.range.min, currentRange.min); int upper = Math.min(last.range.max, currentRange.max); int mid = lower + (upper-lower)/2; if (mid == lower || mid == upper) { // shard too small... this should pretty much never happen, but use default split logic if it does. return null; } // Make sure to include the shard's current range in the new ranges so we don't create useless empty shards. DocRouter.Range lowerRange = new DocRouter.Range(currentRange.min, mid); DocRouter.Range upperRange = new DocRouter.Range(mid+1, currentRange.max); targetRanges.add(lowerRange); targetRanges.add(upperRange); return targetRanges; } // We have at least two ranges, so we want to partition the ranges // and avoid splitting any individual range. // The "middle" bucket we are going to find will be included with the lower range and excluded from the upper range. int targetCount = totalCount / 2; RangeCount middle = null; RangeCount prev = null; int currCount = 0; for (RangeCount rangeCount : counts) { currCount += rangeCount.count; if (currCount >= targetCount) { // this should at least be true on the last range middle = rangeCount; break; } prev = rangeCount; } // check if using the range before the middle one would make a better split point int overError = currCount - targetCount; // error if we include middle in first split int underError = targetCount - (currCount - middle.count); // error if we include middle in second split if (underError < overError) { middle = prev; } // The middle should never be the last, since that means that we won't actually do a split. // Minimising the error (above) should already ensure this never happens. assert middle != last; // Make sure to include the shard's current range in the new ranges so we don't create useless empty shards. DocRouter.Range lowerRange = new DocRouter.Range(currentRange.min, middle.range.max); DocRouter.Range upperRange = new DocRouter.Range(middle.range.max+1, currentRange.max); targetRanges.add(lowerRange); targetRanges.add(upperRange); return targetRanges; } }
1
33,687
This is the wrong MDC; see MDCLoggingContext which imports `org.slf4j.MDC`
apache-lucene-solr
java
@@ -127,6 +127,9 @@ public interface HistoricDetailQuery extends Query<HistoricDetailQuery, Historic /** Only select historic details that have occurred after the given date (inclusive). */ HistoricDetailQuery occurredAfter(Date date); + /** Only select historic details that were set during the process start. */ + HistoricDetailQuery initial(); + /** * Order by tenant id (needs to be followed by {@link #asc()} or {@link #desc()}). * Note that the ordering of historic details without tenant id is database-specific.
1
/* * Copyright Camunda Services GmbH and/or licensed to Camunda Services GmbH * under one or more contributor license agreements. See the NOTICE file * distributed with this work for additional information regarding copyright * ownership. Camunda licenses this file to you under the Apache License, * Version 2.0; you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.camunda.bpm.engine.history; import org.camunda.bpm.engine.query.Query; import org.camunda.bpm.engine.runtime.CaseExecution; import org.camunda.bpm.engine.runtime.CaseInstance; import org.camunda.bpm.engine.runtime.Execution; import org.camunda.bpm.engine.runtime.ProcessInstance; import org.camunda.bpm.engine.runtime.VariableInstance; import java.util.Date; /** * Programmatic querying for {@link HistoricDetail}s. * * @author Tom Baeyens */ public interface HistoricDetailQuery extends Query<HistoricDetailQuery, HistoricDetail> { /** * Only select the historic detail with the given id. * * @param id the historic detail to select * @return the query builder */ HistoricDetailQuery detailId(String id); /** Only select historic variable updates with the given process instance. * {@link ProcessInstance} ids and {@link HistoricProcessInstance} ids match. */ HistoricDetailQuery processInstanceId(String processInstanceId); /** Only select historic variable updates with the given case instance. * {@link CaseInstance} ids and {@link HistoricCaseInstance} ids match. */ HistoricDetailQuery caseInstanceId(String caseInstanceId); /** Only select historic variable updates with the given execution. * Note that {@link Execution} ids are not stored in the history as first class citizen, * only process instances are.*/ HistoricDetailQuery executionId(String executionId); /** Only select historic variable updates with the given case execution. * Note that {@link CaseExecution} ids are not stored in the history as first class citizen, * only case instances are.*/ HistoricDetailQuery caseExecutionId(String caseExecutionId); /** Only select historic variable updates associated to the given {@link HistoricActivityInstance activity instance}. * @deprecated since 5.2, use {@link #activityInstanceId(String)} instead */ HistoricDetailQuery activityId(String activityId); /** Only select historic variable updates associated to the given {@link HistoricActivityInstance activity instance}. */ HistoricDetailQuery activityInstanceId(String activityInstanceId); /** Only select historic variable updates associated to the given {@link HistoricTaskInstance historic task instance}. */ HistoricDetailQuery taskId(String taskId); /** Only select historic variable updates associated to the given {@link HistoricVariableInstance historic variable instance}. */ HistoricDetailQuery variableInstanceId(String variableInstanceId); /** Only select historic process variables which match one of the given variable types. */ HistoricDetailQuery variableTypeIn(String... variableTypes); /** Only select {@link HistoricFormProperty}s. */ @Deprecated HistoricDetailQuery formProperties(); /** Only select {@link HistoricFormField}s. */ HistoricDetailQuery formFields(); /** Only select {@link HistoricVariableUpdate}s. */ HistoricDetailQuery variableUpdates(); /** * Disable fetching of byte array and file values. By default, the query will fetch such values. * By calling this method you can prevent the values of (potentially large) blob data chunks to be fetched. * The variables themselves are nonetheless included in the query result. * * @return the query builder */ HistoricDetailQuery disableBinaryFetching(); /** * Disable deserialization of variable values that are custom objects. By default, the query * will attempt to deserialize the value of these variables. By calling this method you can * prevent such attempts in environments where their classes are not available. * Independent of this setting, variable serialized values are accessible. */ HistoricDetailQuery disableCustomObjectDeserialization(); /** Exclude all task-related {@link HistoricDetail}s, so only items which have no * task-id set will be selected. When used together with {@link #taskId(String)}, this * call is ignored task details are NOT excluded. */ HistoricDetailQuery excludeTaskDetails(); /** Only select historic details with one of the given tenant ids. */ HistoricDetailQuery tenantIdIn(String... tenantIds); /** Only selects historic details that have no tenant id. */ HistoricDetailQuery withoutTenantId(); /** Only select historic details with the given process instance ids. */ HistoricDetailQuery processInstanceIdIn(String... processInstanceIds); /** * Select historic details related with given userOperationId. */ HistoricDetailQuery userOperationId(String userOperationId); /** Only select historic details that have occurred before the given date (inclusive). */ HistoricDetailQuery occurredBefore(Date date); /** Only select historic details that have occurred after the given date (inclusive). */ HistoricDetailQuery occurredAfter(Date date); /** * Order by tenant id (needs to be followed by {@link #asc()} or {@link #desc()}). * Note that the ordering of historic details without tenant id is database-specific. */ HistoricDetailQuery orderByTenantId(); HistoricDetailQuery orderByProcessInstanceId(); HistoricDetailQuery orderByVariableName(); HistoricDetailQuery orderByFormPropertyId(); HistoricDetailQuery orderByVariableType(); HistoricDetailQuery orderByVariableRevision(); HistoricDetailQuery orderByTime(); /** * <p>Sort the {@link HistoricDetail historic detail events} in the order in which * they occurred and needs to be followed by {@link #asc()} or {@link #desc()}.</p> * * <p>The set of all {@link HistoricVariableUpdate historic variable update events} is * a <strong>partially ordered set</strong>. Due to this fact {@link HistoricVariableUpdate * historic variable update events} for two different {@link VariableInstance variable * instances} are <strong>incomparable</strong>. So that it is not possible to sort * the {@link HistoricDetail historic variable update events} for two {@link VariableInstance * variable instances} in the order they occurred. Just for one {@link VariableInstance variable * instance} the set of {@link HistoricVariableUpdate historic variable update events} can be * <strong>totally ordered</strong> by using {@link #variableInstanceId(String)} and {@link * #orderPartiallyByOccurrence()} which will return a result set ordered by its occurrence.</p> * * <p><strong>For example:</strong><br> * An execution variable <code>myVariable</code> will be updated multiple times:</p> * * <code> * runtimeService.setVariable("anExecutionId", "myVariable", 1000);<br> * execution.setVariable("myVariable", 5000);<br> * runtimeService.setVariable("anExecutionId", "myVariable", 2500);<br> * runtimeService.removeVariable("anExecutionId", "myVariable"); * </code> * * <p>As a result there exists four {@link HistoricVariableUpdate historic variable update events}.</p> * * <p>By using {@link #variableInstanceId(String)} and {@link #orderPartiallyByOccurrence()} it * is possible to sort the events in the order in which they occurred. The following query</p> * * <code> * historyService.createHistoricDetailQuery()<br> * &nbsp;&nbsp;.variableInstanceId("myVariableInstId")<br> * &nbsp;&nbsp;.orderPartiallyByOccurrence()<br> * &nbsp;&nbsp;.asc()<br> * &nbsp;&nbsp;.list() * </code> * * <p>will return the following totally ordered result set</p> * * <code> * [<br> * &nbsp;&nbsp;HistoricVariableUpdate[id: "myVariableInstId", variableName: "myVariable", value: 1000],<br> * &nbsp;&nbsp;HistoricVariableUpdate[id: "myVariableInstId", variableName: "myVariable", value: 5000],<br> * &nbsp;&nbsp;HistoricVariableUpdate[id: "myVariableInstId", variableName: "myVariable", value: 2500]<br> * &nbsp;&nbsp;HistoricVariableUpdate[id: "myVariableInstId", variableName: "myVariable", value: null]<br> * ] * </code> * * <p><strong>Note:</strong><br> * Please note that a {@link HistoricFormField historic form field event} can occur only once.</p> * * @since 7.3 */ HistoricDetailQuery orderPartiallyByOccurrence(); }
1
9,932
Maybe we can clarify a bit what that means.
camunda-camunda-bpm-platform
java
@@ -94,7 +94,7 @@ namespace Microsoft.AspNet.Server.Kestrel.Http { tcs2.SetResult(task2.Result); } - }, tcs); + }, tcs, cancellationToken); return tcs.Task; }
1
// Copyright (c) .NET Foundation. All rights reserved. // Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information. using System; using System.IO; using System.Threading; using System.Threading.Tasks; namespace Microsoft.AspNet.Server.Kestrel.Http { public class FrameRequestStream : Stream { private readonly MessageBody _body; public FrameRequestStream(MessageBody body) { _body = body; } public override bool CanRead { get { return true; } } public override bool CanSeek { get { return false; } } public override bool CanWrite { get { return false; } } public override long Length { get { throw new NotImplementedException(); } } public override long Position { get; set; } public override void Flush() { throw new NotImplementedException(); } public override long Seek(long offset, SeekOrigin origin) { throw new NotImplementedException(); } public override void SetLength(long value) { throw new NotImplementedException(); } public override int Read(byte[] buffer, int offset, int count) { return ReadAsync(buffer, offset, count).Result; } #if NET451 public override IAsyncResult BeginRead(byte[] buffer, int offset, int count, AsyncCallback callback, object state) { var task = ReadAsync(buffer, offset, count, CancellationToken.None, state); if (callback != null) { task.ContinueWith(t => callback.Invoke(t)); } return task; } public override int EndRead(IAsyncResult asyncResult) { return ((Task<int>)asyncResult).Result; } #endif public override Task<int> ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) { return _body.ReadAsync(new ArraySegment<byte>(buffer, offset, count), cancellationToken); } public Task<int> ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken, object state) { var tcs = new TaskCompletionSource<int>(state); var task = _body.ReadAsync(new ArraySegment<byte>(buffer, offset, count), cancellationToken); task.ContinueWith((task2, state2) => { var tcs2 = (TaskCompletionSource<int>)state2; if (task2.IsCanceled) { tcs2.SetCanceled(); } else if (task2.IsFaulted) { tcs2.SetException(task2.Exception); } else { tcs2.SetResult(task2.Result); } }, tcs); return tcs.Task; } public override void Write(byte[] buffer, int offset, int count) { throw new NotImplementedException(); } } }
1
6,604
This entire method can be deleted; it isn't used and it isn't an override of Stream.
aspnet-KestrelHttpServer
.cs
@@ -50,6 +50,8 @@ import org.apache.iceberg.util.Pair; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static org.apache.iceberg.MetadataTableUtils.createMetadataTableInstance; + /** * Implementation of Iceberg tables that uses the Hadoop FileSystem * to store metadata and manifests.
1
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.iceberg.hadoop; import java.util.Map; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.iceberg.AllDataFilesTable; import org.apache.iceberg.AllEntriesTable; import org.apache.iceberg.AllManifestsTable; import org.apache.iceberg.BaseTable; import org.apache.iceberg.DataFilesTable; import org.apache.iceberg.HistoryTable; import org.apache.iceberg.ManifestEntriesTable; import org.apache.iceberg.ManifestsTable; import org.apache.iceberg.MetadataTableType; import org.apache.iceberg.PartitionSpec; import org.apache.iceberg.PartitionsTable; import org.apache.iceberg.Schema; import org.apache.iceberg.SnapshotsTable; import org.apache.iceberg.SortOrder; import org.apache.iceberg.StaticTableOperations; import org.apache.iceberg.Table; import org.apache.iceberg.TableMetadata; import org.apache.iceberg.TableOperations; import org.apache.iceberg.Tables; import org.apache.iceberg.exceptions.AlreadyExistsException; import org.apache.iceberg.exceptions.NoSuchTableException; import org.apache.iceberg.relocated.com.google.common.base.Preconditions; import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap; import org.apache.iceberg.util.Pair; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Implementation of Iceberg tables that uses the Hadoop FileSystem * to store metadata and manifests. */ public class HadoopTables implements Tables, Configurable { private static final Logger LOG = LoggerFactory.getLogger(HadoopTables.class); private static final String METADATA_JSON = "metadata.json"; private Configuration conf; public HadoopTables() { this(new Configuration()); } public HadoopTables(Configuration conf) { this.conf = conf; } /** * Loads the table location from a FileSystem path location. * * @param location a path URI (e.g. hdfs:///warehouse/my_table/) * @return table implementation */ @Override public Table load(String location) { Table result; Pair<String, MetadataTableType> parsedMetadataType = parseMetadataType(location); if (parsedMetadataType != null) { // Load a metadata table result = loadMetadataTable(parsedMetadataType.first(), parsedMetadataType.second()); } else { // Load a normal table TableOperations ops = newTableOps(location); if (ops.current() != null) { result = new BaseTable(ops, location); } else { throw new NoSuchTableException("Table does not exist at location: " + location); } } LOG.info("Table location loaded: {}", result.location()); return result; } /** * Try to resolve a metadata table, which we encode as URI fragments * e.g. hdfs:///warehouse/my_table#snapshots * @param location Path to parse * @return A base table name and MetadataTableType if a type is found, null if not */ private Pair<String, MetadataTableType> parseMetadataType(String location) { int hashIndex = location.lastIndexOf('#'); if (hashIndex != -1 & !location.endsWith("#")) { String baseTable = location.substring(0, hashIndex); String metaTable = location.substring(hashIndex + 1); MetadataTableType type = MetadataTableType.from(metaTable); return (type == null) ? null : Pair.of(baseTable, type); } else { return null; } } private Table loadMetadataTable(String location, MetadataTableType type) { TableOperations ops = newTableOps(location); if (ops.current() == null) { throw new NoSuchTableException("Table does not exist at location: " + location); } Table baseTable = new BaseTable(ops, location); switch (type) { case ENTRIES: return new ManifestEntriesTable(ops, baseTable); case FILES: return new DataFilesTable(ops, baseTable); case HISTORY: return new HistoryTable(ops, baseTable); case SNAPSHOTS: return new SnapshotsTable(ops, baseTable); case MANIFESTS: return new ManifestsTable(ops, baseTable); case PARTITIONS: return new PartitionsTable(ops, baseTable); case ALL_DATA_FILES: return new AllDataFilesTable(ops, baseTable); case ALL_MANIFESTS: return new AllManifestsTable(ops, baseTable); case ALL_ENTRIES: return new AllEntriesTable(ops, baseTable); default: throw new NoSuchTableException(String.format("Unknown metadata table type: %s for %s", type, location)); } } /** * Create a table using the FileSystem implementation resolve from * location. * * @param schema iceberg schema used to create the table * @param spec partitioning spec, if null the table will be unpartitioned * @param properties a string map of table properties, initialized to empty if null * @param location a path URI (e.g. hdfs:///warehouse/my_table) * @return newly created table implementation */ @Override public Table create(Schema schema, PartitionSpec spec, SortOrder order, Map<String, String> properties, String location) { Preconditions.checkNotNull(schema, "A table schema is required"); TableOperations ops = newTableOps(location); if (ops.current() != null) { throw new AlreadyExistsException("Table already exists at location: " + location); } Map<String, String> tableProps = properties == null ? ImmutableMap.of() : properties; PartitionSpec partitionSpec = spec == null ? PartitionSpec.unpartitioned() : spec; SortOrder sortOrder = order == null ? SortOrder.unsorted() : order; TableMetadata metadata = TableMetadata.newTableMetadata(schema, partitionSpec, sortOrder, location, tableProps); ops.commit(null, metadata); return new BaseTable(ops, location); } private TableOperations newTableOps(String location) { if (location.contains(METADATA_JSON)) { return new StaticTableOperations(location, new HadoopFileIO(conf)); } else { return new HadoopTableOperations(new Path(location), conf); } } @Override public void setConf(Configuration conf) { this.conf = conf; } @Override public Configuration getConf() { return conf; } }
1
24,382
We avoid static method imports. Can you call `MetadataTableUtils.createMetadataTableInstance` instead?
apache-iceberg
java
@@ -254,6 +254,7 @@ class PACFetcher(QObject): def __eq__(self, other): # pylint: disable=protected-access return self._pac_url == other._pac_url + # pylint: enable=protected-access def __repr__(self): return utils.get_repr(self, url=self._pac_url, constructor=True)
1
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: # Copyright 2016-2017 Florian Bruhin (The Compiler) <[email protected]> # # This file is part of qutebrowser. # # qutebrowser is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # qutebrowser is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. """Evaluation of PAC scripts.""" import sys import functools from PyQt5.QtCore import QObject, pyqtSignal, pyqtSlot, QUrl from PyQt5.QtNetwork import (QNetworkProxy, QNetworkRequest, QHostInfo, QNetworkReply, QNetworkAccessManager, QHostAddress) from PyQt5.QtQml import QJSEngine, QJSValue from qutebrowser.utils import log, utils, qtutils class ParseProxyError(Exception): """Error while parsing PAC result string.""" pass class EvalProxyError(Exception): """Error while evaluating PAC script.""" pass def _js_slot(*args): """Wrap a methods as a JavaScript function. Register a PACContext method as a JavaScript function, and catch exceptions returning them as JavaScript Error objects. Args: args: Types of method arguments. Return: Wrapped method. """ def _decorator(method): @functools.wraps(method) def new_method(self, *args, **kwargs): try: return method(self, *args, **kwargs) except: e = str(sys.exc_info()[0]) log.network.exception("PAC evaluation error") # pylint: disable=protected-access return self._error_con.callAsConstructor([e]) # pylint: enable=protected-access return pyqtSlot(*args, result=QJSValue)(new_method) return _decorator class _PACContext(QObject): """Implementation of PAC API functions that require native calls. See https://developer.mozilla.org/en-US/docs/Mozilla/Projects/Necko/Proxy_Auto-Configuration_(PAC)_file """ JS_DEFINITIONS = """ function dnsResolve(host) { return PAC.dnsResolve(host); } function myIpAddress() { return PAC.myIpAddress(); } """ def __init__(self, engine): """Create a new PAC API implementation instance. Args: engine: QJSEngine which is used for running PAC. """ super().__init__(parent=engine) self._engine = engine self._error_con = engine.globalObject().property("Error") @_js_slot(str) def dnsResolve(self, host): """Resolve a DNS hostname. Resolves the given DNS hostname into an IP address, and returns it in the dot-separated format as a string. Args: host: hostname to resolve. """ ips = QHostInfo.fromName(host) if ips.error() != QHostInfo.NoError or not ips.addresses(): err_f = "Failed to resolve host during PAC evaluation: {}" log.network.info(err_f.format(host)) return QJSValue(QJSValue.NullValue) else: return ips.addresses()[0].toString() @_js_slot() def myIpAddress(self): """Get host IP address. Return the server IP address of the current machine, as a string in the dot-separated integer format. """ return QHostAddress(QHostAddress.LocalHost).toString() class PACResolver: """Evaluate PAC script files and resolve proxies.""" @staticmethod def _parse_proxy_host(host_str): host, _colon, port_str = host_str.partition(':') try: port = int(port_str) except ValueError: raise ParseProxyError("Invalid port number") return (host, port) @staticmethod def _parse_proxy_entry(proxy_str): """Parse one proxy string entry, as described in PAC specification.""" config = [c.strip() for c in proxy_str.split(' ') if c] if not config: raise ParseProxyError("Empty proxy entry") elif config[0] == "DIRECT": if len(config) != 1: raise ParseProxyError("Invalid number of parameters for " + "DIRECT") return QNetworkProxy(QNetworkProxy.NoProxy) elif config[0] == "PROXY": if len(config) != 2: raise ParseProxyError("Invalid number of parameters for PROXY") host, port = PACResolver._parse_proxy_host(config[1]) return QNetworkProxy(QNetworkProxy.HttpProxy, host, port) elif config[0] in ["SOCKS", "SOCKS5"]: if len(config) != 2: raise ParseProxyError("Invalid number of parameters for SOCKS") host, port = PACResolver._parse_proxy_host(config[1]) return QNetworkProxy(QNetworkProxy.Socks5Proxy, host, port) else: err = "Unknown proxy type: {}" raise ParseProxyError(err.format(config[0])) @staticmethod def _parse_proxy_string(proxy_str): proxies = proxy_str.split(';') return [PACResolver._parse_proxy_entry(x) for x in proxies] def _evaluate(self, js_code, js_file): ret = self._engine.evaluate(js_code, js_file) if ret.isError(): err = "JavaScript error while evaluating PAC file: {}" raise EvalProxyError(err.format(ret.toString())) def __init__(self, pac_str): """Create a PAC resolver. Args: pac_str: JavaScript code containing PAC resolver. """ self._engine = QJSEngine() self._ctx = _PACContext(self._engine) self._engine.globalObject().setProperty( "PAC", self._engine.newQObject(self._ctx)) self._evaluate(_PACContext.JS_DEFINITIONS, "pac_js_definitions") self._evaluate(utils.read_file("javascript/pac_utils.js"), "pac_utils") proxy_config = self._engine.newObject() proxy_config.setProperty("bindings", self._engine.newObject()) self._engine.globalObject().setProperty("ProxyConfig", proxy_config) self._evaluate(pac_str, "pac") global_js_object = self._engine.globalObject() self._resolver = global_js_object.property("FindProxyForURL") if not self._resolver.isCallable(): err = "Cannot resolve FindProxyForURL function, got '{}' instead" raise EvalProxyError(err.format(self._resolver.toString())) def resolve(self, query, from_file=False): """Resolve a proxy via PAC. Args: query: QNetworkProxyQuery. from_file: Whether the proxy info is coming from a file. Return: A list of QNetworkProxy objects in order of preference. """ if from_file: string_flags = QUrl.PrettyDecoded else: string_flags = QUrl.RemoveUserInfo if query.url().scheme() == 'https': string_flags |= QUrl.RemovePath | QUrl.RemoveQuery result = self._resolver.call([query.url().toString(string_flags), query.peerHostName()]) result_str = result.toString() if not result.isString(): err = "Got strange value from FindProxyForURL: '{}'" raise EvalProxyError(err.format(result_str)) return self._parse_proxy_string(result_str) class PACFetcher(QObject): """Asynchronous fetcher of PAC files.""" finished = pyqtSignal() def __init__(self, url, parent=None): """Resolve a PAC proxy from URL. Args: url: QUrl of a PAC proxy. """ super().__init__(parent) pac_prefix = "pac+" assert url.scheme().startswith(pac_prefix) url.setScheme(url.scheme()[len(pac_prefix):]) self._pac_url = url self._manager = QNetworkAccessManager() self._manager.setProxy(QNetworkProxy(QNetworkProxy.NoProxy)) self._pac = None self._error_message = None self._reply = None def __eq__(self, other): # pylint: disable=protected-access return self._pac_url == other._pac_url def __repr__(self): return utils.get_repr(self, url=self._pac_url, constructor=True) def fetch(self): """Fetch the proxy from the remote URL.""" self._reply = self._manager.get(QNetworkRequest(self._pac_url)) self._reply.finished.connect(self._finish) @pyqtSlot() def _finish(self): if self._reply.error() != QNetworkReply.NoError: error = "Can't fetch PAC file from URL, error code {}: {}" self._error_message = error.format( self._reply.error(), self._reply.errorString()) log.network.error(self._error_message) else: try: pacscript = bytes(self._reply.readAll()).decode("utf-8") except UnicodeError as e: error = "Invalid encoding of a PAC file: {}" self._error_message = error.format(e) log.network.exception(self._error_message) try: self._pac = PACResolver(pacscript) log.network.debug("Successfully evaluated PAC file.") except EvalProxyError as e: error = "Error in PAC evaluation: {}" self._error_message = error.format(e) log.network.exception(self._error_message) self._manager = None self._reply = None self.finished.emit() def _wait(self): """Wait until a reply from the remote server is received.""" if self._manager is not None: loop = qtutils.EventLoop() self.finished.connect(loop.quit) loop.exec_() def fetch_error(self): """Check if PAC script is successfully fetched. Return None iff PAC script is downloaded and evaluated successfully, error string otherwise. """ self._wait() return self._error_message def resolve(self, query): """Resolve a query via PAC. Args: QNetworkProxyQuery. Return a list of QNetworkProxy objects in order of preference. """ self._wait() from_file = self._pac_url.scheme() == 'file' try: return self._pac.resolve(query, from_file=from_file) except (EvalProxyError, ParseProxyError) as e: log.network.exception("Error in PAC resolution: {}.".format(e)) # .invalid is guaranteed to be inaccessible in RFC 6761. # Port 9 is for DISCARD protocol -- DISCARD servers act like # /dev/null. # Later NetworkManager.createRequest will detect this and display # an error message. error_host = "pac-resolve-error.qutebrowser.invalid" return [QNetworkProxy(QNetworkProxy.HttpProxy, error_host, 9)]
1
19,410
No need for this, as pylint already only turns things off for this function and it's needed for the entire function.
qutebrowser-qutebrowser
py
@@ -37,7 +37,13 @@ class Purchase < ActiveRecord::Base to: :purchaseable, prefix: :purchaseable, allow_nil: true - delegate :fulfilled_with_github?, :subscription?, :terms, to: :purchaseable + delegate( + :fulfilled_with_github?, + :includes_mentor?, + :subscription?, + :terms, + to: :purchaseable + ) def self.within_range(start_time, end_time) paid.where("created_at >= ? and created_at <= ?", start_time, end_time)
1
require 'digest/md5' class Purchase < ActiveRecord::Base PAYMENT_METHODS = %w(stripe paypal subscription free) belongs_to :user belongs_to :purchaseable, polymorphic: true belongs_to :coupon serialize :github_usernames attr_accessor :stripe_token, :paypal_url, :password validates :email, presence: true, format: { with: /\A([^@\s]+)@((?:[-a-z0-9]+\.)+[a-z]{2,})\Z/i } validates :lookup, presence: true validates :name, presence: true validates :password, presence: true, if: :password_required? validates :payment_method, inclusion: { in: PAYMENT_METHODS }, presence: true validates :purchaseable_id, presence: true validates :purchaseable_type, presence: true validates :quantity, presence: true validates :user_id, presence: true, if: :subscription? validates :variant, presence: true before_validation :create_user, if: :password_required? before_validation :generate_lookup, on: :create before_validation :set_free_payment_method, on: :create before_create :place_payment after_save :save_info_to_user, if: :user after_save :fulfill, if: :being_paid? after_save :send_receipt, if: :being_paid? after_save :update_user_payment_info, if: :being_paid? delegate :name, :sku, to: :purchaseable, prefix: :purchaseable, allow_nil: true delegate :fulfilled_with_github?, :subscription?, :terms, to: :purchaseable def self.within_range(start_time, end_time) paid.where("created_at >= ? and created_at <= ?", start_time, end_time) end def self.total_sales_within_range(start_time, end_time) within_range(start_time, end_time).to_a.sum(&:price) end def self.for_purchaseable(purchaseable) where(purchaseable_id: purchaseable.id, purchaseable_type: purchaseable.class.name) end def self.paid where(paid: true) end def self.with_stripe_customer_id where("stripe_customer_id is not null") end def self.by_email(email) where(email: email) end def price(coupon=CouponFactory.for_purchase(self)) paid_price || PurchasePriceCalculator.new(self, coupon).calculate end def first_name name.split(" ").first end def last_name name.split(" ").last end def to_param lookup end def stripe? payment_method == 'stripe' end def paypal? payment_method == 'paypal' end def free? price.zero? end def purchasing_as_subscriber? payment_method == 'subscription' end def complete_payment(params) payment.complete(params) save! end def starts_on purchaseable.starts_on(created_at.to_date) end def ends_on purchaseable.ends_on(created_at.to_date) end def active? (starts_on..ends_on).cover?(Time.zone.today) end def status if self.ends_on.today? || self.ends_on.future? 'in-progress' elsif self.ends_on.past? 'complete' end end def set_as_paid self.paid = true self.paid_price = price coupon.try(:applied) end def set_as_unpaid self.paid = false end def payment @payment ||= Payments::Factory.new(payment_method).new(self) end def github_usernames Array(super).compact.map(&:strip).reject(&:blank?) end def success_url(controller) if paypal? paypal_url else purchaseable.after_purchase_url(controller, self) end end private def password_required? subscription? && user.blank? end def create_user if name.present? && email.present? && password.present? self.user = User.create(name: name, email: email, password: password) add_errors_from_user unless user.valid? end end def add_errors_from_user errors[:email] = user.errors[:email] errors[:name] = user.errors[:name] errors[:password] = user.errors[:password] errors end def set_free_payment_method if free? && !subscription? && !purchasing_as_subscriber? self.payment_method = 'free' end end def stripe_customer @stripe_customer ||= Stripe::Customer.retrieve(stripe_customer_id) end def being_paid? paid? && paid_was == false end def place_payment payment.place end def update_user_payment_info if user payment.update_user(user) end end def fulfill purchaseable.fulfill(self, user) end def generate_lookup key = "#{email}#{purchaseable_name}#{Time.zone.now}\n" self.lookup = Digest::MD5.hexdigest(key).downcase end def save_info_to_user PurchaseInfoCopier.new(self, user).copy_info_to_user end def send_receipt unless purchasing_as_subscriber? SendPurchaseReceiptEmailJob.enqueue(id) end end end
1
9,785
I'm actually surprised this is valid Ruby. I thought you'd need either `delegate(...)` or `delegate \ ...`.
thoughtbot-upcase
rb
@@ -99,7 +99,7 @@ public class TestHelpers { if (checkArrowValidityVector) { ColumnVector columnVector = batch.column(i); ValueVector arrowVector = ((IcebergArrowColumnVector) columnVector).vectorAccessor().getVector(); - Assert.assertEquals("Nullability doesn't match", expectedValue == null, arrowVector.isNull(rowId)); + Assert.assertFalse("Nullability doesn't match", expectedValue == null ^ arrowVector.isNull(rowId)); } } }
1
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.iceberg.spark.data; import java.math.BigDecimal; import java.nio.ByteBuffer; import java.sql.Timestamp; import java.time.Instant; import java.time.LocalDate; import java.time.OffsetDateTime; import java.time.ZoneOffset; import java.time.temporal.ChronoUnit; import java.util.Collection; import java.util.Date; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.UUID; import org.apache.arrow.vector.ValueVector; import org.apache.avro.generic.GenericData; import org.apache.avro.generic.GenericData.Record; import org.apache.iceberg.Schema; import org.apache.iceberg.relocated.com.google.common.collect.Lists; import org.apache.iceberg.spark.data.vectorized.IcebergArrowColumnVector; import org.apache.iceberg.types.Type; import org.apache.iceberg.types.Types; import org.apache.orc.storage.serde2.io.DateWritable; import org.apache.spark.sql.Row; import org.apache.spark.sql.catalyst.InternalRow; import org.apache.spark.sql.catalyst.expressions.GenericRow; import org.apache.spark.sql.catalyst.expressions.SpecializedGetters; import org.apache.spark.sql.catalyst.util.ArrayData; import org.apache.spark.sql.catalyst.util.DateTimeUtils; import org.apache.spark.sql.catalyst.util.MapData; import org.apache.spark.sql.types.ArrayType; import org.apache.spark.sql.types.BinaryType; import org.apache.spark.sql.types.DataType; import org.apache.spark.sql.types.Decimal; import org.apache.spark.sql.types.MapType; import org.apache.spark.sql.types.StructField; import org.apache.spark.sql.types.StructType; import org.apache.spark.sql.vectorized.ColumnVector; import org.apache.spark.sql.vectorized.ColumnarBatch; import org.apache.spark.unsafe.types.UTF8String; import org.junit.Assert; import scala.collection.Seq; import static org.apache.iceberg.spark.SparkSchemaUtil.convert; import static scala.collection.JavaConverters.mapAsJavaMapConverter; import static scala.collection.JavaConverters.seqAsJavaListConverter; @SuppressWarnings("checkstyle:OverloadMethodsDeclarationOrder") public class TestHelpers { private TestHelpers() { } public static void assertEqualsSafe(Types.StructType struct, Record rec, Row row) { List<Types.NestedField> fields = struct.fields(); for (int i = 0; i < fields.size(); i += 1) { Type fieldType = fields.get(i).type(); Object expectedValue = rec.get(i); Object actualValue = row.get(i); assertEqualsSafe(fieldType, expectedValue, actualValue); } } public static void assertEqualsBatch(Types.StructType struct, Iterator<Record> expected, ColumnarBatch batch, boolean checkArrowValidityVector) { for (int rowId = 0; rowId < batch.numRows(); rowId++) { List<Types.NestedField> fields = struct.fields(); InternalRow row = batch.getRow(rowId); Record rec = expected.next(); for (int i = 0; i < fields.size(); i += 1) { Type fieldType = fields.get(i).type(); Object expectedValue = rec.get(i); Object actualValue = row.isNullAt(i) ? null : row.get(i, convert(fieldType)); assertEqualsUnsafe(fieldType, expectedValue, actualValue); if (checkArrowValidityVector) { ColumnVector columnVector = batch.column(i); ValueVector arrowVector = ((IcebergArrowColumnVector) columnVector).vectorAccessor().getVector(); Assert.assertEquals("Nullability doesn't match", expectedValue == null, arrowVector.isNull(rowId)); } } } } private static void assertEqualsSafe(Types.ListType list, Collection<?> expected, List actual) { Type elementType = list.elementType(); List<?> expectedElements = Lists.newArrayList(expected); for (int i = 0; i < expectedElements.size(); i += 1) { Object expectedValue = expectedElements.get(i); Object actualValue = actual.get(i); assertEqualsSafe(elementType, expectedValue, actualValue); } } private static void assertEqualsSafe(Types.MapType map, Map<?, ?> expected, Map<?, ?> actual) { Type keyType = map.keyType(); Type valueType = map.valueType(); for (Object expectedKey : expected.keySet()) { Object matchingKey = null; for (Object actualKey : actual.keySet()) { try { assertEqualsSafe(keyType, expectedKey, actualKey); matchingKey = actualKey; } catch (AssertionError e) { // failed } } Assert.assertNotNull("Should have a matching key", matchingKey); assertEqualsSafe(valueType, expected.get(expectedKey), actual.get(matchingKey)); } } private static final OffsetDateTime EPOCH = Instant.ofEpochMilli(0L).atOffset(ZoneOffset.UTC); private static final LocalDate EPOCH_DAY = EPOCH.toLocalDate(); @SuppressWarnings("unchecked") private static void assertEqualsSafe(Type type, Object expected, Object actual) { if (expected == null && actual == null) { return; } switch (type.typeId()) { case BOOLEAN: case INTEGER: case LONG: case FLOAT: case DOUBLE: Assert.assertEquals("Primitive value should be equal to expected", expected, actual); break; case DATE: Assert.assertTrue("Should be an int", expected instanceof Integer); Assert.assertTrue("Should be a Date", actual instanceof Date); int daysFromEpoch = (Integer) expected; LocalDate date = ChronoUnit.DAYS.addTo(EPOCH_DAY, daysFromEpoch); Assert.assertEquals("ISO-8601 date should be equal", date.toString(), actual.toString()); break; case TIMESTAMP: Assert.assertTrue("Should be a long", expected instanceof Long); Assert.assertTrue("Should be a Timestamp", actual instanceof Timestamp); Timestamp ts = (Timestamp) actual; // milliseconds from nanos has already been added by getTime long tsMicros = (ts.getTime() * 1000) + ((ts.getNanos() / 1000) % 1000); Assert.assertEquals("Timestamp micros should be equal", expected, tsMicros); break; case STRING: Assert.assertTrue("Should be a String", actual instanceof String); Assert.assertEquals("Strings should be equal", String.valueOf(expected), actual); break; case UUID: Assert.assertTrue("Should expect a UUID", expected instanceof UUID); Assert.assertTrue("Should be a String", actual instanceof String); Assert.assertEquals("UUID string representation should match", expected.toString(), actual); break; case FIXED: Assert.assertTrue("Should expect a Fixed", expected instanceof GenericData.Fixed); Assert.assertTrue("Should be a byte[]", actual instanceof byte[]); Assert.assertArrayEquals("Bytes should match", ((GenericData.Fixed) expected).bytes(), (byte[]) actual); break; case BINARY: Assert.assertTrue("Should expect a ByteBuffer", expected instanceof ByteBuffer); Assert.assertTrue("Should be a byte[]", actual instanceof byte[]); Assert.assertArrayEquals("Bytes should match", ((ByteBuffer) expected).array(), (byte[]) actual); break; case DECIMAL: Assert.assertTrue("Should expect a BigDecimal", expected instanceof BigDecimal); Assert.assertTrue("Should be a BigDecimal", actual instanceof BigDecimal); Assert.assertEquals("BigDecimals should be equal", expected, actual); break; case STRUCT: Assert.assertTrue("Should expect a Record", expected instanceof Record); Assert.assertTrue("Should be a Row", actual instanceof Row); assertEqualsSafe(type.asNestedType().asStructType(), (Record) expected, (Row) actual); break; case LIST: Assert.assertTrue("Should expect a Collection", expected instanceof Collection); Assert.assertTrue("Should be a Seq", actual instanceof Seq); List<?> asList = seqAsJavaListConverter((Seq<?>) actual).asJava(); assertEqualsSafe(type.asNestedType().asListType(), (Collection) expected, asList); break; case MAP: Assert.assertTrue("Should expect a Collection", expected instanceof Map); Assert.assertTrue("Should be a Map", actual instanceof scala.collection.Map); Map<String, ?> asMap = mapAsJavaMapConverter( (scala.collection.Map<String, ?>) actual).asJava(); assertEqualsSafe(type.asNestedType().asMapType(), (Map<String, ?>) expected, asMap); break; case TIME: default: throw new IllegalArgumentException("Not a supported type: " + type); } } public static void assertEqualsUnsafe(Types.StructType struct, Record rec, InternalRow row) { List<Types.NestedField> fields = struct.fields(); for (int i = 0; i < fields.size(); i += 1) { Type fieldType = fields.get(i).type(); Object expectedValue = rec.get(i); Object actualValue = row.isNullAt(i) ? null : row.get(i, convert(fieldType)); assertEqualsUnsafe(fieldType, expectedValue, actualValue); } } private static void assertEqualsUnsafe(Types.ListType list, Collection<?> expected, ArrayData actual) { Type elementType = list.elementType(); List<?> expectedElements = Lists.newArrayList(expected); for (int i = 0; i < expectedElements.size(); i += 1) { Object expectedValue = expectedElements.get(i); Object actualValue = actual.get(i, convert(elementType)); assertEqualsUnsafe(elementType, expectedValue, actualValue); } } private static void assertEqualsUnsafe(Types.MapType map, Map<?, ?> expected, MapData actual) { Type keyType = map.keyType(); Type valueType = map.valueType(); List<Map.Entry<?, ?>> expectedElements = Lists.newArrayList(expected.entrySet()); ArrayData actualKeys = actual.keyArray(); ArrayData actualValues = actual.valueArray(); for (int i = 0; i < expectedElements.size(); i += 1) { Map.Entry<?, ?> expectedPair = expectedElements.get(i); Object actualKey = actualKeys.get(i, convert(keyType)); Object actualValue = actualValues.get(i, convert(keyType)); assertEqualsUnsafe(keyType, expectedPair.getKey(), actualKey); assertEqualsUnsafe(valueType, expectedPair.getValue(), actualValue); } } private static void assertEqualsUnsafe(Type type, Object expected, Object actual) { if (expected == null && actual == null) { return; } switch (type.typeId()) { case BOOLEAN: case INTEGER: case LONG: case FLOAT: case DOUBLE: case DATE: case TIMESTAMP: Assert.assertEquals("Primitive value should be equal to expected", expected, actual); break; case STRING: Assert.assertTrue("Should be a UTF8String", actual instanceof UTF8String); Assert.assertEquals("Strings should be equal", expected, actual.toString()); break; case UUID: Assert.assertTrue("Should expect a UUID", expected instanceof UUID); Assert.assertTrue("Should be a UTF8String", actual instanceof UTF8String); Assert.assertEquals("UUID string representation should match", expected.toString(), actual.toString()); break; case FIXED: Assert.assertTrue("Should expect a Fixed", expected instanceof GenericData.Fixed); Assert.assertTrue("Should be a byte[]", actual instanceof byte[]); Assert.assertArrayEquals("Bytes should match", ((GenericData.Fixed) expected).bytes(), (byte[]) actual); break; case BINARY: Assert.assertTrue("Should expect a ByteBuffer", expected instanceof ByteBuffer); Assert.assertTrue("Should be a byte[]", actual instanceof byte[]); Assert.assertArrayEquals("Bytes should match", ((ByteBuffer) expected).array(), (byte[]) actual); break; case DECIMAL: Assert.assertTrue("Should expect a BigDecimal", expected instanceof BigDecimal); Assert.assertTrue("Should be a Decimal", actual instanceof Decimal); Assert.assertEquals("BigDecimals should be equal", expected, ((Decimal) actual).toJavaBigDecimal()); break; case STRUCT: Assert.assertTrue("Should expect a Record", expected instanceof Record); Assert.assertTrue("Should be an InternalRow", actual instanceof InternalRow); assertEqualsUnsafe(type.asNestedType().asStructType(), (Record) expected, (InternalRow) actual); break; case LIST: Assert.assertTrue("Should expect a Collection", expected instanceof Collection); Assert.assertTrue("Should be an ArrayData", actual instanceof ArrayData); assertEqualsUnsafe(type.asNestedType().asListType(), (Collection) expected, (ArrayData) actual); break; case MAP: Assert.assertTrue("Should expect a Map", expected instanceof Map); Assert.assertTrue("Should be an ArrayBasedMapData", actual instanceof MapData); assertEqualsUnsafe(type.asNestedType().asMapType(), (Map) expected, (MapData) actual); break; case TIME: default: throw new IllegalArgumentException("Not a supported type: " + type); } } /** * Check that the given InternalRow is equivalent to the Row. * @param prefix context for error messages * @param type the type of the row * @param expected the expected value of the row * @param actual the actual value of the row */ public static void assertEquals(String prefix, Types.StructType type, InternalRow expected, Row actual) { if (expected == null || actual == null) { Assert.assertEquals(prefix, expected, actual); } else { List<Types.NestedField> fields = type.fields(); for (int c = 0; c < fields.size(); ++c) { String fieldName = fields.get(c).name(); Type childType = fields.get(c).type(); switch (childType.typeId()) { case BOOLEAN: case INTEGER: case LONG: case FLOAT: case DOUBLE: case STRING: case DECIMAL: case DATE: case TIMESTAMP: Assert.assertEquals(prefix + "." + fieldName + " - " + childType, getValue(expected, c, childType), getPrimitiveValue(actual, c, childType)); break; case UUID: case FIXED: case BINARY: assertEqualBytes(prefix + "." + fieldName, (byte[]) getValue(expected, c, childType), (byte[]) actual.get(c)); break; case STRUCT: { Types.StructType st = (Types.StructType) childType; assertEquals(prefix + "." + fieldName, st, expected.getStruct(c, st.fields().size()), actual.getStruct(c)); break; } case LIST: assertEqualsLists(prefix + "." + fieldName, childType.asListType(), expected.getArray(c), toList((Seq<?>) actual.get(c))); break; case MAP: assertEqualsMaps(prefix + "." + fieldName, childType.asMapType(), expected.getMap(c), toJavaMap((scala.collection.Map<?, ?>) actual.getMap(c))); break; default: throw new IllegalArgumentException("Unhandled type " + childType); } } } } private static void assertEqualsLists(String prefix, Types.ListType type, ArrayData expected, List actual) { if (expected == null || actual == null) { Assert.assertEquals(prefix, expected, actual); } else { Assert.assertEquals(prefix + " length", expected.numElements(), actual.size()); Type childType = type.elementType(); for (int e = 0; e < expected.numElements(); ++e) { switch (childType.typeId()) { case BOOLEAN: case INTEGER: case LONG: case FLOAT: case DOUBLE: case STRING: case DECIMAL: case DATE: case TIMESTAMP: Assert.assertEquals(prefix + ".elem " + e + " - " + childType, getValue(expected, e, childType), actual.get(e)); break; case UUID: case FIXED: case BINARY: assertEqualBytes(prefix + ".elem " + e, (byte[]) getValue(expected, e, childType), (byte[]) actual.get(e)); break; case STRUCT: { Types.StructType st = (Types.StructType) childType; assertEquals(prefix + ".elem " + e, st, expected.getStruct(e, st.fields().size()), (Row) actual.get(e)); break; } case LIST: assertEqualsLists(prefix + ".elem " + e, childType.asListType(), expected.getArray(e), toList((Seq<?>) actual.get(e))); break; case MAP: assertEqualsMaps(prefix + ".elem " + e, childType.asMapType(), expected.getMap(e), toJavaMap((scala.collection.Map<?, ?>) actual.get(e))); break; default: throw new IllegalArgumentException("Unhandled type " + childType); } } } } private static void assertEqualsMaps(String prefix, Types.MapType type, MapData expected, Map<?, ?> actual) { if (expected == null || actual == null) { Assert.assertEquals(prefix, expected, actual); } else { Type keyType = type.keyType(); Type valueType = type.valueType(); ArrayData expectedKeyArray = expected.keyArray(); ArrayData expectedValueArray = expected.valueArray(); Assert.assertEquals(prefix + " length", expected.numElements(), actual.size()); for (int e = 0; e < expected.numElements(); ++e) { Object expectedKey = getValue(expectedKeyArray, e, keyType); Object actualValue = actual.get(expectedKey); if (actualValue == null) { Assert.assertEquals(prefix + ".key=" + expectedKey + " has null", true, expected.valueArray().isNullAt(e)); } else { switch (valueType.typeId()) { case BOOLEAN: case INTEGER: case LONG: case FLOAT: case DOUBLE: case STRING: case DECIMAL: case DATE: case TIMESTAMP: Assert.assertEquals(prefix + ".key=" + expectedKey + " - " + valueType, getValue(expectedValueArray, e, valueType), actual.get(expectedKey)); break; case UUID: case FIXED: case BINARY: assertEqualBytes(prefix + ".key=" + expectedKey, (byte[]) getValue(expectedValueArray, e, valueType), (byte[]) actual.get(expectedKey)); break; case STRUCT: { Types.StructType st = (Types.StructType) valueType; assertEquals(prefix + ".key=" + expectedKey, st, expectedValueArray.getStruct(e, st.fields().size()), (Row) actual.get(expectedKey)); break; } case LIST: assertEqualsLists(prefix + ".key=" + expectedKey, valueType.asListType(), expectedValueArray.getArray(e), toList((Seq<?>) actual.get(expectedKey))); break; case MAP: assertEqualsMaps(prefix + ".key=" + expectedKey, valueType.asMapType(), expectedValueArray.getMap(e), toJavaMap((scala.collection.Map<?, ?>) actual.get(expectedKey))); break; default: throw new IllegalArgumentException("Unhandled type " + valueType); } } } } } private static Object getValue(SpecializedGetters container, int ord, Type type) { if (container.isNullAt(ord)) { return null; } switch (type.typeId()) { case BOOLEAN: return container.getBoolean(ord); case INTEGER: return container.getInt(ord); case LONG: return container.getLong(ord); case FLOAT: return container.getFloat(ord); case DOUBLE: return container.getDouble(ord); case STRING: return container.getUTF8String(ord).toString(); case BINARY: case FIXED: case UUID: return container.getBinary(ord); case DATE: return new DateWritable(container.getInt(ord)).get(); case TIMESTAMP: return DateTimeUtils.toJavaTimestamp(container.getLong(ord)); case DECIMAL: { Types.DecimalType dt = (Types.DecimalType) type; return container.getDecimal(ord, dt.precision(), dt.scale()).toJavaBigDecimal(); } case STRUCT: Types.StructType struct = type.asStructType(); InternalRow internalRow = container.getStruct(ord, struct.fields().size()); Object[] data = new Object[struct.fields().size()]; for (int i = 0; i < data.length; i += 1) { if (internalRow.isNullAt(i)) { data[i] = null; } else { data[i] = getValue(internalRow, i, struct.fields().get(i).type()); } } return new GenericRow(data); default: throw new IllegalArgumentException("Unhandled type " + type); } } private static Object getPrimitiveValue(Row row, int ord, Type type) { if (row.isNullAt(ord)) { return null; } switch (type.typeId()) { case BOOLEAN: return row.getBoolean(ord); case INTEGER: return row.getInt(ord); case LONG: return row.getLong(ord); case FLOAT: return row.getFloat(ord); case DOUBLE: return row.getDouble(ord); case STRING: return row.getString(ord); case BINARY: case FIXED: case UUID: return row.get(ord); case DATE: return row.getDate(ord); case TIMESTAMP: return row.getTimestamp(ord); case DECIMAL: return row.getDecimal(ord); default: throw new IllegalArgumentException("Unhandled type " + type); } } private static <K, V> Map<K, V> toJavaMap(scala.collection.Map<K, V> map) { return map == null ? null : mapAsJavaMapConverter(map).asJava(); } private static List toList(Seq<?> val) { return val == null ? null : seqAsJavaListConverter(val).asJava(); } private static void assertEqualBytes(String context, byte[] expected, byte[] actual) { if (expected == null || actual == null) { Assert.assertEquals(context, expected, actual); } else { Assert.assertArrayEquals(context, expected, actual); } } static void assertEquals(Schema schema, Object expected, Object actual) { assertEquals("schema", convert(schema), expected, actual); } private static void assertEquals(String context, DataType type, Object expected, Object actual) { if (expected == null && actual == null) { return; } if (type instanceof StructType) { Assert.assertTrue("Expected should be an InternalRow: " + context, expected instanceof InternalRow); Assert.assertTrue("Actual should be an InternalRow: " + context, actual instanceof InternalRow); assertEquals(context, (StructType) type, (InternalRow) expected, (InternalRow) actual); } else if (type instanceof ArrayType) { Assert.assertTrue("Expected should be an ArrayData: " + context, expected instanceof ArrayData); Assert.assertTrue("Actual should be an ArrayData: " + context, actual instanceof ArrayData); assertEquals(context, (ArrayType) type, (ArrayData) expected, (ArrayData) actual); } else if (type instanceof MapType) { Assert.assertTrue("Expected should be a MapData: " + context, expected instanceof MapData); Assert.assertTrue("Actual should be a MapData: " + context, actual instanceof MapData); assertEquals(context, (MapType) type, (MapData) expected, (MapData) actual); } else if (type instanceof BinaryType) { assertEqualBytes(context, (byte[]) expected, (byte[]) actual); } else { Assert.assertEquals("Value should match expected: " + context, expected, actual); } } private static void assertEquals(String context, StructType struct, InternalRow expected, InternalRow actual) { Assert.assertEquals("Should have correct number of fields", struct.size(), actual.numFields()); for (int i = 0; i < actual.numFields(); i += 1) { StructField field = struct.fields()[i]; DataType type = field.dataType(); assertEquals(context + "." + field.name(), type, expected.isNullAt(i) ? null : expected.get(i, type), actual.isNullAt(i) ? null : actual.get(i, type)); } } private static void assertEquals(String context, ArrayType array, ArrayData expected, ArrayData actual) { Assert.assertEquals("Should have the same number of elements", expected.numElements(), actual.numElements()); DataType type = array.elementType(); for (int i = 0; i < actual.numElements(); i += 1) { assertEquals(context + ".element", type, expected.isNullAt(i) ? null : expected.get(i, type), actual.isNullAt(i) ? null : actual.get(i, type)); } } private static void assertEquals(String context, MapType map, MapData expected, MapData actual) { Assert.assertEquals("Should have the same number of elements", expected.numElements(), actual.numElements()); DataType keyType = map.keyType(); ArrayData expectedKeys = expected.keyArray(); ArrayData expectedValues = expected.valueArray(); DataType valueType = map.valueType(); ArrayData actualKeys = actual.keyArray(); ArrayData actualValues = actual.valueArray(); for (int i = 0; i < actual.numElements(); i += 1) { assertEquals(context + ".key", keyType, expectedKeys.isNullAt(i) ? null : expectedKeys.get(i, keyType), actualKeys.isNullAt(i) ? null : actualKeys.get(i, keyType)); assertEquals(context + ".value", valueType, expectedValues.isNullAt(i) ? null : expectedValues.get(i, valueType), actualValues.isNullAt(i) ? null : actualValues.get(i, valueType)); } } }
1
24,883
Why change this?
apache-iceberg
java
@@ -537,6 +537,12 @@ module.exports = class XHRUpload extends Plugin { const files = fileIDs.map((fileID) => this.uppy.getFile(fileID)) if (this.opts.bundle) { + // if bundle: true, we don’t support remote uploads + const isSomeFileRemote = files.some(file => file.isRemote) + if (isSomeFileRemote) { + throw new Error('Can’t bundle remote files when bundle: true is set') + } + return this.uploadBundle(files) }
1
const { Plugin } = require('@uppy/core') const cuid = require('cuid') const Translator = require('@uppy/utils/lib/Translator') const { Provider, RequestClient, Socket } = require('@uppy/companion-client') const emitSocketProgress = require('@uppy/utils/lib/emitSocketProgress') const getSocketHost = require('@uppy/utils/lib/getSocketHost') const settle = require('@uppy/utils/lib/settle') const limitPromises = require('@uppy/utils/lib/limitPromises') function buildResponseError (xhr, error) { // No error message if (!error) error = new Error('Upload error') // Got an error message string if (typeof error === 'string') error = new Error(error) // Got something else if (!(error instanceof Error)) { error = Object.assign(new Error('Upload error'), { data: error }) } error.request = xhr return error } /** * Set `data.type` in the blob to `file.meta.type`, * because we might have detected a more accurate file type in Uppy * https://stackoverflow.com/a/50875615 * * @param {Object} file File object with `data`, `size` and `meta` properties * @returns {Object} blob updated with the new `type` set from `file.meta.type` */ function setTypeInBlob (file) { const dataWithUpdatedType = file.data.slice(0, file.data.size, file.meta.type) return dataWithUpdatedType } module.exports = class XHRUpload extends Plugin { static VERSION = require('../package.json').version constructor (uppy, opts) { super(uppy, opts) this.type = 'uploader' this.id = this.opts.id || 'XHRUpload' this.title = 'XHRUpload' this.defaultLocale = { strings: { timedOut: 'Upload stalled for %{seconds} seconds, aborting.' } } // Default options const defaultOptions = { formData: true, fieldName: 'files[]', method: 'post', metaFields: null, responseUrlFieldName: 'url', bundle: false, headers: {}, timeout: 30 * 1000, limit: 0, withCredentials: false, responseType: '', /** * @typedef respObj * @property {string} responseText * @property {number} status * @property {string} statusText * @property {Object.<string, string>} headers * * @param {string} responseText the response body string * @param {XMLHttpRequest | respObj} response the response object (XHR or similar) */ getResponseData (responseText, response) { let parsedResponse = {} try { parsedResponse = JSON.parse(responseText) } catch (err) { console.log(err) } return parsedResponse }, /** * * @param {string} responseText the response body string * @param {XMLHttpRequest | respObj} response the response object (XHR or similar) */ getResponseError (responseText, response) { return new Error('Upload error') }, /** * @param {number} status the response status code * @param {string} responseText the response body string * @param {XMLHttpRequest | respObj} response the response object (XHR or similar) */ validateStatus (status, responseText, response) { return status >= 200 && status < 300 } } // Merge default options with the ones set by user this.opts = Object.assign({}, defaultOptions, opts) // i18n this.translator = new Translator([ this.defaultLocale, this.uppy.locale, this.opts.locale ]) this.i18n = this.translator.translate.bind(this.translator) this.i18nArray = this.translator.translateArray.bind(this.translator) this.handleUpload = this.handleUpload.bind(this) // Simultaneous upload limiting is shared across all uploads with this plugin. if (typeof this.opts.limit === 'number' && this.opts.limit !== 0) { this.limitUploads = limitPromises(this.opts.limit) } else { this.limitUploads = (fn) => fn } if (this.opts.bundle && !this.opts.formData) { throw new Error('`opts.formData` must be true when `opts.bundle` is enabled.') } } getOptions (file) { const overrides = this.uppy.getState().xhrUpload const opts = { ...this.opts, ...(overrides || {}), ...(file.xhrUpload || {}), headers: {} } Object.assign(opts.headers, this.opts.headers) if (overrides) { Object.assign(opts.headers, overrides.headers) } if (file.xhrUpload) { Object.assign(opts.headers, file.xhrUpload.headers) } return opts } // Helper to abort upload requests if there has not been any progress for `timeout` ms. // Create an instance using `timer = createProgressTimeout(10000, onTimeout)` // Call `timer.progress()` to signal that there has been progress of any kind. // Call `timer.done()` when the upload has completed. createProgressTimeout (timeout, timeoutHandler) { const uppy = this.uppy const self = this let isDone = false function onTimedOut () { uppy.log(`[XHRUpload] timed out`) const error = new Error(self.i18n('timedOut', { seconds: Math.ceil(timeout / 1000) })) timeoutHandler(error) } let aliveTimer = null function progress () { // Some browsers fire another progress event when the upload is // cancelled, so we have to ignore progress after the timer was // told to stop. if (isDone) return if (timeout > 0) { if (aliveTimer) clearTimeout(aliveTimer) aliveTimer = setTimeout(onTimedOut, timeout) } } function done () { uppy.log(`[XHRUpload] timer done`) if (aliveTimer) { clearTimeout(aliveTimer) aliveTimer = null } isDone = true } return { progress, done } } addMetadata (formData, meta, opts) { const metaFields = Array.isArray(opts.metaFields) ? opts.metaFields // Send along all fields by default. : Object.keys(meta) metaFields.forEach((item) => { formData.append(item, meta[item]) }) } createFormDataUpload (file, opts) { const formPost = new FormData() this.addMetadata(formPost, file.meta, opts) const dataWithUpdatedType = setTypeInBlob(file) if (file.name) { formPost.append(opts.fieldName, dataWithUpdatedType, file.meta.name) } else { formPost.append(opts.fieldName, dataWithUpdatedType) } return formPost } createBundledUpload (files, opts) { const formPost = new FormData() const { meta } = this.uppy.getState() this.addMetadata(formPost, meta, opts) files.forEach((file) => { const opts = this.getOptions(file) const dataWithUpdatedType = setTypeInBlob(file) if (file.name) { formPost.append(opts.fieldName, dataWithUpdatedType, file.name) } else { formPost.append(opts.fieldName, dataWithUpdatedType) } }) return formPost } createBareUpload (file, opts) { return file.data } upload (file, current, total) { const opts = this.getOptions(file) this.uppy.log(`uploading ${current} of ${total}`) return new Promise((resolve, reject) => { const data = opts.formData ? this.createFormDataUpload(file, opts) : this.createBareUpload(file, opts) const timer = this.createProgressTimeout(opts.timeout, (error) => { xhr.abort() this.uppy.emit('upload-error', file, error) reject(error) }) const xhr = new XMLHttpRequest() const id = cuid() xhr.upload.addEventListener('loadstart', (ev) => { this.uppy.log(`[XHRUpload] ${id} started`) }) xhr.upload.addEventListener('progress', (ev) => { this.uppy.log(`[XHRUpload] ${id} progress: ${ev.loaded} / ${ev.total}`) // Begin checking for timeouts when progress starts, instead of loading, // to avoid timing out requests on browser concurrency queue timer.progress() if (ev.lengthComputable) { this.uppy.emit('upload-progress', file, { uploader: this, bytesUploaded: ev.loaded, bytesTotal: ev.total }) } }) xhr.addEventListener('load', (ev) => { this.uppy.log(`[XHRUpload] ${id} finished`) timer.done() if (opts.validateStatus(ev.target.status, xhr.responseText, xhr)) { const body = opts.getResponseData(xhr.responseText, xhr) const uploadURL = body[opts.responseUrlFieldName] const uploadResp = { status: ev.target.status, body, uploadURL } this.uppy.emit('upload-success', file, uploadResp) if (uploadURL) { this.uppy.log(`Download ${file.name} from ${uploadURL}`) } return resolve(file) } else { const body = opts.getResponseData(xhr.responseText, xhr) const error = buildResponseError(xhr, opts.getResponseError(xhr.responseText, xhr)) const response = { status: ev.target.status, body } this.uppy.emit('upload-error', file, error, response) return reject(error) } }) xhr.addEventListener('error', (ev) => { this.uppy.log(`[XHRUpload] ${id} errored`) timer.done() const error = buildResponseError(xhr, opts.getResponseError(xhr.responseText, xhr)) this.uppy.emit('upload-error', file, error) return reject(error) }) xhr.open(opts.method.toUpperCase(), opts.endpoint, true) // IE10 does not allow setting `withCredentials` and `responseType` // before `open()` is called. xhr.withCredentials = opts.withCredentials if (opts.responseType !== '') { xhr.responseType = opts.responseType } Object.keys(opts.headers).forEach((header) => { xhr.setRequestHeader(header, opts.headers[header]) }) xhr.send(data) this.uppy.on('file-removed', (removedFile) => { if (removedFile.id === file.id) { timer.done() xhr.abort() reject(new Error('File removed')) } }) this.uppy.on('cancel-all', () => { timer.done() xhr.abort() reject(new Error('Upload cancelled')) }) }) } uploadRemote (file, current, total) { const opts = this.getOptions(file) return new Promise((resolve, reject) => { const fields = {} const metaFields = Array.isArray(opts.metaFields) ? opts.metaFields // Send along all fields by default. : Object.keys(file.meta) metaFields.forEach((name) => { fields[name] = file.meta[name] }) const Client = file.remote.providerOptions.provider ? Provider : RequestClient const client = new Client(this.uppy, file.remote.providerOptions) client.post(file.remote.url, { ...file.remote.body, endpoint: opts.endpoint, size: file.data.size, fieldname: opts.fieldName, metadata: fields, headers: opts.headers }).then((res) => { const token = res.token const host = getSocketHost(file.remote.companionUrl) const socket = new Socket({ target: `${host}/api/${token}` }) socket.on('progress', (progressData) => emitSocketProgress(this, progressData, file)) socket.on('success', (data) => { const body = opts.getResponseData(data.response.responseText, data.response) const uploadURL = body[opts.responseUrlFieldName] const uploadResp = { status: data.response.status, body, uploadURL } this.uppy.emit('upload-success', file, uploadResp) socket.close() return resolve() }) socket.on('error', (errData) => { const resp = errData.response const error = resp ? opts.getResponseError(resp.responseText, resp) : Object.assign(new Error(errData.error.message), { cause: errData.error }) this.uppy.emit('upload-error', file, error) reject(error) }) }) }) } uploadBundle (files) { return new Promise((resolve, reject) => { const endpoint = this.opts.endpoint const method = this.opts.method const optsFromState = this.uppy.getState().xhrUpload const formData = this.createBundledUpload(files, { ...this.opts, ...(optsFromState || {}) }) const xhr = new XMLHttpRequest() const timer = this.createProgressTimeout(this.opts.timeout, (error) => { xhr.abort() emitError(error) reject(error) }) const emitError = (error) => { files.forEach((file) => { this.uppy.emit('upload-error', file, error) }) } xhr.upload.addEventListener('loadstart', (ev) => { this.uppy.log('[XHRUpload] started uploading bundle') timer.progress() }) xhr.upload.addEventListener('progress', (ev) => { timer.progress() if (!ev.lengthComputable) return files.forEach((file) => { this.uppy.emit('upload-progress', file, { uploader: this, bytesUploaded: ev.loaded / ev.total * file.size, bytesTotal: file.size }) }) }) xhr.addEventListener('load', (ev) => { timer.done() if (this.opts.validateStatus(ev.target.status, xhr.responseText, xhr)) { const body = this.opts.getResponseData(xhr.responseText, xhr) const uploadResp = { status: ev.target.status, body } files.forEach((file) => { this.uppy.emit('upload-success', file, uploadResp) }) return resolve() } const error = this.opts.getResponseError(xhr.responseText, xhr) || new Error('Upload error') error.request = xhr emitError(error) return reject(error) }) xhr.addEventListener('error', (ev) => { timer.done() const error = this.opts.getResponseError(xhr.responseText, xhr) || new Error('Upload error') emitError(error) return reject(error) }) this.uppy.on('cancel-all', () => { timer.done() xhr.abort() }) xhr.open(method.toUpperCase(), endpoint, true) // IE10 does not allow setting `withCredentials` and `responseType` // before `open()` is called. xhr.withCredentials = this.opts.withCredentials if (this.opts.responseType !== '') { xhr.responseType = this.opts.responseType } Object.keys(this.opts.headers).forEach((header) => { xhr.setRequestHeader(header, this.opts.headers[header]) }) xhr.send(formData) files.forEach((file) => { this.uppy.emit('upload-started', file) }) }) } uploadFiles (files) { const actions = files.map((file, i) => { const current = parseInt(i, 10) + 1 const total = files.length if (file.error) { return () => Promise.reject(new Error(file.error)) } else if (file.isRemote) { // We emit upload-started here, so that it's also emitted for files // that have to wait due to the `limit` option. this.uppy.emit('upload-started', file) return this.uploadRemote.bind(this, file, current, total) } else { this.uppy.emit('upload-started', file) return this.upload.bind(this, file, current, total) } }) const promises = actions.map((action) => { const limitedAction = this.limitUploads(action) return limitedAction() }) return settle(promises) } handleUpload (fileIDs) { if (fileIDs.length === 0) { this.uppy.log('[XHRUpload] No files to upload!') return Promise.resolve() } this.uppy.log('[XHRUpload] Uploading...') const files = fileIDs.map((fileID) => this.uppy.getFile(fileID)) if (this.opts.bundle) { return this.uploadBundle(files) } return this.uploadFiles(files).then(() => null) } install () { if (this.opts.bundle) { const { capabilities } = this.uppy.getState() this.uppy.setState({ capabilities: { ...capabilities, individualCancellation: false } }) } this.uppy.addUploader(this.handleUpload) } uninstall () { if (this.opts.bundle) { const { capabilities } = this.uppy.getState() this.uppy.setState({ capabilities: { ...capabilities, individualCancellation: true } }) } this.uppy.removeUploader(this.handleUpload) } }
1
12,368
Maybe change it to `'Can't UPLOAD remote files when bundle: true is set'`?
transloadit-uppy
js
@@ -217,7 +217,14 @@ namespace OpenTelemetry if (this.circularBuffer.Count > 0) { - this.exporter.Export(new Batch<T>(this.circularBuffer, this.maxExportBatchSize)); + try + { + this.exporter.Export(new Batch<T>(this.circularBuffer, this.maxExportBatchSize)); + } + catch (Exception ex) + { + OpenTelemetrySdkEventSource.Log.SpanProcessorException(nameof(this.OnExport), ex); + } this.dataExportedNotification.Set(); this.dataExportedNotification.Reset();
1
// <copyright file="BatchExportProcessor.cs" company="OpenTelemetry Authors"> // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // </copyright> using System; using System.Diagnostics; using System.Threading; using OpenTelemetry.Internal; namespace OpenTelemetry { /// <summary> /// Implements processor that batches telemetry objects before calling exporter. /// </summary> /// <typeparam name="T">The type of telemetry object to be exported.</typeparam> public class BatchExportProcessor<T> : BaseExportProcessor<T> where T : class { internal const int DefaultMaxQueueSize = 2048; internal const int DefaultScheduledDelayMilliseconds = 5000; internal const int DefaultExporterTimeoutMilliseconds = 30000; internal const int DefaultMaxExportBatchSize = 512; private readonly CircularBuffer<T> circularBuffer; private readonly int scheduledDelayMilliseconds; private readonly int exporterTimeoutMilliseconds; private readonly int maxExportBatchSize; private readonly Thread exporterThread; private readonly AutoResetEvent exportTrigger = new AutoResetEvent(false); private readonly ManualResetEvent dataExportedNotification = new ManualResetEvent(false); private readonly ManualResetEvent shutdownTrigger = new ManualResetEvent(false); private long shutdownDrainTarget = long.MaxValue; private long droppedCount; /// <summary> /// Initializes a new instance of the <see cref="BatchExportProcessor{T}"/> class. /// </summary> /// <param name="exporter">Exporter instance.</param> /// <param name="maxQueueSize">The maximum queue size. After the size is reached data are dropped. The default value is 2048.</param> /// <param name="scheduledDelayMilliseconds">The delay interval in milliseconds between two consecutive exports. The default value is 5000.</param> /// <param name="exporterTimeoutMilliseconds">How long the export can run before it is cancelled. The default value is 30000.</param> /// <param name="maxExportBatchSize">The maximum batch size of every export. It must be smaller or equal to maxQueueSize. The default value is 512.</param> public BatchExportProcessor( BaseExporter<T> exporter, int maxQueueSize = DefaultMaxQueueSize, int scheduledDelayMilliseconds = DefaultScheduledDelayMilliseconds, int exporterTimeoutMilliseconds = DefaultExporterTimeoutMilliseconds, int maxExportBatchSize = DefaultMaxExportBatchSize) : base(exporter) { if (maxQueueSize <= 0) { throw new ArgumentOutOfRangeException(nameof(maxQueueSize), maxQueueSize, "maxQueueSize should be greater than zero."); } if (maxExportBatchSize <= 0 || maxExportBatchSize > maxQueueSize) { throw new ArgumentOutOfRangeException(nameof(maxExportBatchSize), maxExportBatchSize, "maxExportBatchSize should be greater than zero and less than maxQueueSize."); } if (scheduledDelayMilliseconds <= 0) { throw new ArgumentOutOfRangeException(nameof(scheduledDelayMilliseconds), scheduledDelayMilliseconds, "scheduledDelayMilliseconds should be greater than zero."); } if (exporterTimeoutMilliseconds < 0) { throw new ArgumentOutOfRangeException(nameof(exporterTimeoutMilliseconds), exporterTimeoutMilliseconds, "exporterTimeoutMilliseconds should be non-negative."); } this.circularBuffer = new CircularBuffer<T>(maxQueueSize); this.scheduledDelayMilliseconds = scheduledDelayMilliseconds; this.exporterTimeoutMilliseconds = exporterTimeoutMilliseconds; this.maxExportBatchSize = maxExportBatchSize; this.exporterThread = new Thread(new ThreadStart(this.ExporterProc)) { IsBackground = true, Name = $"OpenTelemetry-{nameof(BatchExportProcessor<T>)}-{exporter.GetType().Name}", }; this.exporterThread.Start(); } /// <summary> /// Gets the number of telemetry objects dropped by the processor. /// </summary> internal long DroppedCount => this.droppedCount; /// <summary> /// Gets the number of telemetry objects received by the processor. /// </summary> internal long ReceivedCount => this.circularBuffer.AddedCount + this.DroppedCount; /// <summary> /// Gets the number of telemetry objects processed by the underlying exporter. /// </summary> internal long ProcessedCount => this.circularBuffer.RemovedCount; /// <inheritdoc/> public override void OnExport(T data) { if (this.circularBuffer.TryAdd(data, maxSpinCount: 50000)) { if (this.circularBuffer.Count >= this.maxExportBatchSize) { this.exportTrigger.Set(); } return; // enqueue succeeded } // either the queue is full or exceeded the spin limit, drop the item on the floor Interlocked.Increment(ref this.droppedCount); } /// <inheritdoc/> protected override bool OnForceFlush(int timeoutMilliseconds) { var tail = this.circularBuffer.RemovedCount; var head = this.circularBuffer.AddedCount; if (head == tail) { return true; // nothing to flush } this.exportTrigger.Set(); if (timeoutMilliseconds == 0) { return false; } var triggers = new WaitHandle[] { this.dataExportedNotification, this.shutdownTrigger }; var sw = Stopwatch.StartNew(); // There is a chance that the export thread finished processing all the data from the queue, // and signaled before we enter wait here, use polling to prevent being blocked indefinitely. const int pollingMilliseconds = 1000; while (true) { if (timeoutMilliseconds == Timeout.Infinite) { WaitHandle.WaitAny(triggers, pollingMilliseconds); } else { var timeout = timeoutMilliseconds - sw.ElapsedMilliseconds; if (timeout <= 0) { return this.circularBuffer.RemovedCount >= head; } WaitHandle.WaitAny(triggers, Math.Min((int)timeout, pollingMilliseconds)); } if (this.circularBuffer.RemovedCount >= head) { return true; } if (this.shutdownDrainTarget != long.MaxValue) { return false; } } } /// <inheritdoc/> protected override bool OnShutdown(int timeoutMilliseconds) { this.shutdownDrainTarget = this.circularBuffer.AddedCount; this.shutdownTrigger.Set(); if (timeoutMilliseconds == Timeout.Infinite) { this.exporterThread.Join(); return this.exporter.Shutdown(); } if (timeoutMilliseconds == 0) { return this.exporter.Shutdown(0); } var sw = Stopwatch.StartNew(); this.exporterThread.Join(timeoutMilliseconds); var timeout = timeoutMilliseconds - sw.ElapsedMilliseconds; return this.exporter.Shutdown((int)Math.Max(timeout, 0)); } private void ExporterProc() { var triggers = new WaitHandle[] { this.exportTrigger, this.shutdownTrigger }; while (true) { // only wait when the queue doesn't have enough items, otherwise keep busy and send data continuously if (this.circularBuffer.Count < this.maxExportBatchSize) { WaitHandle.WaitAny(triggers, this.scheduledDelayMilliseconds); } if (this.circularBuffer.Count > 0) { this.exporter.Export(new Batch<T>(this.circularBuffer, this.maxExportBatchSize)); this.dataExportedNotification.Set(); this.dataExportedNotification.Reset(); } if (this.circularBuffer.RemovedCount >= this.shutdownDrainTarget) { break; } } } } }
1
18,524
Do we need to drop the remaining items from the batch? Otherwise we might end up with a dead loop. Add @CodeBlanch for awareness.
open-telemetry-opentelemetry-dotnet
.cs
@@ -292,6 +292,19 @@ func setupMiners(st state.Tree, sm vm.StorageMap, keys []*types.KeyInfo, miners return nil, err } } + if m.NumCommittedSectors > 0 { + // Now submit a dummy PoSt right away to trigger power updates. + // Don't worry, bootstrap miner actors don't need to verify + // that the PoSt is well formed. + poStProof := make([]byte, types.OnePoStProofPartition.ProofLen()) + if _, err := pnrg.Read(poStProof[:]); err != nil { + return nil, err + } + _, err = applyMessageDirect(ctx, st, sm, addr, maddr, types.NewAttoFILFromFIL(0), "submitPoSt", []types.PoStProof{poStProof}, types.EmptyIntSet()) + if err != nil { + return nil, err + } + } } return minfos, nil
1
package gengen import ( "context" "fmt" "io" mrand "math/rand" "strconv" "github.com/filecoin-project/go-filecoin/actor" "github.com/filecoin-project/go-filecoin/actor/builtin" "github.com/filecoin-project/go-filecoin/actor/builtin/account" "github.com/filecoin-project/go-filecoin/address" "github.com/filecoin-project/go-filecoin/consensus" "github.com/filecoin-project/go-filecoin/crypto" "github.com/filecoin-project/go-filecoin/state" "github.com/filecoin-project/go-filecoin/types" "github.com/filecoin-project/go-filecoin/vm" bserv "github.com/ipfs/go-blockservice" "github.com/ipfs/go-car" "github.com/ipfs/go-cid" ds "github.com/ipfs/go-datastore" "github.com/ipfs/go-hamt-ipld" "github.com/ipfs/go-ipfs-blockstore" "github.com/ipfs/go-ipfs-exchange-offline" dag "github.com/ipfs/go-merkledag" "github.com/libp2p/go-libp2p-peer" mh "github.com/multiformats/go-multihash" "github.com/pkg/errors" ) // CreateStorageMinerConfig holds configuration options used to create a storage // miner in the genesis block. Note: Instances of this struct can be created // from the contents of fixtures/setup.json, which means that a JSON // encoder/decoder must exist for any of the struct's fields' types. type CreateStorageMinerConfig struct { // Owner is the name of the key that owns this miner // It must be a name of a key from the configs 'Keys' list Owner int // PeerID is the peer ID to set as the miners owner PeerID string // NumCommittedSectors is the number of sectors that this miner has // committed to the network. NumCommittedSectors uint64 // SectorSize is the size of the sectors that this miner commits, in bytes. SectorSize uint64 } // GenesisCfg is the top level configuration struct used to create a genesis // block. type GenesisCfg struct { // Keys is an array of names of keys. A random key will be generated // for each name here. Keys int // PreAlloc is a mapping from key names to string values of whole filecoin // that will be preallocated to each account PreAlloc []string // Miners is a list of miners that should be set up at the start of the network Miners []*CreateStorageMinerConfig // ProofsMode affects sealing, sector packing, PoSt, etc. in the proofs library ProofsMode types.ProofsMode } // RenderedGenInfo contains information about a genesis block creation type RenderedGenInfo struct { // Keys is the set of keys generated Keys []*types.KeyInfo // Miners is the list of addresses of miners created Miners []RenderedMinerInfo // GenesisCid is the cid of the created genesis block GenesisCid cid.Cid } // RenderedMinerInfo contains info about a created miner type RenderedMinerInfo struct { // Owner is the key name of the owner of this miner Owner int // Address is the address generated on-chain for the miner Address address.Address // Power is the amount of storage power this miner was created with Power *types.BytesAmount } // GenGen takes the genesis configuration and creates a genesis block that // matches the description. It writes all chunks to the dagservice, and returns // the final genesis block. // // WARNING: Do not use maps in this code, they will make this code non deterministic. func GenGen(ctx context.Context, cfg *GenesisCfg, cst *hamt.CborIpldStore, bs blockstore.Blockstore, seed int64) (*RenderedGenInfo, error) { pnrg := mrand.New(mrand.NewSource(seed)) keys, err := genKeys(cfg.Keys, pnrg) if err != nil { return nil, err } st := state.NewEmptyStateTreeWithActors(cst, builtin.Actors) storageMap := vm.NewStorageMap(bs) if err := consensus.SetupDefaultActors(ctx, st, storageMap, cfg.ProofsMode); err != nil { return nil, err } if err := setupPrealloc(st, keys, cfg.PreAlloc); err != nil { return nil, err } miners, err := setupMiners(st, storageMap, keys, cfg.Miners, pnrg) if err != nil { return nil, err } if err := cst.Blocks.AddBlock(types.StorageMarketActorCodeObj); err != nil { return nil, err } if err := cst.Blocks.AddBlock(types.MinerActorCodeObj); err != nil { return nil, err } if err := cst.Blocks.AddBlock(types.BootstrapMinerActorCodeObj); err != nil { return nil, err } if err := cst.Blocks.AddBlock(types.AccountActorCodeObj); err != nil { return nil, err } if err := cst.Blocks.AddBlock(types.PaymentBrokerActorCodeObj); err != nil { return nil, err } stateRoot, err := st.Flush(ctx) if err != nil { return nil, err } err = storageMap.Flush() if err != nil { return nil, err } geneblk := &types.Block{ StateRoot: stateRoot, } c, err := cst.Put(ctx, geneblk) if err != nil { return nil, err } return &RenderedGenInfo{ Keys: keys, GenesisCid: c, Miners: miners, }, nil } func genKeys(cfgkeys int, pnrg io.Reader) ([]*types.KeyInfo, error) { keys := make([]*types.KeyInfo, cfgkeys) for i := 0; i < cfgkeys; i++ { sk, err := crypto.GenerateKeyFromSeed(pnrg) // TODO: GenerateKey should return a KeyInfo if err != nil { return nil, err } ki := &types.KeyInfo{ PrivateKey: sk, Curve: types.SECP256K1, } keys[i] = ki } return keys, nil } func setupPrealloc(st state.Tree, keys []*types.KeyInfo, prealloc []string) error { if len(keys) < len(prealloc) { return fmt.Errorf("keys do not match prealloc") } for i, v := range prealloc { ki := keys[i] addr, err := ki.Address() if err != nil { return err } valint, err := strconv.ParseUint(v, 10, 64) if err != nil { return err } act, err := account.NewActor(types.NewAttoFILFromFIL(valint)) if err != nil { return err } if err := st.SetActor(context.Background(), addr, act); err != nil { return err } } netact, err := account.NewActor(types.NewAttoFILFromFIL(10000000000)) if err != nil { return err } return st.SetActor(context.Background(), address.NetworkAddress, netact) } func setupMiners(st state.Tree, sm vm.StorageMap, keys []*types.KeyInfo, miners []*CreateStorageMinerConfig, pnrg io.Reader) ([]RenderedMinerInfo, error) { var minfos []RenderedMinerInfo ctx := context.Background() for _, m := range miners { addr, err := keys[m.Owner].Address() if err != nil { return nil, err } var pid peer.ID if m.PeerID != "" { p, err := peer.IDB58Decode(m.PeerID) if err != nil { return nil, err } pid = p } else { // this is just deterministically deriving from the owner h, err := mh.Sum(addr.Bytes(), mh.SHA2_256, -1) if err != nil { return nil, err } pid = peer.ID(h) } // give collateral to account actor _, err = applyMessageDirect(ctx, st, sm, address.NetworkAddress, addr, types.NewAttoFILFromFIL(100000), "") if err != nil { return nil, err } ret, err := applyMessageDirect(ctx, st, sm, addr, address.StorageMarketAddress, types.NewAttoFILFromFIL(100000), "createStorageMiner", types.NewBytesAmount(m.SectorSize), pid) if err != nil { return nil, err } // get miner address maddr, err := address.NewFromBytes(ret[0]) if err != nil { return nil, err } minfos = append(minfos, RenderedMinerInfo{ Address: maddr, Owner: m.Owner, Power: types.NewBytesAmount(m.SectorSize * m.NumCommittedSectors), }) // commit sector to add power for i := uint64(0); i < m.NumCommittedSectors; i++ { // the following statement fakes out the behavior of the SectorBuilder.sectorIDNonce, // which is initialized to 0 and incremented (for the first sector) to 1 sectorID := i + 1 commD := make([]byte, 32) commR := make([]byte, 32) commRStar := make([]byte, 32) sealProof := make([]byte, types.TwoPoRepProofPartitions.ProofLen()) if _, err := pnrg.Read(commD[:]); err != nil { return nil, err } if _, err := pnrg.Read(commR[:]); err != nil { return nil, err } if _, err := pnrg.Read(commRStar[:]); err != nil { return nil, err } if _, err := pnrg.Read(sealProof[:]); err != nil { return nil, err } _, err := applyMessageDirect(ctx, st, sm, addr, maddr, types.NewAttoFILFromFIL(0), "commitSector", sectorID, commD, commR, commRStar, sealProof) if err != nil { return nil, err } } } return minfos, nil } // GenGenesisCar generates a car for the given genesis configuration func GenGenesisCar(cfg *GenesisCfg, out io.Writer, seed int64) (*RenderedGenInfo, error) { // TODO: these six lines are ugly. We can do better... mds := ds.NewMapDatastore() bstore := blockstore.NewBlockstore(mds) offl := offline.Exchange(bstore) blkserv := bserv.New(bstore, offl) cst := &hamt.CborIpldStore{Blocks: blkserv} dserv := dag.NewDAGService(blkserv) ctx := context.Background() info, err := GenGen(ctx, cfg, cst, bstore, seed) if err != nil { return nil, err } return info, car.WriteCar(ctx, dserv, []cid.Cid{info.GenesisCid}, out) } // applyMessageDirect applies a given message directly to the given state tree and storage map and returns the result of the message. // This is a shortcut to allow gengen to use built-in actor functionality to alter the genesis block's state. // Outside genesis, direct execution of actor code is a really bad idea. func applyMessageDirect(ctx context.Context, st state.Tree, vms vm.StorageMap, from, to address.Address, value types.AttoFIL, method string, params ...interface{}) ([][]byte, error) { pdata := actor.MustConvertParams(params...) msg := types.NewMessage(from, to, 0, value, method, pdata) // this should never fail due to lack of gas since gas doesn't have meaning here gasLimit := types.BlockGasLimit smsg, err := types.NewSignedMessage(*msg, &signer{}, types.NewGasPrice(0), gasLimit) if err != nil { return nil, err } // create new processor that doesn't reward and doesn't validate applier := consensus.NewConfiguredProcessor(&messageValidator{}, &blockRewarder{}) res, err := applier.ApplyMessagesAndPayRewards(ctx, st, vms, []*types.SignedMessage{smsg}, address.Undef, types.NewBlockHeight(0), nil) if err != nil { return nil, err } if len(res.Results) == 0 { return nil, errors.New("GenGen message did not produce a result") } if res.Results[0].ExecutionError != nil { return nil, res.Results[0].ExecutionError } return res.Results[0].Receipt.Return, nil } // GenGenMessageValidator is a validator that doesn't validate to simplify message creation in tests. type messageValidator struct{} var _ consensus.SignedMessageValidator = (*messageValidator)(nil) // Validate always returns nil func (ggmv *messageValidator) Validate(ctx context.Context, msg *types.SignedMessage, fromActor *actor.Actor) error { return nil } // blockRewarder is a rewarder that doesn't actually add any rewards to simplify state tracking in tests type blockRewarder struct{} var _ consensus.BlockRewarder = (*blockRewarder)(nil) // BlockReward is a noop func (gbr *blockRewarder) BlockReward(ctx context.Context, st state.Tree, minerAddr address.Address) error { return nil } // GasReward is a noop func (gbr *blockRewarder) GasReward(ctx context.Context, st state.Tree, minerAddr address.Address, msg *types.SignedMessage, cost types.AttoFIL) error { return nil } // signer doesn't actually sign because it's not actually validated type signer struct{} var _ types.Signer = (*signer)(nil) func (ggs *signer) SignBytes(data []byte, addr address.Address) (types.Signature, error) { return nil, nil } // ApplyProofsModeDefaults mutates the given genesis configuration, setting the // appropriate proofs mode and corresponding storage miner sector size. If // force is true, proofs mode and sector size-values will be overridden with the // appropriate defaults for the selected proofs mode. func ApplyProofsModeDefaults(cfg *GenesisCfg, useLiveProofsMode bool, force bool) { mode := types.TestProofsMode sectorSize := types.OneKiBSectorSize if useLiveProofsMode { mode = types.LiveProofsMode sectorSize = types.TwoHundredFiftySixMiBSectorSize } if cfg.ProofsMode == types.UnsetProofsMode || force { cfg.ProofsMode = mode } for _, m := range cfg.Miners { if m.SectorSize == 0 || force { m.SectorSize = sectorSize.Uint64() } } }
1
19,929
Because power is now added during `submitPoSt` this is needed for setting power in the genesis block. Again let me know if this bootstrapping solution is flawed.
filecoin-project-venus
go
@@ -40,6 +40,10 @@ return [ 'session_gc' => ['int'], 'timezone_identifiers_list' => ['list<string>|false', 'timezoneGroup='=>'int', 'countryCode='=>'?string'], 'unpack' => ['array', 'format'=>'string', 'string'=>'string', 'offset='=>'int'], + 'ReflectionFunction::getReturnType' => ['?ReflectionNamedType'], + 'ReflectionFunctionAbstract::getReturnType' => ['?ReflectionNamedType'], + 'ReflectionMethod::getReturnType' => ['?ReflectionNamedType'], + 'ReflectionParameter::getType' => ['?ReflectionNamedType'] ], 'old' => [ 'DateTimeZone::listIdentifiers' => ['list<string>|false', 'timezoneGroup='=>'int', 'countryCode='=>'string'],
1
<?php // phpcs:ignoreFile /** * This contains the information needed to convert the function signatures for php 7.1 to php 7.0 (and vice versa) * * This has two sections. * The 'new' section contains function/method names from FunctionSignatureMap (And alternates, if applicable) that do not exist in php7.0 or have different signatures in php 7.1. * If they were just updated, the function/method will be present in the 'added' signatures. * The 'old' signatures contains the signatures that are different in php 7.0. * Functions are expected to be removed only in major releases of php. (e.g. php 7.0 removed various functions that were deprecated in 5.6) * * @see FunctionSignatureMap.php * * @phan-file-suppress PhanPluginMixedKeyNoKey (read by Phan when analyzing this file) */ return [ 'new' => [ 'Closure::fromCallable' => ['Closure', 'callable'=>'callable'], 'DateTimeZone::listIdentifiers' => ['list<string>|false', 'timezoneGroup='=>'int', 'countryCode='=>'string|null'], 'SQLite3::createFunction' => ['bool', 'name'=>'string', 'callback'=>'callable', 'argCount='=>'int', 'flags='=>'int'], 'curl_multi_errno' => ['int', 'multi_handle'=>'resource'], 'curl_share_errno' => ['int', 'share_handle'=>'resource'], 'curl_share_strerror' => ['string', 'error_code'=>'int'], 'get_headers' => ['array|false', 'url'=>'string', 'associative='=>'int', 'context='=>'resource'], 'getenv\'1' => ['array<string,string>'], 'getopt' => ['array<string,string>|array<string,false>|array<string,array<int,string|false>>', 'short_options'=>'string', 'long_options='=>'array', '&w_rest_index='=>'int'], 'hash_hkdf' => ['string', 'algo'=>'string', 'key'=>'string', 'length='=>'int', 'info='=>'string', 'salt='=>'string'], 'is_iterable' => ['bool', 'value'=>'mixed'], 'openssl_get_curve_names' => ['array<int,string>'], 'pcntl_async_signals' => ['bool', 'enable='=>'bool'], 'pcntl_signal_get_handler' => ['int|string', 'signal'=>'int'], 'pg_fetch_all' => ['array', 'result'=>'resource', 'mode='=>'int'], 'pg_last_error' => ['string', 'connection='=>'resource', 'operation='=>'int'], 'pg_select' => ['mixed', 'connection'=>'resource', 'table_name'=>'string', 'conditions'=>'array', 'flags='=>'int', 'mode='=>'int'], 'sapi_windows_cp_conv' => ['string', 'in_codepage'=>'int|string', 'out_codepage'=>'int|string', 'subject'=>'string'], 'sapi_windows_cp_get' => ['int'], 'sapi_windows_cp_is_utf8' => ['bool'], 'sapi_windows_cp_set' => ['bool', 'codepage'=>'int'], 'session_create_id' => ['string', 'prefix='=>'string'], 'session_gc' => ['int'], 'timezone_identifiers_list' => ['list<string>|false', 'timezoneGroup='=>'int', 'countryCode='=>'?string'], 'unpack' => ['array', 'format'=>'string', 'string'=>'string', 'offset='=>'int'], ], 'old' => [ 'DateTimeZone::listIdentifiers' => ['list<string>|false', 'timezoneGroup='=>'int', 'countryCode='=>'string'], 'SQLite3::createFunction' => ['bool', 'name'=>'string', 'callback'=>'callable', 'argCount='=>'int'], 'get_headers' => ['array|false', 'url'=>'string', 'associative='=>'int'], 'getopt' => ['array<string,string>|array<string,false>|array<string,array<int,string|false>>', 'short_options'=>'string', 'long_options='=>'array'], 'pg_fetch_all' => ['array', 'result'=>'resource'], 'pg_last_error' => ['string', 'connection='=>'resource'], 'pg_select' => ['mixed', 'connection'=>'resource', 'table_name'=>'string', 'conditions'=>'array', 'flags='=>'int'], 'timezone_identifiers_list' => ['list<string>|false', 'timezoneGroup='=>'int', 'countryCode='=>'string'], 'unpack' => ['array', 'format'=>'string', 'string'=>'string'], ], ];
1
10,711
To be consistent, these should go at the top between `DateTimeZone::listIdentifiers` and `SQLite3::createFunction`.
vimeo-psalm
php
@@ -165,4 +165,13 @@ public abstract class Directory implements Closeable { * @throws AlreadyClosedException if this Directory is closed */ protected void ensureOpen() throws AlreadyClosedException {} + + /** + * Implementations can override this if they are capable of reporting modification time + * of a file in seconds since the epoch. + */ + public long fileModified(String name) throws IOException { + throw new UnsupportedOperationException(); + } + }
1
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.lucene.store; import java.io.Closeable; import java.io.FileNotFoundException; import java.io.IOException; import java.nio.file.NoSuchFileException; import java.util.Collection; // for javadocs import org.apache.lucene.util.IOUtils; /** A Directory is a flat list of files. Files may be written once, when they * are created. Once a file is created it may only be opened for read, or * deleted. Random access is permitted both when reading and writing. * * <p> Java's i/o APIs not used directly, but rather all i/o is * through this API. This permits things such as: <ul> * <li> implementation of RAM-based indices; * <li> implementation indices stored in a database, via JDBC; * <li> implementation of an index as a single file; * </ul> * * Directory locking is implemented by an instance of {@link * LockFactory}. * */ public abstract class Directory implements Closeable { /** * Returns an array of strings, one for each entry in the directory, in sorted (UTF16, java's String.compare) order. * * @throws IOException in case of IO error */ public abstract String[] listAll() throws IOException; /** Removes an existing file in the directory. */ public abstract void deleteFile(String name) throws IOException; /** * Returns the length of a file in the directory. This method follows the * following contract: * <ul> * <li>Throws {@link FileNotFoundException} or {@link NoSuchFileException} * if the file does not exist. * <li>Returns a value &ge;0 if the file exists, which specifies its length. * </ul> * * @param name the name of the file for which to return the length. * @throws IOException if there was an IO error while retrieving the file's * length. */ public abstract long fileLength(String name) throws IOException; /** Creates a new, empty file in the directory with the given name. Returns a stream writing this file. */ public abstract IndexOutput createOutput(String name, IOContext context) throws IOException; /** Creates a new, empty file for writing in the directory, with a * temporary file name including prefix and suffix, ending with the * reserved extension <code>.tmp</code>. Use * {@link IndexOutput#getName} to see what name was used. */ public abstract IndexOutput createTempOutput(String prefix, String suffix, IOContext context) throws IOException; /** * Ensure that any writes to these files are moved to * stable storage. Lucene uses this to properly commit * changes to the index, to prevent a machine/OS crash * from corrupting the index. * <br> * NOTE: Clients may call this method for same files over * and over again, so some impls might optimize for that. * For other impls the operation can be a noop, for various * reasons. */ public abstract void sync(Collection<String> names) throws IOException; /** * Renames {@code source} to {@code dest} as an atomic operation, * where {@code dest} does not yet exist in the directory. * <p> * Notes: This method is used by IndexWriter to publish commits. * It is ok if this operation is not truly atomic, for example * both {@code source} and {@code dest} can be visible temporarily. * It is just important that the contents of {@code dest} appear * atomically, or an exception is thrown. */ public abstract void renameFile(String source, String dest) throws IOException; /** Returns a stream reading an existing file. * <p>Throws {@link FileNotFoundException} or {@link NoSuchFileException} * if the file does not exist. */ public abstract IndexInput openInput(String name, IOContext context) throws IOException; /** Returns a stream reading an existing file, computing checksum as it reads */ public ChecksumIndexInput openChecksumInput(String name, IOContext context) throws IOException { return new BufferedChecksumIndexInput(openInput(name, context)); } /** * Returns an obtained {@link Lock}. * @param name the name of the lock file * @throws LockObtainFailedException (optional specific exception) if the lock could * not be obtained because it is currently held elsewhere. * @throws IOException if any i/o error occurs attempting to gain the lock */ public abstract Lock obtainLock(String name) throws IOException; /** Closes the store. */ @Override public abstract void close() throws IOException; @Override public String toString() { return getClass().getSimpleName() + '@' + Integer.toHexString(hashCode()); } /** * Copies the file <i>src</i> in <i>from</i> to this directory under the new * file name <i>dest</i>. * <p> * If you want to copy the entire source directory to the destination one, you * can do so like this: * * <pre class="prettyprint"> * Directory to; // the directory to copy to * for (String file : dir.listAll()) { * to.copyFrom(dir, file, newFile, IOContext.DEFAULT); // newFile can be either file, or a new name * } * </pre> * <p> * <b>NOTE:</b> this method does not check whether <i>dest</i> exist and will * overwrite it if it does. */ public void copyFrom(Directory from, String src, String dest, IOContext context) throws IOException { boolean success = false; try (IndexInput is = from.openInput(src, context); IndexOutput os = createOutput(dest, context)) { os.copyBytes(is, is.length()); success = true; } finally { if (!success) { IOUtils.deleteFilesIgnoringExceptions(this, dest); } } } /** * @throws AlreadyClosedException if this Directory is closed */ protected void ensureOpen() throws AlreadyClosedException {} }
1
25,985
I think we should avoid changing any lucene classes for the moment - fileModified() can probably stay where it is?
apache-lucene-solr
java
@@ -1174,7 +1174,7 @@ public class OverviewFragment extends Fragment implements View.OnClickListener, extendedBolusView.setText(extendedBolusText); } if (extendedBolusText.equals("")) - extendedBolusView.setVisibility(View.INVISIBLE); + extendedBolusView.setVisibility(View.GONE); else extendedBolusView.setVisibility(View.VISIBLE); }
1
package info.nightscout.androidaps.plugins.Overview; import android.annotation.SuppressLint; import android.app.Activity; import android.app.NotificationManager; import android.content.ActivityNotFoundException; import android.content.Context; import android.content.Intent; import android.content.pm.PackageManager; import android.graphics.Color; import android.graphics.Paint; import android.os.Bundle; import android.os.Handler; import android.support.v4.app.Fragment; import android.support.v4.app.FragmentActivity; import android.support.v4.app.FragmentManager; import android.support.v4.content.res.ResourcesCompat; import android.support.v7.app.AlertDialog; import android.support.v7.widget.LinearLayoutManager; import android.support.v7.widget.PopupMenu; import android.support.v7.widget.RecyclerView; import android.text.SpannableString; import android.text.style.ForegroundColorSpan; import android.util.DisplayMetrics; import android.util.TypedValue; import android.view.ContextMenu; import android.view.HapticFeedbackConstants; import android.view.LayoutInflater; import android.view.Menu; import android.view.MenuItem; import android.view.View; import android.view.ViewGroup; import android.widget.CheckBox; import android.widget.CompoundButton; import android.widget.ImageButton; import android.widget.LinearLayout; import android.widget.TextView; import com.crashlytics.android.answers.CustomEvent; import com.jjoe64.graphview.GraphView; import com.squareup.otto.Subscribe; import org.json.JSONException; import org.json.JSONObject; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.text.DecimalFormat; import java.util.Calendar; import java.util.Date; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; import info.nightscout.androidaps.Config; import info.nightscout.androidaps.Constants; import info.nightscout.androidaps.MainApp; import info.nightscout.androidaps.R; import info.nightscout.androidaps.data.DetailedBolusInfo; import info.nightscout.androidaps.data.GlucoseStatus; import info.nightscout.androidaps.data.IobTotal; import info.nightscout.androidaps.data.Profile; import info.nightscout.androidaps.data.QuickWizardEntry; import info.nightscout.androidaps.db.BgReading; import info.nightscout.androidaps.db.CareportalEvent; import info.nightscout.androidaps.db.DatabaseHelper; import info.nightscout.androidaps.db.ExtendedBolus; import info.nightscout.androidaps.db.Source; import info.nightscout.androidaps.db.TempTarget; import info.nightscout.androidaps.db.TemporaryBasal; import info.nightscout.androidaps.events.EventCareportalEventChange; import info.nightscout.androidaps.events.EventExtendedBolusChange; import info.nightscout.androidaps.events.EventInitializationChanged; import info.nightscout.androidaps.events.EventPreferenceChange; import info.nightscout.androidaps.events.EventProfileSwitchChange; import info.nightscout.androidaps.events.EventPumpStatusChanged; import info.nightscout.androidaps.events.EventRefreshOverview; import info.nightscout.androidaps.events.EventTempBasalChange; import info.nightscout.androidaps.events.EventTempTargetChange; import info.nightscout.androidaps.events.EventTreatmentChange; import info.nightscout.androidaps.interfaces.Constraint; import info.nightscout.androidaps.interfaces.PluginType; import info.nightscout.androidaps.interfaces.PumpDescription; import info.nightscout.androidaps.interfaces.PumpInterface; import info.nightscout.androidaps.plugins.Careportal.CareportalFragment; import info.nightscout.androidaps.plugins.Careportal.Dialogs.NewNSTreatmentDialog; import info.nightscout.androidaps.plugins.Careportal.OptionsToShow; import info.nightscout.androidaps.plugins.ConfigBuilder.ConfigBuilderPlugin; import info.nightscout.androidaps.plugins.ConstraintsObjectives.ObjectivesPlugin; import info.nightscout.androidaps.plugins.IobCobCalculator.AutosensData; import info.nightscout.androidaps.plugins.IobCobCalculator.IobCobCalculatorPlugin; import info.nightscout.androidaps.plugins.IobCobCalculator.events.EventAutosensCalculationFinished; import info.nightscout.androidaps.plugins.IobCobCalculator.events.EventIobCalculationProgress; import info.nightscout.androidaps.plugins.Loop.LoopPlugin; import info.nightscout.androidaps.plugins.Loop.events.EventNewOpenLoopNotification; import info.nightscout.androidaps.plugins.NSClientInternal.data.NSDeviceStatus; import info.nightscout.androidaps.plugins.Overview.Dialogs.CalibrationDialog; import info.nightscout.androidaps.plugins.Overview.Dialogs.ErrorHelperActivity; import info.nightscout.androidaps.plugins.Overview.Dialogs.NewCarbsDialog; import info.nightscout.androidaps.plugins.Overview.Dialogs.NewInsulinDialog; import info.nightscout.androidaps.plugins.Overview.Dialogs.NewTreatmentDialog; import info.nightscout.androidaps.plugins.Overview.Dialogs.WizardDialog; import info.nightscout.androidaps.plugins.Overview.activities.QuickWizardListActivity; import info.nightscout.androidaps.plugins.Overview.events.EventSetWakeLock; import info.nightscout.androidaps.plugins.Overview.graphData.GraphData; import info.nightscout.androidaps.plugins.Overview.notifications.NotificationRecyclerViewAdapter; import info.nightscout.androidaps.plugins.Overview.notifications.NotificationStore; import info.nightscout.androidaps.plugins.Source.SourceDexcomG5Plugin; import info.nightscout.androidaps.plugins.Source.SourceXdripPlugin; import info.nightscout.androidaps.plugins.Treatments.TreatmentsPlugin; import info.nightscout.androidaps.plugins.Treatments.fragments.ProfileViewerDialog; import info.nightscout.androidaps.queue.Callback; import info.nightscout.utils.BolusWizard; import info.nightscout.utils.DateUtil; import info.nightscout.utils.DecimalFormatter; import info.nightscout.utils.FabricPrivacy; import info.nightscout.utils.NSUpload; import info.nightscout.utils.OKDialog; import info.nightscout.utils.Profiler; import info.nightscout.utils.SP; import info.nightscout.utils.SingleClickButton; import info.nightscout.utils.ToastUtils; public class OverviewFragment extends Fragment implements View.OnClickListener, View.OnLongClickListener { private static Logger log = LoggerFactory.getLogger(OverviewFragment.class); TextView timeView; TextView bgView; TextView arrowView; TextView timeAgoView; TextView deltaView; TextView avgdeltaView; TextView baseBasalView; TextView extendedBolusView; TextView activeProfileView; TextView iobView; TextView cobView; TextView apsModeView; TextView tempTargetView; TextView pumpStatusView; TextView pumpDeviceStatusView; TextView openapsDeviceStatusView; TextView uploaderDeviceStatusView; TextView iobCalculationProgressView; LinearLayout loopStatusLayout; LinearLayout pumpStatusLayout; GraphView bgGraph; GraphView iobGraph; ImageButton chartButton; TextView iage; TextView cage; TextView sage; TextView pbage; RecyclerView notificationsView; LinearLayoutManager llm; LinearLayout acceptTempLayout; SingleClickButton acceptTempButton; SingleClickButton treatmentButton; SingleClickButton wizardButton; SingleClickButton calibrationButton; SingleClickButton insulinButton; SingleClickButton carbsButton; SingleClickButton cgmButton; SingleClickButton quickWizardButton; CheckBox lockScreen; boolean smallWidth; boolean smallHeight; public static boolean shorttextmode = false; private boolean accepted; private int rangeToDisplay = 6; // for graph Handler sLoopHandler = new Handler(); Runnable sRefreshLoop = null; final Object updateSync = new Object(); public enum CHARTTYPE {PRE, BAS, IOB, COB, DEV, SEN, DEVSLOPE} private static final ScheduledExecutorService worker = Executors.newSingleThreadScheduledExecutor(); private static ScheduledFuture<?> scheduledUpdate = null; public OverviewFragment() { super(); } @Override public View onCreateView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) { try { //check screen width final DisplayMetrics dm = new DisplayMetrics(); getActivity().getWindowManager().getDefaultDisplay().getMetrics(dm); int screen_width = dm.widthPixels; int screen_height = dm.heightPixels; smallWidth = screen_width <= Constants.SMALL_WIDTH; smallHeight = screen_height <= Constants.SMALL_HEIGHT; boolean landscape = screen_height < screen_width; View view; if (MainApp.sResources.getBoolean(R.bool.isTablet) && (Config.NSCLIENT || Config.G5UPLOADER)) { view = inflater.inflate(R.layout.overview_fragment_nsclient_tablet, container, false); } else if (Config.NSCLIENT || Config.G5UPLOADER) { view = inflater.inflate(R.layout.overview_fragment_nsclient, container, false); shorttextmode = true; } else if (smallHeight || landscape) { view = inflater.inflate(R.layout.overview_fragment_smallheight, container, false); } else { view = inflater.inflate(R.layout.overview_fragment, container, false); } timeView = (TextView) view.findViewById(R.id.overview_time); bgView = (TextView) view.findViewById(R.id.overview_bg); arrowView = (TextView) view.findViewById(R.id.overview_arrow); if (smallWidth) { arrowView.setTextSize(TypedValue.COMPLEX_UNIT_DIP, 35); } timeAgoView = (TextView) view.findViewById(R.id.overview_timeago); deltaView = (TextView) view.findViewById(R.id.overview_delta); avgdeltaView = (TextView) view.findViewById(R.id.overview_avgdelta); baseBasalView = (TextView) view.findViewById(R.id.overview_basebasal); extendedBolusView = (TextView) view.findViewById(R.id.overview_extendedbolus); activeProfileView = (TextView) view.findViewById(R.id.overview_activeprofile); pumpStatusView = (TextView) view.findViewById(R.id.overview_pumpstatus); pumpDeviceStatusView = (TextView) view.findViewById(R.id.overview_pump); openapsDeviceStatusView = (TextView) view.findViewById(R.id.overview_openaps); uploaderDeviceStatusView = (TextView) view.findViewById(R.id.overview_uploader); iobCalculationProgressView = (TextView) view.findViewById(R.id.overview_iobcalculationprogess); loopStatusLayout = (LinearLayout) view.findViewById(R.id.overview_looplayout); pumpStatusLayout = (LinearLayout) view.findViewById(R.id.overview_pumpstatuslayout); pumpStatusView.setBackgroundColor(MainApp.sResources.getColor(R.color.colorInitializingBorder)); iobView = (TextView) view.findViewById(R.id.overview_iob); cobView = (TextView) view.findViewById(R.id.overview_cob); apsModeView = (TextView) view.findViewById(R.id.overview_apsmode); tempTargetView = (TextView) view.findViewById(R.id.overview_temptarget); iage = (TextView) view.findViewById(R.id.careportal_insulinage); cage = (TextView) view.findViewById(R.id.careportal_canulaage); sage = (TextView) view.findViewById(R.id.careportal_sensorage); pbage = (TextView) view.findViewById(R.id.careportal_pbage); bgGraph = (GraphView) view.findViewById(R.id.overview_bggraph); iobGraph = (GraphView) view.findViewById(R.id.overview_iobgraph); treatmentButton = (SingleClickButton) view.findViewById(R.id.overview_treatmentbutton); treatmentButton.setOnClickListener(this); wizardButton = (SingleClickButton) view.findViewById(R.id.overview_wizardbutton); wizardButton.setOnClickListener(this); insulinButton = (SingleClickButton) view.findViewById(R.id.overview_insulinbutton); if (insulinButton != null) insulinButton.setOnClickListener(this); carbsButton = (SingleClickButton) view.findViewById(R.id.overview_carbsbutton); if (carbsButton != null) carbsButton.setOnClickListener(this); acceptTempButton = (SingleClickButton) view.findViewById(R.id.overview_accepttempbutton); if (acceptTempButton != null) acceptTempButton.setOnClickListener(this); quickWizardButton = (SingleClickButton) view.findViewById(R.id.overview_quickwizardbutton); quickWizardButton.setOnClickListener(this); quickWizardButton.setOnLongClickListener(this); calibrationButton = (SingleClickButton) view.findViewById(R.id.overview_calibrationbutton); if (calibrationButton != null) calibrationButton.setOnClickListener(this); cgmButton = (SingleClickButton) view.findViewById(R.id.overview_cgmbutton); if (cgmButton != null) cgmButton.setOnClickListener(this); acceptTempLayout = (LinearLayout) view.findViewById(R.id.overview_accepttemplayout); notificationsView = (RecyclerView) view.findViewById(R.id.overview_notifications); notificationsView.setHasFixedSize(true); llm = new LinearLayoutManager(view.getContext()); notificationsView.setLayoutManager(llm); int axisWidth = 50; if (dm.densityDpi <= 120) axisWidth = 3; else if (dm.densityDpi <= 160) axisWidth = 10; else if (dm.densityDpi <= 320) axisWidth = 35; else if (dm.densityDpi <= 420) axisWidth = 50; else if (dm.densityDpi <= 560) axisWidth = 70; else axisWidth = 80; bgGraph.getGridLabelRenderer().setGridColor(MainApp.sResources.getColor(R.color.graphgrid)); bgGraph.getGridLabelRenderer().reloadStyles(); iobGraph.getGridLabelRenderer().setGridColor(MainApp.sResources.getColor(R.color.graphgrid)); iobGraph.getGridLabelRenderer().reloadStyles(); iobGraph.getGridLabelRenderer().setHorizontalLabelsVisible(false); bgGraph.getGridLabelRenderer().setLabelVerticalWidth(axisWidth); iobGraph.getGridLabelRenderer().setLabelVerticalWidth(axisWidth); iobGraph.getGridLabelRenderer().setNumVerticalLabels(5); rangeToDisplay = SP.getInt(R.string.key_rangetodisplay, 6); bgGraph.setOnLongClickListener(new View.OnLongClickListener() { @Override public boolean onLongClick(View v) { rangeToDisplay += 6; rangeToDisplay = rangeToDisplay > 24 ? 6 : rangeToDisplay; SP.putInt(R.string.key_rangetodisplay, rangeToDisplay); updateGUI("rangeChange"); return false; } }); setupChartMenu(view); lockScreen = (CheckBox) view.findViewById(R.id.overview_lockscreen); if (lockScreen != null) { lockScreen.setChecked(SP.getBoolean("lockscreen", false)); lockScreen.setOnCheckedChangeListener(new CompoundButton.OnCheckedChangeListener() { @Override public void onCheckedChanged(CompoundButton buttonView, boolean isChecked) { SP.putBoolean("lockscreen", isChecked); MainApp.bus().post(new EventSetWakeLock(isChecked)); } }); } return view; } catch (Exception e) { FabricPrivacy.logException(e); log.debug("Runtime Exception", e); } return null; } private void setupChartMenu(View view) { chartButton = (ImageButton) view.findViewById(R.id.overview_chartMenuButton); chartButton.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { final LoopPlugin.LastRun finalLastRun = LoopPlugin.lastRun; final boolean predictionsAvailable = finalLastRun != null && finalLastRun.request.hasPredictions; MenuItem item; CharSequence title; SpannableString s; PopupMenu popup = new PopupMenu(v.getContext(), v); if (predictionsAvailable) { item = popup.getMenu().add(Menu.NONE, CHARTTYPE.PRE.ordinal(), Menu.NONE, "Predictions"); title = item.getTitle(); s = new SpannableString(title); s.setSpan(new ForegroundColorSpan(ResourcesCompat.getColor(getResources(), R.color.prediction, null)), 0, s.length(), 0); item.setTitle(s); item.setCheckable(true); item.setChecked(SP.getBoolean("showprediction", true)); } item = popup.getMenu().add(Menu.NONE, CHARTTYPE.BAS.ordinal(), Menu.NONE, MainApp.gs(R.string.overview_show_basals)); title = item.getTitle(); s = new SpannableString(title); s.setSpan(new ForegroundColorSpan(ResourcesCompat.getColor(getResources(), R.color.basal, null)), 0, s.length(), 0); item.setTitle(s); item.setCheckable(true); item.setChecked(SP.getBoolean("showbasals", true)); item = popup.getMenu().add(Menu.NONE, CHARTTYPE.IOB.ordinal(), Menu.NONE, MainApp.gs(R.string.overview_show_iob)); title = item.getTitle(); s = new SpannableString(title); s.setSpan(new ForegroundColorSpan(ResourcesCompat.getColor(getResources(), R.color.iob, null)), 0, s.length(), 0); item.setTitle(s); item.setCheckable(true); item.setChecked(SP.getBoolean("showiob", true)); item = popup.getMenu().add(Menu.NONE, CHARTTYPE.COB.ordinal(), Menu.NONE, MainApp.gs(R.string.overview_show_cob)); title = item.getTitle(); s = new SpannableString(title); s.setSpan(new ForegroundColorSpan(ResourcesCompat.getColor(getResources(), R.color.cob, null)), 0, s.length(), 0); item.setTitle(s); item.setCheckable(true); item.setChecked(SP.getBoolean("showcob", true)); item = popup.getMenu().add(Menu.NONE, CHARTTYPE.DEV.ordinal(), Menu.NONE, MainApp.gs(R.string.overview_show_deviations)); title = item.getTitle(); s = new SpannableString(title); s.setSpan(new ForegroundColorSpan(ResourcesCompat.getColor(getResources(), R.color.deviations, null)), 0, s.length(), 0); item.setTitle(s); item.setCheckable(true); item.setChecked(SP.getBoolean("showdeviations", false)); item = popup.getMenu().add(Menu.NONE, CHARTTYPE.SEN.ordinal(), Menu.NONE, MainApp.gs(R.string.overview_show_sensitivity)); title = item.getTitle(); s = new SpannableString(title); s.setSpan(new ForegroundColorSpan(ResourcesCompat.getColor(getResources(), R.color.ratio, null)), 0, s.length(), 0); item.setTitle(s); item.setCheckable(true); item.setChecked(SP.getBoolean("showratios", false)); if (MainApp.devBranch) { item = popup.getMenu().add(Menu.NONE, CHARTTYPE.DEVSLOPE.ordinal(), Menu.NONE, "Deviation slope"); title = item.getTitle(); s = new SpannableString(title); s.setSpan(new ForegroundColorSpan(ResourcesCompat.getColor(getResources(), R.color.devslopepos, null)), 0, s.length(), 0); item.setTitle(s); item.setCheckable(true); item.setChecked(SP.getBoolean("showdevslope", false)); } popup.setOnMenuItemClickListener(new PopupMenu.OnMenuItemClickListener() { @Override public boolean onMenuItemClick(MenuItem item) { if (item.getItemId() == CHARTTYPE.PRE.ordinal()) { SP.putBoolean("showprediction", !item.isChecked()); } else if (item.getItemId() == CHARTTYPE.BAS.ordinal()) { SP.putBoolean("showbasals", !item.isChecked()); } else if (item.getItemId() == CHARTTYPE.IOB.ordinal()) { SP.putBoolean("showiob", !item.isChecked()); } else if (item.getItemId() == CHARTTYPE.COB.ordinal()) { SP.putBoolean("showcob", !item.isChecked()); } else if (item.getItemId() == CHARTTYPE.DEV.ordinal()) { SP.putBoolean("showdeviations", !item.isChecked()); } else if (item.getItemId() == CHARTTYPE.SEN.ordinal()) { SP.putBoolean("showratios", !item.isChecked()); } else if (item.getItemId() == CHARTTYPE.DEVSLOPE.ordinal()) { SP.putBoolean("showdevslope", !item.isChecked()); } scheduleUpdateGUI("onGraphCheckboxesCheckedChanged"); return true; } }); chartButton.setImageResource(R.drawable.ic_arrow_drop_up_white_24dp); popup.setOnDismissListener(new PopupMenu.OnDismissListener() { @Override public void onDismiss(PopupMenu menu) { chartButton.setImageResource(R.drawable.ic_arrow_drop_down_white_24dp); } }); popup.show(); } }); } @Override public void onCreateContextMenu(ContextMenu menu, View v, ContextMenu.ContextMenuInfo menuInfo) { super.onCreateContextMenu(menu, v, menuInfo); if (v == apsModeView) { final LoopPlugin loopPlugin = LoopPlugin.getPlugin(); final PumpDescription pumpDescription = ConfigBuilderPlugin.getActivePump().getPumpDescription(); if (loopPlugin == null || !MainApp.getConfigBuilder().isProfileValid("ContexMenuCreation")) return; menu.setHeaderTitle(MainApp.gs(R.string.loop)); if (loopPlugin.isEnabled(PluginType.LOOP)) { menu.add(MainApp.gs(R.string.disableloop)); if (!loopPlugin.isSuspended()) { menu.add(MainApp.gs(R.string.suspendloopfor1h)); menu.add(MainApp.gs(R.string.suspendloopfor2h)); menu.add(MainApp.gs(R.string.suspendloopfor3h)); menu.add(MainApp.gs(R.string.suspendloopfor10h)); if (pumpDescription.tempDurationStep15mAllowed) menu.add(MainApp.gs(R.string.disconnectpumpfor15m)); if (pumpDescription.tempDurationStep30mAllowed) menu.add(MainApp.gs(R.string.disconnectpumpfor30m)); menu.add(MainApp.gs(R.string.disconnectpumpfor1h)); menu.add(MainApp.gs(R.string.disconnectpumpfor2h)); menu.add(MainApp.gs(R.string.disconnectpumpfor3h)); } else { menu.add(MainApp.gs(R.string.resume)); } } if (!loopPlugin.isEnabled(PluginType.LOOP)) menu.add(MainApp.gs(R.string.enableloop)); } else if (v == activeProfileView) { menu.setHeaderTitle(MainApp.gs(R.string.profile)); menu.add(MainApp.gs(R.string.danar_viewprofile)); if (MainApp.getConfigBuilder().getActiveProfileInterface().getProfile() != null) { menu.add(MainApp.gs(R.string.careportal_profileswitch)); } } } @Override public boolean onContextItemSelected(MenuItem item) { final Profile profile = MainApp.getConfigBuilder().getProfile(); if (profile == null) return true; final LoopPlugin loopPlugin = LoopPlugin.getPlugin(); if (item.getTitle().equals(MainApp.gs(R.string.disableloop))) { loopPlugin.setPluginEnabled(PluginType.LOOP, false); loopPlugin.setFragmentVisible(PluginType.LOOP, false); MainApp.getConfigBuilder().storeSettings("DisablingLoop"); updateGUI("suspendmenu"); ConfigBuilderPlugin.getCommandQueue().cancelTempBasal(true, new Callback() { @Override public void run() { if (!result.success) { ToastUtils.showToastInUiThread(MainApp.instance().getApplicationContext(), MainApp.gs(R.string.tempbasaldeliveryerror)); } } }); NSUpload.uploadOpenAPSOffline(24 * 60); // upload 24h, we don't know real duration return true; } else if (item.getTitle().equals(MainApp.gs(R.string.enableloop))) { loopPlugin.setPluginEnabled(PluginType.LOOP, true); loopPlugin.setFragmentVisible(PluginType.LOOP, true); MainApp.getConfigBuilder().storeSettings("EnablingLoop"); updateGUI("suspendmenu"); NSUpload.uploadOpenAPSOffline(0); return true; } else if (item.getTitle().equals(MainApp.gs(R.string.resume))) { loopPlugin.suspendTo(0L); updateGUI("suspendmenu"); ConfigBuilderPlugin.getCommandQueue().cancelTempBasal(true, new Callback() { @Override public void run() { if (!result.success) { ToastUtils.showToastInUiThread(MainApp.instance().getApplicationContext(), MainApp.gs(R.string.tempbasaldeliveryerror)); } } }); NSUpload.uploadOpenAPSOffline(0); return true; } else if (item.getTitle().equals(MainApp.gs(R.string.suspendloopfor1h))) { MainApp.getConfigBuilder().suspendLoop(60); updateGUI("suspendmenu"); return true; } else if (item.getTitle().equals(MainApp.gs(R.string.suspendloopfor2h))) { MainApp.getConfigBuilder().suspendLoop(120); updateGUI("suspendmenu"); return true; } else if (item.getTitle().equals(MainApp.gs(R.string.suspendloopfor3h))) { MainApp.getConfigBuilder().suspendLoop(180); updateGUI("suspendmenu"); return true; } else if (item.getTitle().equals(MainApp.gs(R.string.suspendloopfor10h))) { MainApp.getConfigBuilder().suspendLoop(600); updateGUI("suspendmenu"); return true; } else if (item.getTitle().equals(MainApp.gs(R.string.disconnectpumpfor15m))) { MainApp.getConfigBuilder().disconnectPump(15, profile); updateGUI("suspendmenu"); return true; } else if (item.getTitle().equals(MainApp.gs(R.string.disconnectpumpfor30m))) { MainApp.getConfigBuilder().disconnectPump(30, profile); updateGUI("suspendmenu"); return true; } else if (item.getTitle().equals(MainApp.gs(R.string.disconnectpumpfor1h))) { MainApp.getConfigBuilder().disconnectPump(60, profile); updateGUI("suspendmenu"); return true; } else if (item.getTitle().equals(MainApp.gs(R.string.disconnectpumpfor2h))) { MainApp.getConfigBuilder().disconnectPump(120, profile); updateGUI("suspendmenu"); return true; } else if (item.getTitle().equals(MainApp.gs(R.string.disconnectpumpfor3h))) { MainApp.getConfigBuilder().disconnectPump(180, profile); updateGUI("suspendmenu"); return true; } else if (item.getTitle().equals(MainApp.gs(R.string.careportal_profileswitch))) { NewNSTreatmentDialog newDialog = new NewNSTreatmentDialog(); final OptionsToShow profileswitch = CareportalFragment.PROFILESWITCHDIRECT; profileswitch.executeProfileSwitch = true; newDialog.setOptions(profileswitch, R.string.careportal_profileswitch); newDialog.show(getFragmentManager(), "NewNSTreatmentDialog"); } else if (item.getTitle().equals(MainApp.gs(R.string.danar_viewprofile))) { ProfileViewerDialog pvd = ProfileViewerDialog.newInstance(System.currentTimeMillis()); FragmentManager manager = getFragmentManager(); pvd.show(manager, "ProfileViewDialog"); } return super.onContextItemSelected(item); } @Override public void onClick(View v) { boolean xdrip = MainApp.getSpecificPlugin(SourceXdripPlugin.class) != null && MainApp.getSpecificPlugin(SourceXdripPlugin.class).isEnabled(PluginType.BGSOURCE); boolean g5 = MainApp.getSpecificPlugin(SourceDexcomG5Plugin.class) != null && MainApp.getSpecificPlugin(SourceDexcomG5Plugin.class).isEnabled(PluginType.BGSOURCE); String units = MainApp.getConfigBuilder().getProfileUnits(); FragmentManager manager = getFragmentManager(); switch (v.getId()) { case R.id.overview_accepttempbutton: onClickAcceptTemp(); break; case R.id.overview_quickwizardbutton: onClickQuickwizard(); break; case R.id.overview_wizardbutton: WizardDialog wizardDialog = new WizardDialog(); wizardDialog.show(manager, "WizardDialog"); break; case R.id.overview_calibrationbutton: if (xdrip) { CalibrationDialog calibrationDialog = new CalibrationDialog(); calibrationDialog.show(manager, "CalibrationDialog"); } else if (g5) { try { Intent i = new Intent("com.dexcom.cgm.activities.MeterEntryActivity"); startActivity(i); } catch (ActivityNotFoundException e) { ToastUtils.showToastInUiThread(getActivity(), MainApp.gs(R.string.g5appnotdetected)); } } break; case R.id.overview_cgmbutton: if (xdrip) openCgmApp("com.eveningoutpost.dexdrip"); else if (g5 && units.equals(Constants.MGDL)) openCgmApp("com.dexcom.cgm.region5.mgdl"); else if (g5 && units.equals(Constants.MMOL)) openCgmApp("com.dexcom.cgm.region5.mmol"); break; case R.id.overview_treatmentbutton: NewTreatmentDialog treatmentDialogFragment = new NewTreatmentDialog(); treatmentDialogFragment.show(manager, "TreatmentDialog"); break; case R.id.overview_insulinbutton: new NewInsulinDialog().show(manager, "InsulinDialog"); break; case R.id.overview_carbsbutton: new NewCarbsDialog().show(manager, "CarbsDialog"); break; case R.id.overview_pumpstatus: if (ConfigBuilderPlugin.getActivePump().isSuspended() || !ConfigBuilderPlugin.getActivePump().isInitialized()) ConfigBuilderPlugin.getCommandQueue().readStatus("RefreshClicked", null); break; } } public boolean openCgmApp(String packageName) { PackageManager packageManager = getContext().getPackageManager(); try { Intent intent = packageManager.getLaunchIntentForPackage(packageName); if (intent == null) { throw new ActivityNotFoundException(); } intent.addCategory(Intent.CATEGORY_LAUNCHER); getContext().startActivity(intent); return true; } catch (ActivityNotFoundException e) { new AlertDialog.Builder(getContext()) .setMessage(R.string.error_starting_cgm) .setPositiveButton("OK", null) .show(); return false; } } @Override public boolean onLongClick(View v) { switch (v.getId()) { case R.id.overview_quickwizardbutton: Intent i = new Intent(v.getContext(), QuickWizardListActivity.class); startActivity(i); return true; } return false; } private void onClickAcceptTemp() { Profile profile = MainApp.getConfigBuilder().getProfile(); if (LoopPlugin.getPlugin().isEnabled(PluginType.LOOP) && profile != null) { LoopPlugin.getPlugin().invoke("Accept temp button", false); final LoopPlugin.LastRun finalLastRun = LoopPlugin.lastRun; if (finalLastRun != null && finalLastRun.lastAPSRun != null && finalLastRun.constraintsProcessed.isChangeRequested()) { AlertDialog.Builder builder = new AlertDialog.Builder(getContext()); builder.setTitle(getContext().getString(R.string.confirmation)); builder.setMessage(getContext().getString(R.string.setbasalquestion) + "\n" + finalLastRun.constraintsProcessed); builder.setPositiveButton(getContext().getString(R.string.ok), (dialog, id) -> { hideTempRecommendation(); clearNotification(); MainApp.getConfigBuilder().applyTBRRequest(finalLastRun.constraintsProcessed, profile, new Callback() { @Override public void run() { if (result.enacted) { finalLastRun.tbrSetByPump = result; finalLastRun.lastEnact = new Date(); finalLastRun.lastOpenModeAccept = new Date(); NSUpload.uploadDeviceStatus(); ObjectivesPlugin objectivesPlugin = MainApp.getSpecificPlugin(ObjectivesPlugin.class); if (objectivesPlugin != null) { ObjectivesPlugin.manualEnacts++; ObjectivesPlugin.saveProgress(); } } scheduleUpdateGUI("onClickAcceptTemp"); } }); FabricPrivacy.getInstance().logCustom(new CustomEvent("AcceptTemp")); }); builder.setNegativeButton(getContext().getString(R.string.cancel), null); builder.show(); } } } void onClickQuickwizard() { final BgReading actualBg = DatabaseHelper.actualBg(); final Profile profile = MainApp.getConfigBuilder().getProfile(); final TempTarget tempTarget = TreatmentsPlugin.getPlugin().getTempTargetFromHistory(); final QuickWizardEntry quickWizardEntry = OverviewPlugin.getPlugin().quickWizard.getActive(); if (quickWizardEntry != null && actualBg != null && profile != null) { quickWizardButton.setVisibility(View.VISIBLE); final BolusWizard wizard = quickWizardEntry.doCalc(profile, tempTarget, actualBg, true); final JSONObject boluscalcJSON = new JSONObject(); try { boluscalcJSON.put("eventTime", DateUtil.toISOString(new Date())); boluscalcJSON.put("targetBGLow", wizard.targetBGLow); boluscalcJSON.put("targetBGHigh", wizard.targetBGHigh); boluscalcJSON.put("isf", wizard.sens); boluscalcJSON.put("ic", wizard.ic); boluscalcJSON.put("iob", -(wizard.insulingFromBolusIOB + wizard.insulingFromBasalsIOB)); boluscalcJSON.put("bolusiobused", true); boluscalcJSON.put("basaliobused", true); boluscalcJSON.put("bg", actualBg.valueToUnits(profile.getUnits())); boluscalcJSON.put("insulinbg", wizard.insulinFromBG); boluscalcJSON.put("insulinbgused", true); boluscalcJSON.put("bgdiff", wizard.bgDiff); boluscalcJSON.put("insulincarbs", wizard.insulinFromCarbs); boluscalcJSON.put("carbs", quickWizardEntry.carbs()); boluscalcJSON.put("othercorrection", 0d); boluscalcJSON.put("insulintrend", wizard.insulinFromTrend); boluscalcJSON.put("insulin", wizard.calculatedTotalInsulin); } catch (JSONException e) { log.error("Unhandled exception", e); } if (wizard.calculatedTotalInsulin > 0d && quickWizardEntry.carbs() > 0d) { DecimalFormat formatNumber2decimalplaces = new DecimalFormat("0.00"); String confirmMessage = getString(R.string.entertreatmentquestion); Double insulinAfterConstraints = MainApp.getConstraintChecker().applyBolusConstraints(new Constraint<>(wizard.calculatedTotalInsulin)).value(); Integer carbsAfterConstraints = MainApp.getConstraintChecker().applyCarbsConstraints(new Constraint<>(quickWizardEntry.carbs())).value(); confirmMessage += "\n" + getString(R.string.bolus) + ": " + formatNumber2decimalplaces.format(insulinAfterConstraints) + "U"; confirmMessage += "\n" + getString(R.string.carbs) + ": " + carbsAfterConstraints + "g"; if (!insulinAfterConstraints.equals(wizard.calculatedTotalInsulin) || !carbsAfterConstraints.equals(quickWizardEntry.carbs())) { AlertDialog.Builder builder = new AlertDialog.Builder(getContext()); builder.setTitle(MainApp.gs(R.string.treatmentdeliveryerror)); builder.setMessage(getString(R.string.constraints_violation) + "\n" + getString(R.string.changeyourinput)); builder.setPositiveButton(MainApp.gs(R.string.ok), null); builder.show(); return; } final Double finalInsulinAfterConstraints = insulinAfterConstraints; final Integer finalCarbsAfterConstraints = carbsAfterConstraints; final Context context = getContext(); final AlertDialog.Builder builder = new AlertDialog.Builder(context); accepted = false; builder.setTitle(MainApp.gs(R.string.confirmation)); builder.setMessage(confirmMessage); builder.setPositiveButton(getString(R.string.ok), (dialog, id) -> { synchronized (builder) { if (accepted) { log.debug("guarding: already accepted"); return; } accepted = true; if (finalInsulinAfterConstraints > 0 || finalCarbsAfterConstraints > 0) { if (wizard.superBolus) { final LoopPlugin loopPlugin = LoopPlugin.getPlugin(); if (loopPlugin.isEnabled(PluginType.LOOP)) { loopPlugin.superBolusTo(System.currentTimeMillis() + 2 * 60L * 60 * 1000); MainApp.bus().post(new EventRefreshOverview("WizardDialog")); } ConfigBuilderPlugin.getCommandQueue().tempBasalPercent(0, 120, true, profile, new Callback() { @Override public void run() { if (!result.success) { Intent i = new Intent(MainApp.instance(), ErrorHelperActivity.class); i.putExtra("soundid", R.raw.boluserror); i.putExtra("status", result.comment); i.putExtra("title", MainApp.gs(R.string.tempbasaldeliveryerror)); i.addFlags(Intent.FLAG_ACTIVITY_NEW_TASK); MainApp.instance().startActivity(i); } } }); } DetailedBolusInfo detailedBolusInfo = new DetailedBolusInfo(); detailedBolusInfo.eventType = CareportalEvent.BOLUSWIZARD; detailedBolusInfo.insulin = finalInsulinAfterConstraints; detailedBolusInfo.carbs = finalCarbsAfterConstraints; detailedBolusInfo.context = context; detailedBolusInfo.boluscalc = boluscalcJSON; detailedBolusInfo.source = Source.USER; ConfigBuilderPlugin.getCommandQueue().bolus(detailedBolusInfo, new Callback() { @Override public void run() { if (!result.success) { Intent i = new Intent(MainApp.instance(), ErrorHelperActivity.class); i.putExtra("soundid", R.raw.boluserror); i.putExtra("status", result.comment); i.putExtra("title", MainApp.gs(R.string.treatmentdeliveryerror)); i.addFlags(Intent.FLAG_ACTIVITY_NEW_TASK); MainApp.instance().startActivity(i); } } }); FabricPrivacy.getInstance().logCustom(new CustomEvent("QuickWizard")); } } }); builder.setNegativeButton(getString(R.string.cancel), null); builder.show(); } } } @Override public void onPause() { super.onPause(); MainApp.bus().unregister(this); sLoopHandler.removeCallbacksAndMessages(null); unregisterForContextMenu(apsModeView); unregisterForContextMenu(activeProfileView); } @Override public void onResume() { super.onResume(); MainApp.bus().register(this); sRefreshLoop = () -> { scheduleUpdateGUI("refreshLoop"); sLoopHandler.postDelayed(sRefreshLoop, 60 * 1000L); }; sLoopHandler.postDelayed(sRefreshLoop, 60 * 1000L); registerForContextMenu(apsModeView); registerForContextMenu(activeProfileView); updateGUI("onResume"); } @Subscribe public void onStatusEvent(final EventInitializationChanged ev) { scheduleUpdateGUI("EventInitializationChanged"); } @Subscribe public void onStatusEvent(final EventPreferenceChange ev) { scheduleUpdateGUI("EventPreferenceChange"); } @Subscribe public void onStatusEvent(final EventRefreshOverview ev) { scheduleUpdateGUI(ev.from); } @Subscribe public void onStatusEvent(final EventAutosensCalculationFinished ev) { scheduleUpdateGUI("EventAutosensCalculationFinished"); } @Subscribe public void onStatusEvent(final EventTreatmentChange ev) { scheduleUpdateGUI("EventTreatmentChange"); } @Subscribe public void onStatusEvent(final EventCareportalEventChange ev) { scheduleUpdateGUI("EventCareportalEventChange"); } @Subscribe public void onStatusEvent(final EventTempBasalChange ev) { scheduleUpdateGUI("EventTempBasalChange"); } @Subscribe public void onStatusEvent(final EventExtendedBolusChange ev) { scheduleUpdateGUI("EventExtendedBolusChange"); } @Subscribe public void onStatusEvent(final EventNewOpenLoopNotification ev) { scheduleUpdateGUI("EventNewOpenLoopNotification"); } @Subscribe public void onStatusEvent(final EventTempTargetChange ev) { scheduleUpdateGUI("EventTempTargetChange"); } @Subscribe public void onStatusEvent(final EventProfileSwitchChange ev) { scheduleUpdateGUI("EventProfileSwitchChange"); } @Subscribe public void onStatusEvent(final EventPumpStatusChanged s) { Activity activity = getActivity(); if (activity != null) activity.runOnUiThread(() -> updatePumpStatus(s.textStatus())); } @Subscribe public void onStatusEvent(final EventIobCalculationProgress e) { Activity activity = getActivity(); if (activity != null) activity.runOnUiThread(() -> { if (iobCalculationProgressView != null) iobCalculationProgressView.setText(e.progress); }); } private void hideTempRecommendation() { Activity activity = getActivity(); if (activity != null) activity.runOnUiThread(() -> { if (acceptTempLayout != null) acceptTempLayout.setVisibility(View.GONE); }); } private void clearNotification() { NotificationManager notificationManager = (NotificationManager) MainApp.instance().getSystemService(Context.NOTIFICATION_SERVICE); notificationManager.cancel(Constants.notificationID); } private void updatePumpStatus(String status) { if (!status.equals("")) { pumpStatusView.setText(status); pumpStatusLayout.setVisibility(View.VISIBLE); loopStatusLayout.setVisibility(View.GONE); } else { pumpStatusLayout.setVisibility(View.GONE); loopStatusLayout.setVisibility(View.VISIBLE); } } public void scheduleUpdateGUI(final String from) { class UpdateRunnable implements Runnable { public void run() { Activity activity = getActivity(); if (activity != null) activity.runOnUiThread(() -> { updateGUI(from); scheduledUpdate = null; }); } } // prepare task for execution in 400 msec // cancel waiting task to prevent multiple updates if (scheduledUpdate != null) scheduledUpdate.cancel(false); Runnable task = new UpdateRunnable(); final int msec = 500; scheduledUpdate = worker.schedule(task, msec, TimeUnit.MILLISECONDS); } @SuppressLint("SetTextI18n") public void updateGUI(final String from) { log.debug("updateGUI entered from: " + from); final Date updateGUIStart = new Date(); if (getActivity() == null) return; if (timeView != null) { //must not exists timeView.setText(DateUtil.timeString(new Date())); } if (!MainApp.getConfigBuilder().isProfileValid("Overview")) { pumpStatusView.setText(R.string.noprofileset); pumpStatusLayout.setVisibility(View.VISIBLE); loopStatusLayout.setVisibility(View.GONE); return; } pumpStatusLayout.setVisibility(View.GONE); loopStatusLayout.setVisibility(View.VISIBLE); updateNotifications(); CareportalFragment.updateAge(getActivity(), sage, iage, cage, pbage); BgReading actualBG = DatabaseHelper.actualBg(); BgReading lastBG = DatabaseHelper.lastBg(); final PumpInterface pump = ConfigBuilderPlugin.getActivePump(); final Profile profile = MainApp.getConfigBuilder().getProfile(); final String units = profile.getUnits(); final double lowLine = OverviewPlugin.getPlugin().determineLowLine(units); final double highLine = OverviewPlugin.getPlugin().determineHighLine(units); //Start with updating the BG as it is unaffected by loop. // **** BG value **** if (lastBG != null) { int color = MainApp.sResources.getColor(R.color.inrange); if (lastBG.valueToUnits(units) < lowLine) color = MainApp.sResources.getColor(R.color.low); else if (lastBG.valueToUnits(units) > highLine) color = MainApp.sResources.getColor(R.color.high); bgView.setText(lastBG.valueToUnitsToString(units)); arrowView.setText(lastBG.directionToSymbol()); bgView.setTextColor(color); arrowView.setTextColor(color); GlucoseStatus glucoseStatus = GlucoseStatus.getGlucoseStatusData(); if (glucoseStatus != null) { deltaView.setText("Δ " + Profile.toUnitsString(glucoseStatus.delta, glucoseStatus.delta * Constants.MGDL_TO_MMOLL, units) + " " + units); if (avgdeltaView != null) avgdeltaView.setText("øΔ15m: " + Profile.toUnitsString(glucoseStatus.short_avgdelta, glucoseStatus.short_avgdelta * Constants.MGDL_TO_MMOLL, units) + " øΔ40m: " + Profile.toUnitsString(glucoseStatus.long_avgdelta, glucoseStatus.long_avgdelta * Constants.MGDL_TO_MMOLL, units)); } else { deltaView.setText("Δ " + MainApp.gs(R.string.notavailable)); if (avgdeltaView != null) avgdeltaView.setText(""); } } Constraint<Boolean> closedLoopEnabled = MainApp.getConstraintChecker().isClosedLoopAllowed(); // open loop mode final LoopPlugin.LastRun finalLastRun = LoopPlugin.lastRun; if (Config.APS && pump.getPumpDescription().isTempBasalCapable) { apsModeView.setVisibility(View.VISIBLE); apsModeView.setBackgroundColor(MainApp.sResources.getColor(R.color.loopenabled)); apsModeView.setTextColor(Color.BLACK); final LoopPlugin loopPlugin = LoopPlugin.getPlugin(); if (loopPlugin.isEnabled(PluginType.LOOP) && loopPlugin.isSuperBolus()) { apsModeView.setBackgroundColor(MainApp.sResources.getColor(R.color.looppumpsuspended)); apsModeView.setText(String.format(MainApp.gs(R.string.loopsuperbolusfor), loopPlugin.minutesToEndOfSuspend())); apsModeView.setTextColor(Color.WHITE); } else if (loopPlugin.isEnabled(PluginType.LOOP) && loopPlugin.isDisconnected()) { apsModeView.setBackgroundColor(MainApp.sResources.getColor(R.color.looppumpsuspended)); apsModeView.setText(String.format(MainApp.gs(R.string.loopdisconnectedfor), loopPlugin.minutesToEndOfSuspend())); apsModeView.setTextColor(Color.WHITE); } else if (loopPlugin.isEnabled(PluginType.LOOP) && loopPlugin.isSuspended()) { apsModeView.setBackgroundColor(MainApp.sResources.getColor(R.color.looppumpsuspended)); apsModeView.setText(String.format(MainApp.gs(R.string.loopsuspendedfor), loopPlugin.minutesToEndOfSuspend())); apsModeView.setTextColor(Color.WHITE); } else if (pump.isSuspended()) { apsModeView.setBackgroundColor(MainApp.sResources.getColor(R.color.looppumpsuspended)); apsModeView.setText(MainApp.gs(R.string.pumpsuspended)); apsModeView.setTextColor(Color.WHITE); } else if (loopPlugin.isEnabled(PluginType.LOOP)) { if (closedLoopEnabled.value()) { apsModeView.setText(MainApp.gs(R.string.closedloop)); } else { apsModeView.setText(MainApp.gs(R.string.openloop)); } } else { apsModeView.setBackgroundColor(MainApp.sResources.getColor(R.color.loopdisabled)); apsModeView.setText(MainApp.gs(R.string.disabledloop)); apsModeView.setTextColor(Color.WHITE); } } else { apsModeView.setVisibility(View.GONE); } // temp target TempTarget tempTarget = TreatmentsPlugin.getPlugin().getTempTargetFromHistory(); if (tempTarget != null) { tempTargetView.setTextColor(Color.BLACK); tempTargetView.setBackgroundColor(MainApp.sResources.getColor(R.color.tempTargetBackground)); tempTargetView.setVisibility(View.VISIBLE); tempTargetView.setText(Profile.toTargetRangeString(tempTarget.low, tempTarget.high, Constants.MGDL, units) + " " + DateUtil.untilString(tempTarget.end())); } else { tempTargetView.setTextColor(Color.WHITE); tempTargetView.setBackgroundColor(MainApp.sResources.getColor(R.color.tempTargetDisabledBackground)); tempTargetView.setText(Profile.toTargetRangeString(profile.getTargetLow(), profile.getTargetHigh(), units, units)); tempTargetView.setVisibility(View.VISIBLE); } // **** Temp button **** if (acceptTempLayout != null) { boolean showAcceptButton = !closedLoopEnabled.value(); // Open mode needed showAcceptButton = showAcceptButton && finalLastRun != null && finalLastRun.lastAPSRun != null; // aps result must exist showAcceptButton = showAcceptButton && (finalLastRun.lastOpenModeAccept == null || finalLastRun.lastOpenModeAccept.getTime() < finalLastRun.lastAPSRun.getTime()); // never accepted or before last result showAcceptButton = showAcceptButton && finalLastRun.constraintsProcessed.isChangeRequested(); // change is requested if (showAcceptButton && pump.isInitialized() && !pump.isSuspended() && LoopPlugin.getPlugin().isEnabled(PluginType.LOOP)) { acceptTempLayout.setVisibility(View.VISIBLE); acceptTempButton.setText(getContext().getString(R.string.setbasalquestion) + "\n" + finalLastRun.constraintsProcessed); } else { acceptTempLayout.setVisibility(View.GONE); } } // **** Calibration & CGM buttons **** boolean xDripIsBgSource = MainApp.getSpecificPlugin(SourceXdripPlugin.class) != null && MainApp.getSpecificPlugin(SourceXdripPlugin.class).isEnabled(PluginType.BGSOURCE); boolean g5IsBgSource = MainApp.getSpecificPlugin(SourceDexcomG5Plugin.class) != null && MainApp.getSpecificPlugin(SourceDexcomG5Plugin.class).isEnabled(PluginType.BGSOURCE); boolean bgAvailable = DatabaseHelper.actualBg() != null; if (calibrationButton != null) { if ((xDripIsBgSource || g5IsBgSource) && bgAvailable && SP.getBoolean(R.string.key_show_calibration_button, true)) { calibrationButton.setVisibility(View.VISIBLE); } else { calibrationButton.setVisibility(View.GONE); } } if (cgmButton != null) { if (xDripIsBgSource && SP.getBoolean(R.string.key_show_cgm_button, false)) { cgmButton.setVisibility(View.VISIBLE); } else if (g5IsBgSource && SP.getBoolean(R.string.key_show_cgm_button, false)) { cgmButton.setVisibility(View.VISIBLE); } else { cgmButton.setVisibility(View.GONE); } } final TemporaryBasal activeTemp = TreatmentsPlugin.getPlugin().getTempBasalFromHistory(System.currentTimeMillis()); String basalText = ""; if (shorttextmode) { if (activeTemp != null) { basalText = "T: " + activeTemp.toStringVeryShort(); } else { basalText = DecimalFormatter.to2Decimal(profile.getBasal()) + "U/h"; } baseBasalView.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { String fullText = MainApp.gs(R.string.pump_basebasalrate_label) + ": " + DecimalFormatter.to2Decimal(profile.getBasal()) + "U/h\n"; if (activeTemp != null) { fullText += MainApp.gs(R.string.pump_tempbasal_label) + ": " + activeTemp.toStringFull(); } OKDialog.show(getActivity(), MainApp.gs(R.string.basal), fullText, null); } }); } else { if (activeTemp != null) { basalText = activeTemp.toStringFull() + " "; } if (Config.NSCLIENT || Config.G5UPLOADER) basalText += "(" + DecimalFormatter.to2Decimal(profile.getBasal()) + " U/h)"; else if (pump.getPumpDescription().isTempBasalCapable) { basalText += "(" + DecimalFormatter.to2Decimal(pump.getBaseBasalRate()) + "U/h)"; } } if (activeTemp != null) { baseBasalView.setTextColor(MainApp.sResources.getColor(R.color.basal)); } else { baseBasalView.setTextColor(Color.WHITE); } baseBasalView.setText(basalText); final ExtendedBolus extendedBolus = TreatmentsPlugin.getPlugin().getExtendedBolusFromHistory(System.currentTimeMillis()); String extendedBolusText = ""; if (extendedBolusView != null) { // must not exists in all layouts if (shorttextmode) { if (extendedBolus != null && !pump.isFakingTempsByExtendedBoluses()) { extendedBolusText = DecimalFormatter.to2Decimal(extendedBolus.absoluteRate()) + "U/h"; } extendedBolusView.setText(extendedBolusText); extendedBolusView.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { OKDialog.show(getActivity(), MainApp.gs(R.string.extendedbolus), extendedBolus.toString(), null); } }); } else { if (extendedBolus != null && !pump.isFakingTempsByExtendedBoluses()) { extendedBolusText = extendedBolus.toString(); } extendedBolusView.setText(extendedBolusText); } if (extendedBolusText.equals("")) extendedBolusView.setVisibility(View.INVISIBLE); else extendedBolusView.setVisibility(View.VISIBLE); } activeProfileView.setText(MainApp.getConfigBuilder().getProfileName()); activeProfileView.setBackgroundColor(Color.GRAY); tempTargetView.setOnLongClickListener(view -> { view.performHapticFeedback(HapticFeedbackConstants.LONG_PRESS); NewNSTreatmentDialog newTTDialog = new NewNSTreatmentDialog(); final OptionsToShow temptarget = CareportalFragment.TEMPTARGET; temptarget.executeTempTarget = true; newTTDialog.setOptions(temptarget, R.string.careportal_temporarytarget); newTTDialog.show(getFragmentManager(), "NewNSTreatmentDialog"); return true; }); tempTargetView.setLongClickable(true); // QuickWizard button QuickWizardEntry quickWizardEntry = OverviewPlugin.getPlugin().quickWizard.getActive(); if (quickWizardEntry != null && lastBG != null && pump.isInitialized() && !pump.isSuspended()) { quickWizardButton.setVisibility(View.VISIBLE); String text = quickWizardEntry.buttonText() + "\n" + DecimalFormatter.to0Decimal(quickWizardEntry.carbs()) + "g"; BolusWizard wizard = quickWizardEntry.doCalc(profile, tempTarget, lastBG, false); text += " " + DecimalFormatter.toPumpSupportedBolus(wizard.calculatedTotalInsulin) + "U"; quickWizardButton.setText(text); if (wizard.calculatedTotalInsulin <= 0) quickWizardButton.setVisibility(View.GONE); } else quickWizardButton.setVisibility(View.GONE); // **** Various treatment buttons **** if (carbsButton != null) { if (SP.getBoolean(R.string.key_show_carbs_button, true) && (!ConfigBuilderPlugin.getActivePump().getPumpDescription().storesCarbInfo || (pump.isInitialized() && !pump.isSuspended()))) { carbsButton.setVisibility(View.VISIBLE); } else { carbsButton.setVisibility(View.GONE); } } if (pump.isInitialized() && !pump.isSuspended()) { if (treatmentButton != null) { if (SP.getBoolean(R.string.key_show_treatment_button, false)) { treatmentButton.setVisibility(View.VISIBLE); } else { treatmentButton.setVisibility(View.GONE); } } if (wizardButton != null) { if (SP.getBoolean(R.string.key_show_wizard_button, true)) { wizardButton.setVisibility(View.VISIBLE); } else { wizardButton.setVisibility(View.GONE); } } if (insulinButton != null) { if (SP.getBoolean(R.string.key_show_insulin_button, true)) { insulinButton.setVisibility(View.VISIBLE); } else { insulinButton.setVisibility(View.GONE); } } } // **** BG value **** if (lastBG == null) { //left this here as it seems you want to exit at this point if it is null... return; } Integer flag = bgView.getPaintFlags(); if (actualBG == null) { flag |= Paint.STRIKE_THRU_TEXT_FLAG; } else flag &= ~Paint.STRIKE_THRU_TEXT_FLAG; bgView.setPaintFlags(flag); timeAgoView.setText(DateUtil.minAgo(lastBG.date)); // iob TreatmentsPlugin.getPlugin().updateTotalIOBTreatments(); TreatmentsPlugin.getPlugin().updateTotalIOBTempBasals(); final IobTotal bolusIob = TreatmentsPlugin.getPlugin().getLastCalculationTreatments().round(); final IobTotal basalIob = TreatmentsPlugin.getPlugin().getLastCalculationTempBasals().round(); if (shorttextmode) { String iobtext = DecimalFormatter.to2Decimal(bolusIob.iob + basalIob.basaliob) + "U"; iobView.setText(iobtext); iobView.setOnClickListener(v -> { String iobtext1 = DecimalFormatter.to2Decimal(bolusIob.iob + basalIob.basaliob) + "U\n" + getString(R.string.bolus) + ": " + DecimalFormatter.to2Decimal(bolusIob.iob) + "U\n" + getString(R.string.basal) + ": " + DecimalFormatter.to2Decimal(basalIob.basaliob) + "U\n"; OKDialog.show(getActivity(), MainApp.gs(R.string.iob), iobtext1, null); }); } else if (MainApp.sResources.getBoolean(R.bool.isTablet)) { String iobtext = DecimalFormatter.to2Decimal(bolusIob.iob + basalIob.basaliob) + "U (" + getString(R.string.bolus) + ": " + DecimalFormatter.to2Decimal(bolusIob.iob) + "U " + getString(R.string.basal) + ": " + DecimalFormatter.to2Decimal(basalIob.basaliob) + "U)"; iobView.setText(iobtext); } else { String iobtext = DecimalFormatter.to2Decimal(bolusIob.iob + basalIob.basaliob) + "U (" + DecimalFormatter.to2Decimal(bolusIob.iob) + "/" + DecimalFormatter.to2Decimal(basalIob.basaliob) + ")"; iobView.setText(iobtext); } // cob if (cobView != null) { // view must not exists String cobText = ""; AutosensData autosensData = IobCobCalculatorPlugin.getPlugin().getLastAutosensData("Overview COB"); if (autosensData != null) cobText = (int) autosensData.cob + " g"; cobView.setText(cobText); } final boolean predictionsAvailable = finalLastRun != null && finalLastRun.request.hasPredictions; // pump status from ns if (pumpDeviceStatusView != null) { pumpDeviceStatusView.setText(NSDeviceStatus.getInstance().getPumpStatus()); pumpDeviceStatusView.setOnClickListener(v -> OKDialog.show(getActivity(), MainApp.gs(R.string.pump), NSDeviceStatus.getInstance().getExtendedPumpStatus(), null)); } // OpenAPS status from ns if (openapsDeviceStatusView != null) { openapsDeviceStatusView.setText(NSDeviceStatus.getInstance().getOpenApsStatus()); openapsDeviceStatusView.setOnClickListener(v -> OKDialog.show(getActivity(), MainApp.gs(R.string.openaps), NSDeviceStatus.getInstance().getExtendedOpenApsStatus(), null)); } // Uploader status from ns if (uploaderDeviceStatusView != null) { uploaderDeviceStatusView.setText(NSDeviceStatus.getInstance().getUploaderStatus()); uploaderDeviceStatusView.setOnClickListener(v -> OKDialog.show(getActivity(), MainApp.gs(R.string.uploader), NSDeviceStatus.getInstance().getExtendedUploaderStatus(), null)); } // ****** GRAPH ******* new Thread(() -> { // allign to hours Calendar calendar = Calendar.getInstance(); calendar.setTimeInMillis(System.currentTimeMillis()); calendar.set(Calendar.MILLISECOND, 0); calendar.set(Calendar.SECOND, 0); calendar.set(Calendar.MINUTE, 0); calendar.add(Calendar.HOUR, 1); int hoursToFetch; final long toTime; final long fromTime; final long endTime; if (predictionsAvailable && SP.getBoolean("showprediction", false)) { int predHours = (int) (Math.ceil(finalLastRun.constraintsProcessed.getLatestPredictionsTime() - System.currentTimeMillis()) / (60 * 60 * 1000)); predHours = Math.min(2, predHours); predHours = Math.max(0, predHours); hoursToFetch = rangeToDisplay - predHours; toTime = calendar.getTimeInMillis() + 100000; // little bit more to avoid wrong rounding - Graphview specific fromTime = toTime - hoursToFetch * 60 * 60 * 1000L; endTime = toTime + predHours * 60 * 60 * 1000L; } else { hoursToFetch = rangeToDisplay; toTime = calendar.getTimeInMillis() + 100000; // little bit more to avoid wrong rounding - Graphview specific fromTime = toTime - hoursToFetch * 60 * 60 * 1000L; endTime = toTime; } final long now = System.currentTimeMillis(); // ------------------ 1st graph Profiler.log(log, from + " - 1st graph - START", updateGUIStart); final GraphData graphData = new GraphData(bgGraph, IobCobCalculatorPlugin.getPlugin()); // **** In range Area **** graphData.addInRangeArea(fromTime, endTime, lowLine, highLine); // **** BG **** if (predictionsAvailable && SP.getBoolean("showprediction", false)) graphData.addBgReadings(fromTime, toTime, lowLine, highLine, finalLastRun.constraintsProcessed); else graphData.addBgReadings(fromTime, toTime, lowLine, highLine, null); // set manual x bounds to have nice steps graphData.formatAxis(fromTime, endTime); // Treatments graphData.addTreatments(fromTime, endTime); // add basal data if (pump.getPumpDescription().isTempBasalCapable && SP.getBoolean("showbasals", true)) { graphData.addBasals(fromTime, now, lowLine / graphData.maxY / 1.2d); } // add target line graphData.addTargetLine(fromTime, toTime, profile); // **** NOW line **** graphData.addNowLine(now); // ------------------ 2nd graph Profiler.log(log, from + " - 2nd graph - START", updateGUIStart); final GraphData secondGraphData = new GraphData(iobGraph, IobCobCalculatorPlugin.getPlugin()); boolean useIobForScale = false; boolean useCobForScale = false; boolean useDevForScale = false; boolean useRatioForScale = false; boolean useDSForScale = false; if (SP.getBoolean("showiob", true)) { useIobForScale = true; } else if (SP.getBoolean("showcob", true)) { useCobForScale = true; } else if (SP.getBoolean("showdeviations", false)) { useDevForScale = true; } else if (SP.getBoolean("showratios", false)) { useRatioForScale = true; } else if (SP.getBoolean("showdevslope", false)) { useDSForScale = true; } if (SP.getBoolean("showiob", true)) secondGraphData.addIob(fromTime, now, useIobForScale, 1d); if (SP.getBoolean("showcob", true)) secondGraphData.addCob(fromTime, now, useCobForScale, useCobForScale ? 1d : 0.5d); if (SP.getBoolean("showdeviations", false)) secondGraphData.addDeviations(fromTime, now, useDevForScale, 1d); if (SP.getBoolean("showratios", false)) secondGraphData.addRatio(fromTime, now, useRatioForScale, 1d); if (SP.getBoolean("showdevslope", false)) secondGraphData.addDeviationSlope(fromTime, now, useDSForScale, 1d); // **** NOW line **** // set manual x bounds to have nice steps secondGraphData.formatAxis(fromTime, endTime); secondGraphData.addNowLine(now); // do GUI update FragmentActivity activity = getActivity(); if (activity != null) { activity.runOnUiThread(() -> { if (SP.getBoolean("showiob", true) || SP.getBoolean("showcob", true) || SP.getBoolean("showdeviations", false) || SP.getBoolean("showratios", false) || SP.getBoolean("showdevslope", false)) { iobGraph.setVisibility(View.VISIBLE); } else { iobGraph.setVisibility(View.GONE); } // finally enforce drawing of graphs graphData.performUpdate(); secondGraphData.performUpdate(); Profiler.log(log, from + " - onDataChanged", updateGUIStart); }); } }).start(); Profiler.log(log, from, updateGUIStart); } //Notifications void updateNotifications() { NotificationStore nstore = OverviewPlugin.getPlugin().notificationStore; nstore.removeExpired(); nstore.unSnooze(); if (nstore.store.size() > 0) { NotificationRecyclerViewAdapter adapter = new NotificationRecyclerViewAdapter(nstore.store); notificationsView.setAdapter(adapter); notificationsView.setVisibility(View.VISIBLE); } else { notificationsView.setVisibility(View.GONE); } } }
1
30,313
GONE breaks formating of ":". or is it fixed below?
MilosKozak-AndroidAPS
java
@@ -20,6 +20,9 @@ public class ASTResource extends ASTFormalParameter { public Object jjtAccept(JavaParserVisitor visitor, Object data) { return visitor.visit(this, data); } + + // TODO Should we deprecate all methods from ASTFormalParameter? + } /* * JavaCC - OriginalChecksum=92734fc70bba91fd9422150dbf87d5c4 (do not edit this
1
/** * BSD-style license; for more info see http://pmd.sourceforge.net/license.html */ /* Generated By:JJTree: Do not edit this line. ASTResource.java Version 4.1 */ /* JavaCCOptions:MULTI=true,NODE_USES_PARSER=true,VISITOR=true,TRACK_TOKENS=false,NODE_PREFIX=AST,NODE_EXTENDS=,NODE_FACTORY= */ package net.sourceforge.pmd.lang.java.ast; public class ASTResource extends ASTFormalParameter { public ASTResource(int id) { super(id); } public ASTResource(JavaParser p, int id) { super(p, id); } /** Accept the visitor. **/ @Override public Object jjtAccept(JavaParserVisitor visitor, Object data) { return visitor.visit(this, data); } } /* * JavaCC - OriginalChecksum=92734fc70bba91fd9422150dbf87d5c4 (do not edit this * line) */
1
14,899
What do you mean with this? Won't the deprecation be inherited?
pmd-pmd
java
@@ -90,6 +90,8 @@ std::string FlatCompiler::GetUsageString(const char* program_name) const { " --cpp-ptr-type T Set object API pointer type (default std::unique_ptr)\n" " --cpp-str-type T Set object API string type (default std::string)\n" " T::c_str() and T::length() must be supported\n" + " --object-prefix Customise class prefix for C++ object-based API.\n" + " --object-suffix Customise class suffix for C++ object-based API. Default value is \"T\"\n" " --no-js-exports Removes Node.js style export lines in JS.\n" " --goog-js-export Uses goog.exports* for closure compiler exporting in JS.\n" " --go-namespace Generate the overrided namespace in Golang.\n"
1
/* * Copyright 2014 Google Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "flatbuffers/flatc.h" #include <list> #define FLATC_VERSION "1.7.0 (" __DATE__ ")" namespace flatbuffers { void FlatCompiler::ParseFile( flatbuffers::Parser &parser, const std::string &filename, const std::string &contents, std::vector<const char *> &include_directories) const { auto local_include_directory = flatbuffers::StripFileName(filename); include_directories.push_back(local_include_directory.c_str()); include_directories.push_back(nullptr); if (!parser.Parse(contents.c_str(), &include_directories[0], filename.c_str())) Error(parser.error_, false, false); include_directories.pop_back(); include_directories.pop_back(); } void FlatCompiler::Warn(const std::string &warn, bool show_exe_name) const { params_.warn_fn(this, warn, show_exe_name); } void FlatCompiler::Error(const std::string &err, bool usage, bool show_exe_name) const { params_.error_fn(this, err, usage, show_exe_name); } std::string FlatCompiler::GetUsageString(const char* program_name) const { std::stringstream ss; ss << "Usage: " << program_name << " [OPTION]... FILE... [-- FILE...]\n"; for (size_t i = 0; i < params_.num_generators; ++i) { const Generator& g = params_.generators[i]; std::stringstream full_name; full_name << std::setw(12) << std::left << g.generator_opt_long; const char *name = g.generator_opt_short ? g.generator_opt_short : " "; const char *help = g.generator_help; ss << " " << full_name.str() << " " << name << " " << help << ".\n"; } ss << " -o PATH Prefix PATH to all generated files.\n" " -I PATH Search for includes in the specified path.\n" " -M Print make rules for generated files.\n" " --version Print the version number of flatc and exit.\n" " --strict-json Strict JSON: field names must be / will be quoted,\n" " no trailing commas in tables/vectors.\n" " --allow-non-utf8 Pass non-UTF-8 input through parser and emit nonstandard\n" " \\x escapes in JSON. (Default is to raise parse error on\n" " non-UTF-8 input.)\n" " --defaults-json Output fields whose value is the default when\n" " writing JSON\n" " --unknown-json Allow fields in JSON that are not defined in the\n" " schema. These fields will be discared when generating\n" " binaries.\n" " --no-prefix Don\'t prefix enum values with the enum type in C++.\n" " --scoped-enums Use C++11 style scoped and strongly typed enums.\n" " also implies --no-prefix.\n" " --gen-includes (deprecated), this is the default behavior.\n" " If the original behavior is required (no include\n" " statements) use --no-includes.\n" " --no-includes Don\'t generate include statements for included\n" " schemas the generated file depends on (C++).\n" " --gen-mutable Generate accessors that can mutate buffers in-place.\n" " --gen-onefile Generate single output file for C#.\n" " --gen-name-strings Generate type name functions for C++.\n" " --escape-proto-ids Disable appending '_' in namespaces names.\n" " --gen-object-api Generate an additional object-based API.\n" " --cpp-ptr-type T Set object API pointer type (default std::unique_ptr)\n" " --cpp-str-type T Set object API string type (default std::string)\n" " T::c_str() and T::length() must be supported\n" " --no-js-exports Removes Node.js style export lines in JS.\n" " --goog-js-export Uses goog.exports* for closure compiler exporting in JS.\n" " --go-namespace Generate the overrided namespace in Golang.\n" " --raw-binary Allow binaries without file_indentifier to be read.\n" " This may crash flatc given a mismatched schema.\n" " --proto Input is a .proto, translate to .fbs.\n" " --grpc Generate GRPC interfaces for the specified languages\n" " --schema Serialize schemas instead of JSON (use with -b)\n" " --bfbs-comments Add doc comments to the binary schema files.\n" " --conform FILE Specify a schema the following schemas should be\n" " an evolution of. Gives errors if not.\n" " --conform-includes Include path for the schema given with --conform\n" " PATH \n" " --include-prefix Prefix this path to any generated include statements.\n" " PATH\n" " --keep-prefix Keep original prefix of schema include statement.\n" " --no-fb-import Don't include flatbuffers import statement for TypeScript.\n" " --no-ts-reexport Don't re-export imported dependencies for TypeScript.\n" "FILEs may be schemas (must end in .fbs), or JSON files (conforming to preceding\n" "schema). FILEs after the -- must be binary flatbuffer format files.\n" "Output files are named using the base file name of the input,\n" "and written to the current directory or the path given by -o.\n" "example: " << program_name << " -c -b schema1.fbs schema2.fbs data.json\n"; return ss.str(); } int FlatCompiler::Compile(int argc, const char** argv) { if (params_.generators == nullptr || params_.num_generators == 0) { return 0; } flatbuffers::IDLOptions opts; std::string output_path; bool any_generator = false; bool print_make_rules = false; bool raw_binary = false; bool schema_binary = false; bool grpc_enabled = false; std::vector<std::string> filenames; std::list<std::string> include_directories_storage; std::vector<const char *> include_directories; std::vector<const char *> conform_include_directories; std::vector<bool> generator_enabled(params_.num_generators, false); size_t binary_files_from = std::numeric_limits<size_t>::max(); std::string conform_to_schema; for (int argi = 0; argi < argc; argi++) { std::string arg = argv[argi]; if (arg[0] == '-') { if (filenames.size() && arg[1] != '-') Error("invalid option location: " + arg, true); if (arg == "-o") { if (++argi >= argc) Error("missing path following: " + arg, true); output_path = flatbuffers::ConCatPathFileName( flatbuffers::PosixPath(argv[argi]), ""); } else if(arg == "-I") { if (++argi >= argc) Error("missing path following" + arg, true); include_directories_storage.push_back( flatbuffers::PosixPath(argv[argi])); include_directories.push_back( include_directories_storage.back().c_str()); } else if(arg == "--conform") { if (++argi >= argc) Error("missing path following" + arg, true); conform_to_schema = flatbuffers::PosixPath(argv[argi]); } else if (arg == "--conform-includes") { if (++argi >= argc) Error("missing path following" + arg, true); include_directories_storage.push_back( flatbuffers::PosixPath(argv[argi])); conform_include_directories.push_back( include_directories_storage.back().c_str()); } else if (arg == "--include-prefix") { if (++argi >= argc) Error("missing path following" + arg, true); opts.include_prefix = flatbuffers::ConCatPathFileName( flatbuffers::PosixPath(argv[argi]), ""); } else if(arg == "--keep-prefix") { opts.keep_include_path = true; } else if(arg == "--strict-json") { opts.strict_json = true; } else if(arg == "--allow-non-utf8") { opts.allow_non_utf8 = true; } else if(arg == "--no-js-exports") { opts.skip_js_exports = true; } else if(arg == "--goog-js-export") { opts.use_goog_js_export_format = true; } else if(arg == "--go-namespace") { if (++argi >= argc) Error("missing golang namespace" + arg, true); opts.go_namespace = argv[argi]; } else if(arg == "--defaults-json") { opts.output_default_scalars_in_json = true; } else if (arg == "--unknown-json") { opts.skip_unexpected_fields_in_json = true; } else if(arg == "--no-prefix") { opts.prefixed_enums = false; } else if(arg == "--scoped-enums") { opts.prefixed_enums = false; opts.scoped_enums = true; } else if (arg == "--no-union-value-namespacing") { opts.union_value_namespacing = false; } else if(arg == "--gen-mutable") { opts.mutable_buffer = true; } else if(arg == "--gen-name-strings") { opts.generate_name_strings = true; } else if(arg == "--gen-object-api") { opts.generate_object_based_api = true; } else if (arg == "--cpp-ptr-type") { if (++argi >= argc) Error("missing type following" + arg, true); opts.cpp_object_api_pointer_type = argv[argi]; } else if (arg == "--cpp-str-type") { if (++argi >= argc) Error("missing type following" + arg, true); opts.cpp_object_api_string_type = argv[argi]; } else if(arg == "--gen-all") { opts.generate_all = true; opts.include_dependence_headers = false; } else if(arg == "--gen-includes") { // Deprecated, remove this option some time in the future. printf("warning: --gen-includes is deprecated (it is now default)\n"); } else if(arg == "--no-includes") { opts.include_dependence_headers = false; } else if (arg == "--gen-onefile") { opts.one_file = true; } else if (arg == "--raw-binary") { raw_binary = true; } else if(arg == "--") { // Separator between text and binary inputs. binary_files_from = filenames.size(); } else if(arg == "--proto") { opts.proto_mode = true; } else if(arg == "--escape-proto-ids") { opts.escape_proto_identifiers = true; } else if(arg == "--schema") { schema_binary = true; } else if(arg == "-M") { print_make_rules = true; } else if(arg == "--version") { printf("flatc version %s\n", FLATC_VERSION); exit(0); } else if(arg == "--grpc") { grpc_enabled = true; } else if(arg == "--bfbs-comments") { opts.binary_schema_comments = true; } else if(arg == "--no-fb-import") { opts.skip_flatbuffers_import = true; } else if(arg == "--no-ts-reexport") { opts.reexport_ts_modules = false; } else { for (size_t i = 0; i < params_.num_generators; ++i) { if (arg == params_.generators[i].generator_opt_long || (params_.generators[i].generator_opt_short && arg == params_.generators[i].generator_opt_short)) { generator_enabled[i] = true; any_generator = true; opts.lang_to_generate |= params_.generators[i].lang; goto found; } } Error("unknown commandline argument: " + arg, true); found:; } } else { filenames.push_back(flatbuffers::PosixPath(argv[argi])); } } if (!filenames.size()) Error("missing input files", false, true); if (opts.proto_mode) { if (any_generator) Error("cannot generate code directly from .proto files", true); } else if (!any_generator && conform_to_schema.empty()) { Error("no options: specify at least one generator.", true); } flatbuffers::Parser conform_parser; if (!conform_to_schema.empty()) { std::string contents; if (!flatbuffers::LoadFile(conform_to_schema.c_str(), true, &contents)) Error("unable to load schema: " + conform_to_schema); ParseFile(conform_parser, conform_to_schema, contents, conform_include_directories); } std::unique_ptr<flatbuffers::Parser> parser(new flatbuffers::Parser(opts)); for (auto file_it = filenames.begin(); file_it != filenames.end(); ++file_it) { auto &filename = *file_it; std::string contents; if (!flatbuffers::LoadFile(filename.c_str(), true, &contents)) Error("unable to load file: " + filename); bool is_binary = static_cast<size_t>(file_it - filenames.begin()) >= binary_files_from; auto is_schema = flatbuffers::GetExtension(filename) == "fbs"; if (is_binary) { parser->builder_.Clear(); parser->builder_.PushFlatBuffer( reinterpret_cast<const uint8_t *>(contents.c_str()), contents.length()); if (!raw_binary) { // Generally reading binaries that do not correspond to the schema // will crash, and sadly there's no way around that when the binary // does not contain a file identifier. // We'd expect that typically any binary used as a file would have // such an identifier, so by default we require them to match. if (!parser->file_identifier_.length()) { Error("current schema has no file_identifier: cannot test if \"" + filename + "\" matches the schema, use --raw-binary to read this file" " anyway."); } else if (!flatbuffers::BufferHasIdentifier(contents.c_str(), parser->file_identifier_.c_str())) { Error("binary \"" + filename + "\" does not have expected file_identifier \"" + parser->file_identifier_ + "\", use --raw-binary to read this file anyway."); } } } else { // Check if file contains 0 bytes. if (contents.length() != strlen(contents.c_str())) { Error("input file appears to be binary: " + filename, true); } if (is_schema) { // If we're processing multiple schemas, make sure to start each // one from scratch. If it depends on previous schemas it must do // so explicitly using an include. parser.reset(new flatbuffers::Parser(opts)); } ParseFile(*parser.get(), filename, contents, include_directories); if (!is_schema && !parser->builder_.GetSize()) { // If a file doesn't end in .fbs, it must be json/binary. Ensure we // didn't just parse a schema with a different extension. Error("input file is neither json nor a .fbs (schema) file: " + filename, true); } if (is_schema && !conform_to_schema.empty()) { auto err = parser->ConformTo(conform_parser); if (!err.empty()) Error("schemas don\'t conform: " + err); } if (schema_binary) { parser->Serialize(); parser->file_extension_ = reflection::SchemaExtension(); } } std::string filebase = flatbuffers::StripPath( flatbuffers::StripExtension(filename)); for (size_t i = 0; i < params_.num_generators; ++i) { parser->opts.lang = params_.generators[i].lang; if (generator_enabled[i]) { if (!print_make_rules) { flatbuffers::EnsureDirExists(output_path); if ((!params_.generators[i].schema_only || is_schema) && !params_.generators[i].generate(*parser.get(), output_path, filebase)) { Error(std::string("Unable to generate ") + params_.generators[i].lang_name + " for " + filebase); } } else { std::string make_rule = params_.generators[i].make_rule( *parser.get(), output_path, filename); if (!make_rule.empty()) printf("%s\n", flatbuffers::WordWrap( make_rule, 80, " ", " \\").c_str()); } if (grpc_enabled) { if (params_.generators[i].generateGRPC != nullptr) { if (!params_.generators[i].generateGRPC(*parser.get(), output_path, filebase)) { Error(std::string("Unable to generate GRPC interface for") + params_.generators[i].lang_name); } } else { Warn(std::string("GRPC interface generator not implemented for ") + params_.generators[i].lang_name); } } } } if (opts.proto_mode) GenerateFBS(*parser.get(), output_path, filebase); // We do not want to generate code for the definitions in this file // in any files coming up next. parser->MarkGenerated(); } return 0; } } // namespace flatbuffers
1
12,406
line between " and " should be <80 chars.
google-flatbuffers
java
@@ -7,7 +7,6 @@ module Travis class Script class Ruby < Script DEFAULTS = { - rvm: 'default', gemfile: 'Gemfile' }
1
require 'travis/build/script/shared/bundler' require 'travis/build/script/shared/jdk' require 'travis/build/script/shared/rvm' module Travis module Build class Script class Ruby < Script DEFAULTS = { rvm: 'default', gemfile: 'Gemfile' } include Bundler, RVM, Jdk def announce super sh.cmd 'gem --version' end def script sh.if "-f #{config[:gemfile]}" do sh.cmd 'bundle exec rake' end sh.else do sh.cmd 'rake' end end private def uses_java? ruby_version.include?('jruby') end end end end end
1
13,366
What will happen if neither `rvm` nor `ruby` is present in the .travis.yml?
travis-ci-travis-build
rb
@@ -71,8 +71,7 @@ class HCI_ACL_Hdr(Packet): def post_build(self, p, pay): p += pay if self.len is None: - l = len(p)-4 - p = p[:2]+chr(l&0xff)+chr((l>>8)&0xff)+p[4:] + p = p[:2] + struct.pack("<H", len(pay)) + p[4:] return p
1
## This file is part of Scapy ## See http://www.secdev.org/projects/scapy for more informations ## Copyright (C) Philippe Biondi <[email protected]> ## Copyright (C) Mike Ryan <[email protected]> ## This program is published under a GPLv2 license """ Bluetooth layers, sockets and send/receive functions. """ import socket,struct,array from ctypes import * from select import select from scapy.config import conf from scapy.packet import * from scapy.fields import * from scapy.supersocket import SuperSocket from scapy.sendrecv import sndrcv from scapy.data import MTU ########## # Fields # ########## class XLEShortField(LEShortField): def i2repr(self, pkt, x): return lhex(self.i2h(pkt, x)) class XLELongField(LEShortField): def __init__(self, name, default): Field.__init__(self, name, default, "<Q") def i2repr(self, pkt, x): return lhex(self.i2h(pkt, x)) class LEMACField(Field): def __init__(self, name, default): Field.__init__(self, name, default, "6s") def i2m(self, pkt, x): if x is None: return b"\0\0\0\0\0\0" return mac2str(x)[::-1] def m2i(self, pkt, x): return str2mac(x[::-1]) def any2i(self, pkt, x): if isinstance(x, str) and len(x) is 6: x = self.m2i(pkt, x) return x def i2repr(self, pkt, x): x = self.i2h(pkt, x) if self in conf.resolve: x = conf.manufdb._resolve_MAC(x) return x def randval(self): return RandMAC() class HCI_Hdr(Packet): name = "HCI header" fields_desc = [ ByteEnumField("type",2,{1:"command",2:"ACLdata",3:"SCOdata",4:"event",5:"vendor"}),] def mysummary(self): return self.sprintf("HCI %type%") class HCI_ACL_Hdr(Packet): name = "HCI ACL header" fields_desc = [ ByteField("handle",0), # Actually, handle is 12 bits and flags is 4. ByteField("flags",0), # I wait to write a LEBitField LEShortField("len",None), ] def post_build(self, p, pay): p += pay if self.len is None: l = len(p)-4 p = p[:2]+chr(l&0xff)+chr((l>>8)&0xff)+p[4:] return p class L2CAP_Hdr(Packet): name = "L2CAP header" fields_desc = [ LEShortField("len",None), LEShortEnumField("cid",0,{1:"control", 4:"attribute"}),] def post_build(self, p, pay): p += pay if self.len is None: l = len(pay) p = chr(l&0xff)+chr((l>>8)&0xff)+p[2:] return p class L2CAP_CmdHdr(Packet): name = "L2CAP command header" fields_desc = [ ByteEnumField("code",8,{1:"rej",2:"conn_req",3:"conn_resp", 4:"conf_req",5:"conf_resp",6:"disconn_req", 7:"disconn_resp",8:"echo_req",9:"echo_resp", 10:"info_req",11:"info_resp", 18:"conn_param_update_req", 19:"conn_param_update_resp"}), ByteField("id",0), LEShortField("len",None) ] def post_build(self, p, pay): p += pay if self.len is None: l = len(p)-4 p = p[:2]+chr(l&0xff)+chr((l>>8)&0xff)+p[4:] return p def answers(self, other): if other.id == self.id: if self.code == 1: return 1 if other.code in [2,4,6,8,10,18] and self.code == other.code+1: if other.code == 8: return 1 return self.payload.answers(other.payload) return 0 class L2CAP_ConnReq(Packet): name = "L2CAP Conn Req" fields_desc = [ LEShortEnumField("psm",0,{1:"SDP",3:"RFCOMM",5:"telephony control"}), LEShortField("scid",0), ] class L2CAP_ConnResp(Packet): name = "L2CAP Conn Resp" fields_desc = [ LEShortField("dcid",0), LEShortField("scid",0), LEShortEnumField("result",0,["success", "pend", "cr_bad_psm", "cr_sec_block", "cr_no_mem", "reserved","cr_inval_scid", "cr_scid_in_use"]), LEShortEnumField("status",0,["no_info", "authen_pend", "author_pend", "reserved"]), ] def answers(self, other): return self.scid == other.scid class L2CAP_CmdRej(Packet): name = "L2CAP Command Rej" fields_desc = [ LEShortField("reason",0), ] class L2CAP_ConfReq(Packet): name = "L2CAP Conf Req" fields_desc = [ LEShortField("dcid",0), LEShortField("flags",0), ] class L2CAP_ConfResp(Packet): name = "L2CAP Conf Resp" fields_desc = [ LEShortField("scid",0), LEShortField("flags",0), LEShortEnumField("result",0,["success","unaccept","reject","unknown"]), ] def answers(self, other): return self.scid == other.scid class L2CAP_DisconnReq(Packet): name = "L2CAP Disconn Req" fields_desc = [ LEShortField("dcid",0), LEShortField("scid",0), ] class L2CAP_DisconnResp(Packet): name = "L2CAP Disconn Resp" fields_desc = [ LEShortField("dcid",0), LEShortField("scid",0), ] def answers(self, other): return self.scid == other.scid class L2CAP_InfoReq(Packet): name = "L2CAP Info Req" fields_desc = [ LEShortEnumField("type",0,{1:"CL_MTU",2:"FEAT_MASK"}), StrField("data","") ] class L2CAP_InfoResp(Packet): name = "L2CAP Info Resp" fields_desc = [ LEShortField("type",0), LEShortEnumField("result",0,["success","not_supp"]), StrField("data",""), ] def answers(self, other): return self.type == other.type class L2CAP_Connection_Parameter_Update_Request(Packet): name = "L2CAP Connection Parameter Update Request" fields_desc = [ LEShortField("min_interval", 0), LEShortField("max_interval", 0), LEShortField("slave_latency", 0), LEShortField("timeout_mult", 0), ] class L2CAP_Connection_Parameter_Update_Response(Packet): name = "L2CAP Connection Parameter Update Response" fields_desc = [ LEShortField("move_result", 0), ] class ATT_Hdr(Packet): name = "ATT header" fields_desc = [ XByteField("opcode", None), ] class ATT_Error_Response(Packet): name = "Error Response" fields_desc = [ XByteField("request", 0), LEShortField("handle", 0), XByteField("ecode", 0), ] class ATT_Exchange_MTU_Request(Packet): name = "Exchange MTU Request" fields_desc = [ LEShortField("mtu", 0), ] class ATT_Exchange_MTU_Response(Packet): name = "Exchange MTU Response" fields_desc = [ LEShortField("mtu", 0), ] class ATT_Find_Information_Request(Packet): name = "Find Information Request" fields_desc = [ XLEShortField("start", 0x0000), XLEShortField("end", 0xffff), ] class ATT_Find_Information_Response(Packet): name = "Find Information Reponse" fields_desc = [ XByteField("format", 1), StrField("data", "") ] class ATT_Find_By_Type_Value_Request(Packet): name = "Find By Type Value Request" fields_desc = [ XLEShortField("start", 0x0001), XLEShortField("end", 0xffff), XLEShortField("uuid", None), StrField("data", ""), ] class ATT_Find_By_Type_Value_Response(Packet): name = "Find By Type Value Response" fields_desc = [ StrField("handles", ""), ] class ATT_Read_By_Type_Request_128bit(Packet): name = "Read By Type Request" fields_desc = [ XLEShortField("start", 0x0001), XLEShortField("end", 0xffff), XLELongField("uuid1", None), XLELongField("uuid2", None)] class ATT_Read_By_Type_Request(Packet): name = "Read By Type Request" fields_desc = [ XLEShortField("start", 0x0001), XLEShortField("end", 0xffff), XLEShortField("uuid", None)] class ATT_Read_By_Type_Response(Packet): name = "Read By Type Response" # fields_desc = [ FieldLenField("len", None, length_of="data", fmt="B"), # StrLenField("data", "", length_from=lambda pkt:pkt.len), ] fields_desc = [ StrField("data", "") ] class ATT_Read_Request(Packet): name = "Read Request" fields_desc = [ XLEShortField("gatt_handle", 0), ] class ATT_Read_Response(Packet): name = "Read Response" fields_desc = [ StrField("value", ""), ] class ATT_Read_By_Group_Type_Request(Packet): name = "Read By Group Type Request" fields_desc = [ XLEShortField("start", 0), XLEShortField("end", 0xffff), XLEShortField("uuid", 0), ] class ATT_Read_By_Group_Type_Response(Packet): name = "Read By Group Type Response" fields_desc = [ XByteField("length", 0), StrField("data", ""), ] class ATT_Write_Request(Packet): name = "Write Request" fields_desc = [ XLEShortField("gatt_handle", 0), StrField("data", ""), ] class ATT_Write_Command(Packet): name = "Write Request" fields_desc = [ XLEShortField("gatt_handle", 0), StrField("data", ""), ] class ATT_Write_Response(Packet): name = "Write Response" fields_desc = [ ] class ATT_Handle_Value_Notification(Packet): name = "Handle Value Notification" fields_desc = [ XLEShortField("handle", 0), StrField("value", ""), ] class SM_Hdr(Packet): name = "SM header" fields_desc = [ ByteField("sm_command", None) ] class SM_Pairing_Request(Packet): name = "Pairing Request" fields_desc = [ ByteEnumField("iocap", 3, {0:"DisplayOnly", 1:"DisplayYesNo", 2:"KeyboardOnly", 3:"NoInputNoOutput", 4:"KeyboardDisplay"}), ByteEnumField("oob", 0, {0:"Not Present", 1:"Present (from remote device)"}), BitField("authentication", 0, 8), ByteField("max_key_size", 16), ByteField("initiator_key_distribution", 0), ByteField("responder_key_distribution", 0), ] class SM_Pairing_Response(Packet): name = "Pairing Response" fields_desc = [ ByteEnumField("iocap", 3, {0:"DisplayOnly", 1:"DisplayYesNo", 2:"KeyboardOnly", 3:"NoInputNoOutput", 4:"KeyboardDisplay"}), ByteEnumField("oob", 0, {0:"Not Present", 1:"Present (from remote device)"}), BitField("authentication", 0, 8), ByteField("max_key_size", 16), ByteField("initiator_key_distribution", 0), ByteField("responder_key_distribution", 0), ] class SM_Confirm(Packet): name = "Pairing Confirm" fields_desc = [ StrFixedLenField("confirm", b'\x00' * 16, 16) ] class SM_Random(Packet): name = "Pairing Random" fields_desc = [ StrFixedLenField("random", b'\x00' * 16, 16) ] class SM_Failed(Packet): name = "Pairing Failed" fields_desc = [ XByteField("reason", 0) ] class SM_Encryption_Information(Packet): name = "Encryption Information" fields_desc = [ StrFixedLenField("ltk", b"\x00" * 16, 16), ] class SM_Master_Identification(Packet): name = "Master Identification" fields_desc = [ XLEShortField("ediv", 0), StrFixedLenField("rand", b'\x00' * 8, 8), ] class SM_Identity_Information(Packet): name = "Identity Information" fields_desc = [ StrFixedLenField("irk", b'\x00' * 16, 16), ] class SM_Identity_Address_Information(Packet): name = "Identity Address Information" fields_desc = [ ByteEnumField("atype", 0, {0:"public"}), LEMACField("address", None), ] class SM_Signing_Information(Packet): name = "Signing Information" fields_desc = [ StrFixedLenField("csrk", b'\x00' * 16, 16), ] class EIR_Hdr(Packet): name = "EIR Header" fields_desc = [ FieldLenField("len", 0, fmt="B"), ByteEnumField("type", 0, { 0x01: "flags", 0x02: "incomplete_list_16_bit_svc_uuids", 0x03: "complete_list_16_bit_svc_uuids", 0x04: "incomplete_list_32_bit_svc_uuids", 0x05: "complete_list_32_bit_svc_uuids", 0x06: "incomplete_list_128_bit_svc_uuids", 0x07: "complete_list_128_bit_svc_uuids", 0x08: "shortened_local_name", 0x09: "complete_local_name", 0x0a: "tx_power_level", 0x0d: "class_of_device", 0x0e: "simple_pairing_hash", 0x0f: "simple_pairing_rand", 0x10: "sec_mgr_tk", 0x11: "sec_mgr_oob_flags", 0x12: "slave_conn_intvl_range", 0x17: "pub_target_addr", 0x18: "rand_target_addr", 0x19: "appearance", 0x1a: "adv_intvl", 0x1b: "le_addr", 0x1c: "le_role", 0x14: "list_16_bit_svc_sollication_uuids", 0x1f: "list_32_bit_svc_sollication_uuids", 0x15: "list_128_bit_svc_sollication_uuids", 0x16: "svc_data_16_bit_uuid", 0x20: "svc_data_32_bit_uuid", 0x21: "svc_data_128_bit_uuid", 0x22: "sec_conn_confirm", 0x22: "sec_conn_rand", 0x24: "uri", 0xff: "mfg_specific_data", }), ] def mysummary(self): return self.sprintf("EIR %type%") class EIR_Element(Packet): name = "EIR Element" def extract_padding(self, s): # Needed to end each EIR_Element packet and make PacketListField work. return '', s @staticmethod def length_from(pkt): # 'type' byte is included in the length, so substract 1: return pkt.underlayer.len - 1 class EIR_Raw(EIR_Element): name = "EIR Raw" fields_desc = [ StrLenField("data", "", length_from=EIR_Element.length_from) ] class EIR_Flags(EIR_Element): name = "Flags" fields_desc = [ FlagsField("flags", 0x2, 8, ["limited_disc_mode", "general_disc_mode", "br_edr_not_supported", "simul_le_br_edr_ctrl", "simul_le_br_edr_host"] + 3*["reserved"]) ] class EIR_CompleteList16BitServiceUUIDs(EIR_Element): name = "Complete list of 16-bit service UUIDs" fields_desc = [ FieldListField("svc_uuids", None, XLEShortField("uuid", 0), length_from=EIR_Element.length_from) ] class EIR_IncompleteList16BitServiceUUIDs(EIR_CompleteList16BitServiceUUIDs): name = "Incomplete list of 16-bit service UUIDs" class EIR_CompleteLocalName(EIR_Element): name = "Complete Local Name" fields_desc = [ StrLenField("local_name", "", length_from=EIR_Element.length_from) ] class EIR_ShortenedLocalName(EIR_CompleteLocalName): name = "Shortened Local Name" class EIR_TX_Power_Level(EIR_Element): name = "TX Power Level" fields_desc = [SignedByteField("level", 0)] class EIR_Manufacturer_Specific_Data(EIR_Element): name = "EIR Manufacturer Specific Data" fields_desc = [ XLEShortField("company_id", 0), StrLenField("data", "", length_from=lambda pkt: EIR_Element.length_from(pkt) - 2) ] class HCI_Command_Hdr(Packet): name = "HCI Command header" fields_desc = [ XLEShortField("opcode", 0), ByteField("len", None), ] def post_build(self, p, pay): p += pay if self.len is None: l = len(p)-3 p = p[:2]+chr(l&0xff)+p[3:] return p class HCI_Cmd_Reset(Packet): name = "Reset" class HCI_Cmd_Set_Event_Filter(Packet): name = "Set Event Filter" fields_desc = [ ByteEnumField("type", 0, {0:"clear"}), ] class HCI_Cmd_Connect_Accept_Timeout(Packet): name = "Connection Attempt Timeout" fields_desc = [ LEShortField("timeout", 32000) ] # 32000 slots is 20000 msec class HCI_Cmd_LE_Host_Supported(Packet): name = "LE Host Supported" fields_desc = [ ByteField("supported", 1), ByteField("simultaneous", 1), ] class HCI_Cmd_Set_Event_Mask(Packet): name = "Set Event Mask" fields_desc = [ StrFixedLenField("mask", b"\xff\xff\xfb\xff\x07\xf8\xbf\x3d", 8) ] class HCI_Cmd_Read_BD_Addr(Packet): name = "Read BD Addr" class HCI_Cmd_LE_Set_Scan_Parameters(Packet): name = "LE Set Scan Parameters" fields_desc = [ ByteEnumField("type", 1, {1:"active"}), XLEShortField("interval", 16), XLEShortField("window", 16), ByteEnumField("atype", 0, {0:"public"}), ByteEnumField("policy", 0, {0:"all"}), ] class HCI_Cmd_LE_Set_Scan_Enable(Packet): name = "LE Set Scan Enable" fields_desc = [ ByteField("enable", 1), ByteField("filter_dups", 1), ] class HCI_Cmd_Disconnect(Packet): name = "Disconnect" fields_desc = [ XLEShortField("handle", 0), ByteField("reason", 0x13), ] class HCI_Cmd_LE_Create_Connection(Packet): name = "LE Create Connection" fields_desc = [ LEShortField("interval", 96), LEShortField("window", 48), ByteEnumField("filter", 0, {0:"address"}), ByteEnumField("patype", 0, {0:"public", 1:"random"}), LEMACField("paddr", None), ByteEnumField("atype", 0, {0:"public", 1:"random"}), LEShortField("min_interval", 40), LEShortField("max_interval", 56), LEShortField("latency", 0), LEShortField("timeout", 42), LEShortField("min_ce", 0), LEShortField("max_ce", 0), ] class HCI_Cmd_LE_Create_Connection_Cancel(Packet): name = "LE Create Connection Cancel" class HCI_Cmd_LE_Connection_Update(Packet): name = "LE Connection Update" fields_desc = [ XLEShortField("handle", 0), XLEShortField("min_interval", 0), XLEShortField("max_interval", 0), XLEShortField("latency", 0), XLEShortField("timeout", 0), LEShortField("min_ce", 0), LEShortField("max_ce", 0xffff), ] class HCI_Cmd_LE_Read_Buffer_Size(Packet): name = "LE Read Buffer Size" class HCI_Cmd_LE_Set_Random_Address(Packet): name = "LE Set Random Address" fields_desc = [ LEMACField("address", None) ] class HCI_Cmd_LE_Set_Advertising_Parameters(Packet): name = "LE Set Advertising Parameters" fields_desc = [ LEShortField("interval_min", 0x0800), LEShortField("interval_max", 0x0800), ByteEnumField("adv_type", 0, {0:"ADV_IND", 1:"ADV_DIRECT_IND", 2:"ADV_SCAN_IND", 3:"ADV_NONCONN_IND", 4:"ADV_DIRECT_IND_LOW"}), ByteEnumField("oatype", 0, {0:"public", 1:"random"}), ByteEnumField("datype", 0, {0:"public", 1:"random"}), LEMACField("daddr", None), ByteField("channel_map", 7), ByteEnumField("filter_policy", 0, {0:"all:all", 1:"connect:all scan:whitelist", 2:"connect:whitelist scan:all", 3:"all:whitelist"}), ] class HCI_Cmd_LE_Set_Advertising_Data(Packet): name = "LE Set Advertising Data" fields_desc = [ FieldLenField("len", None, length_of="data", fmt="B"), StrLenField("data", "", length_from=lambda pkt:pkt.len), ] class HCI_Cmd_LE_Set_Advertise_Enable(Packet): name = "LE Set Advertise Enable" fields_desc = [ ByteField("enable", 0) ] class HCI_Cmd_LE_Start_Encryption_Request(Packet): name = "LE Start Encryption" fields_desc = [ LEShortField("handle", 0), StrFixedLenField("rand", None, 8), XLEShortField("ediv", 0), StrFixedLenField("ltk", b'\x00' * 16, 16), ] class HCI_Cmd_LE_Long_Term_Key_Request_Negative_Reply(Packet): name = "LE Long Term Key Request Negative Reply" fields_desc = [ LEShortField("handle", 0), ] class HCI_Cmd_LE_Long_Term_Key_Request_Reply(Packet): name = "LE Long Term Key Request Reply" fields_desc = [ LEShortField("handle", 0), StrFixedLenField("ltk", b'\x00' * 16, 16), ] class HCI_Event_Hdr(Packet): name = "HCI Event header" fields_desc = [ XByteField("code", 0), ByteField("length", 0), ] class HCI_Event_Disconnection_Complete(Packet): name = "Disconnection Complete" fields_desc = [ ByteEnumField("status", 0, {0:"success"}), LEShortField("handle", 0), XByteField("reason", 0), ] class HCI_Event_Encryption_Change(Packet): name = "Encryption Change" fields_desc = [ ByteEnumField("status", 0, {0:"change has occurred"}), LEShortField("handle", 0), ByteEnumField("enabled", 0, {0:"OFF", 1:"ON (LE)", 2:"ON (BR/EDR)"}), ] class HCI_Event_Command_Complete(Packet): name = "Command Complete" fields_desc = [ ByteField("number", 0), XLEShortField("opcode", 0), ByteEnumField("status", 0, {0:"success"}), ] class HCI_Cmd_Complete_Read_BD_Addr(Packet): name = "Read BD Addr" fields_desc = [ LEMACField("addr", None), ] class HCI_Event_Command_Status(Packet): name = "Command Status" fields_desc = [ ByteEnumField("status", 0, {0:"pending"}), ByteField("number", 0), XLEShortField("opcode", None), ] class HCI_Event_Number_Of_Completed_Packets(Packet): name = "Number Of Completed Packets" fields_desc = [ ByteField("number", 0) ] class HCI_Event_LE_Meta(Packet): name = "LE Meta" fields_desc = [ ByteEnumField("event", 0, {2:"advertising_report"}) ] class HCI_LE_Meta_Connection_Complete(Packet): name = "Connection Complete" fields_desc = [ ByteEnumField("status", 0, {0:"success"}), LEShortField("handle", 0), ByteEnumField("role", 0, {0:"master"}), ByteEnumField("patype", 0, {0:"public", 1:"random"}), LEMACField("paddr", None), LEShortField("interval", 54), LEShortField("latency", 0), LEShortField("supervision", 42), XByteField("clock_latency", 5), ] class HCI_LE_Meta_Connection_Update_Complete(Packet): name = "Connection Update Complete" fields_desc = [ ByteEnumField("status", 0, {0:"success"}), LEShortField("handle", 0), LEShortField("interval", 54), LEShortField("latency", 0), LEShortField("timeout", 42), ] class HCI_LE_Meta_Advertising_Report(Packet): name = "Advertising Report" fields_desc = [ ByteField("number", 0), ByteEnumField("type", 0, {0:"conn_und", 4:"scan_rsp"}), ByteEnumField("atype", 0, {0:"public", 1:"random"}), LEMACField("addr", None), FieldLenField("len", None, length_of="data", fmt="B"), PacketListField("data", [], EIR_Hdr, length_from=lambda pkt:pkt.len), SignedByteField("rssi", 0)] class HCI_LE_Meta_Long_Term_Key_Request(Packet): name = "Long Term Key Request" fields_desc = [ LEShortField("handle", 0), StrFixedLenField("rand", None, 8), XLEShortField("ediv", 0), ] bind_layers( HCI_Hdr, HCI_Command_Hdr, type=1) bind_layers( HCI_Hdr, HCI_ACL_Hdr, type=2) bind_layers( HCI_Hdr, HCI_Event_Hdr, type=4) bind_layers( HCI_Hdr, conf.raw_layer, ) bind_layers( HCI_Command_Hdr, HCI_Cmd_Reset, opcode=0x0c03) bind_layers( HCI_Command_Hdr, HCI_Cmd_Set_Event_Mask, opcode=0x0c01) bind_layers( HCI_Command_Hdr, HCI_Cmd_Set_Event_Filter, opcode=0x0c05) bind_layers( HCI_Command_Hdr, HCI_Cmd_Connect_Accept_Timeout, opcode=0x0c16) bind_layers( HCI_Command_Hdr, HCI_Cmd_LE_Host_Supported, opcode=0x0c6d) bind_layers( HCI_Command_Hdr, HCI_Cmd_Read_BD_Addr, opcode=0x1009) bind_layers( HCI_Command_Hdr, HCI_Cmd_LE_Read_Buffer_Size, opcode=0x2002) bind_layers( HCI_Command_Hdr, HCI_Cmd_LE_Set_Random_Address, opcode=0x2005) bind_layers( HCI_Command_Hdr, HCI_Cmd_LE_Set_Advertising_Parameters, opcode=0x2006) bind_layers( HCI_Command_Hdr, HCI_Cmd_LE_Set_Advertising_Data, opcode=0x2008) bind_layers( HCI_Command_Hdr, HCI_Cmd_LE_Set_Advertise_Enable, opcode=0x200a) bind_layers( HCI_Command_Hdr, HCI_Cmd_LE_Set_Scan_Parameters, opcode=0x200b) bind_layers( HCI_Command_Hdr, HCI_Cmd_LE_Set_Scan_Enable, opcode=0x200c) bind_layers( HCI_Command_Hdr, HCI_Cmd_Disconnect, opcode=0x406) bind_layers( HCI_Command_Hdr, HCI_Cmd_LE_Create_Connection, opcode=0x200d) bind_layers( HCI_Command_Hdr, HCI_Cmd_LE_Create_Connection_Cancel, opcode=0x200e) bind_layers( HCI_Command_Hdr, HCI_Cmd_LE_Connection_Update, opcode=0x2013) bind_layers( HCI_Command_Hdr, HCI_Cmd_LE_Start_Encryption_Request, opcode=0x2019) bind_layers( HCI_Command_Hdr, HCI_Cmd_LE_Long_Term_Key_Request_Reply, opcode=0x201a) bind_layers( HCI_Command_Hdr, HCI_Cmd_LE_Long_Term_Key_Request_Negative_Reply, opcode=0x201b) bind_layers( HCI_Event_Hdr, HCI_Event_Disconnection_Complete, code=0x5) bind_layers( HCI_Event_Hdr, HCI_Event_Encryption_Change, code=0x8) bind_layers( HCI_Event_Hdr, HCI_Event_Command_Complete, code=0xe) bind_layers( HCI_Event_Hdr, HCI_Event_Command_Status, code=0xf) bind_layers( HCI_Event_Hdr, HCI_Event_Number_Of_Completed_Packets, code=0x13) bind_layers( HCI_Event_Hdr, HCI_Event_LE_Meta, code=0x3e) bind_layers( HCI_Event_Command_Complete, HCI_Cmd_Complete_Read_BD_Addr, opcode=0x1009) bind_layers( HCI_Event_LE_Meta, HCI_LE_Meta_Connection_Complete, event=1) bind_layers( HCI_Event_LE_Meta, HCI_LE_Meta_Advertising_Report, event=2) bind_layers( HCI_Event_LE_Meta, HCI_LE_Meta_Connection_Update_Complete, event=3) bind_layers( HCI_Event_LE_Meta, HCI_LE_Meta_Long_Term_Key_Request, event=5) bind_layers(EIR_Hdr, EIR_Flags, type=0x01) bind_layers(EIR_Hdr, EIR_IncompleteList16BitServiceUUIDs, type=0x02) bind_layers(EIR_Hdr, EIR_CompleteList16BitServiceUUIDs, type=0x03) bind_layers(EIR_Hdr, EIR_ShortenedLocalName, type=0x08) bind_layers(EIR_Hdr, EIR_CompleteLocalName, type=0x09) bind_layers(EIR_Hdr, EIR_TX_Power_Level, type=0x0a) bind_layers(EIR_Hdr, EIR_Manufacturer_Specific_Data, type=0xff) bind_layers(EIR_Hdr, EIR_Raw) bind_layers( HCI_ACL_Hdr, L2CAP_Hdr, ) bind_layers( L2CAP_Hdr, L2CAP_CmdHdr, cid=1) bind_layers( L2CAP_Hdr, L2CAP_CmdHdr, cid=5) #LE L2CAP Signaling Channel bind_layers( L2CAP_CmdHdr, L2CAP_CmdRej, code=1) bind_layers( L2CAP_CmdHdr, L2CAP_ConnReq, code=2) bind_layers( L2CAP_CmdHdr, L2CAP_ConnResp, code=3) bind_layers( L2CAP_CmdHdr, L2CAP_ConfReq, code=4) bind_layers( L2CAP_CmdHdr, L2CAP_ConfResp, code=5) bind_layers( L2CAP_CmdHdr, L2CAP_DisconnReq, code=6) bind_layers( L2CAP_CmdHdr, L2CAP_DisconnResp, code=7) bind_layers( L2CAP_CmdHdr, L2CAP_InfoReq, code=10) bind_layers( L2CAP_CmdHdr, L2CAP_InfoResp, code=11) bind_layers( L2CAP_CmdHdr, L2CAP_Connection_Parameter_Update_Request, code=18) bind_layers( L2CAP_CmdHdr, L2CAP_Connection_Parameter_Update_Response, code=19) bind_layers( L2CAP_Hdr, ATT_Hdr, cid=4) bind_layers( ATT_Hdr, ATT_Error_Response, opcode=0x1) bind_layers( ATT_Hdr, ATT_Exchange_MTU_Request, opcode=0x2) bind_layers( ATT_Hdr, ATT_Exchange_MTU_Response, opcode=0x3) bind_layers( ATT_Hdr, ATT_Find_Information_Request, opcode=0x4) bind_layers( ATT_Hdr, ATT_Find_Information_Response, opcode=0x5) bind_layers( ATT_Hdr, ATT_Find_By_Type_Value_Request, opcode=0x6) bind_layers( ATT_Hdr, ATT_Find_By_Type_Value_Response, opcode=0x7) bind_layers( ATT_Hdr, ATT_Read_By_Type_Request, opcode=0x8) bind_layers( ATT_Hdr, ATT_Read_By_Type_Request_128bit, opcode=0x8) bind_layers( ATT_Hdr, ATT_Read_By_Type_Response, opcode=0x9) bind_layers( ATT_Hdr, ATT_Read_Request, opcode=0xa) bind_layers( ATT_Hdr, ATT_Read_Response, opcode=0xb) bind_layers( ATT_Hdr, ATT_Read_By_Group_Type_Request, opcode=0x10) bind_layers( ATT_Hdr, ATT_Read_By_Group_Type_Response, opcode=0x11) bind_layers( ATT_Hdr, ATT_Write_Request, opcode=0x12) bind_layers( ATT_Hdr, ATT_Write_Response, opcode=0x13) bind_layers( ATT_Hdr, ATT_Write_Command, opcode=0x52) bind_layers( ATT_Hdr, ATT_Handle_Value_Notification, opcode=0x1b) bind_layers( L2CAP_Hdr, SM_Hdr, cid=6) bind_layers( SM_Hdr, SM_Pairing_Request, sm_command=1) bind_layers( SM_Hdr, SM_Pairing_Response, sm_command=2) bind_layers( SM_Hdr, SM_Confirm, sm_command=3) bind_layers( SM_Hdr, SM_Random, sm_command=4) bind_layers( SM_Hdr, SM_Failed, sm_command=5) bind_layers( SM_Hdr, SM_Encryption_Information, sm_command=6) bind_layers( SM_Hdr, SM_Master_Identification, sm_command=7) bind_layers( SM_Hdr, SM_Identity_Information, sm_command=8) bind_layers( SM_Hdr, SM_Identity_Address_Information, sm_command=9) bind_layers( SM_Hdr, SM_Signing_Information, sm_command=0x0a) class BluetoothL2CAPSocket(SuperSocket): desc = "read/write packets on a connected L2CAP socket" def __init__(self, peer): s = socket.socket(socket.AF_BLUETOOTH, socket.SOCK_RAW, socket.BTPROTO_L2CAP) s.connect((peer,0)) self.ins = self.outs = s def recv(self, x=MTU): return L2CAP_CmdHdr(self.ins.recv(x)) class BluetoothHCISocket(SuperSocket): desc = "read/write on a BlueTooth HCI socket" def __init__(self, iface=0x10000, type=None): s = socket.socket(socket.AF_BLUETOOTH, socket.SOCK_RAW, socket.BTPROTO_HCI) s.setsockopt(socket.SOL_HCI, socket.HCI_DATA_DIR,1) s.setsockopt(socket.SOL_HCI, socket.HCI_TIME_STAMP,1) s.setsockopt(socket.SOL_HCI, socket.HCI_FILTER, struct.pack("IIIh2x", 0xffffffff,0xffffffff,0xffffffff,0)) #type mask, event mask, event mask, opcode s.bind((iface,)) self.ins = self.outs = s # s.connect((peer,0)) def recv(self, x): return HCI_Hdr(self.ins.recv(x)) class sockaddr_hci(Structure): _fields_ = [ ("sin_family", c_ushort), ("hci_dev", c_ushort), ("hci_channel", c_ushort), ] class BluetoothSocketError(BaseException): pass class BluetoothCommandError(BaseException): pass class BluetoothUserSocket(SuperSocket): desc = "read/write H4 over a Bluetooth user channel" def __init__(self, adapter=0): # s = socket.socket(socket.AF_BLUETOOTH, socket.SOCK_RAW, socket.BTPROTO_HCI) # s.bind((0,1)) # yeah, if only # thanks to Python's weak ass socket and bind implementations, we have # to call down into libc with ctypes sockaddr_hcip = POINTER(sockaddr_hci) cdll.LoadLibrary("libc.so.6") libc = CDLL("libc.so.6") socket_c = libc.socket socket_c.argtypes = (c_int, c_int, c_int); socket_c.restype = c_int bind = libc.bind bind.argtypes = (c_int, POINTER(sockaddr_hci), c_int) bind.restype = c_int ######## ## actual code s = socket_c(31, 3, 1) # (AF_BLUETOOTH, SOCK_RAW, HCI_CHANNEL_USER) if s < 0: raise BluetoothSocketError("Unable to open PF_BLUETOOTH socket") sa = sockaddr_hci() sa.sin_family = 31 # AF_BLUETOOTH sa.hci_dev = adapter # adapter index sa.hci_channel = 1 # HCI_USER_CHANNEL r = bind(s, sockaddr_hcip(sa), sizeof(sa)) if r != 0: raise BluetoothSocketError("Unable to bind") self.ins = self.outs = socket.fromfd(s, 31, 3, 1) def send_command(self, cmd): opcode = cmd.opcode self.send(cmd) while True: r = self.recv() if r.type == 0x04 and r.code == 0xe and r.opcode == opcode: if r.status != 0: raise BluetoothCommandError("Command %x failed with %x" % (opcode, r.status)) return r def recv(self, x=512): return HCI_Hdr(self.ins.recv(x)) def readable(self, timeout=0): (ins, outs, foo) = select([self.ins], [], [], timeout) return len(ins) > 0 def flush(self): while self.readable(): self.recv() ## Bluetooth @conf.commands.register def srbt(peer, pkts, inter=0.1, *args, **kargs): """send and receive using a bluetooth socket""" s = conf.BTsocket(peer=peer) a,b = sndrcv(s,pkts,inter=inter,*args,**kargs) s.close() return a,b @conf.commands.register def srbt1(peer, pkts, *args, **kargs): """send and receive 1 packet using a bluetooth socket""" a,b = srbt(peer, pkts, *args, **kargs) if len(a) > 0: return a[0][1] conf.BTsocket = BluetoothL2CAPSocket
1
11,095
@p-l- Hey it seems that this was len-4 ?!
secdev-scapy
py
@@ -23,12 +23,14 @@ import AdBlockerWarning from './ad-blocker-warning'; import { render } from '../../../../../tests/js/test-utils'; import { STORE_NAME } from '../datastore/constants'; -const setupAdBlockerRegistry = async ( registry ) => { - await registry.dispatch( STORE_NAME ).receiveIsAdBlockerActive( true ); +const setupAdBlockerRegistry = ( registry ) => { + registry.dispatch( STORE_NAME ).receiveSettings( {} ); + registry.dispatch( STORE_NAME ).receiveIsAdBlockerActive( true ); }; -const setupNoAdBlockerRegistry = async ( registry ) => { - await registry.dispatch( STORE_NAME ).receiveIsAdBlockerActive( false ); +const setupNoAdBlockerRegistry = ( registry ) => { + registry.dispatch( STORE_NAME ).receiveSettings( {} ); + registry.dispatch( STORE_NAME ).receiveIsAdBlockerActive( false ); }; describe( 'AdBlockerWarning', () => {
1
/** * AdSense AdBlocker Warning component tests. * * Site Kit by Google, Copyright 2020 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Internal dependencies */ import AdBlockerWarning from './ad-blocker-warning'; import { render } from '../../../../../tests/js/test-utils'; import { STORE_NAME } from '../datastore/constants'; const setupAdBlockerRegistry = async ( registry ) => { await registry.dispatch( STORE_NAME ).receiveIsAdBlockerActive( true ); }; const setupNoAdBlockerRegistry = async ( registry ) => { await registry.dispatch( STORE_NAME ).receiveIsAdBlockerActive( false ); }; describe( 'AdBlockerWarning', () => { it( 'should render the warning when an AdBlocker is active', async () => { const { container } = render( <AdBlockerWarning />, { setupRegistry: setupAdBlockerRegistry } ); expect( container.querySelector( '.googlesitekit-settings-module-warning' ) ).not.toEqual( null ); } ); it( 'should render nothing when no AdBlocker is active', async () => { const { container } = render( <AdBlockerWarning />, { setupRegistry: setupNoAdBlockerRegistry } ); expect( container.firstChild ).toEqual( null ); } ); } );
1
29,213
This is something I added which resolved a few large errors in the output, although not related to the refactor here I don't think.
google-site-kit-wp
js
@@ -200,7 +200,6 @@ public class ExternalPlayerFragment extends Fragment { .into(imgvCover); if (controller != null && controller.isPlayingVideoLocally()) { - butPlay.setVisibility(View.GONE); ((MainActivity) getActivity()).getBottomSheet().setLocked(true); ((MainActivity) getActivity()).getBottomSheet().setState(BottomSheetBehavior.STATE_COLLAPSED); } else {
1
package de.danoeh.antennapod.fragment; import android.content.Intent; import android.os.Bundle; import android.util.Log; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import android.widget.ImageButton; import android.widget.ImageView; import android.widget.ProgressBar; import android.widget.TextView; import androidx.fragment.app.Fragment; import com.bumptech.glide.Glide; import com.bumptech.glide.request.RequestOptions; import com.google.android.material.bottomsheet.BottomSheetBehavior; import de.danoeh.antennapod.R; import de.danoeh.antennapod.activity.MainActivity; import de.danoeh.antennapod.core.event.PlaybackPositionEvent; import de.danoeh.antennapod.core.feed.MediaType; import de.danoeh.antennapod.core.feed.util.ImageResourceUtils; import de.danoeh.antennapod.core.glide.ApGlideSettings; import de.danoeh.antennapod.core.service.playback.PlaybackService; import de.danoeh.antennapod.core.util.playback.Playable; import de.danoeh.antennapod.core.util.playback.PlaybackController; import io.reactivex.Maybe; import io.reactivex.android.schedulers.AndroidSchedulers; import io.reactivex.disposables.Disposable; import io.reactivex.schedulers.Schedulers; import org.greenrobot.eventbus.EventBus; import org.greenrobot.eventbus.Subscribe; import org.greenrobot.eventbus.ThreadMode; /** * Fragment which is supposed to be displayed outside of the MediaplayerActivity. */ public class ExternalPlayerFragment extends Fragment { public static final String TAG = "ExternalPlayerFragment"; private ImageView imgvCover; private TextView txtvTitle; private ImageButton butPlay; private TextView feedName; private ProgressBar progressBar; private PlaybackController controller; private Disposable disposable; public ExternalPlayerFragment() { super(); } @Override public View onCreateView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) { View root = inflater.inflate(R.layout.external_player_fragment, container, false); imgvCover = root.findViewById(R.id.imgvCover); txtvTitle = root.findViewById(R.id.txtvTitle); butPlay = root.findViewById(R.id.butPlay); feedName = root.findViewById(R.id.txtvAuthor); progressBar = root.findViewById(R.id.episodeProgress); root.findViewById(R.id.fragmentLayout).setOnClickListener(v -> { Log.d(TAG, "layoutInfo was clicked"); if (controller != null && controller.getMedia() != null) { if (controller.getMedia().getMediaType() == MediaType.AUDIO) { ((MainActivity) getActivity()).getBottomSheet().setState(BottomSheetBehavior.STATE_EXPANDED); } else { Intent intent = PlaybackService.getPlayerActivityIntent(getActivity(), controller.getMedia()); startActivity(intent); } } }); return root; } @Override public void onActivityCreated(Bundle savedInstanceState) { super.onActivityCreated(savedInstanceState); butPlay.setOnClickListener(v -> { if (controller != null) { controller.playPause(); } }); loadMediaInfo(); } private PlaybackController setupPlaybackController() { return new PlaybackController(getActivity()) { @Override public void onPositionObserverUpdate() { ExternalPlayerFragment.this.onPositionObserverUpdate(); } @Override public ImageButton getPlayButton() { return butPlay; } @Override public boolean loadMediaInfo() { return ExternalPlayerFragment.this.loadMediaInfo(); } @Override public void setupGUI() { ExternalPlayerFragment.this.loadMediaInfo(); } @Override public void onShutdownNotification() { ((MainActivity) getActivity()).setPlayerVisible(false); } @Override public void onPlaybackEnd() { ((MainActivity) getActivity()).setPlayerVisible(false); } }; } @Override public void onStart() { super.onStart(); controller = setupPlaybackController(); controller.init(); loadMediaInfo(); EventBus.getDefault().register(this); } @Override public void onStop() { super.onStop(); if (controller != null) { controller.release(); controller = null; } EventBus.getDefault().unregister(this); } @Subscribe(threadMode = ThreadMode.MAIN) public void onEventMainThread(PlaybackPositionEvent event) { onPositionObserverUpdate(); } @Override public void onDestroy() { super.onDestroy(); Log.d(TAG, "Fragment is about to be destroyed"); if (disposable != null) { disposable.dispose(); } } @Override public void onPause() { super.onPause(); if (controller != null) { controller.pause(); } } private boolean loadMediaInfo() { Log.d(TAG, "Loading media info"); if (controller == null) { Log.w(TAG, "loadMediaInfo was called while PlaybackController was null!"); return false; } if (disposable != null) { disposable.dispose(); } disposable = Maybe.fromCallable(() -> controller.getMedia()) .subscribeOn(Schedulers.io()) .observeOn(AndroidSchedulers.mainThread()) .subscribe(this::updateUi, error -> Log.e(TAG, Log.getStackTraceString(error)), () -> ((MainActivity) getActivity()).setPlayerVisible(false)); return true; } private void updateUi(Playable media) { if (media == null) { return; } ((MainActivity) getActivity()).setPlayerVisible(true); txtvTitle.setText(media.getEpisodeTitle()); feedName.setText(media.getFeedTitle()); onPositionObserverUpdate(); Glide.with(getActivity()) .load(ImageResourceUtils.getImageLocation(media)) .apply(new RequestOptions() .placeholder(R.color.light_gray) .error(R.color.light_gray) .diskCacheStrategy(ApGlideSettings.AP_DISK_CACHE_STRATEGY) .fitCenter() .dontAnimate()) .into(imgvCover); if (controller != null && controller.isPlayingVideoLocally()) { butPlay.setVisibility(View.GONE); ((MainActivity) getActivity()).getBottomSheet().setLocked(true); ((MainActivity) getActivity()).getBottomSheet().setState(BottomSheetBehavior.STATE_COLLAPSED); } else { butPlay.setVisibility(View.VISIBLE); ((MainActivity) getActivity()).getBottomSheet().setLocked(false); } } private void onPositionObserverUpdate() { if (controller == null) { return; } else if (controller.getPosition() == PlaybackService.INVALID_TIME || controller.getDuration() == PlaybackService.INVALID_TIME) { return; } progressBar.setProgress((int) ((double) controller.getPosition() / controller.getDuration() * 100)); } }
1
17,104
i have removed this line because whenever a video is played in AntennaPod this line was making play button invisible
AntennaPod-AntennaPod
java
@@ -0,0 +1,10 @@ +package org.apache.servicecomb.foundation.vertx; + +import org.junit.Test; + +public class MyTest { + @Test + public void myTest() { + System.out.println(System.getProperty("java.io.tmpdir")); + } +}
1
1
12,292
remove temporary code
apache-servicecomb-java-chassis
java
@@ -154,4 +154,11 @@ public interface BlockHeader { * @return The Keccak 256-bit hash of this header. */ Hash getBlockHash(); + + /** + * The BASEFEE of this header. + * + * @return TheBASEFEE of this header. + */ + Long getBaseFee(); }
1
/* * Copyright ConsenSys AG. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. * * SPDX-License-Identifier: Apache-2.0 */ package org.hyperledger.besu.plugin.data; import org.apache.tuweni.bytes.Bytes; /** * The minimum set of data for a BlockHeader, as defined in the <a href= * "https://ethereum.github.io/yellowpaper/paper.pdf">Ethereum Yellow Paper</a>. */ public interface BlockHeader { /** * The Keccak 256-bit hash of the parent block’s header, in its entirety. * * @return The Keccak 256-bit hash of the parent block’s header, in its entirety. */ Hash getParentHash(); /** * The Keccak 256-bit hash of the ommers list portion of this block. * * @return The Keccak 256-bit hash of the ommers list portion of this block. */ Hash getOmmersHash(); /** * The 160-bit address to which all fees collected from the successful mining of this block be * transferred. * * <p>The name in the yellow paper is beneficiary. * * @return The 160-bit address to which all fees collected from the successful mining of this * block be transferred. */ Address getCoinbase(); /** * The Keccak 256-bit hash of the root node of the state trie, after all transactions are executed * and finalisations applied. * * @return The Keccak 256-bit hash of the root node of the state trie, after all transactions are * executed and finalisations applied. */ Hash getStateRoot(); /** * The Keccak 256-bit hash of theroot node of the trie structure populated with each transaction * in the transactions list portion of the block. * * @return The Keccak 256-bit hash of theroot node of the trie structure populated with each * transaction in the transactions list portion of the block. */ Hash getTransactionsRoot(); /** * The Keccak 256-bit hash of the root node of the trie structure populated with the receipts of * each transaction in the transactions list portion of the block. * * @return The Keccak 256-bit hash of the root node of the trie structure populated with the * receipts of each transaction in the transactions list portion of the block. */ Hash getReceiptsRoot(); /** * The Bloom filter composed from indexable information (logger address and log topics) contained * in each log entry from the receipt of each transaction in the transactions list. * * @return The Bloom filter composed from indexable information (logger address and log topics) * contained in each log entry from the receipt of each transaction in the transactions list. */ Bytes getLogsBloom(); /** * A scalar value corresponding to the difficulty level of this block. This can be calculated from * the previous block’s difficulty level and the timestamp. * * @return A UInt256 value corresponding to the difficulty level of this block. This can be * calculated from the previous block’s difficulty level and the timestamp. */ Quantity getDifficulty(); /** * A scalar value equal to the number of ancestor blocks. The genesis block has a number of zero. * * @return A scalar value equal to the number of ancestor blocks. The genesis block has a number * of zero. */ long getNumber(); /** * A scalar value equal to the current limit of gas expenditure per block. * * @return A scalar value equal to the current limit of gas expenditure per block. */ long getGasLimit(); /** * A scalar value equal to the total gas used in transactions in this block. * * @return A scalar value equal to the total gas used in transactions in this block. */ long getGasUsed(); /** * A scalar value equal to the reasonable output of Unix’s time() at this block’s inception. * * @return A scalar value equal to the reasonable output of Unix’s time() at this block’s * inception. */ long getTimestamp(); /** * An arbitrary byte array containing data relevant to this block. This must be 32 bytes or fewer. * * @return An arbitrary byte array containing data relevant to this block. This must be 32 bytes * or fewer. */ Bytes getExtraData(); /** * A 256-bit hash which, combined with the nonce, proves that a sufficient amount of computation * has been carried out on this block. * * @return A 256-bit hash which, combined with the nonce, proves that a sufficient amount of * computation has been carried out on this block. */ Hash getMixHash(); /** * A 64-bit value which, combined with the mixhash, proves that a sufficient amount of computation * has been carried out on this block. * * @return A 64-bit value which, combined with the mixhash, proves that a sufficient amount of * computation has been carried out on this block. */ long getNonce(); /** * The Keccak 256-bit hash of this header. * * @return The Keccak 256-bit hash of this header. */ Hash getBlockHash(); }
1
22,002
* Should be tagged `@Unstable` * Should be a default method, returning `null` * Also, perhaps `Optional<Long>` instead of just `Long`? Always empty when the BASEFEE isn't relevant? If so the default is `Optional.empty()`
hyperledger-besu
java
@@ -30,6 +30,7 @@ import { STORE_NAME as CORE_SITE } from '../datastore/site/constants'; import { STORE_NAME as CORE_USER } from '../datastore/user/constants'; const MODULE_SLUG = 'test-slug'; +const TEST_STORE_NAME = 'test/' + MODULE_SLUG; describe( 'createInfoStore store', () => { let registry;
1
/** * Info datastore functions tests. * * Site Kit by Google, Copyright 2020 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * WordPress dependencies */ import { addQueryArgs } from '@wordpress/url'; /** * Internal dependencies */ import { createTestRegistry, unsubscribeFromAll } from 'tests/js/utils'; import { createInfoStore } from './create-info-store'; import { STORE_NAME as CORE_SITE } from '../datastore/site/constants'; import { STORE_NAME as CORE_USER } from '../datastore/user/constants'; const MODULE_SLUG = 'test-slug'; describe( 'createInfoStore store', () => { let registry; beforeEach( () => { registry = createTestRegistry(); } ); afterEach( () => { unsubscribeFromAll( registry ); } ); describe( 'storeName', () => { it( 'returns the correct default store name', () => { const { STORE_NAME } = createInfoStore( MODULE_SLUG, {} ); expect( STORE_NAME ).toEqual( `modules/${ MODULE_SLUG }` ); } ); it( 'returns the passed store name', () => { const { STORE_NAME } = createInfoStore( MODULE_SLUG, { storeName: 'test/createstore', } ); expect( STORE_NAME ).toEqual( 'test/createstore' ); } ); } ); describe( 'selectors', () => { describe( 'getAdminScreenURL', () => { // Uses google dashboard when no `adminPage` is provided. it( 'returns the adminScreenURL page if no `adminPage` is provided', () => { registry.dispatch( CORE_SITE ).receiveSiteInfo( { adminURL: 'http://example.com/wp-admin/' } ); const { STORE_NAME, ...store } = createInfoStore( MODULE_SLUG ); registry.registerStore( STORE_NAME, store ); const adminSreenURL = registry.select( STORE_NAME ).getAdminScreenURL(); const { origin, pathname } = new URL( adminSreenURL ); expect( origin + pathname ).toEqual( 'http://example.com/wp-admin/admin.php' ); expect( adminSreenURL ).toMatchQueryParameters( { page: 'googlesitekit-dashboard' } ); } ); // It uses `adminPage` when provided. it( 'returns adminPage url when `adminPage` is provided', () => { registry.dispatch( CORE_SITE ).receiveSiteInfo( { adminURL: 'http://example.com/wp-admin/' } ); const { STORE_NAME, ...store } = createInfoStore( MODULE_SLUG, { adminPage: 'test-admin-page' } ); registry.registerStore( STORE_NAME, store ); const adminSreenURL = registry.select( STORE_NAME ).getAdminScreenURL(); const { origin, pathname } = new URL( adminSreenURL ); expect( origin + pathname ).toEqual( 'http://example.com/wp-admin/admin.php' ); expect( adminSreenURL ).toMatchQueryParameters( { page: 'test-admin-page' } ); } ); // It adds extra query parameters if provided. it( 'adds extra query parameters to the adminScreenURL when provided', () => { registry.dispatch( CORE_SITE ).receiveSiteInfo( { adminURL: 'http://example.com/wp-admin/' } ); const { STORE_NAME, ...store } = createInfoStore( MODULE_SLUG ); registry.registerStore( STORE_NAME, store ); const adminSreenURL = registry.select( STORE_NAME ).getAdminScreenURL( { foo: 'bar' } ); const { origin, pathname } = new URL( adminSreenURL ); expect( origin + pathname ).toEqual( 'http://example.com/wp-admin/admin.php' ); expect( adminSreenURL ).toMatchQueryParameters( { page: 'googlesitekit-dashboard', foo: 'bar' } ); } ); } ); describe( 'getAdminReauthURL', () => { // It generates an adminReauthURL with no slug passed. it( 'works with no slug passed', () => { registry.dispatch( CORE_SITE ).receiveSiteInfo( { adminURL: 'http://example.com/wp-admin/' } ); registry.dispatch( CORE_USER ).receiveGetAuthentication( { needsReauthentication: false } ); const { STORE_NAME, ...store } = createInfoStore(); registry.registerStore( STORE_NAME, store ); const adminReauthURL = registry.select( STORE_NAME ).getAdminReauthURL(); const { origin, pathname } = new URL( adminReauthURL ); expect( origin + pathname ).toEqual( 'http://example.com/wp-admin/admin.php' ); expect( adminReauthURL ).toMatchQueryParameters( { page: 'googlesitekit-dashboard', reAuth: 'true' } ); } ); // It generates an adminReauthURL with reAuth set to false it( 'it generates an adminReauthURL with reAuth set to false', () => { registry.dispatch( CORE_SITE ).receiveSiteInfo( { adminURL: 'http://example.com/wp-admin/' } ); registry.dispatch( CORE_USER ).receiveGetAuthentication( { needsReauthentication: false } ); const { STORE_NAME, ...store } = createInfoStore(); registry.registerStore( STORE_NAME, store ); const adminReauthURL = registry.select( STORE_NAME ).getAdminReauthURL( false ); const { origin, pathname } = new URL( adminReauthURL ); expect( origin + pathname ).toEqual( 'http://example.com/wp-admin/admin.php' ); expect( adminReauthURL ).toMatchQueryParameters( { page: 'googlesitekit-dashboard', reAuth: 'false' } ); } ); // It adds notification_success parameter when needsReautentication is false and requireSetup is false. it( 'adds notification query parameter to the adminReauthURL when needsReautentication is false and requireSetup is false', () => { registry.dispatch( CORE_SITE ).receiveSiteInfo( { adminURL: 'http://example.com/wp-admin/' } ); registry.dispatch( CORE_USER ).receiveGetAuthentication( { needsReauthentication: false } ); const { STORE_NAME, ...store } = createInfoStore( MODULE_SLUG, { requiresSetup: false } ); registry.registerStore( STORE_NAME, store ); const adminReauthURL = registry.select( STORE_NAME ).getAdminReauthURL(); const { origin, pathname } = new URL( adminReauthURL ); expect( origin + pathname ).toEqual( 'http://example.com/wp-admin/admin.php' ); expect( adminReauthURL ).toMatchQueryParameters( { page: 'googlesitekit-dashboard', slug: MODULE_SLUG, notification: 'authentication_success', } ); } ); // Uses connect URL when needsReautentication is true. it( 'adds connectURL to the adminReauthURL when needsReautentication is true', () => { const connectURLBase = 'http://connect.com/wp-admin/admin.php'; const connectURLQueryParams = { page: 'googlesitekit-splash', googlesitekit_connect: '1', nonce: '12345', }; const connectURL = addQueryArgs( connectURLBase, connectURLQueryParams ); registry.dispatch( CORE_SITE ).receiveSiteInfo( { adminURL: 'http://example.com/wp-admin/' } ); registry.dispatch( CORE_USER ).receiveGetAuthentication( { needsReauthentication: true } ); registry.dispatch( CORE_USER ).receiveConnectURL( connectURL ); const { STORE_NAME, ...store } = createInfoStore( MODULE_SLUG ); registry.registerStore( STORE_NAME, store ); const adminReauthURL = registry.select( STORE_NAME ).getAdminReauthURL(); const { origin, pathname } = new URL( adminReauthURL ); expect( origin + pathname ).toEqual( connectURLBase ); expect( adminReauthURL ).toMatchQueryParameters( { ...connectURLQueryParams, redirect: `http://example.com/wp-admin/admin.php?page=googlesitekit-dashboard&slug=${ MODULE_SLUG }&reAuth=true`, status: 'true', } ); } ); } ); } ); } );
1
34,704
Minor detail, but let's use the interpolated template string syntax instead.
google-site-kit-wp
js
@@ -53,6 +53,11 @@ type TimeChaosSpec struct { // TimeOffset defines the delta time of injected program TimeOffset TimeOffset `json:"timeOffset"` + // ClockIds defines all affected clock id + // All available options are ["CLOCK_REALTIME","CLOCK_MONOTONIC","CLOCK_PROCESS_CPUTIME_ID","CLOCK_THREAD_CPUTIME_ID","CLOCK_MONOTONIC_RAW","CLOCK_REALTIME_COARSE","CLOCK_MONOTONIC_COARSE","CLOCK_BOOTTIME","CLOCK_REALTIME_ALARM","CLOCK_BOOTTIME_ALARM"] + // Default value is ["CLOCK_REALTIME"] + ClockIds []string `json:"clockIds,omitempty"` + // Duration represents the duration of the chaos action Duration *string `json:"duration"`
1
// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package v1alpha1 import ( "time" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // +kubebuilder:object:root=true // TimeChaos is the Schema for the timechaos API type TimeChaos struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` // Spec defines the behavior of a time chaos experiment Spec TimeChaosSpec `json:"spec"` // +optional // Most recently observed status of the time chaos experiment Status TimeChaosStatus `json:"status"` } // TimeChaosSpec defines the desired state of TimeChaos type TimeChaosSpec struct { // Mode defines the mode to run chaos action. // Supported mode: one / all / fixed / fixed-percent / random-max-percent Mode PodMode `json:"mode"` // Value is required when the mode is set to `FixedPodMode` / `FixedPercentPodMod` / `RandomMaxPercentPodMod`. // If `FixedPodMode`, provide an integer of pods to do chaos action. // If `FixedPercentPodMod`, provide a number from 0-100 to specify the max % of pods the server can do chaos action. // If `RandomMaxPercentPodMod`, provide a number from 0-100 to specify the % of pods to do chaos action // +optional Value string `json:"value"` // Selector is used to select pods that are used to inject chaos action. Selector SelectorSpec `json:"selector"` // TimeOffset defines the delta time of injected program TimeOffset TimeOffset `json:"timeOffset"` // Duration represents the duration of the chaos action Duration *string `json:"duration"` // Scheduler defines some schedule rules to control the running time of the chaos experiment about time. Scheduler *SchedulerSpec `json:"scheduler"` // Next time when this action will be applied again // +optional NextStart *metav1.Time `json:"nextStart,omitempty"` // Next time when this action will be recovered // +optional NextRecover *metav1.Time `json:"nextRecover,omitempty"` } // GetSelector is a getter for Selector (for implementing SelectSpec) func (in *TimeChaosSpec) GetSelector() SelectorSpec { return in.Selector } // GetMode is a getter for Mode (for implementing SelectSpec) func (in *TimeChaosSpec) GetMode() PodMode { return in.Mode } // GetValue is a getter for Value (for implementing SelectSpec) func (in *TimeChaosSpec) GetValue() string { return in.Value } // TimeOffset defines the delta time of injected program // As `clock_gettime` return a struct contains two field: `tv_sec` and `tv_nsec`. // `Sec` is the offset of seconds, corresponding to `tv_sec` field. // `NSec` is the offset of nanoseconds, corresponding to `tv_nsec` field. type TimeOffset struct { Sec int64 `json:"sec"` NSec int64 `json:"nsec"` } // TimeChaosStatus defines the observed state of TimeChaos type TimeChaosStatus struct { ChaosStatus `json:",inline"` } // GetDuration gets the duration of TimeChaos func (in *TimeChaos) GetDuration() (*time.Duration, error) { if in.Spec.Duration == nil { return nil, nil } duration, err := time.ParseDuration(*in.Spec.Duration) if err != nil { return nil, err } return &duration, nil } // GetNextStart gets NextStart field of TimeChaos func (in *TimeChaos) GetNextStart() time.Time { if in.Spec.NextStart == nil { return time.Time{} } return in.Spec.NextStart.Time } // SetNextStart sets NextStart field of TimeChaos func (in *TimeChaos) SetNextStart(t time.Time) { if t.IsZero() { in.Spec.NextStart = nil return } if in.Spec.NextStart == nil { in.Spec.NextStart = &metav1.Time{} } in.Spec.NextStart.Time = t } // GetNextRecover get NextRecover field of TimeChaos func (in *TimeChaos) GetNextRecover() time.Time { if in.Spec.NextRecover == nil { return time.Time{} } return in.Spec.NextRecover.Time } // SetNextRecover sets NextRecover field of TimeChaos func (in *TimeChaos) SetNextRecover(t time.Time) { if t.IsZero() { in.Spec.NextRecover = nil return } if in.Spec.NextRecover == nil { in.Spec.NextRecover = &metav1.Time{} } in.Spec.NextRecover.Time = t } // GetScheduler returns the scheduler of TimeChaos func (in *TimeChaos) GetScheduler() *SchedulerSpec { return in.Spec.Scheduler } // GetStatus returns the status of TimeChaos func (in *TimeChaos) GetStatus() *ChaosStatus { return &in.Status.ChaosStatus } // IsDeleted returns whether this resource has been deleted func (in *TimeChaos) IsDeleted() bool { return !in.DeletionTimestamp.IsZero() } // +kubebuilder:object:root=true // TimeChaosList contains a list of TimeChaos type TimeChaosList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` Items []TimeChaos `json:"items"` } func init() { SchemeBuilder.Register(&TimeChaos{}, &TimeChaosList{}) }
1
13,643
this line is so long, split it to multi lines.
chaos-mesh-chaos-mesh
go
@@ -2865,7 +2865,7 @@ void AdjustValidatorOptions(const DeviceExtensions &device_extensions, const Dev if (device_extensions.vk_khr_uniform_buffer_standard_layout && enabled_features.core12.uniformBufferStandardLayout == VK_TRUE) { options.SetUniformBufferStandardLayout(true); } - if (device_extensions.vk_ext_scalar_block_layout && enabled_features.core12.scalarBlockLayout == VK_TRUE) { + if (device_extensions.vk_ext_scalar_block_layout || enabled_features.core12.scalarBlockLayout == VK_TRUE) { options.SetScalarBlockLayout(true); } if (device_extensions.vk_khr_workgroup_memory_explicit_layout &&
1
/* Copyright (c) 2015-2021 The Khronos Group Inc. * Copyright (c) 2015-2021 Valve Corporation * Copyright (c) 2015-2021 LunarG, Inc. * Copyright (C) 2015-2021 Google Inc. * Modifications Copyright (C) 2020 Advanced Micro Devices, Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * Author: Chris Forbes <[email protected]> * Author: Dave Houlton <[email protected]> * Author: Tobias Hector <[email protected]> */ #include "shader_validation.h" #include <cassert> #include <cinttypes> #include <cmath> #include <sstream> #include <string> #include <vector> #include <spirv/unified1/spirv.hpp> #include "vk_enum_string_helper.h" #include "vk_layer_data.h" #include "vk_layer_utils.h" #include "chassis.h" #include "core_validation.h" #include "xxhash.h" static shader_stage_attributes shader_stage_attribs[] = { {"vertex shader", false, false, VK_SHADER_STAGE_VERTEX_BIT}, {"tessellation control shader", true, true, VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT}, {"tessellation evaluation shader", true, false, VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT}, {"geometry shader", true, false, VK_SHADER_STAGE_GEOMETRY_BIT}, {"fragment shader", false, false, VK_SHADER_STAGE_FRAGMENT_BIT}, }; static bool IsNarrowNumericType(spirv_inst_iter type) { if (type.opcode() != spv::OpTypeInt && type.opcode() != spv::OpTypeFloat) return false; return type.word(2) < 64; } static bool TypesMatch(SHADER_MODULE_STATE const *a, SHADER_MODULE_STATE const *b, unsigned a_type, unsigned b_type, bool a_arrayed, bool b_arrayed, bool relaxed) { // Walk two type trees together, and complain about differences auto a_insn = a->get_def(a_type); auto b_insn = b->get_def(b_type); assert(a_insn != a->end()); assert(b_insn != b->end()); // Ignore runtime-sized arrays-- they cannot appear in these interfaces. if (a_arrayed && a_insn.opcode() == spv::OpTypeArray) { return TypesMatch(a, b, a_insn.word(2), b_type, false, b_arrayed, relaxed); } if (b_arrayed && b_insn.opcode() == spv::OpTypeArray) { // We probably just found the extra level of arrayness in b_type: compare the type inside it to a_type return TypesMatch(a, b, a_type, b_insn.word(2), a_arrayed, false, relaxed); } if (a_insn.opcode() == spv::OpTypeVector && relaxed && IsNarrowNumericType(b_insn)) { return TypesMatch(a, b, a_insn.word(2), b_type, a_arrayed, b_arrayed, false); } if (a_insn.opcode() != b_insn.opcode()) { return false; } if (a_insn.opcode() == spv::OpTypePointer) { // Match on pointee type. storage class is expected to differ return TypesMatch(a, b, a_insn.word(3), b_insn.word(3), a_arrayed, b_arrayed, relaxed); } if (a_arrayed || b_arrayed) { // If we havent resolved array-of-verts by here, we're not going to. return false; } switch (a_insn.opcode()) { case spv::OpTypeBool: return true; case spv::OpTypeInt: // Match on width, signedness return a_insn.word(2) == b_insn.word(2) && a_insn.word(3) == b_insn.word(3); case spv::OpTypeFloat: // Match on width return a_insn.word(2) == b_insn.word(2); case spv::OpTypeVector: // Match on element type, count. if (!TypesMatch(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false)) return false; if (relaxed && IsNarrowNumericType(a->get_def(a_insn.word(2)))) { return a_insn.word(3) >= b_insn.word(3); } else { return a_insn.word(3) == b_insn.word(3); } case spv::OpTypeMatrix: // Match on element type, count. return TypesMatch(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false) && a_insn.word(3) == b_insn.word(3); case spv::OpTypeArray: // Match on element type, count. these all have the same layout. we don't get here if b_arrayed. This differs from // vector & matrix types in that the array size is the id of a constant instruction, * not a literal within OpTypeArray return TypesMatch(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false) && a->GetConstantValueById(a_insn.word(3)) == b->GetConstantValueById(b_insn.word(3)); case spv::OpTypeStruct: // Match on all element types { if (a_insn.len() != b_insn.len()) { return false; // Structs cannot match if member counts differ } for (unsigned i = 2; i < a_insn.len(); i++) { if (!TypesMatch(a, b, a_insn.word(i), b_insn.word(i), a_arrayed, b_arrayed, false)) { return false; } } return true; } default: // Remaining types are CLisms, or may not appear in the interfaces we are interested in. Just claim no match. return false; } } static unsigned GetLocationsConsumedByFormat(VkFormat format) { switch (format) { case VK_FORMAT_R64G64B64A64_SFLOAT: case VK_FORMAT_R64G64B64A64_SINT: case VK_FORMAT_R64G64B64A64_UINT: case VK_FORMAT_R64G64B64_SFLOAT: case VK_FORMAT_R64G64B64_SINT: case VK_FORMAT_R64G64B64_UINT: return 2; default: return 1; } } static unsigned GetFormatType(VkFormat fmt) { if (FormatIsSInt(fmt)) return FORMAT_TYPE_SINT; if (FormatIsUInt(fmt)) return FORMAT_TYPE_UINT; if (FormatIsDepthAndStencil(fmt)) return FORMAT_TYPE_FLOAT | FORMAT_TYPE_UINT; if (fmt == VK_FORMAT_UNDEFINED) return 0; // everything else -- UNORM/SNORM/FLOAT/USCALED/SSCALED is all float in the shader. return FORMAT_TYPE_FLOAT; } static uint32_t GetShaderStageId(VkShaderStageFlagBits stage) { uint32_t bit_pos = uint32_t(u_ffs(stage)); return bit_pos - 1; } bool CoreChecks::ValidateViConsistency(VkPipelineVertexInputStateCreateInfo const *vi) const { // Walk the binding descriptions, which describe the step rate and stride of each vertex buffer. Each binding should // be specified only once. layer_data::unordered_map<uint32_t, VkVertexInputBindingDescription const *> bindings; bool skip = false; for (unsigned i = 0; i < vi->vertexBindingDescriptionCount; i++) { auto desc = &vi->pVertexBindingDescriptions[i]; auto &binding = bindings[desc->binding]; if (binding) { // TODO: "VUID-VkGraphicsPipelineCreateInfo-pStages-00742" perhaps? skip |= LogError(device, kVUID_Core_Shader_InconsistentVi, "Duplicate vertex input binding descriptions for binding %d", desc->binding); } else { binding = desc; } } return skip; } bool CoreChecks::ValidateViAgainstVsInputs(VkPipelineVertexInputStateCreateInfo const *vi, SHADER_MODULE_STATE const *vs, spirv_inst_iter entrypoint) const { bool skip = false; const auto inputs = vs->CollectInterfaceByLocation(entrypoint, spv::StorageClassInput, false); // Build index by location std::map<uint32_t, const VkVertexInputAttributeDescription *> attribs; if (vi) { for (uint32_t i = 0; i < vi->vertexAttributeDescriptionCount; ++i) { const auto num_locations = GetLocationsConsumedByFormat(vi->pVertexAttributeDescriptions[i].format); for (uint32_t j = 0; j < num_locations; ++j) { attribs[vi->pVertexAttributeDescriptions[i].location + j] = &vi->pVertexAttributeDescriptions[i]; } } } struct AttribInputPair { const VkVertexInputAttributeDescription *attrib = nullptr; const interface_var *input = nullptr; }; std::map<uint32_t, AttribInputPair> location_map; for (const auto &attrib_it : attribs) location_map[attrib_it.first].attrib = attrib_it.second; for (const auto &input_it : inputs) location_map[input_it.first.first].input = &input_it.second; for (const auto &location_it : location_map) { const auto location = location_it.first; const auto attrib = location_it.second.attrib; const auto input = location_it.second.input; if (attrib && !input) { skip |= LogPerformanceWarning(vs->vk_shader_module(), kVUID_Core_Shader_OutputNotConsumed, "Vertex attribute at location %" PRIu32 " not consumed by vertex shader", location); } else if (!attrib && input) { skip |= LogError(vs->vk_shader_module(), kVUID_Core_Shader_InputNotProduced, "Vertex shader consumes input at location %" PRIu32 " but not provided", location); } else if (attrib && input) { const auto attrib_type = GetFormatType(attrib->format); const auto input_type = vs->GetFundamentalType(input->type_id); // Type checking if (!(attrib_type & input_type)) { skip |= LogError(vs->vk_shader_module(), kVUID_Core_Shader_InterfaceTypeMismatch, "Attribute type of `%s` at location %" PRIu32 " does not match vertex shader input type of `%s`", string_VkFormat(attrib->format), location, vs->DescribeType(input->type_id).c_str()); } } else { // !attrib && !input assert(false); // at least one exists in the map } } return skip; } bool CoreChecks::ValidateFsOutputsAgainstRenderPass(SHADER_MODULE_STATE const *fs, spirv_inst_iter entrypoint, PIPELINE_STATE const *pipeline, uint32_t subpass_index) const { bool skip = false; const auto rpci = pipeline->rp_state->createInfo.ptr(); struct Attachment { const VkAttachmentReference2 *reference = nullptr; const VkAttachmentDescription2 *attachment = nullptr; const interface_var *output = nullptr; }; std::map<uint32_t, Attachment> location_map; const auto subpass = rpci->pSubpasses[subpass_index]; for (uint32_t i = 0; i < subpass.colorAttachmentCount; ++i) { auto const &reference = subpass.pColorAttachments[i]; location_map[i].reference = &reference; if (reference.attachment != VK_ATTACHMENT_UNUSED && rpci->pAttachments[reference.attachment].format != VK_FORMAT_UNDEFINED) { location_map[i].attachment = &rpci->pAttachments[reference.attachment]; } } // TODO: dual source blend index (spv::DecIndex, zero if not provided) const auto outputs = fs->CollectInterfaceByLocation(entrypoint, spv::StorageClassOutput, false); for (const auto &output_it : outputs) { auto const location = output_it.first.first; location_map[location].output = &output_it.second; } const bool alpha_to_coverage_enabled = pipeline->graphicsPipelineCI.pMultisampleState != NULL && pipeline->graphicsPipelineCI.pMultisampleState->alphaToCoverageEnable == VK_TRUE; for (const auto &location_it : location_map) { const auto reference = location_it.second.reference; if (reference != nullptr && reference->attachment == VK_ATTACHMENT_UNUSED) { continue; } const auto location = location_it.first; const auto attachment = location_it.second.attachment; const auto output = location_it.second.output; if (attachment && !output) { if (pipeline->attachments[location].colorWriteMask != 0) { skip |= LogWarning(fs->vk_shader_module(), kVUID_Core_Shader_InputNotProduced, "Attachment %" PRIu32 " not written by fragment shader; undefined values will be written to attachment", location); } } else if (!attachment && output) { if (!(alpha_to_coverage_enabled && location == 0)) { skip |= LogWarning(fs->vk_shader_module(), kVUID_Core_Shader_OutputNotConsumed, "fragment shader writes to output location %" PRIu32 " with no matching attachment", location); } } else if (attachment && output) { const auto attachment_type = GetFormatType(attachment->format); const auto output_type = fs->GetFundamentalType(output->type_id); // Type checking if (!(output_type & attachment_type)) { skip |= LogWarning(fs->vk_shader_module(), kVUID_Core_Shader_InterfaceTypeMismatch, "Attachment %" PRIu32 " of type `%s` does not match fragment shader output type of `%s`; resulting values are undefined", location, string_VkFormat(attachment->format), fs->DescribeType(output->type_id).c_str()); } } else { // !attachment && !output assert(false); // at least one exists in the map } } const auto output_zero = location_map.count(0) ? location_map[0].output : nullptr; bool location_zero_has_alpha = output_zero && fs->get_def(output_zero->type_id) != fs->end() && fs->GetComponentsConsumedByType(output_zero->type_id, false) == 4; if (alpha_to_coverage_enabled && !location_zero_has_alpha) { skip |= LogError(fs->vk_shader_module(), kVUID_Core_Shader_NoAlphaAtLocation0WithAlphaToCoverage, "fragment shader doesn't declare alpha output at location 0 even though alpha to coverage is enabled."); } return skip; } PushConstantByteState CoreChecks::ValidatePushConstantSetUpdate(const std::vector<uint8_t> &push_constant_data_update, const shader_struct_member &push_constant_used_in_shader, uint32_t &out_issue_index) const { const auto *used_bytes = push_constant_used_in_shader.GetUsedbytes(); const auto used_bytes_size = used_bytes->size(); if (used_bytes_size == 0) return PC_Byte_Updated; const auto push_constant_data_update_size = push_constant_data_update.size(); const auto *data = push_constant_data_update.data(); if ((*data == PC_Byte_Updated) && std::memcmp(data, data + 1, push_constant_data_update_size - 1) == 0) { if (used_bytes_size <= push_constant_data_update_size) { return PC_Byte_Updated; } const auto used_bytes_size1 = used_bytes_size - push_constant_data_update_size; const auto *used_bytes_data1 = used_bytes->data() + push_constant_data_update_size; if ((*used_bytes_data1 == 0) && std::memcmp(used_bytes_data1, used_bytes_data1 + 1, used_bytes_size1 - 1) == 0) { return PC_Byte_Updated; } } uint32_t i = 0; for (const auto used : *used_bytes) { if (used) { if (i >= push_constant_data_update.size() || push_constant_data_update[i] == PC_Byte_Not_Set) { out_issue_index = i; return PC_Byte_Not_Set; } else if (push_constant_data_update[i] == PC_Byte_Not_Updated) { out_issue_index = i; return PC_Byte_Not_Updated; } } ++i; } return PC_Byte_Updated; } bool CoreChecks::ValidatePushConstantUsage(const PIPELINE_STATE &pipeline, SHADER_MODULE_STATE const *src, VkPipelineShaderStageCreateInfo const *pStage, const std::string &vuid) const { bool skip = false; // Temp workaround to prevent false positive errors // https://github.com/KhronosGroup/Vulkan-ValidationLayers/issues/2450 if (src->multiple_entry_points) { return skip; } // Validate directly off the offsets. this isn't quite correct for arrays and matrices, but is a good first step. const auto *entrypoint = src->FindEntrypointStruct(pStage->pName, pStage->stage); if (!entrypoint || !entrypoint->push_constant_used_in_shader.IsUsed()) { return skip; } std::vector<VkPushConstantRange> const *push_constant_ranges = pipeline.pipeline_layout->push_constant_ranges.get(); bool found_stage = false; for (auto const &range : *push_constant_ranges) { if (range.stageFlags & pStage->stage) { found_stage = true; std::string location_desc; std::vector<uint8_t> push_constant_bytes_set; if (range.offset > 0) { push_constant_bytes_set.resize(range.offset, PC_Byte_Not_Set); } push_constant_bytes_set.resize(range.offset + range.size, PC_Byte_Updated); uint32_t issue_index = 0; const auto ret = ValidatePushConstantSetUpdate(push_constant_bytes_set, entrypoint->push_constant_used_in_shader, issue_index); if (ret == PC_Byte_Not_Set) { const auto loc_descr = entrypoint->push_constant_used_in_shader.GetLocationDesc(issue_index); LogObjectList objlist(src->vk_shader_module()); objlist.add(pipeline.pipeline_layout->layout()); skip |= LogError(objlist, vuid, "Push constant buffer:%s in %s is out of range in %s.", loc_descr.c_str(), string_VkShaderStageFlags(pStage->stage).c_str(), report_data->FormatHandle(pipeline.pipeline_layout->layout()).c_str()); break; } } } if (!found_stage) { LogObjectList objlist(src->vk_shader_module()); objlist.add(pipeline.pipeline_layout->layout()); skip |= LogError(objlist, vuid, "Push constant is used in %s of %s. But %s doesn't set %s.", string_VkShaderStageFlags(pStage->stage).c_str(), report_data->FormatHandle(src->vk_shader_module()).c_str(), report_data->FormatHandle(pipeline.pipeline_layout->layout()).c_str(), string_VkShaderStageFlags(pStage->stage).c_str()); } return skip; } bool CoreChecks::ValidateBuiltinLimits(SHADER_MODULE_STATE const *src, spirv_inst_iter entrypoint) const { bool skip = false; // Currently all builtin tested are only found in fragment shaders if (entrypoint.word(1) != spv::ExecutionModelFragment) { return skip; } // Find all builtin from just the interface variables for (uint32_t id : FindEntrypointInterfaces(entrypoint)) { auto insn = src->get_def(id); assert(insn.opcode() == spv::OpVariable); const decoration_set decorations = src->get_decorations(insn.word(2)); // Currently don't need to search in structs if (((decorations.flags & decoration_set::builtin_bit) != 0) && (decorations.builtin == spv::BuiltInSampleMask)) { auto type_pointer = src->get_def(insn.word(1)); assert(type_pointer.opcode() == spv::OpTypePointer); auto type = src->get_def(type_pointer.word(3)); if (type.opcode() == spv::OpTypeArray) { uint32_t length = static_cast<uint32_t>(src->GetConstantValueById(type.word(3))); // Handles both the input and output sampleMask if (length > phys_dev_props.limits.maxSampleMaskWords) { skip |= LogError(device, "VUID-VkPipelineShaderStageCreateInfo-maxSampleMaskWords-00711", "vkCreateGraphicsPipelines(): The BuiltIns SampleMask array sizes is %u which exceeds " "maxSampleMaskWords of %u in %s.", length, phys_dev_props.limits.maxSampleMaskWords, report_data->FormatHandle(src->vk_shader_module()).c_str()); } break; } } } return skip; } // Validate that data for each specialization entry is fully contained within the buffer. bool CoreChecks::ValidateSpecializations(VkPipelineShaderStageCreateInfo const *info) const { bool skip = false; VkSpecializationInfo const *spec = info->pSpecializationInfo; if (spec) { for (auto i = 0u; i < spec->mapEntryCount; i++) { if (spec->pMapEntries[i].offset >= spec->dataSize) { skip |= LogError(device, "VUID-VkSpecializationInfo-offset-00773", "Specialization entry %u (for constant id %u) references memory outside provided specialization " "data (bytes %u.." PRINTF_SIZE_T_SPECIFIER "; " PRINTF_SIZE_T_SPECIFIER " bytes provided).", i, spec->pMapEntries[i].constantID, spec->pMapEntries[i].offset, spec->pMapEntries[i].offset + spec->dataSize - 1, spec->dataSize); continue; } if (spec->pMapEntries[i].offset + spec->pMapEntries[i].size > spec->dataSize) { skip |= LogError(device, "VUID-VkSpecializationInfo-pMapEntries-00774", "Specialization entry %u (for constant id %u) references memory outside provided specialization " "data (bytes %u.." PRINTF_SIZE_T_SPECIFIER "; " PRINTF_SIZE_T_SPECIFIER " bytes provided).", i, spec->pMapEntries[i].constantID, spec->pMapEntries[i].offset, spec->pMapEntries[i].offset + spec->pMapEntries[i].size - 1, spec->dataSize); } for (uint32_t j = i + 1; j < spec->mapEntryCount; ++j) { if (spec->pMapEntries[i].constantID == spec->pMapEntries[j].constantID) { skip |= LogError(device, "VUID-VkSpecializationInfo-constantID-04911", "Specialization entry %" PRIu32 " and %" PRIu32 " have the same constantID (%" PRIu32 ").", i, j, spec->pMapEntries[i].constantID); } } } } return skip; } // TODO (jbolz): Can this return a const reference? static std::set<uint32_t> TypeToDescriptorTypeSet(SHADER_MODULE_STATE const *module, uint32_t type_id, unsigned &descriptor_count, bool is_khr) { auto type = module->get_def(type_id); bool is_storage_buffer = false; descriptor_count = 1; std::set<uint32_t> ret; // Strip off any array or ptrs. Where we remove array levels, adjust the descriptor count for each dimension. while (type.opcode() == spv::OpTypeArray || type.opcode() == spv::OpTypePointer || type.opcode() == spv::OpTypeRuntimeArray) { if (type.opcode() == spv::OpTypeRuntimeArray) { descriptor_count = 0; type = module->get_def(type.word(2)); } else if (type.opcode() == spv::OpTypeArray) { descriptor_count *= module->GetConstantValueById(type.word(3)); type = module->get_def(type.word(2)); } else { if (type.word(2) == spv::StorageClassStorageBuffer) { is_storage_buffer = true; } type = module->get_def(type.word(3)); } } switch (type.opcode()) { case spv::OpTypeStruct: { for (auto insn : module->decoration_inst) { if (insn.word(1) == type.word(1)) { if (insn.word(2) == spv::DecorationBlock) { if (is_storage_buffer) { ret.insert(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER); ret.insert(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC); return ret; } else { ret.insert(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER); ret.insert(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC); ret.insert(VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT); return ret; } } else if (insn.word(2) == spv::DecorationBufferBlock) { ret.insert(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER); ret.insert(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC); return ret; } } } // Invalid return ret; } case spv::OpTypeSampler: ret.insert(VK_DESCRIPTOR_TYPE_SAMPLER); ret.insert(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER); return ret; case spv::OpTypeSampledImage: { // Slight relaxation for some GLSL historical madness: samplerBuffer doesn't really have a sampler, and a texel // buffer descriptor doesn't really provide one. Allow this slight mismatch. auto image_type = module->get_def(type.word(2)); auto dim = image_type.word(3); auto sampled = image_type.word(7); if (dim == spv::DimBuffer && sampled == 1) { ret.insert(VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER); return ret; } } ret.insert(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER); return ret; case spv::OpTypeImage: { // Many descriptor types backing image types-- depends on dimension and whether the image will be used with a sampler. // SPIRV for Vulkan requires that sampled be 1 or 2 -- leaving the decision to runtime is unacceptable. auto dim = type.word(3); auto sampled = type.word(7); if (dim == spv::DimSubpassData) { ret.insert(VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT); return ret; } else if (dim == spv::DimBuffer) { if (sampled == 1) { ret.insert(VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER); return ret; } else { ret.insert(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER); return ret; } } else if (sampled == 1) { ret.insert(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE); ret.insert(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER); return ret; } else { ret.insert(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE); return ret; } } case spv::OpTypeAccelerationStructureNV: is_khr ? ret.insert(VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR) : ret.insert(VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NV); return ret; // We shouldn't really see any other junk types -- but if we do, they're a mismatch. default: return ret; // Matches nothing } } static std::string string_descriptorTypes(const std::set<uint32_t> &descriptor_types) { std::stringstream ss; for (auto it = descriptor_types.begin(); it != descriptor_types.end(); ++it) { if (ss.tellp()) ss << ", "; ss << string_VkDescriptorType(VkDescriptorType(*it)); } return ss.str(); } bool CoreChecks::RequirePropertyFlag(VkBool32 check, char const *flag, char const *structure, const char *vuid) const { if (!check) { if (LogError(device, vuid, "Shader requires flag %s set in %s but it is not set on the device", flag, structure)) { return true; } } return false; } bool CoreChecks::RequireFeature(VkBool32 feature, char const *feature_name, const char *vuid) const { if (!feature) { if (LogError(device, vuid, "Shader requires %s but is not enabled on the device", feature_name)) { return true; } } return false; } bool CoreChecks::ValidateShaderStageWritableOrAtomicDescriptor(VkShaderStageFlagBits stage, bool has_writable_descriptor, bool has_atomic_descriptor) const { bool skip = false; if (has_writable_descriptor || has_atomic_descriptor) { switch (stage) { case VK_SHADER_STAGE_COMPUTE_BIT: case VK_SHADER_STAGE_RAYGEN_BIT_NV: case VK_SHADER_STAGE_ANY_HIT_BIT_NV: case VK_SHADER_STAGE_CLOSEST_HIT_BIT_NV: case VK_SHADER_STAGE_MISS_BIT_NV: case VK_SHADER_STAGE_INTERSECTION_BIT_NV: case VK_SHADER_STAGE_CALLABLE_BIT_NV: case VK_SHADER_STAGE_TASK_BIT_NV: case VK_SHADER_STAGE_MESH_BIT_NV: /* No feature requirements for writes and atomics from compute * raytracing, or mesh stages */ break; case VK_SHADER_STAGE_FRAGMENT_BIT: skip |= RequireFeature(enabled_features.core.fragmentStoresAndAtomics, "fragmentStoresAndAtomics", kVUID_Core_Shader_FeatureNotEnabled); break; default: skip |= RequireFeature(enabled_features.core.vertexPipelineStoresAndAtomics, "vertexPipelineStoresAndAtomics", kVUID_Core_Shader_FeatureNotEnabled); break; } } return skip; } bool CoreChecks::ValidateShaderStageGroupNonUniform(SHADER_MODULE_STATE const *module, VkShaderStageFlagBits stage, spirv_inst_iter &insn) const { bool skip = false; // Check anything using a group operation (which currently is only OpGroupNonUnifrom* operations) if (GroupOperation(insn.opcode()) == true) { // Check the quad operations. if ((insn.opcode() == spv::OpGroupNonUniformQuadBroadcast) || (insn.opcode() == spv::OpGroupNonUniformQuadSwap)) { if ((stage != VK_SHADER_STAGE_FRAGMENT_BIT) && (stage != VK_SHADER_STAGE_COMPUTE_BIT)) { skip |= RequireFeature(phys_dev_props_core11.subgroupQuadOperationsInAllStages, "VkPhysicalDeviceSubgroupProperties::quadOperationsInAllStages", kVUID_Core_Shader_FeatureNotEnabled); } } uint32_t scope_type = spv::ScopeMax; if (insn.opcode() == spv::OpGroupNonUniformPartitionNV) { // OpGroupNonUniformPartitionNV always assumed subgroup as missing operand scope_type = spv::ScopeSubgroup; } else { // "All <id> used for Scope <id> must be of an OpConstant" auto scope_id = module->get_def(insn.word(3)); scope_type = scope_id.word(3); } if (scope_type == spv::ScopeSubgroup) { // "Group operations with subgroup scope" must have stage support const VkSubgroupFeatureFlags supported_stages = phys_dev_props_core11.subgroupSupportedStages; skip |= RequirePropertyFlag(supported_stages & stage, string_VkShaderStageFlagBits(stage), "VkPhysicalDeviceSubgroupProperties::supportedStages", kVUID_Core_Shader_ExceedDeviceLimit); } if (!enabled_features.core12.shaderSubgroupExtendedTypes) { auto type = module->get_def(insn.word(1)); if (type.opcode() == spv::OpTypeVector) { // Get the element type type = module->get_def(type.word(2)); } if (type.opcode() != spv::OpTypeBool) { // Both OpTypeInt and OpTypeFloat the width is in the 2nd word. const uint32_t width = type.word(2); if ((type.opcode() == spv::OpTypeFloat && width == 16) || (type.opcode() == spv::OpTypeInt && (width == 8 || width == 16 || width == 64))) { skip |= RequireFeature(enabled_features.core12.shaderSubgroupExtendedTypes, "VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures::shaderSubgroupExtendedTypes", kVUID_Core_Shader_FeatureNotEnabled); } } } } return skip; } bool CoreChecks::ValidateWorkgroupSize(SHADER_MODULE_STATE const *src, VkPipelineShaderStageCreateInfo const *pStage, const std::unordered_map<uint32_t, std::vector<uint32_t>>& id_value_map) const { bool skip = false; std::array<uint32_t, 3> work_group_size = src->GetWorkgroupSize(pStage, id_value_map); for (uint32_t i = 0; i < 3; ++i) { if (work_group_size[i] > phys_dev_props.limits.maxComputeWorkGroupSize[i]) { const char member = 'x' + static_cast<int8_t>(i); skip |= LogError(device, kVUID_Core_Shader_MaxComputeWorkGroupSize, "Specialization constant is being used to specialize WorkGroupSize.%c, but value (%" PRIu32 ") is greater than VkPhysicalDeviceLimits::maxComputeWorkGroupSize[%" PRIu32 "] = %" PRIu32 ".", member, work_group_size[i], i, phys_dev_props.limits.maxComputeWorkGroupSize[i]); } } return skip; } bool CoreChecks::ValidateShaderStageInputOutputLimits(SHADER_MODULE_STATE const *src, VkPipelineShaderStageCreateInfo const *pStage, const PIPELINE_STATE *pipeline, spirv_inst_iter entrypoint) const { if (pStage->stage == VK_SHADER_STAGE_COMPUTE_BIT || pStage->stage == VK_SHADER_STAGE_ALL_GRAPHICS || pStage->stage == VK_SHADER_STAGE_ALL) { return false; } bool skip = false; auto const &limits = phys_dev_props.limits; std::set<uint32_t> patch_i_ds; struct Variable { uint32_t baseTypePtrID; uint32_t ID; uint32_t storageClass; }; std::vector<Variable> variables; uint32_t num_vertices = 0; bool is_iso_lines = false; bool is_point_mode = false; auto entrypoint_variables = FindEntrypointInterfaces(entrypoint); for (auto insn : *src) { switch (insn.opcode()) { // Find all Patch decorations case spv::OpDecorate: switch (insn.word(2)) { case spv::DecorationPatch: { patch_i_ds.insert(insn.word(1)); break; } default: break; } break; // Find all input and output variables case spv::OpVariable: { Variable var = {}; var.storageClass = insn.word(3); if ((var.storageClass == spv::StorageClassInput || var.storageClass == spv::StorageClassOutput) && // Only include variables in the entrypoint's interface find(entrypoint_variables.begin(), entrypoint_variables.end(), insn.word(2)) != entrypoint_variables.end()) { var.baseTypePtrID = insn.word(1); var.ID = insn.word(2); variables.push_back(var); } break; } case spv::OpExecutionMode: if (insn.word(1) == entrypoint.word(2)) { switch (insn.word(2)) { default: break; case spv::ExecutionModeOutputVertices: num_vertices = insn.word(3); break; case spv::ExecutionModeIsolines: is_iso_lines = true; break; case spv::ExecutionModePointMode: is_point_mode = true; break; } } break; default: break; } } bool strip_output_array_level = (pStage->stage == VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT || pStage->stage == VK_SHADER_STAGE_MESH_BIT_NV); bool strip_input_array_level = (pStage->stage == VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT || pStage->stage == VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT || pStage->stage == VK_SHADER_STAGE_GEOMETRY_BIT); uint32_t num_comp_in = 0, num_comp_out = 0; int max_comp_in = 0, max_comp_out = 0; auto inputs = src->CollectInterfaceByLocation(entrypoint, spv::StorageClassInput, strip_input_array_level); auto outputs = src->CollectInterfaceByLocation(entrypoint, spv::StorageClassOutput, strip_output_array_level); // Find max component location used for input variables. for (auto &var : inputs) { int location = var.first.first; int component = var.first.second; interface_var &iv = var.second; // Only need to look at the first location, since we use the type's whole size if (iv.offset != 0) { continue; } if (iv.is_patch) { continue; } int num_components = src->GetComponentsConsumedByType(iv.type_id, strip_input_array_level); max_comp_in = std::max(max_comp_in, location * 4 + component + num_components); } // Find max component location used for output variables. for (auto &var : outputs) { int location = var.first.first; int component = var.first.second; interface_var &iv = var.second; // Only need to look at the first location, since we use the type's whole size if (iv.offset != 0) { continue; } if (iv.is_patch) { continue; } int num_components = src->GetComponentsConsumedByType(iv.type_id, strip_output_array_level); max_comp_out = std::max(max_comp_out, location * 4 + component + num_components); } // XXX TODO: Would be nice to rewrite this to use CollectInterfaceByLocation (or something similar), // but that doesn't include builtins. // When rewritten, using the CreatePipelineExceedVertexMaxComponentsWithBuiltins test it would be nice to also let the user know // how many components were from builtins as it might not be obvious for (auto &var : variables) { // Check if the variable is a patch. Patches can also be members of blocks, // but if they are then the top-level arrayness has already been stripped // by the time GetComponentsConsumedByType gets to it. bool is_patch = patch_i_ds.find(var.ID) != patch_i_ds.end(); if (var.storageClass == spv::StorageClassInput) { num_comp_in += src->GetComponentsConsumedByType(var.baseTypePtrID, strip_input_array_level && !is_patch); } else { // var.storageClass == spv::StorageClassOutput num_comp_out += src->GetComponentsConsumedByType(var.baseTypePtrID, strip_output_array_level && !is_patch); } } switch (pStage->stage) { case VK_SHADER_STAGE_VERTEX_BIT: if (num_comp_out > limits.maxVertexOutputComponents) { skip |= LogError(pipeline->pipeline(), kVUID_Core_Shader_ExceedDeviceLimit, "Invalid Pipeline CreateInfo State: Vertex shader exceeds " "VkPhysicalDeviceLimits::maxVertexOutputComponents of %u " "components by %u components", limits.maxVertexOutputComponents, num_comp_out - limits.maxVertexOutputComponents); } if (max_comp_out > static_cast<int>(limits.maxVertexOutputComponents)) { skip |= LogError(pipeline->pipeline(), kVUID_Core_Shader_ExceedDeviceLimit, "Invalid Pipeline CreateInfo State: Vertex shader output variable uses location that " "exceeds component limit VkPhysicalDeviceLimits::maxVertexOutputComponents (%u)", limits.maxVertexOutputComponents); } break; case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT: if (num_comp_in > limits.maxTessellationControlPerVertexInputComponents) { skip |= LogError(pipeline->pipeline(), kVUID_Core_Shader_ExceedDeviceLimit, "Invalid Pipeline CreateInfo State: Tessellation control shader exceeds " "VkPhysicalDeviceLimits::maxTessellationControlPerVertexInputComponents of %u " "components by %u components", limits.maxTessellationControlPerVertexInputComponents, num_comp_in - limits.maxTessellationControlPerVertexInputComponents); } if (max_comp_in > static_cast<int>(limits.maxTessellationControlPerVertexInputComponents)) { skip |= LogError(pipeline->pipeline(), kVUID_Core_Shader_ExceedDeviceLimit, "Invalid Pipeline CreateInfo State: Tessellation control shader input variable uses location that " "exceeds component limit VkPhysicalDeviceLimits::maxTessellationControlPerVertexInputComponents (%u)", limits.maxTessellationControlPerVertexInputComponents); } if (num_comp_out > limits.maxTessellationControlPerVertexOutputComponents) { skip |= LogError(pipeline->pipeline(), kVUID_Core_Shader_ExceedDeviceLimit, "Invalid Pipeline CreateInfo State: Tessellation control shader exceeds " "VkPhysicalDeviceLimits::maxTessellationControlPerVertexOutputComponents of %u " "components by %u components", limits.maxTessellationControlPerVertexOutputComponents, num_comp_out - limits.maxTessellationControlPerVertexOutputComponents); } if (max_comp_out > static_cast<int>(limits.maxTessellationControlPerVertexOutputComponents)) { skip |= LogError(pipeline->pipeline(), kVUID_Core_Shader_ExceedDeviceLimit, "Invalid Pipeline CreateInfo State: Tessellation control shader output variable uses location that " "exceeds component limit VkPhysicalDeviceLimits::maxTessellationControlPerVertexOutputComponents (%u)", limits.maxTessellationControlPerVertexOutputComponents); } break; case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT: if (num_comp_in > limits.maxTessellationEvaluationInputComponents) { skip |= LogError(pipeline->pipeline(), kVUID_Core_Shader_ExceedDeviceLimit, "Invalid Pipeline CreateInfo State: Tessellation evaluation shader exceeds " "VkPhysicalDeviceLimits::maxTessellationEvaluationInputComponents of %u " "components by %u components", limits.maxTessellationEvaluationInputComponents, num_comp_in - limits.maxTessellationEvaluationInputComponents); } if (max_comp_in > static_cast<int>(limits.maxTessellationEvaluationInputComponents)) { skip |= LogError(pipeline->pipeline(), kVUID_Core_Shader_ExceedDeviceLimit, "Invalid Pipeline CreateInfo State: Tessellation evaluation shader input variable uses location that " "exceeds component limit VkPhysicalDeviceLimits::maxTessellationEvaluationInputComponents (%u)", limits.maxTessellationEvaluationInputComponents); } if (num_comp_out > limits.maxTessellationEvaluationOutputComponents) { skip |= LogError(pipeline->pipeline(), kVUID_Core_Shader_ExceedDeviceLimit, "Invalid Pipeline CreateInfo State: Tessellation evaluation shader exceeds " "VkPhysicalDeviceLimits::maxTessellationEvaluationOutputComponents of %u " "components by %u components", limits.maxTessellationEvaluationOutputComponents, num_comp_out - limits.maxTessellationEvaluationOutputComponents); } if (max_comp_out > static_cast<int>(limits.maxTessellationEvaluationOutputComponents)) { skip |= LogError(pipeline->pipeline(), kVUID_Core_Shader_ExceedDeviceLimit, "Invalid Pipeline CreateInfo State: Tessellation evaluation shader output variable uses location that " "exceeds component limit VkPhysicalDeviceLimits::maxTessellationEvaluationOutputComponents (%u)", limits.maxTessellationEvaluationOutputComponents); } // Portability validation if (IsExtEnabled(device_extensions.vk_khr_portability_subset)) { if (is_iso_lines && (VK_FALSE == enabled_features.portability_subset_features.tessellationIsolines)) { skip |= LogError(pipeline->pipeline(), kVUID_Portability_Tessellation_Isolines, "Invalid Pipeline CreateInfo state (portability error): Tessellation evaluation shader" " is using abstract patch type IsoLines, but this is not supported on this platform"); } if (is_point_mode && (VK_FALSE == enabled_features.portability_subset_features.tessellationPointMode)) { skip |= LogError(pipeline->pipeline(), kVUID_Portability_Tessellation_PointMode, "Invalid Pipeline CreateInfo state (portability error): Tessellation evaluation shader" " is using abstract patch type PointMode, but this is not supported on this platform"); } } break; case VK_SHADER_STAGE_GEOMETRY_BIT: if (num_comp_in > limits.maxGeometryInputComponents) { skip |= LogError(pipeline->pipeline(), kVUID_Core_Shader_ExceedDeviceLimit, "Invalid Pipeline CreateInfo State: Geometry shader exceeds " "VkPhysicalDeviceLimits::maxGeometryInputComponents of %u " "components by %u components", limits.maxGeometryInputComponents, num_comp_in - limits.maxGeometryInputComponents); } if (max_comp_in > static_cast<int>(limits.maxGeometryInputComponents)) { skip |= LogError(pipeline->pipeline(), kVUID_Core_Shader_ExceedDeviceLimit, "Invalid Pipeline CreateInfo State: Geometry shader input variable uses location that " "exceeds component limit VkPhysicalDeviceLimits::maxGeometryInputComponents (%u)", limits.maxGeometryInputComponents); } if (num_comp_out > limits.maxGeometryOutputComponents) { skip |= LogError(pipeline->pipeline(), kVUID_Core_Shader_ExceedDeviceLimit, "Invalid Pipeline CreateInfo State: Geometry shader exceeds " "VkPhysicalDeviceLimits::maxGeometryOutputComponents of %u " "components by %u components", limits.maxGeometryOutputComponents, num_comp_out - limits.maxGeometryOutputComponents); } if (max_comp_out > static_cast<int>(limits.maxGeometryOutputComponents)) { skip |= LogError(pipeline->pipeline(), kVUID_Core_Shader_ExceedDeviceLimit, "Invalid Pipeline CreateInfo State: Geometry shader output variable uses location that " "exceeds component limit VkPhysicalDeviceLimits::maxGeometryOutputComponents (%u)", limits.maxGeometryOutputComponents); } if (num_comp_out * num_vertices > limits.maxGeometryTotalOutputComponents) { skip |= LogError(pipeline->pipeline(), kVUID_Core_Shader_ExceedDeviceLimit, "Invalid Pipeline CreateInfo State: Geometry shader exceeds " "VkPhysicalDeviceLimits::maxGeometryTotalOutputComponents of %u " "components by %u components", limits.maxGeometryTotalOutputComponents, num_comp_out * num_vertices - limits.maxGeometryTotalOutputComponents); } break; case VK_SHADER_STAGE_FRAGMENT_BIT: if (num_comp_in > limits.maxFragmentInputComponents) { skip |= LogError(pipeline->pipeline(), kVUID_Core_Shader_ExceedDeviceLimit, "Invalid Pipeline CreateInfo State: Fragment shader exceeds " "VkPhysicalDeviceLimits::maxFragmentInputComponents of %u " "components by %u components", limits.maxFragmentInputComponents, num_comp_in - limits.maxFragmentInputComponents); } if (max_comp_in > static_cast<int>(limits.maxFragmentInputComponents)) { skip |= LogError(pipeline->pipeline(), kVUID_Core_Shader_ExceedDeviceLimit, "Invalid Pipeline CreateInfo State: Fragment shader input variable uses location that " "exceeds component limit VkPhysicalDeviceLimits::maxFragmentInputComponents (%u)", limits.maxFragmentInputComponents); } break; case VK_SHADER_STAGE_RAYGEN_BIT_NV: case VK_SHADER_STAGE_ANY_HIT_BIT_NV: case VK_SHADER_STAGE_CLOSEST_HIT_BIT_NV: case VK_SHADER_STAGE_MISS_BIT_NV: case VK_SHADER_STAGE_INTERSECTION_BIT_NV: case VK_SHADER_STAGE_CALLABLE_BIT_NV: case VK_SHADER_STAGE_TASK_BIT_NV: case VK_SHADER_STAGE_MESH_BIT_NV: break; default: assert(false); // This should never happen } return skip; } bool CoreChecks::ValidateShaderStorageImageFormats(SHADER_MODULE_STATE const *src) const { bool skip = false; // Got through all ImageRead/Write instructions for (auto insn : *src) { switch (insn.opcode()) { case spv::OpImageSparseRead: case spv::OpImageRead: { spirv_inst_iter type_def = src->GetImageFormatInst(insn.word(3)); if (type_def != src->end()) { const auto dim = type_def.word(3); // If the Image Dim operand is not SubpassData, the Image Format must not be Unknown, unless the // StorageImageReadWithoutFormat Capability was declared. if (dim != spv::DimSubpassData && type_def.word(8) == spv::ImageFormatUnknown) { skip |= RequireFeature(enabled_features.core.shaderStorageImageReadWithoutFormat, "shaderStorageImageReadWithoutFormat", kVUID_Features_shaderStorageImageReadWithoutFormat); } } break; } case spv::OpImageWrite: { spirv_inst_iter type_def = src->GetImageFormatInst(insn.word(1)); if (type_def != src->end()) { if (type_def.word(8) == spv::ImageFormatUnknown) { skip |= RequireFeature(enabled_features.core.shaderStorageImageWriteWithoutFormat, "shaderStorageImageWriteWithoutFormat", kVUID_Features_shaderStorageImageWriteWithoutFormat); } } break; } } } // Go through all variables for images and check decorations for (auto insn : *src) { if (insn.opcode() != spv::OpVariable) continue; uint32_t var = insn.word(2); spirv_inst_iter type_def = src->GetImageFormatInst(insn.word(1)); if (type_def == src->end()) continue; // Only check if the Image Dim operand is not SubpassData const auto dim = type_def.word(3); if (dim == spv::DimSubpassData) continue; // Only check storage images if (type_def.word(7) != 2) continue; if (type_def.word(8) != spv::ImageFormatUnknown) continue; decoration_set img_decorations = src->get_decorations(var); if (!enabled_features.core.shaderStorageImageReadWithoutFormat && !(img_decorations.flags & decoration_set::nonreadable_bit)) { skip |= LogError(device, kVUID_Features_shaderStorageImageReadWithoutFormat_NonReadable, "shaderStorageImageReadWithoutFormat not supported but variable %" PRIu32 " " " without format not marked a NonReadable", var); } if (!enabled_features.core.shaderStorageImageWriteWithoutFormat && !(img_decorations.flags & decoration_set::nonwritable_bit)) { skip |= LogError(device, kVUID_Features_shaderStorageImageWriteWithoutFormat_NonWritable, "shaderStorageImageWriteWithoutFormat not supported but variable %" PRIu32 " " "without format not marked a NonWritable", var); } } return skip; } bool CoreChecks::ValidateShaderStageMaxResources(VkShaderStageFlagBits stage, const PIPELINE_STATE *pipeline) const { bool skip = false; uint32_t total_resources = 0; // Only currently testing for graphics and compute pipelines // TODO: Add check and support for Ray Tracing pipeline VUID 03428 if ((stage & (VK_SHADER_STAGE_ALL_GRAPHICS | VK_SHADER_STAGE_COMPUTE_BIT)) == 0) { return false; } if (stage == VK_SHADER_STAGE_FRAGMENT_BIT) { // "For the fragment shader stage the framebuffer color attachments also count against this limit" total_resources += pipeline->rp_state->createInfo.pSubpasses[pipeline->graphicsPipelineCI.subpass].colorAttachmentCount; } // TODO: This reuses a lot of GetDescriptorCountMaxPerStage but currently would need to make it agnostic in a way to handle // input from CreatePipeline and CreatePipelineLayout level for (auto set_layout : pipeline->pipeline_layout->set_layouts) { if ((set_layout->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT) != 0) { continue; } for (uint32_t binding_idx = 0; binding_idx < set_layout->GetBindingCount(); binding_idx++) { const VkDescriptorSetLayoutBinding *binding = set_layout->GetDescriptorSetLayoutBindingPtrFromIndex(binding_idx); // Bindings with a descriptorCount of 0 are "reserved" and should be skipped if (((stage & binding->stageFlags) != 0) && (binding->descriptorCount > 0)) { // Check only descriptor types listed in maxPerStageResources description in spec switch (binding->descriptorType) { case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE: case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER: case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER: case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER: case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC: case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT: total_resources += binding->descriptorCount; break; default: break; } } } } if (total_resources > phys_dev_props.limits.maxPerStageResources) { const char *vuid = (stage == VK_SHADER_STAGE_COMPUTE_BIT) ? "VUID-VkComputePipelineCreateInfo-layout-01687" : "VUID-VkGraphicsPipelineCreateInfo-layout-01688"; skip |= LogError(pipeline->pipeline(), vuid, "Invalid Pipeline CreateInfo State: Shader Stage %s exceeds component limit " "VkPhysicalDeviceLimits::maxPerStageResources (%u)", string_VkShaderStageFlagBits(stage), phys_dev_props.limits.maxPerStageResources); } return skip; } // copy the specialization constant value into buf, if it is present void GetSpecConstantValue(VkPipelineShaderStageCreateInfo const *pStage, uint32_t spec_id, void *buf) { VkSpecializationInfo const *spec = pStage->pSpecializationInfo; if (spec && spec_id < spec->mapEntryCount) { memcpy(buf, (uint8_t *)spec->pData + spec->pMapEntries[spec_id].offset, spec->pMapEntries[spec_id].size); } } // Fill in value with the constant or specialization constant value, if available. // Returns true if the value has been accurately filled out. static bool GetIntConstantValue(spirv_inst_iter insn, SHADER_MODULE_STATE const *src, VkPipelineShaderStageCreateInfo const *pStage, const layer_data::unordered_map<uint32_t, uint32_t> &id_to_spec_id, uint32_t *value) { auto type_id = src->get_def(insn.word(1)); if (type_id.opcode() != spv::OpTypeInt || type_id.word(2) != 32) { return false; } switch (insn.opcode()) { case spv::OpSpecConstant: *value = insn.word(3); GetSpecConstantValue(pStage, id_to_spec_id.at(insn.word(2)), value); return true; case spv::OpConstant: *value = insn.word(3); return true; default: return false; } } // Map SPIR-V type to VK_COMPONENT_TYPE enum VkComponentTypeNV GetComponentType(spirv_inst_iter insn, SHADER_MODULE_STATE const *src) { switch (insn.opcode()) { case spv::OpTypeInt: switch (insn.word(2)) { case 8: return insn.word(3) != 0 ? VK_COMPONENT_TYPE_SINT8_NV : VK_COMPONENT_TYPE_UINT8_NV; case 16: return insn.word(3) != 0 ? VK_COMPONENT_TYPE_SINT16_NV : VK_COMPONENT_TYPE_UINT16_NV; case 32: return insn.word(3) != 0 ? VK_COMPONENT_TYPE_SINT32_NV : VK_COMPONENT_TYPE_UINT32_NV; case 64: return insn.word(3) != 0 ? VK_COMPONENT_TYPE_SINT64_NV : VK_COMPONENT_TYPE_UINT64_NV; default: return VK_COMPONENT_TYPE_MAX_ENUM_NV; } case spv::OpTypeFloat: switch (insn.word(2)) { case 16: return VK_COMPONENT_TYPE_FLOAT16_NV; case 32: return VK_COMPONENT_TYPE_FLOAT32_NV; case 64: return VK_COMPONENT_TYPE_FLOAT64_NV; default: return VK_COMPONENT_TYPE_MAX_ENUM_NV; } default: return VK_COMPONENT_TYPE_MAX_ENUM_NV; } } // Validate SPV_NV_cooperative_matrix behavior that can't be statically validated // in SPIRV-Tools (e.g. due to specialization constant usage). bool CoreChecks::ValidateCooperativeMatrix(SHADER_MODULE_STATE const *src, VkPipelineShaderStageCreateInfo const *pStage, const PIPELINE_STATE *pipeline) const { bool skip = false; // Map SPIR-V result ID to specialization constant id (SpecId decoration value) layer_data::unordered_map<uint32_t, uint32_t> id_to_spec_id; // Map SPIR-V result ID to the ID of its type. layer_data::unordered_map<uint32_t, uint32_t> id_to_type_id; struct CoopMatType { uint32_t scope, rows, cols; VkComponentTypeNV component_type; bool all_constant; CoopMatType() : scope(0), rows(0), cols(0), component_type(VK_COMPONENT_TYPE_MAX_ENUM_NV), all_constant(false) {} void Init(uint32_t id, SHADER_MODULE_STATE const *src, VkPipelineShaderStageCreateInfo const *pStage, const layer_data::unordered_map<uint32_t, uint32_t> &id_to_spec_id) { spirv_inst_iter insn = src->get_def(id); uint32_t component_type_id = insn.word(2); uint32_t scope_id = insn.word(3); uint32_t rows_id = insn.word(4); uint32_t cols_id = insn.word(5); auto component_type_iter = src->get_def(component_type_id); auto scope_iter = src->get_def(scope_id); auto rows_iter = src->get_def(rows_id); auto cols_iter = src->get_def(cols_id); all_constant = true; if (!GetIntConstantValue(scope_iter, src, pStage, id_to_spec_id, &scope)) { all_constant = false; } if (!GetIntConstantValue(rows_iter, src, pStage, id_to_spec_id, &rows)) { all_constant = false; } if (!GetIntConstantValue(cols_iter, src, pStage, id_to_spec_id, &cols)) { all_constant = false; } component_type = GetComponentType(component_type_iter, src); } }; bool seen_coopmat_capability = false; for (auto insn : *src) { // Whitelist instructions whose result can be a cooperative matrix type, and // keep track of their types. It would be nice if SPIRV-Headers generated code // to identify which instructions have a result type and result id. Lacking that, // this whitelist is based on the set of instructions that // SPV_NV_cooperative_matrix says can be used with cooperative matrix types. switch (insn.opcode()) { case spv::OpLoad: case spv::OpCooperativeMatrixLoadNV: case spv::OpCooperativeMatrixMulAddNV: case spv::OpSNegate: case spv::OpFNegate: case spv::OpIAdd: case spv::OpFAdd: case spv::OpISub: case spv::OpFSub: case spv::OpFDiv: case spv::OpSDiv: case spv::OpUDiv: case spv::OpMatrixTimesScalar: case spv::OpConstantComposite: case spv::OpCompositeConstruct: case spv::OpConvertFToU: case spv::OpConvertFToS: case spv::OpConvertSToF: case spv::OpConvertUToF: case spv::OpUConvert: case spv::OpSConvert: case spv::OpFConvert: id_to_type_id[insn.word(2)] = insn.word(1); break; default: break; } switch (insn.opcode()) { case spv::OpDecorate: if (insn.word(2) == spv::DecorationSpecId) { id_to_spec_id[insn.word(1)] = insn.word(3); } break; case spv::OpCapability: if (insn.word(1) == spv::CapabilityCooperativeMatrixNV) { seen_coopmat_capability = true; if (!(pStage->stage & phys_dev_ext_props.cooperative_matrix_props.cooperativeMatrixSupportedStages)) { skip |= LogError( pipeline->pipeline(), kVUID_Core_Shader_CooperativeMatrixSupportedStages, "OpTypeCooperativeMatrixNV used in shader stage not in cooperativeMatrixSupportedStages (= %u)", phys_dev_ext_props.cooperative_matrix_props.cooperativeMatrixSupportedStages); } } break; case spv::OpMemoryModel: // If the capability isn't enabled, don't bother with the rest of this function. // OpMemoryModel is the first required instruction after all OpCapability instructions. if (!seen_coopmat_capability) { return skip; } break; case spv::OpTypeCooperativeMatrixNV: { CoopMatType m; m.Init(insn.word(1), src, pStage, id_to_spec_id); if (m.all_constant) { // Validate that the type parameters are all supported for one of the // operands of a cooperative matrix property. bool valid = false; for (unsigned i = 0; i < cooperative_matrix_properties.size(); ++i) { if (cooperative_matrix_properties[i].AType == m.component_type && cooperative_matrix_properties[i].MSize == m.rows && cooperative_matrix_properties[i].KSize == m.cols && cooperative_matrix_properties[i].scope == m.scope) { valid = true; break; } if (cooperative_matrix_properties[i].BType == m.component_type && cooperative_matrix_properties[i].KSize == m.rows && cooperative_matrix_properties[i].NSize == m.cols && cooperative_matrix_properties[i].scope == m.scope) { valid = true; break; } if (cooperative_matrix_properties[i].CType == m.component_type && cooperative_matrix_properties[i].MSize == m.rows && cooperative_matrix_properties[i].NSize == m.cols && cooperative_matrix_properties[i].scope == m.scope) { valid = true; break; } if (cooperative_matrix_properties[i].DType == m.component_type && cooperative_matrix_properties[i].MSize == m.rows && cooperative_matrix_properties[i].NSize == m.cols && cooperative_matrix_properties[i].scope == m.scope) { valid = true; break; } } if (!valid) { skip |= LogError(pipeline->pipeline(), kVUID_Core_Shader_CooperativeMatrixType, "OpTypeCooperativeMatrixNV (result id = %u) operands don't match a supported matrix type", insn.word(1)); } } break; } case spv::OpCooperativeMatrixMulAddNV: { CoopMatType a, b, c, d; if (id_to_type_id.find(insn.word(2)) == id_to_type_id.end() || id_to_type_id.find(insn.word(3)) == id_to_type_id.end() || id_to_type_id.find(insn.word(4)) == id_to_type_id.end() || id_to_type_id.find(insn.word(5)) == id_to_type_id.end()) { // Couldn't find type of matrix assert(false); break; } d.Init(id_to_type_id[insn.word(2)], src, pStage, id_to_spec_id); a.Init(id_to_type_id[insn.word(3)], src, pStage, id_to_spec_id); b.Init(id_to_type_id[insn.word(4)], src, pStage, id_to_spec_id); c.Init(id_to_type_id[insn.word(5)], src, pStage, id_to_spec_id); if (a.all_constant && b.all_constant && c.all_constant && d.all_constant) { // Validate that the type parameters are all supported for the same // cooperative matrix property. bool valid = false; for (unsigned i = 0; i < cooperative_matrix_properties.size(); ++i) { if (cooperative_matrix_properties[i].AType == a.component_type && cooperative_matrix_properties[i].MSize == a.rows && cooperative_matrix_properties[i].KSize == a.cols && cooperative_matrix_properties[i].scope == a.scope && cooperative_matrix_properties[i].BType == b.component_type && cooperative_matrix_properties[i].KSize == b.rows && cooperative_matrix_properties[i].NSize == b.cols && cooperative_matrix_properties[i].scope == b.scope && cooperative_matrix_properties[i].CType == c.component_type && cooperative_matrix_properties[i].MSize == c.rows && cooperative_matrix_properties[i].NSize == c.cols && cooperative_matrix_properties[i].scope == c.scope && cooperative_matrix_properties[i].DType == d.component_type && cooperative_matrix_properties[i].MSize == d.rows && cooperative_matrix_properties[i].NSize == d.cols && cooperative_matrix_properties[i].scope == d.scope) { valid = true; break; } } if (!valid) { skip |= LogError(pipeline->pipeline(), kVUID_Core_Shader_CooperativeMatrixMulAdd, "OpCooperativeMatrixMulAddNV (result id = %u) operands don't match a supported matrix " "VkCooperativeMatrixPropertiesNV", insn.word(2)); } } break; } default: break; } } return skip; } bool CoreChecks::ValidateShaderResolveQCOM(SHADER_MODULE_STATE const *src, VkPipelineShaderStageCreateInfo const *pStage, const PIPELINE_STATE *pipeline) const { bool skip = false; // If the pipeline's subpass description contains flag VK_SUBPASS_DESCRIPTION_FRAGMENT_REGION_BIT_QCOM, // then the fragment shader must not enable the SPIRV SampleRateShading capability. if (pStage->stage == VK_SHADER_STAGE_FRAGMENT_BIT) { for (auto insn : *src) { switch (insn.opcode()) { case spv::OpCapability: if (insn.word(1) == spv::CapabilitySampleRateShading) { auto subpass_flags = (pipeline->rp_state == nullptr) ? 0 : pipeline->rp_state->createInfo.pSubpasses[pipeline->graphicsPipelineCI.subpass].flags; if ((subpass_flags & VK_SUBPASS_DESCRIPTION_FRAGMENT_REGION_BIT_QCOM) != 0) { skip |= LogError(pipeline->pipeline(), kVUID_Core_Shader_ResolveQCOM_InvalidCapability, "Invalid Pipeline CreateInfo State: fragment shader enables SampleRateShading capability " "and the subpass flags includes VK_SUBPASS_DESCRIPTION_FRAGMENT_REGION_BIT_QCOM."); } } break; default: break; } } } return skip; } bool CoreChecks::ValidateAtomicsTypes(SHADER_MODULE_STATE const *src) const { bool skip = false; // "If sparseImageInt64Atomics is enabled, shaderImageInt64Atomics must be enabled" const bool valid_image_64_int = enabled_features.shader_image_atomic_int64_features.shaderImageInt64Atomics == VK_TRUE; const VkPhysicalDeviceShaderAtomicFloatFeaturesEXT &float_features = enabled_features.shader_atomic_float_features; const VkPhysicalDeviceShaderAtomicFloat2FeaturesEXT &float2_features = enabled_features.shader_atomic_float2_features; const bool valid_storage_buffer_float = ( (float_features.shaderBufferFloat32Atomics == VK_TRUE) || (float_features.shaderBufferFloat32AtomicAdd == VK_TRUE) || (float_features.shaderBufferFloat64Atomics == VK_TRUE) || (float_features.shaderBufferFloat64AtomicAdd == VK_TRUE) || (float2_features.shaderBufferFloat16Atomics == VK_TRUE) || (float2_features.shaderBufferFloat16AtomicAdd == VK_TRUE) || (float2_features.shaderBufferFloat16AtomicMinMax == VK_TRUE) || (float2_features.shaderBufferFloat32AtomicMinMax == VK_TRUE) || (float2_features.shaderBufferFloat64AtomicMinMax == VK_TRUE)); const bool valid_workgroup_float = ( (float_features.shaderSharedFloat32Atomics == VK_TRUE) || (float_features.shaderSharedFloat32AtomicAdd == VK_TRUE) || (float_features.shaderSharedFloat64Atomics == VK_TRUE) || (float_features.shaderSharedFloat64AtomicAdd == VK_TRUE) || (float2_features.shaderSharedFloat16Atomics == VK_TRUE) || (float2_features.shaderSharedFloat16AtomicAdd == VK_TRUE) || (float2_features.shaderSharedFloat16AtomicMinMax == VK_TRUE) || (float2_features.shaderSharedFloat32AtomicMinMax == VK_TRUE) || (float2_features.shaderSharedFloat64AtomicMinMax == VK_TRUE)); const bool valid_image_float = ( (float_features.shaderImageFloat32Atomics == VK_TRUE) || (float_features.shaderImageFloat32AtomicAdd == VK_TRUE) || (float2_features.shaderImageFloat32AtomicMinMax == VK_TRUE)); const bool valid_16_float = ( (float2_features.shaderBufferFloat16Atomics == VK_TRUE) || (float2_features.shaderBufferFloat16AtomicAdd == VK_TRUE) || (float2_features.shaderBufferFloat16AtomicMinMax == VK_TRUE) || (float2_features.shaderSharedFloat16Atomics == VK_TRUE) || (float2_features.shaderSharedFloat16AtomicAdd == VK_TRUE) || (float2_features.shaderSharedFloat16AtomicMinMax == VK_TRUE)); const bool valid_32_float = ( (float_features.shaderBufferFloat32Atomics == VK_TRUE) || (float_features.shaderBufferFloat32AtomicAdd == VK_TRUE) || (float_features.shaderSharedFloat32Atomics == VK_TRUE) || (float_features.shaderSharedFloat32AtomicAdd == VK_TRUE) || (float_features.shaderImageFloat32Atomics == VK_TRUE) || (float_features.shaderImageFloat32AtomicAdd == VK_TRUE) || (float2_features.shaderBufferFloat32AtomicMinMax == VK_TRUE) || (float2_features.shaderSharedFloat32AtomicMinMax == VK_TRUE) || (float2_features.shaderImageFloat32AtomicMinMax == VK_TRUE)); const bool valid_64_float = ( (float_features.shaderBufferFloat64Atomics == VK_TRUE) || (float_features.shaderBufferFloat64AtomicAdd == VK_TRUE) || (float_features.shaderSharedFloat64Atomics == VK_TRUE) || (float_features.shaderSharedFloat64AtomicAdd == VK_TRUE) || (float2_features.shaderBufferFloat64AtomicMinMax == VK_TRUE) || (float2_features.shaderSharedFloat64AtomicMinMax == VK_TRUE)); // clang-format on for (auto &atomic_inst : src->atomic_inst) { const atomic_instruction &atomic = atomic_inst.second; const uint32_t opcode = src->at(atomic_inst.first).opcode(); if ((atomic.bit_width == 64) && (atomic.type == spv::OpTypeInt)) { // Validate 64-bit atomics if (((atomic.storage_class == spv::StorageClassStorageBuffer) || (atomic.storage_class == spv::StorageClassUniform)) && (enabled_features.core12.shaderBufferInt64Atomics == VK_FALSE)) { skip |= LogError( device, kVUID_Core_Shader_AtomicFeature, "%s: Can't use 64-bit int atomics operations with %s storage class without shaderBufferInt64Atomics enabled.", report_data->FormatHandle(src->vk_shader_module()).c_str(), StorageClassName(atomic.storage_class)); } else if ((atomic.storage_class == spv::StorageClassWorkgroup) && (enabled_features.core12.shaderSharedInt64Atomics == VK_FALSE)) { skip |= LogError(device, kVUID_Core_Shader_AtomicFeature, "%s: Can't use 64-bit int atomics operations with Workgroup storage class without " "shaderSharedInt64Atomics enabled.", report_data->FormatHandle(src->vk_shader_module()).c_str()); } else if ((atomic.storage_class == spv::StorageClassImage) && (valid_image_64_int == false)) { skip |= LogError(device, kVUID_Core_Shader_AtomicFeature, "%s: Can't use 64-bit int atomics operations with Image storage class without " "shaderImageInt64Atomics enabled.", report_data->FormatHandle(src->vk_shader_module()).c_str()); } } else if (atomic.type == spv::OpTypeFloat) { // Validate Floats if (atomic.storage_class == spv::StorageClassStorageBuffer) { if (valid_storage_buffer_float == false) { skip |= LogError(device, kVUID_Core_Shader_AtomicFeature, "%s: Can't use float atomics operations with StorageBuffer storage class without " "shaderBufferFloat32Atomics or shaderBufferFloat32AtomicAdd or shaderBufferFloat64Atomics or " "shaderBufferFloat64AtomicAdd or shaderBufferFloat16Atomics or shaderBufferFloat16AtomicAdd " "or shaderBufferFloat16AtomicMinMax or shaderBufferFloat32AtomicMinMax or " "shaderBufferFloat64AtomicMinMax enabled.", report_data->FormatHandle(src->vk_shader_module()).c_str()); } else if (opcode == spv::OpAtomicFAddEXT) { if ((atomic.bit_width == 16) && (float2_features.shaderBufferFloat16AtomicAdd == VK_FALSE)) { skip |= LogError(device, kVUID_Core_Shader_AtomicFeature, "%s: Can't use 16-bit float atomics for add operations (OpAtomicFAddEXT) with " "StorageBuffer storage class without shaderBufferFloat16AtomicAdd enabled.", report_data->FormatHandle(src->vk_shader_module()).c_str()); } else if ((atomic.bit_width == 32) && (float_features.shaderBufferFloat32AtomicAdd == VK_FALSE)) { skip |= LogError(device, kVUID_Core_Shader_AtomicFeature, "%s: Can't use 32-bit float atomics for add operations (OpAtomicFAddEXT) with " "StorageBuffer storage class without shaderBufferFloat32AtomicAdd enabled.", report_data->FormatHandle(src->vk_shader_module()).c_str()); } else if ((atomic.bit_width == 64) && (float_features.shaderBufferFloat64AtomicAdd == VK_FALSE)) { skip |= LogError(device, kVUID_Core_Shader_AtomicFeature, "%s: Can't use 64-bit float atomics for add operations (OpAtomicFAddEXT) with " "StorageBuffer storage class without shaderBufferFloat64AtomicAdd enabled.", report_data->FormatHandle(src->vk_shader_module()).c_str()); } } else if (opcode == spv::OpAtomicFMinEXT || opcode == spv::OpAtomicFMaxEXT) { if ((atomic.bit_width == 16) && (float2_features.shaderBufferFloat16AtomicMinMax == VK_FALSE)) { skip |= LogError( device, kVUID_Core_Shader_AtomicFeature, "%s: Can't use 16-bit float atomics for min/max operations (OpAtomicFMinEXT or OpAtomicFMaxEXT) with " "StorageBuffer storage class without shaderBufferFloat16AtomicMinMax enabled.", report_data->FormatHandle(src->vk_shader_module()).c_str()); } else if ((atomic.bit_width == 32) && (float2_features.shaderBufferFloat32AtomicMinMax == VK_FALSE)) { skip |= LogError( device, kVUID_Core_Shader_AtomicFeature, "%s: Can't use 32-bit float atomics for min/max operations (OpAtomicFMinEXT or OpAtomicFMaxEXT) with " "StorageBuffer storage class without shaderBufferFloat32AtomicMinMax enabled.", report_data->FormatHandle(src->vk_shader_module()).c_str()); } else if ((atomic.bit_width == 64) && (float2_features.shaderBufferFloat64AtomicMinMax == VK_FALSE)) { skip |= LogError( device, kVUID_Core_Shader_AtomicFeature, "%s: Can't use 64-bit float atomics for min/max operations (OpAtomicFMinEXT or OpAtomicFMaxEXT) with " "StorageBuffer storage class without shaderBufferFloat64AtomicMinMax enabled.", report_data->FormatHandle(src->vk_shader_module()).c_str()); } } else { // Assume is valid load/store/exchange (rest of supported atomic operations) or else spirv-val will catch if ((atomic.bit_width == 16) && (float2_features.shaderBufferFloat16Atomics == VK_FALSE)) { skip |= LogError( device, kVUID_Core_Shader_AtomicFeature, "%s: Can't use 16-bit float atomics for load/store/exhange operations (OpAtomicLoad, OpAtomicStore, " "OpAtomicExchange) with StorageBuffer storage class without shaderBufferFloat16Atomics enabled.", report_data->FormatHandle(src->vk_shader_module()).c_str()); } else if ((atomic.bit_width == 32) && (float_features.shaderBufferFloat32Atomics == VK_FALSE)) { skip |= LogError( device, kVUID_Core_Shader_AtomicFeature, "%s: Can't use 32-bit float atomics for load/store/exhange operations (OpAtomicLoad, OpAtomicStore, " "OpAtomicExchange) with StorageBuffer storage class without shaderBufferFloat32Atomics enabled.", report_data->FormatHandle(src->vk_shader_module()).c_str()); } else if ((atomic.bit_width == 64) && (float_features.shaderBufferFloat64Atomics == VK_FALSE)) { skip |= LogError( device, kVUID_Core_Shader_AtomicFeature, "%s: Can't use 64-bit float atomics for load/store/exhange operations (OpAtomicLoad, OpAtomicStore, " "OpAtomicExchange) with StorageBuffer storage class without shaderBufferFloat64Atomics enabled.", report_data->FormatHandle(src->vk_shader_module()).c_str()); } } } else if (atomic.storage_class == spv::StorageClassWorkgroup) { if (valid_workgroup_float == false) { skip |= LogError( device, kVUID_Core_Shader_AtomicFeature, "%s: Can't use float atomics operations with Workgroup storage class without shaderSharedFloat32Atomics or " "shaderSharedFloat32AtomicAdd or shaderSharedFloat64Atomics or shaderSharedFloat64AtomicAdd or " "shaderSharedFloat16Atomics or shaderSharedFloat16AtomicAdd or shaderSharedFloat16AtomicMinMax or " "shaderSharedFloat32AtomicMinMax or shaderSharedFloat64AtomicMinMax enabled.", report_data->FormatHandle(src->vk_shader_module()).c_str()); } else if (opcode == spv::OpAtomicFAddEXT) { if ((atomic.bit_width == 16) && (float2_features.shaderSharedFloat16AtomicAdd == VK_FALSE)) { skip |= LogError(device, kVUID_Core_Shader_AtomicFeature, "%s: Can't use 16-bit float atomics for add operations (OpAtomicFAddEXT) with Workgroup " "storage class without shaderSharedFloat16AtomicAdd enabled.", report_data->FormatHandle(src->vk_shader_module()).c_str()); } else if ((atomic.bit_width == 32) && (float_features.shaderSharedFloat32AtomicAdd == VK_FALSE)) { skip |= LogError(device, kVUID_Core_Shader_AtomicFeature, "%s: Can't use 32-bit float atomics for add operations (OpAtomicFAddEXT) with Workgroup " "storage class without shaderSharedFloat32AtomicAdd enabled.", report_data->FormatHandle(src->vk_shader_module()).c_str()); } else if ((atomic.bit_width == 64) && (float_features.shaderSharedFloat64AtomicAdd == VK_FALSE)) { skip |= LogError(device, kVUID_Core_Shader_AtomicFeature, "%s: Can't use 64-bit float atomics for add operations (OpAtomicFAddEXT) with Workgroup " "storage class without shaderSharedFloat64AtomicAdd enabled.", report_data->FormatHandle(src->vk_shader_module()).c_str()); } } else if (opcode == spv::OpAtomicFMinEXT || opcode == spv::OpAtomicFMaxEXT) { if ((atomic.bit_width == 16) && (float2_features.shaderSharedFloat16AtomicMinMax == VK_FALSE)) { skip |= LogError( device, kVUID_Core_Shader_AtomicFeature, "%s: Can't use 16-bit float atomics for min/max operations (OpAtomicFMinEXT or OpAtomicFMaxEXT) with " "Workgroup storage class without shaderSharedFloat16AtomicMinMax enabled.", report_data->FormatHandle(src->vk_shader_module()).c_str()); } else if ((atomic.bit_width == 32) && (float2_features.shaderSharedFloat32AtomicMinMax == VK_FALSE)) { skip |= LogError( device, kVUID_Core_Shader_AtomicFeature, "%s: Can't use 32-bit float atomics for min/max operations (OpAtomicFMinEXT or OpAtomicFMaxEXT) with " "Workgroup storage class without shaderSharedFloat32AtomicMinMax enabled.", report_data->FormatHandle(src->vk_shader_module()).c_str()); } else if ((atomic.bit_width == 64) && (float2_features.shaderSharedFloat64AtomicMinMax == VK_FALSE)) { skip |= LogError( device, kVUID_Core_Shader_AtomicFeature, "%s: Can't use 64-bit float atomics for min/max operations (OpAtomicFMinEXT or OpAtomicFMaxEXT) with " "Workgroup storage class without shaderSharedFloat64AtomicMinMax enabled.", report_data->FormatHandle(src->vk_shader_module()).c_str()); } } else { // Assume is valid load/store/exchange (rest of supported atomic operations) or else spirv-val will catch if ((atomic.bit_width == 16) && (float2_features.shaderSharedFloat16Atomics == VK_FALSE)) { skip |= LogError( device, kVUID_Core_Shader_AtomicFeature, "%s: Can't use 16-bit float atomics for load/store/exhange operations (OpAtomicLoad, OpAtomicStore, " "OpAtomicExchange) with Workgroup storage class without shaderSharedFloat16Atomics enabled.", report_data->FormatHandle(src->vk_shader_module()).c_str()); } else if ((atomic.bit_width == 32) && (float_features.shaderSharedFloat32Atomics == VK_FALSE)) { skip |= LogError( device, kVUID_Core_Shader_AtomicFeature, "%s: Can't use 32-bit float atomics for load/store/exhange operations (OpAtomicLoad, OpAtomicStore, " "OpAtomicExchange) with Workgroup storage class without shaderSharedFloat32Atomics enabled.", report_data->FormatHandle(src->vk_shader_module()).c_str()); } else if ((atomic.bit_width == 64) && (float_features.shaderSharedFloat64Atomics == VK_FALSE)) { skip |= LogError( device, kVUID_Core_Shader_AtomicFeature, "%s: Can't use 64-bit float atomics for load/store/exhange operations (OpAtomicLoad, OpAtomicStore, " "OpAtomicExchange) with Workgroup storage class without shaderSharedFloat64Atomics enabled.", report_data->FormatHandle(src->vk_shader_module()).c_str()); } } } else if ((atomic.storage_class == spv::StorageClassImage) && (valid_image_float == false)) { skip |= LogError(device, kVUID_Core_Shader_AtomicFeature, "%s: Can't use float atomics operations with Image storage class without shaderImageFloat32Atomics or " "shaderImageFloat32AtomicAdd or shaderImageFloat32AtomicMinMax enabled.", report_data->FormatHandle(src->vk_shader_module()).c_str()); } else if ((atomic.bit_width == 16) && (valid_16_float == false)) { skip |= LogError(device, kVUID_Core_Shader_AtomicFeature, "%s: Can't use 16-bit float atomics operations without shaderBufferFloat16Atomics, " "shaderBufferFloat16AtomicAdd, shaderBufferFloat16AtomicMinMax, shaderSharedFloat16Atomics, " "shaderSharedFloat16AtomicAdd or shaderSharedFloat16AtomicMinMax enabled.", report_data->FormatHandle(src->vk_shader_module()).c_str()); } else if ((atomic.bit_width == 32) && (valid_32_float == false)) { skip |= LogError(device, kVUID_Core_Shader_AtomicFeature, "%s: Can't use 32-bit float atomics operations without shaderBufferFloat32AtomicMinMax, " "shaderSharedFloat32AtomicMinMax, shaderImageFloat32AtomicMinMax, sparseImageFloat32AtomicMinMax, " "shaderBufferFloat32Atomics, shaderBufferFloat32AtomicAdd, shaderSharedFloat32Atomics, " "shaderSharedFloat32AtomicAdd, shaderImageFloat32Atomics, shaderImageFloat32AtomicAdd, " "sparseImageFloat32Atomics or sparseImageFloat32AtomicAdd enabled.", report_data->FormatHandle(src->vk_shader_module()).c_str()); } else if ((atomic.bit_width == 64) && (valid_64_float == false)) { skip |= LogError(device, kVUID_Core_Shader_AtomicFeature, "%s: Can't use 64-bit float atomics operations without shaderBufferFloat64AtomicMinMax, " "shaderSharedFloat64AtomicMinMax, shaderBufferFloat64Atomics, shaderBufferFloat64AtomicAdd, " "shaderSharedFloat64Atomics or shaderSharedFloat64AtomicAdd enabled.", report_data->FormatHandle(src->vk_shader_module()).c_str()); } } } return skip; } bool CoreChecks::ValidateExecutionModes(SHADER_MODULE_STATE const *src, spirv_inst_iter entrypoint) const { auto entrypoint_id = entrypoint.word(2); // The first denorm execution mode encountered, along with its bit width. // Used to check if SeparateDenormSettings is respected. std::pair<spv::ExecutionMode, uint32_t> first_denorm_execution_mode = std::make_pair(spv::ExecutionModeMax, 0); // The first rounding mode encountered, along with its bit width. // Used to check if SeparateRoundingModeSettings is respected. std::pair<spv::ExecutionMode, uint32_t> first_rounding_mode = std::make_pair(spv::ExecutionModeMax, 0); bool skip = false; uint32_t vertices_out = 0; uint32_t invocations = 0; auto it = src->execution_mode_inst.find(entrypoint_id); if (it != src->execution_mode_inst.end()) { for (auto insn : it->second) { auto mode = insn.word(2); switch (mode) { case spv::ExecutionModeSignedZeroInfNanPreserve: { auto bit_width = insn.word(3); if ((bit_width == 16 && !phys_dev_props_core12.shaderSignedZeroInfNanPreserveFloat16) || (bit_width == 32 && !phys_dev_props_core12.shaderSignedZeroInfNanPreserveFloat32) || (bit_width == 64 && !phys_dev_props_core12.shaderSignedZeroInfNanPreserveFloat64)) { skip |= LogError( device, kVUID_Core_Shader_FeatureNotEnabled, "Shader requires SignedZeroInfNanPreserve for bit width %d but it is not enabled on the device", bit_width); } break; } case spv::ExecutionModeDenormPreserve: { auto bit_width = insn.word(3); if ((bit_width == 16 && !phys_dev_props_core12.shaderDenormPreserveFloat16) || (bit_width == 32 && !phys_dev_props_core12.shaderDenormPreserveFloat32) || (bit_width == 64 && !phys_dev_props_core12.shaderDenormPreserveFloat64)) { skip |= LogError(device, kVUID_Core_Shader_FeatureNotEnabled, "Shader requires DenormPreserve for bit width %d but it is not enabled on the device", bit_width); } if (first_denorm_execution_mode.first == spv::ExecutionModeMax) { // Register the first denorm execution mode found first_denorm_execution_mode = std::make_pair(static_cast<spv::ExecutionMode>(mode), bit_width); } else if (first_denorm_execution_mode.first != mode && first_denorm_execution_mode.second != bit_width) { switch (phys_dev_props_core12.denormBehaviorIndependence) { case VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_32_BIT_ONLY: if (first_rounding_mode.second != 32 && bit_width != 32) { skip |= LogError(device, kVUID_Core_Shader_FeatureNotEnabled, "Shader uses different denorm execution modes for 16 and 64-bit but " "denormBehaviorIndependence is " "VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_32_BIT_ONLY on the device"); } break; case VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_ALL: break; case VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_NONE: skip |= LogError(device, kVUID_Core_Shader_FeatureNotEnabled, "Shader uses different denorm execution modes for different bit widths but " "denormBehaviorIndependence is " "VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_NONE on the device"); break; default: break; } } break; } case spv::ExecutionModeDenormFlushToZero: { auto bit_width = insn.word(3); if ((bit_width == 16 && !phys_dev_props_core12.shaderDenormFlushToZeroFloat16) || (bit_width == 32 && !phys_dev_props_core12.shaderDenormFlushToZeroFloat32) || (bit_width == 64 && !phys_dev_props_core12.shaderDenormFlushToZeroFloat64)) { skip |= LogError(device, kVUID_Core_Shader_FeatureNotEnabled, "Shader requires DenormFlushToZero for bit width %d but it is not enabled on the device", bit_width); } if (first_denorm_execution_mode.first == spv::ExecutionModeMax) { // Register the first denorm execution mode found first_denorm_execution_mode = std::make_pair(static_cast<spv::ExecutionMode>(mode), bit_width); } else if (first_denorm_execution_mode.first != mode && first_denorm_execution_mode.second != bit_width) { switch (phys_dev_props_core12.denormBehaviorIndependence) { case VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_32_BIT_ONLY: if (first_rounding_mode.second != 32 && bit_width != 32) { skip |= LogError(device, kVUID_Core_Shader_FeatureNotEnabled, "Shader uses different denorm execution modes for 16 and 64-bit but " "denormBehaviorIndependence is " "VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_32_BIT_ONLY on the device"); } break; case VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_ALL: break; case VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_NONE: skip |= LogError(device, kVUID_Core_Shader_FeatureNotEnabled, "Shader uses different denorm execution modes for different bit widths but " "denormBehaviorIndependence is " "VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_NONE on the device"); break; default: break; } } break; } case spv::ExecutionModeRoundingModeRTE: { auto bit_width = insn.word(3); if ((bit_width == 16 && !phys_dev_props_core12.shaderRoundingModeRTEFloat16) || (bit_width == 32 && !phys_dev_props_core12.shaderRoundingModeRTEFloat32) || (bit_width == 64 && !phys_dev_props_core12.shaderRoundingModeRTEFloat64)) { skip |= LogError(device, kVUID_Core_Shader_FeatureNotEnabled, "Shader requires RoundingModeRTE for bit width %d but it is not enabled on the device", bit_width); } if (first_rounding_mode.first == spv::ExecutionModeMax) { // Register the first rounding mode found first_rounding_mode = std::make_pair(static_cast<spv::ExecutionMode>(mode), bit_width); } else if (first_rounding_mode.first != mode && first_rounding_mode.second != bit_width) { switch (phys_dev_props_core12.roundingModeIndependence) { case VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_32_BIT_ONLY: if (first_rounding_mode.second != 32 && bit_width != 32) { skip |= LogError(device, kVUID_Core_Shader_FeatureNotEnabled, "Shader uses different rounding modes for 16 and 64-bit but " "roundingModeIndependence is " "VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_32_BIT_ONLY on the device"); } break; case VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_ALL: break; case VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_NONE: skip |= LogError(device, kVUID_Core_Shader_FeatureNotEnabled, "Shader uses different rounding modes for different bit widths but " "roundingModeIndependence is " "VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_NONE on the device"); break; default: break; } } break; } case spv::ExecutionModeRoundingModeRTZ: { auto bit_width = insn.word(3); if ((bit_width == 16 && !phys_dev_props_core12.shaderRoundingModeRTZFloat16) || (bit_width == 32 && !phys_dev_props_core12.shaderRoundingModeRTZFloat32) || (bit_width == 64 && !phys_dev_props_core12.shaderRoundingModeRTZFloat64)) { skip |= LogError(device, kVUID_Core_Shader_FeatureNotEnabled, "Shader requires RoundingModeRTZ for bit width %d but it is not enabled on the device", bit_width); } if (first_rounding_mode.first == spv::ExecutionModeMax) { // Register the first rounding mode found first_rounding_mode = std::make_pair(static_cast<spv::ExecutionMode>(mode), bit_width); } else if (first_rounding_mode.first != mode && first_rounding_mode.second != bit_width) { switch (phys_dev_props_core12.roundingModeIndependence) { case VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_32_BIT_ONLY: if (first_rounding_mode.second != 32 && bit_width != 32) { skip |= LogError(device, kVUID_Core_Shader_FeatureNotEnabled, "Shader uses different rounding modes for 16 and 64-bit but " "roundingModeIndependence is " "VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_32_BIT_ONLY on the device"); } break; case VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_ALL: break; case VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_NONE: skip |= LogError(device, kVUID_Core_Shader_FeatureNotEnabled, "Shader uses different rounding modes for different bit widths but " "roundingModeIndependence is " "VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_NONE on the device"); break; default: break; } } break; } case spv::ExecutionModeOutputVertices: { vertices_out = insn.word(3); break; } case spv::ExecutionModeInvocations: { invocations = insn.word(3); break; } } } } if (entrypoint.word(1) == spv::ExecutionModelGeometry) { if (vertices_out == 0 || vertices_out > phys_dev_props.limits.maxGeometryOutputVertices) { skip |= LogError(device, "VUID-VkPipelineShaderStageCreateInfo-stage-00714", "Geometry shader entry point must have an OpExecutionMode instruction that " "specifies a maximum output vertex count that is greater than 0 and less " "than or equal to maxGeometryOutputVertices. " "OutputVertices=%d, maxGeometryOutputVertices=%d", vertices_out, phys_dev_props.limits.maxGeometryOutputVertices); } if (invocations == 0 || invocations > phys_dev_props.limits.maxGeometryShaderInvocations) { skip |= LogError(device, "VUID-VkPipelineShaderStageCreateInfo-stage-00715", "Geometry shader entry point must have an OpExecutionMode instruction that " "specifies an invocation count that is greater than 0 and less " "than or equal to maxGeometryShaderInvocations. " "Invocations=%d, maxGeometryShaderInvocations=%d", invocations, phys_dev_props.limits.maxGeometryShaderInvocations); } } return skip; } // For given pipelineLayout verify that the set_layout_node at slot.first // has the requested binding at slot.second and return ptr to that binding static VkDescriptorSetLayoutBinding const *GetDescriptorBinding(PIPELINE_LAYOUT_STATE const *pipelineLayout, descriptor_slot_t slot) { if (!pipelineLayout) return nullptr; if (slot.first >= pipelineLayout->set_layouts.size()) return nullptr; return pipelineLayout->set_layouts[slot.first]->GetDescriptorSetLayoutBindingPtrFromBinding(slot.second); } // If PointList topology is specified in the pipeline, verify that a shader geometry stage writes PointSize // o If there is only a vertex shader : gl_PointSize must be written when using points // o If there is a geometry or tessellation shader: // - If shaderTessellationAndGeometryPointSize feature is enabled: // * gl_PointSize must be written in the final geometry stage // - If shaderTessellationAndGeometryPointSize feature is disabled: // * gl_PointSize must NOT be written and a default of 1.0 is assumed bool CoreChecks::ValidatePointListShaderState(const PIPELINE_STATE *pipeline, SHADER_MODULE_STATE const *src, spirv_inst_iter entrypoint, VkShaderStageFlagBits stage) const { if (pipeline->topology_at_rasterizer != VK_PRIMITIVE_TOPOLOGY_POINT_LIST) { return false; } bool pointsize_written = false; bool skip = false; // Search for PointSize built-in decorations for (auto set : src->builtin_decoration_list) { auto insn = src->at(set.offset); if (set.builtin == spv::BuiltInPointSize) { pointsize_written = src->IsBuiltInWritten(insn, entrypoint); if (pointsize_written) { break; } } } if ((stage == VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT || stage == VK_SHADER_STAGE_GEOMETRY_BIT) && !enabled_features.core.shaderTessellationAndGeometryPointSize) { if (pointsize_written) { skip |= LogError(pipeline->pipeline(), kVUID_Core_Shader_PointSizeBuiltInOverSpecified, "Pipeline topology is set to POINT_LIST and geometry or tessellation shaders write PointSize which " "is prohibited when the shaderTessellationAndGeometryPointSize feature is not enabled."); } } else if (!pointsize_written) { skip |= LogError(pipeline->pipeline(), kVUID_Core_Shader_MissingPointSizeBuiltIn, "Pipeline topology is set to POINT_LIST, but PointSize is not written to in the shader corresponding to %s.", string_VkShaderStageFlagBits(stage)); } return skip; } bool CoreChecks::ValidatePrimitiveRateShaderState(const PIPELINE_STATE *pipeline, SHADER_MODULE_STATE const *src, spirv_inst_iter entrypoint, VkShaderStageFlagBits stage) const { bool primitiverate_written = false; bool viewportindex_written = false; bool viewportmask_written = false; bool skip = false; // Check if the primitive shading rate is written for (auto set : src->builtin_decoration_list) { auto insn = src->at(set.offset); if (set.builtin == spv::BuiltInPrimitiveShadingRateKHR) { primitiverate_written = src->IsBuiltInWritten(insn, entrypoint); } else if (set.builtin == spv::BuiltInViewportIndex) { viewportindex_written = src->IsBuiltInWritten(insn, entrypoint); } else if (set.builtin == spv::BuiltInViewportMaskNV) { viewportmask_written = src->IsBuiltInWritten(insn, entrypoint); } if (primitiverate_written && viewportindex_written && viewportmask_written) { break; } } if (!phys_dev_ext_props.fragment_shading_rate_props.primitiveFragmentShadingRateWithMultipleViewports && pipeline->graphicsPipelineCI.pViewportState) { if (!IsDynamic(pipeline, VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT) && pipeline->graphicsPipelineCI.pViewportState->viewportCount > 1 && primitiverate_written) { skip |= LogError(pipeline->pipeline(), "VUID-VkGraphicsPipelineCreateInfo-primitiveFragmentShadingRateWithMultipleViewports-04503", "vkCreateGraphicsPipelines: %s shader statically writes to PrimitiveShadingRateKHR built-in, but " "multiple viewports " "are used and the primitiveFragmentShadingRateWithMultipleViewports limit is not supported.", string_VkShaderStageFlagBits(stage)); } if (primitiverate_written && viewportindex_written) { skip |= LogError(pipeline->pipeline(), "VUID-VkGraphicsPipelineCreateInfo-primitiveFragmentShadingRateWithMultipleViewports-04504", "vkCreateGraphicsPipelines: %s shader statically writes to both PrimitiveShadingRateKHR and " "ViewportIndex built-ins," "but the primitiveFragmentShadingRateWithMultipleViewports limit is not supported.", string_VkShaderStageFlagBits(stage)); } if (primitiverate_written && viewportmask_written) { skip |= LogError(pipeline->pipeline(), "VUID-VkGraphicsPipelineCreateInfo-primitiveFragmentShadingRateWithMultipleViewports-04505", "vkCreateGraphicsPipelines: %s shader statically writes to both PrimitiveShadingRateKHR and " "ViewportMaskNV built-ins," "but the primitiveFragmentShadingRateWithMultipleViewports limit is not supported.", string_VkShaderStageFlagBits(stage)); } } return skip; } // Validate runtime usage of various opcodes that depends on what Vulkan properties or features are exposed bool CoreChecks::ValidatePropertiesAndFeatures(SHADER_MODULE_STATE const *module, spirv_inst_iter &insn) const { bool skip = false; switch (insn.opcode()) { case spv::OpReadClockKHR: { auto scope_id = module->get_def(insn.word(3)); auto scope_type = scope_id.word(3); // if scope isn't Subgroup or Device, spirv-val will catch if ((scope_type == spv::ScopeSubgroup) && (enabled_features.shader_clock_features.shaderSubgroupClock == VK_FALSE)) { skip |= LogError(device, "UNASSIGNED-spirv-shaderClock-shaderSubgroupClock", "%s: OpReadClockKHR is used with a Subgroup scope but shaderSubgroupClock was not enabled.", report_data->FormatHandle(module->vk_shader_module()).c_str()); } else if ((scope_type == spv::ScopeDevice) && (enabled_features.shader_clock_features.shaderDeviceClock == VK_FALSE)) { skip |= LogError(device, "UNASSIGNED-spirv-shaderClock-shaderDeviceClock", "%s: OpReadClockKHR is used with a Device scope but shaderDeviceClock was not enabled.", report_data->FormatHandle(module->vk_shader_module()).c_str()); } break; } } return skip; } bool CoreChecks::ValidatePipelineShaderStage(VkPipelineShaderStageCreateInfo const *pStage, const PIPELINE_STATE *pipeline, const PipelineStageState &stage_state, const SHADER_MODULE_STATE *module, const spirv_inst_iter &entrypoint, bool check_point_size) const { bool skip = false; // Check the module if (!module->has_valid_spirv) { skip |= LogError(device, "VUID-VkPipelineShaderStageCreateInfo-module-parameter", "%s does not contain valid spirv for stage %s.", report_data->FormatHandle(module->vk_shader_module()).c_str(), string_VkShaderStageFlagBits(pStage->stage)); } // If specialization-constant values are given and specialization-constant instructions are present in the shader, the // specializations should be applied and validated. if (pStage->pSpecializationInfo != nullptr && pStage->pSpecializationInfo->mapEntryCount > 0 && pStage->pSpecializationInfo->pMapEntries != nullptr && module->has_specialization_constants) { // Gather the specialization-constant values. auto const &specialization_info = pStage->pSpecializationInfo; auto const &specialization_data = reinterpret_cast<uint8_t const *>(specialization_info->pData); std::unordered_map<uint32_t, std::vector<uint32_t>> id_value_map; // note: this must be std:: to work with spvtools id_value_map.reserve(specialization_info->mapEntryCount); for (auto i = 0u; i < specialization_info->mapEntryCount; ++i) { auto const &map_entry = specialization_info->pMapEntries[i]; auto itr = module->spec_const_map.find(map_entry.constantID); // "If a constantID value is not a specialization constant ID used in the shader, that map entry does not affect the // behavior of the pipeline." if (itr != module->spec_const_map.cend()) { // Make sure map_entry.size matches the spec constant's size uint32_t spec_const_size = decoration_set::kInvalidValue; const auto def_ins = module->get_def(itr->second); const auto type_ins = module->get_def(def_ins.word(1)); // Specialization constants can only be of type bool, scalar integer, or scalar floating point switch (type_ins.opcode()) { case spv::OpTypeBool: // "If the specialization constant is of type boolean, size must be the byte size of VkBool32" spec_const_size = sizeof(VkBool32); break; case spv::OpTypeInt: case spv::OpTypeFloat: spec_const_size = type_ins.word(2) / 8; break; default: // spirv-val should catch if SpecId is not used on a OpSpecConstantTrue/OpSpecConstantFalse/OpSpecConstant // and OpSpecConstant is validated to be a OpTypeInt or OpTypeFloat break; } if (map_entry.size != spec_const_size) { skip |= LogError(device, "VUID-VkSpecializationMapEntry-constantID-00776", "Specialization constant (ID = %" PRIu32 ", entry = %" PRIu32 ") has invalid size %zu in shader module %s. Expected size is %" PRIu32 " from shader definition.", map_entry.constantID, i, map_entry.size, report_data->FormatHandle(module->vk_shader_module()).c_str(), spec_const_size); } } if ((map_entry.offset + map_entry.size) <= specialization_info->dataSize) { // Allocate enough room for ceil(map_entry.size / 4) to store entries std::vector<uint32_t> entry_data((map_entry.size + 4 - 1) / 4, 0); uint8_t *out_p = reinterpret_cast<uint8_t *>(entry_data.data()); const uint8_t *const start_in_p = specialization_data + map_entry.offset; const uint8_t *const end_in_p = start_in_p + map_entry.size; std::copy(start_in_p, end_in_p, out_p); id_value_map.emplace(map_entry.constantID, std::move(entry_data)); } } // Apply the specialization-constant values and revalidate the shader module. spv_target_env spirv_environment = PickSpirvEnv(api_version, (device_extensions.vk_khr_spirv_1_4 != kNotEnabled)); spvtools::Optimizer optimizer(spirv_environment); spvtools::MessageConsumer consumer = [&skip, &module, &pStage, this](spv_message_level_t level, const char *source, const spv_position_t &position, const char *message) { skip |= LogError( device, "VUID-VkPipelineShaderStageCreateInfo-module-parameter", "%s does not contain valid spirv for stage %s. %s", report_data->FormatHandle(module->vk_shader_module()).c_str(), string_VkShaderStageFlagBits(pStage->stage), message); }; optimizer.SetMessageConsumer(consumer); optimizer.RegisterPass(spvtools::CreateSetSpecConstantDefaultValuePass(id_value_map)); optimizer.RegisterPass(spvtools::CreateFreezeSpecConstantValuePass()); std::vector<uint32_t> specialized_spirv; auto const optimized = optimizer.Run(module->words.data(), module->words.size(), &specialized_spirv); assert(optimized == true); if (optimized) { spv_context ctx = spvContextCreate(spirv_environment); spv_const_binary_t binary{specialized_spirv.data(), specialized_spirv.size()}; spv_diagnostic diag = nullptr; spvtools::ValidatorOptions options; AdjustValidatorOptions(device_extensions, enabled_features, options); auto const spv_valid = spvValidateWithOptions(ctx, options, &binary, &diag); if (spv_valid != SPV_SUCCESS) { skip |= LogError(device, "VUID-VkPipelineShaderStageCreateInfo-module-04145", "After specialization was applied, %s does not contain valid spirv for stage %s.", report_data->FormatHandle(module->vk_shader_module()).c_str(), string_VkShaderStageFlagBits(pStage->stage)); } spvDiagnosticDestroy(diag); spvContextDestroy(ctx); } skip |= ValidateWorkgroupSize(module, pStage, id_value_map); } // Check the entrypoint if (entrypoint == module->end()) { skip |= LogError(device, "VUID-VkPipelineShaderStageCreateInfo-pName-00707", "No entrypoint found named `%s` for stage %s.", pStage->pName, string_VkShaderStageFlagBits(pStage->stage)); } if (skip) return true; // no point continuing beyond here, any analysis is just going to be garbage. // Mark accessible ids auto &accessible_ids = stage_state.accessible_ids; // Validate descriptor set layout against what the entrypoint actually uses bool has_writable_descriptor = stage_state.has_writable_descriptor; auto &descriptor_uses = stage_state.descriptor_uses; // The following tries to limit the number of passes through the shader module. The validation passes in here are "stateless" // and mainly only checking the instruction in detail for a single operation uint32_t total_shared_size = 0; for (auto insn : *module) { skip |= ValidateShaderCapabilitiesAndExtensions(module, insn); skip |= ValidatePropertiesAndFeatures(module, insn); skip |= ValidateShaderStageGroupNonUniform(module, pStage->stage, insn); total_shared_size += module->CalcComputeSharedMemory(pStage->stage, insn); } if (total_shared_size > phys_dev_props.limits.maxComputeSharedMemorySize) { skip |= LogError(device, kVUID_Core_Shader_MaxComputeSharedMemorySize, "Shader uses %" PRIu32 " bytes of shared memory, more than allowed by physicalDeviceLimits::maxComputeSharedMemorySize (%" PRIu32 ")", total_shared_size, phys_dev_props.limits.maxComputeSharedMemorySize); } skip |= ValidateShaderStageWritableOrAtomicDescriptor(pStage->stage, has_writable_descriptor, stage_state.has_atomic_descriptor); skip |= ValidateShaderStageInputOutputLimits(module, pStage, pipeline, entrypoint); skip |= ValidateShaderStorageImageFormats(module); skip |= ValidateShaderStageMaxResources(pStage->stage, pipeline); skip |= ValidateAtomicsTypes(module); skip |= ValidateExecutionModes(module, entrypoint); skip |= ValidateSpecializations(pStage); if (check_point_size && !pipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable) { skip |= ValidatePointListShaderState(pipeline, module, entrypoint, pStage->stage); } skip |= ValidateBuiltinLimits(module, entrypoint); if (enabled_features.cooperative_matrix_features.cooperativeMatrix) { skip |= ValidateCooperativeMatrix(module, pStage, pipeline); } if (enabled_features.fragment_shading_rate_features.primitiveFragmentShadingRate) { skip |= ValidatePrimitiveRateShaderState(pipeline, module, entrypoint, pStage->stage); } if (device_extensions.vk_qcom_render_pass_shader_resolve != kNotEnabled) { skip |= ValidateShaderResolveQCOM(module, pStage, pipeline); } // "layout must be consistent with the layout of the * shader" // 'consistent' -> #descriptorsets-pipelinelayout-consistency std::string vuid_layout_mismatch; if (pipeline->graphicsPipelineCI.sType == VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO) { vuid_layout_mismatch = "VUID-VkGraphicsPipelineCreateInfo-layout-00756"; } else if (pipeline->computePipelineCI.sType == VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO) { vuid_layout_mismatch = "VUID-VkComputePipelineCreateInfo-layout-00703"; } else if (pipeline->raytracingPipelineCI.sType == VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_CREATE_INFO_KHR) { vuid_layout_mismatch = "VUID-VkRayTracingPipelineCreateInfoKHR-layout-03427"; } else if (pipeline->raytracingPipelineCI.sType == VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_CREATE_INFO_NV) { vuid_layout_mismatch = "VUID-VkRayTracingPipelineCreateInfoNV-layout-03427"; } // Validate Push Constants use skip |= ValidatePushConstantUsage(*pipeline, module, pStage, vuid_layout_mismatch); // Validate descriptor use for (auto use : descriptor_uses) { // Verify given pipelineLayout has requested setLayout with requested binding const auto &binding = GetDescriptorBinding(pipeline->pipeline_layout.get(), use.first); unsigned required_descriptor_count; bool is_khr = binding && binding->descriptorType == VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR; std::set<uint32_t> descriptor_types = TypeToDescriptorTypeSet(module, use.second.type_id, required_descriptor_count, is_khr); if (!binding) { skip |= LogError(device, vuid_layout_mismatch, "Shader uses descriptor slot %u.%u (expected `%s`) but not declared in pipeline layout", use.first.first, use.first.second, string_descriptorTypes(descriptor_types).c_str()); } else if (~binding->stageFlags & pStage->stage) { skip |= LogError(device, vuid_layout_mismatch, "Shader uses descriptor slot %u.%u but descriptor not accessible from stage %s", use.first.first, use.first.second, string_VkShaderStageFlagBits(pStage->stage)); } else if ((binding->descriptorType != VK_DESCRIPTOR_TYPE_MUTABLE_VALVE) && (descriptor_types.find(binding->descriptorType) == descriptor_types.end())) { skip |= LogError(device, vuid_layout_mismatch, "Type mismatch on descriptor slot %u.%u (expected `%s`) but descriptor of type %s", use.first.first, use.first.second, string_descriptorTypes(descriptor_types).c_str(), string_VkDescriptorType(binding->descriptorType)); } else if (binding->descriptorCount < required_descriptor_count) { skip |= LogError(device, vuid_layout_mismatch, "Shader expects at least %u descriptors for binding %u.%u but only %u provided", required_descriptor_count, use.first.first, use.first.second, binding->descriptorCount); } } // Validate use of input attachments against subpass structure if (pStage->stage == VK_SHADER_STAGE_FRAGMENT_BIT) { auto input_attachment_uses = module->CollectInterfaceByInputAttachmentIndex(accessible_ids); auto rpci = pipeline->rp_state->createInfo.ptr(); auto subpass = pipeline->graphicsPipelineCI.subpass; for (auto use : input_attachment_uses) { auto input_attachments = rpci->pSubpasses[subpass].pInputAttachments; auto index = (input_attachments && use.first < rpci->pSubpasses[subpass].inputAttachmentCount) ? input_attachments[use.first].attachment : VK_ATTACHMENT_UNUSED; if (index == VK_ATTACHMENT_UNUSED) { skip |= LogError(device, kVUID_Core_Shader_MissingInputAttachment, "Shader consumes input attachment index %d but not provided in subpass", use.first); } else if (!(GetFormatType(rpci->pAttachments[index].format) & module->GetFundamentalType(use.second.type_id))) { skip |= LogError(device, kVUID_Core_Shader_InputAttachmentTypeMismatch, "Subpass input attachment %u format of %s does not match type used in shader `%s`", use.first, string_VkFormat(rpci->pAttachments[index].format), module->DescribeType(use.second.type_id).c_str()); } } } if (pStage->stage == VK_SHADER_STAGE_COMPUTE_BIT) { skip |= ValidateComputeWorkGroupSizes(module, entrypoint); } return skip; } bool CoreChecks::ValidateInterfaceBetweenStages(SHADER_MODULE_STATE const *producer, spirv_inst_iter producer_entrypoint, shader_stage_attributes const *producer_stage, SHADER_MODULE_STATE const *consumer, spirv_inst_iter consumer_entrypoint, shader_stage_attributes const *consumer_stage) const { bool skip = false; auto outputs = producer->CollectInterfaceByLocation(producer_entrypoint, spv::StorageClassOutput, producer_stage->arrayed_output); auto inputs = consumer->CollectInterfaceByLocation(consumer_entrypoint, spv::StorageClassInput, consumer_stage->arrayed_input); auto a_it = outputs.begin(); auto b_it = inputs.begin(); // Maps sorted by key (location); walk them together to find mismatches while ((outputs.size() > 0 && a_it != outputs.end()) || (inputs.size() && b_it != inputs.end())) { bool a_at_end = outputs.size() == 0 || a_it == outputs.end(); bool b_at_end = inputs.size() == 0 || b_it == inputs.end(); auto a_first = a_at_end ? std::make_pair(0u, 0u) : a_it->first; auto b_first = b_at_end ? std::make_pair(0u, 0u) : b_it->first; if (b_at_end || ((!a_at_end) && (a_first < b_first))) { skip |= LogPerformanceWarning(producer->vk_shader_module(), kVUID_Core_Shader_OutputNotConsumed, "%s writes to output location %u.%u which is not consumed by %s", producer_stage->name, a_first.first, a_first.second, consumer_stage->name); a_it++; } else if (a_at_end || a_first > b_first) { skip |= LogError(consumer->vk_shader_module(), kVUID_Core_Shader_InputNotProduced, "%s consumes input location %u.%u which is not written by %s", consumer_stage->name, b_first.first, b_first.second, producer_stage->name); b_it++; } else { // subtleties of arrayed interfaces: // - if is_patch, then the member is not arrayed, even though the interface may be. // - if is_block_member, then the extra array level of an arrayed interface is not // expressed in the member type -- it's expressed in the block type. if (!TypesMatch(producer, consumer, a_it->second.type_id, b_it->second.type_id, producer_stage->arrayed_output && !a_it->second.is_patch && !a_it->second.is_block_member, consumer_stage->arrayed_input && !b_it->second.is_patch && !b_it->second.is_block_member, true)) { skip |= LogError(producer->vk_shader_module(), kVUID_Core_Shader_InterfaceTypeMismatch, "Type mismatch on location %u.%u: '%s' vs '%s'", a_first.first, a_first.second, producer->DescribeType(a_it->second.type_id).c_str(), consumer->DescribeType(b_it->second.type_id).c_str()); } if (a_it->second.is_patch != b_it->second.is_patch) { skip |= LogError(producer->vk_shader_module(), kVUID_Core_Shader_InterfaceTypeMismatch, "Decoration mismatch on location %u.%u: is per-%s in %s stage but per-%s in %s stage", a_first.first, a_first.second, a_it->second.is_patch ? "patch" : "vertex", producer_stage->name, b_it->second.is_patch ? "patch" : "vertex", consumer_stage->name); } if (a_it->second.is_relaxed_precision != b_it->second.is_relaxed_precision) { skip |= LogError(producer->vk_shader_module(), kVUID_Core_Shader_InterfaceTypeMismatch, "Decoration mismatch on location %u.%u: %s and %s stages differ in precision", a_first.first, a_first.second, producer_stage->name, consumer_stage->name); } a_it++; b_it++; } } if (consumer_stage->stage != VK_SHADER_STAGE_FRAGMENT_BIT) { auto builtins_producer = producer->CollectBuiltinBlockMembers(producer_entrypoint, spv::StorageClassOutput); auto builtins_consumer = consumer->CollectBuiltinBlockMembers(consumer_entrypoint, spv::StorageClassInput); if (!builtins_producer.empty() && !builtins_consumer.empty()) { if (builtins_producer.size() != builtins_consumer.size()) { skip |= LogError(producer->vk_shader_module(), kVUID_Core_Shader_InterfaceTypeMismatch, "Number of elements inside builtin block differ between stages (%s %d vs %s %d).", producer_stage->name, static_cast<int>(builtins_producer.size()), consumer_stage->name, static_cast<int>(builtins_consumer.size())); } else { auto it_producer = builtins_producer.begin(); auto it_consumer = builtins_consumer.begin(); while (it_producer != builtins_producer.end() && it_consumer != builtins_consumer.end()) { if (*it_producer != *it_consumer) { skip |= LogError(producer->vk_shader_module(), kVUID_Core_Shader_InterfaceTypeMismatch, "Builtin variable inside block doesn't match between %s and %s.", producer_stage->name, consumer_stage->name); break; } it_producer++; it_consumer++; } } } } return skip; } static inline uint32_t DetermineFinalGeomStage(const PIPELINE_STATE *pipeline, const VkGraphicsPipelineCreateInfo *pCreateInfo) { uint32_t stage_mask = 0; if (pipeline->topology_at_rasterizer == VK_PRIMITIVE_TOPOLOGY_POINT_LIST) { for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) { stage_mask |= pCreateInfo->pStages[i].stage; } // Determine which shader in which PointSize should be written (the final geometry stage) if (stage_mask & VK_SHADER_STAGE_MESH_BIT_NV) { stage_mask = VK_SHADER_STAGE_MESH_BIT_NV; } else if (stage_mask & VK_SHADER_STAGE_GEOMETRY_BIT) { stage_mask = VK_SHADER_STAGE_GEOMETRY_BIT; } else if (stage_mask & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) { stage_mask = VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT; } else if (stage_mask & VK_SHADER_STAGE_VERTEX_BIT) { stage_mask = VK_SHADER_STAGE_VERTEX_BIT; } } return stage_mask; } // Validate that the shaders used by the given pipeline and store the active_slots // that are actually used by the pipeline into pPipeline->active_slots bool CoreChecks::ValidateGraphicsPipelineShaderState(const PIPELINE_STATE *pipeline) const { auto create_info = pipeline->graphicsPipelineCI.ptr(); int vertex_stage = GetShaderStageId(VK_SHADER_STAGE_VERTEX_BIT); int fragment_stage = GetShaderStageId(VK_SHADER_STAGE_FRAGMENT_BIT); const SHADER_MODULE_STATE *shaders[32]; memset(shaders, 0, sizeof(shaders)); spirv_inst_iter entrypoints[32]; bool skip = false; uint32_t pointlist_stage_mask = DetermineFinalGeomStage(pipeline, create_info); for (uint32_t i = 0; i < create_info->stageCount; i++) { auto stage = &create_info->pStages[i]; auto stage_id = GetShaderStageId(stage->stage); shaders[stage_id] = GetShaderModuleState(stage->module); entrypoints[stage_id] = shaders[stage_id]->FindEntrypoint(stage->pName, stage->stage); skip |= ValidatePipelineShaderStage(stage, pipeline, pipeline->stage_state[i], shaders[stage_id], entrypoints[stage_id], (pointlist_stage_mask == stage->stage)); } // if the shader stages are no good individually, cross-stage validation is pointless. if (skip) return true; auto vi = create_info->pVertexInputState; if (vi) { skip |= ValidateViConsistency(vi); } if (shaders[vertex_stage] && shaders[vertex_stage]->has_valid_spirv && !IsDynamic(pipeline, VK_DYNAMIC_STATE_VERTEX_INPUT_EXT)) { skip |= ValidateViAgainstVsInputs(vi, shaders[vertex_stage], entrypoints[vertex_stage]); } int producer = GetShaderStageId(VK_SHADER_STAGE_VERTEX_BIT); int consumer = GetShaderStageId(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT); while (!shaders[producer] && producer != fragment_stage) { producer++; consumer++; } for (; producer != fragment_stage && consumer <= fragment_stage; consumer++) { assert(shaders[producer]); if (shaders[consumer]) { if (shaders[consumer]->has_valid_spirv && shaders[producer]->has_valid_spirv) { skip |= ValidateInterfaceBetweenStages(shaders[producer], entrypoints[producer], &shader_stage_attribs[producer], shaders[consumer], entrypoints[consumer], &shader_stage_attribs[consumer]); } producer = consumer; } } if (shaders[fragment_stage] && shaders[fragment_stage]->has_valid_spirv) { skip |= ValidateFsOutputsAgainstRenderPass(shaders[fragment_stage], entrypoints[fragment_stage], pipeline, create_info->subpass); } return skip; } void CoreChecks::RecordGraphicsPipelineShaderDynamicState(PIPELINE_STATE *pipeline_state) { auto create_info = pipeline_state->graphicsPipelineCI.ptr(); if (phys_dev_ext_props.fragment_shading_rate_props.primitiveFragmentShadingRateWithMultipleViewports || !IsDynamic(pipeline_state, VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT)) { return; } std::array<const SHADER_MODULE_STATE *, 32> shaders; std::fill(shaders.begin(), shaders.end(), nullptr); spirv_inst_iter entrypoints[32]; for (uint32_t i = 0; i < create_info->stageCount; i++) { auto stage = &create_info->pStages[i]; auto stage_id = GetShaderStageId(stage->stage); shaders[stage_id] = GetShaderModuleState(stage->module); entrypoints[stage_id] = shaders[stage_id]->FindEntrypoint(stage->pName, stage->stage); if (stage->stage == VK_SHADER_STAGE_VERTEX_BIT || stage->stage == VK_SHADER_STAGE_GEOMETRY_BIT || stage->stage == VK_SHADER_STAGE_MESH_BIT_NV) { bool primitiverate_written = false; for (auto set : shaders[stage_id]->builtin_decoration_list) { auto insn = shaders[stage_id]->at(set.offset); if (set.builtin == spv::BuiltInPrimitiveShadingRateKHR) { primitiverate_written = shaders[stage_id]->IsBuiltInWritten(insn, entrypoints[stage_id]); } if (primitiverate_written) { break; } } if (primitiverate_written) { pipeline_state->wrote_primitive_shading_rate.insert(stage->stage); } } } } bool CoreChecks::ValidateGraphicsPipelineShaderDynamicState(const PIPELINE_STATE *pipeline, const CMD_BUFFER_STATE *pCB, const char *caller, const DrawDispatchVuid &vuid) const { auto create_info = pipeline->graphicsPipelineCI.ptr(); bool skip = false; for (uint32_t i = 0; i < create_info->stageCount; i++) { auto stage = &create_info->pStages[i]; if (stage->stage == VK_SHADER_STAGE_VERTEX_BIT || stage->stage == VK_SHADER_STAGE_GEOMETRY_BIT || stage->stage == VK_SHADER_STAGE_MESH_BIT_NV) { if (!phys_dev_ext_props.fragment_shading_rate_props.primitiveFragmentShadingRateWithMultipleViewports && IsDynamic(pipeline, VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT) && pCB->viewportWithCountCount != 1) { if (pipeline->wrote_primitive_shading_rate.find(stage->stage) != pipeline->wrote_primitive_shading_rate.end()) { skip |= LogError(pipeline->pipeline(), vuid.viewport_count_primitive_shading_rate, "%s: %s shader of currently bound pipeline statically writes to PrimitiveShadingRateKHR built-in" "but multiple viewports are set by the last call to vkCmdSetViewportWithCountEXT," "and the primitiveFragmentShadingRateWithMultipleViewports limit is not supported.", caller, string_VkShaderStageFlagBits(stage->stage)); } } } } return skip; } bool CoreChecks::ValidateComputePipelineShaderState(PIPELINE_STATE *pipeline) const { const auto &stage = *pipeline->computePipelineCI.stage.ptr(); const SHADER_MODULE_STATE *module = GetShaderModuleState(stage.module); const spirv_inst_iter entrypoint = module->FindEntrypoint(stage.pName, stage.stage); return ValidatePipelineShaderStage(&stage, pipeline, pipeline->stage_state[0], module, entrypoint, false); } uint32_t CoreChecks::CalcShaderStageCount(const PIPELINE_STATE *pipeline, VkShaderStageFlagBits stageBit) const { uint32_t total = 0; const auto *stages = pipeline->raytracingPipelineCI.ptr()->pStages; for (uint32_t stage_index = 0; stage_index < pipeline->raytracingPipelineCI.stageCount; stage_index++) { if (stages[stage_index].stage == stageBit) { total++; } } if (pipeline->raytracingPipelineCI.pLibraryInfo) { for (uint32_t i = 0; i < pipeline->raytracingPipelineCI.pLibraryInfo->libraryCount; ++i) { const PIPELINE_STATE *library_pipeline = GetPipelineState(pipeline->raytracingPipelineCI.pLibraryInfo->pLibraries[i]); total += CalcShaderStageCount(library_pipeline, stageBit); } } return total; } bool CoreChecks::ValidateRayTracingPipeline(PIPELINE_STATE *pipeline, VkPipelineCreateFlags flags, bool isKHR) const { bool skip = false; if (isKHR) { if (pipeline->raytracingPipelineCI.maxPipelineRayRecursionDepth > phys_dev_ext_props.ray_tracing_propsKHR.maxRayRecursionDepth) { skip |= LogError(device, "VUID-VkRayTracingPipelineCreateInfoKHR-maxPipelineRayRecursionDepth-03589", "vkCreateRayTracingPipelinesKHR: maxPipelineRayRecursionDepth (%d ) must be less than or equal to " "VkPhysicalDeviceRayTracingPipelinePropertiesKHR::maxRayRecursionDepth %d", pipeline->raytracingPipelineCI.maxPipelineRayRecursionDepth, phys_dev_ext_props.ray_tracing_propsKHR.maxRayRecursionDepth); } if (pipeline->raytracingPipelineCI.pLibraryInfo) { for (uint32_t i = 0; i < pipeline->raytracingPipelineCI.pLibraryInfo->libraryCount; ++i) { const PIPELINE_STATE *library_pipelinestate = GetPipelineState(pipeline->raytracingPipelineCI.pLibraryInfo->pLibraries[i]); if (library_pipelinestate->raytracingPipelineCI.maxPipelineRayRecursionDepth != pipeline->raytracingPipelineCI.maxPipelineRayRecursionDepth) { skip |= LogError( device, "VUID-VkRayTracingPipelineCreateInfoKHR-pLibraries-03591", "vkCreateRayTracingPipelinesKHR: Each element (%d) of the pLibraries member of libraries must have been" "created with the value of maxPipelineRayRecursionDepth (%d) equal to that in this pipeline (%d) .", i, library_pipelinestate->raytracingPipelineCI.maxPipelineRayRecursionDepth, pipeline->raytracingPipelineCI.maxPipelineRayRecursionDepth); } if (library_pipelinestate->raytracingPipelineCI.pLibraryInfo && (library_pipelinestate->raytracingPipelineCI.pLibraryInterface->maxPipelineRayHitAttributeSize != pipeline->raytracingPipelineCI.pLibraryInterface->maxPipelineRayHitAttributeSize || library_pipelinestate->raytracingPipelineCI.pLibraryInterface->maxPipelineRayPayloadSize != pipeline->raytracingPipelineCI.pLibraryInterface->maxPipelineRayPayloadSize)) { skip |= LogError(device, "VUID-VkRayTracingPipelineCreateInfoKHR-pLibraryInfo-03593", "vkCreateRayTracingPipelinesKHR: If pLibraryInfo is not NULL, each element of its pLibraries " "member must have been created with values of the maxPipelineRayPayloadSize and " "maxPipelineRayHitAttributeSize members of pLibraryInterface equal to those in this pipeline"); } if ((flags & VK_PIPELINE_CREATE_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR) && !(library_pipelinestate->raytracingPipelineCI.flags & VK_PIPELINE_CREATE_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR)) { skip |= LogError(device, "VUID-VkRayTracingPipelineCreateInfoKHR-flags-03594", "vkCreateRayTracingPipelinesKHR: If flags includes " "VK_PIPELINE_CREATE_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR, each element of " "the pLibraries member of libraries must have been created with the " "VK_PIPELINE_CREATE_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR bit set"); } } } } else { if (pipeline->raytracingPipelineCI.maxRecursionDepth > phys_dev_ext_props.ray_tracing_propsNV.maxRecursionDepth) { skip |= LogError(device, "VUID-VkRayTracingPipelineCreateInfoNV-maxRecursionDepth-03457", "vkCreateRayTracingPipelinesNV: maxRecursionDepth (%d) must be less than or equal to " "VkPhysicalDeviceRayTracingPropertiesNV::maxRecursionDepth (%d)", pipeline->raytracingPipelineCI.maxRecursionDepth, phys_dev_ext_props.ray_tracing_propsNV.maxRecursionDepth); } } const auto *stages = pipeline->raytracingPipelineCI.ptr()->pStages; const auto *groups = pipeline->raytracingPipelineCI.ptr()->pGroups; for (uint32_t stage_index = 0; stage_index < pipeline->raytracingPipelineCI.stageCount; stage_index++) { const auto &stage = stages[stage_index]; const SHADER_MODULE_STATE *module = GetShaderModuleState(stage.module); const spirv_inst_iter entrypoint = module->FindEntrypoint(stage.pName, stage.stage); skip |= ValidatePipelineShaderStage(&stage, pipeline, pipeline->stage_state[stage_index], module, entrypoint, false); } if ((pipeline->raytracingPipelineCI.flags & VK_PIPELINE_CREATE_LIBRARY_BIT_KHR) == 0) { const uint32_t raygen_stages_count = CalcShaderStageCount(pipeline, VK_SHADER_STAGE_RAYGEN_BIT_KHR); if (raygen_stages_count == 0) { skip |= LogError( device, isKHR ? "VUID-VkRayTracingPipelineCreateInfoKHR-stage-03425" : "VUID-VkRayTracingPipelineCreateInfoNV-stage-06232", " : The stage member of at least one element of pStages must be VK_SHADER_STAGE_RAYGEN_BIT_KHR."); } } for (uint32_t group_index = 0; group_index < pipeline->raytracingPipelineCI.groupCount; group_index++) { const auto &group = groups[group_index]; if (group.type == VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_NV) { if (group.generalShader >= pipeline->raytracingPipelineCI.stageCount || (stages[group.generalShader].stage != VK_SHADER_STAGE_RAYGEN_BIT_NV && stages[group.generalShader].stage != VK_SHADER_STAGE_MISS_BIT_NV && stages[group.generalShader].stage != VK_SHADER_STAGE_CALLABLE_BIT_NV)) { skip |= LogError(device, isKHR ? "VUID-VkRayTracingShaderGroupCreateInfoKHR-type-03474" : "VUID-VkRayTracingShaderGroupCreateInfoNV-type-02413", ": pGroups[%d]", group_index); } if (group.anyHitShader != VK_SHADER_UNUSED_NV || group.closestHitShader != VK_SHADER_UNUSED_NV || group.intersectionShader != VK_SHADER_UNUSED_NV) { skip |= LogError(device, isKHR ? "VUID-VkRayTracingShaderGroupCreateInfoKHR-type-03475" : "VUID-VkRayTracingShaderGroupCreateInfoNV-type-02414", ": pGroups[%d]", group_index); } } else if (group.type == VK_RAY_TRACING_SHADER_GROUP_TYPE_PROCEDURAL_HIT_GROUP_NV) { if (group.intersectionShader >= pipeline->raytracingPipelineCI.stageCount || stages[group.intersectionShader].stage != VK_SHADER_STAGE_INTERSECTION_BIT_NV) { skip |= LogError(device, isKHR ? "VUID-VkRayTracingShaderGroupCreateInfoKHR-type-03476" : "VUID-VkRayTracingShaderGroupCreateInfoNV-type-02415", ": pGroups[%d]", group_index); } } else if (group.type == VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_NV) { if (group.intersectionShader != VK_SHADER_UNUSED_NV) { skip |= LogError(device, isKHR ? "VUID-VkRayTracingShaderGroupCreateInfoKHR-type-03477" : "VUID-VkRayTracingShaderGroupCreateInfoNV-type-02416", ": pGroups[%d]", group_index); } } if (group.type == VK_RAY_TRACING_SHADER_GROUP_TYPE_PROCEDURAL_HIT_GROUP_NV || group.type == VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_NV) { if (group.anyHitShader != VK_SHADER_UNUSED_NV && (group.anyHitShader >= pipeline->raytracingPipelineCI.stageCount || stages[group.anyHitShader].stage != VK_SHADER_STAGE_ANY_HIT_BIT_NV)) { skip |= LogError(device, isKHR ? "VUID-VkRayTracingShaderGroupCreateInfoKHR-anyHitShader-03479" : "VUID-VkRayTracingShaderGroupCreateInfoNV-anyHitShader-02418", ": pGroups[%d]", group_index); } if (group.closestHitShader != VK_SHADER_UNUSED_NV && (group.closestHitShader >= pipeline->raytracingPipelineCI.stageCount || stages[group.closestHitShader].stage != VK_SHADER_STAGE_CLOSEST_HIT_BIT_NV)) { skip |= LogError(device, isKHR ? "VUID-VkRayTracingShaderGroupCreateInfoKHR-closestHitShader-03478" : "VUID-VkRayTracingShaderGroupCreateInfoNV-closestHitShader-02417", ": pGroups[%d]", group_index); } } } return skip; } uint32_t ValidationCache::MakeShaderHash(VkShaderModuleCreateInfo const *smci) { return XXH32(smci->pCode, smci->codeSize, 0); } static ValidationCache *GetValidationCacheInfo(VkShaderModuleCreateInfo const *pCreateInfo) { const auto validation_cache_ci = LvlFindInChain<VkShaderModuleValidationCacheCreateInfoEXT>(pCreateInfo->pNext); if (validation_cache_ci) { return CastFromHandle<ValidationCache *>(validation_cache_ci->validationCache); } return nullptr; } bool CoreChecks::PreCallValidateCreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkShaderModule *pShaderModule) const { bool skip = false; spv_result_t spv_valid = SPV_SUCCESS; if (disabled[shader_validation]) { return false; } auto have_glsl_shader = device_extensions.vk_nv_glsl_shader; if (!have_glsl_shader && (pCreateInfo->codeSize % 4)) { skip |= LogError(device, "VUID-VkShaderModuleCreateInfo-pCode-01376", "SPIR-V module not valid: Codesize must be a multiple of 4 but is " PRINTF_SIZE_T_SPECIFIER ".", pCreateInfo->codeSize); } else { auto cache = GetValidationCacheInfo(pCreateInfo); uint32_t hash = 0; // If app isn't using a shader validation cache, use the default one from CoreChecks if (!cache) cache = CastFromHandle<ValidationCache *>(core_validation_cache); if (cache) { hash = ValidationCache::MakeShaderHash(pCreateInfo); if (cache->Contains(hash)) return false; } // Use SPIRV-Tools validator to try and catch any issues with the module itself. If specialization constants are present, // the default values will be used during validation. spv_target_env spirv_environment = PickSpirvEnv(api_version, (device_extensions.vk_khr_spirv_1_4 != kNotEnabled)); spv_context ctx = spvContextCreate(spirv_environment); spv_const_binary_t binary{pCreateInfo->pCode, pCreateInfo->codeSize / sizeof(uint32_t)}; spv_diagnostic diag = nullptr; spvtools::ValidatorOptions options; AdjustValidatorOptions(device_extensions, enabled_features, options); spv_valid = spvValidateWithOptions(ctx, options, &binary, &diag); if (spv_valid != SPV_SUCCESS) { if (!have_glsl_shader || (pCreateInfo->pCode[0] == spv::MagicNumber)) { if (spv_valid == SPV_WARNING) { skip |= LogWarning(device, kVUID_Core_Shader_InconsistentSpirv, "SPIR-V module not valid: %s", diag && diag->error ? diag->error : "(no error text)"); } else { skip |= LogError(device, kVUID_Core_Shader_InconsistentSpirv, "SPIR-V module not valid: %s", diag && diag->error ? diag->error : "(no error text)"); } } } else { if (cache) { cache->Insert(hash); } } spvDiagnosticDestroy(diag); spvContextDestroy(ctx); } return skip; } bool CoreChecks::ValidateComputeWorkGroupSizes(const SHADER_MODULE_STATE *shader, const spirv_inst_iter &entrypoint) const { bool skip = false; uint32_t local_size_x = 0; uint32_t local_size_y = 0; uint32_t local_size_z = 0; if (shader->FindLocalSize(entrypoint, local_size_x, local_size_y, local_size_z)) { if (local_size_x > phys_dev_props.limits.maxComputeWorkGroupSize[0]) { skip |= LogError(shader->vk_shader_module(), "UNASSIGNED-features-limits-maxComputeWorkGroupSize", "%s local_size_x (%" PRIu32 ") exceeds device limit maxComputeWorkGroupSize[0] (%" PRIu32 ").", report_data->FormatHandle(shader->vk_shader_module()).c_str(), local_size_x, phys_dev_props.limits.maxComputeWorkGroupSize[0]); } if (local_size_y > phys_dev_props.limits.maxComputeWorkGroupSize[1]) { skip |= LogError(shader->vk_shader_module(), "UNASSIGNED-features-limits-maxComputeWorkGroupSize", "%s local_size_y (%" PRIu32 ") exceeds device limit maxComputeWorkGroupSize[1] (%" PRIu32 ").", report_data->FormatHandle(shader->vk_shader_module()).c_str(), local_size_x, phys_dev_props.limits.maxComputeWorkGroupSize[1]); } if (local_size_z > phys_dev_props.limits.maxComputeWorkGroupSize[2]) { skip |= LogError(shader->vk_shader_module(), "UNASSIGNED-features-limits-maxComputeWorkGroupSize", "%s local_size_z (%" PRIu32 ") exceeds device limit maxComputeWorkGroupSize[2] (%" PRIu32 ").", report_data->FormatHandle(shader->vk_shader_module()).c_str(), local_size_x, phys_dev_props.limits.maxComputeWorkGroupSize[2]); } uint32_t limit = phys_dev_props.limits.maxComputeWorkGroupInvocations; uint64_t invocations = local_size_x * local_size_y; // Prevent overflow. bool fail = false; if (invocations > UINT32_MAX || invocations > limit) { fail = true; } if (!fail) { invocations *= local_size_z; if (invocations > UINT32_MAX || invocations > limit) { fail = true; } } if (fail) { skip |= LogError(shader->vk_shader_module(), "UNASSIGNED-features-limits-maxComputeWorkGroupInvocations", "%s local_size (%" PRIu32 ", %" PRIu32 ", %" PRIu32 ") exceeds device limit maxComputeWorkGroupInvocations (%" PRIu32 ").", report_data->FormatHandle(shader->vk_shader_module()).c_str(), local_size_x, local_size_y, local_size_z, limit); } } return skip; } spv_target_env PickSpirvEnv(uint32_t api_version, bool spirv_1_4) { if (api_version >= VK_API_VERSION_1_2) { return SPV_ENV_VULKAN_1_2; } else if (api_version >= VK_API_VERSION_1_1) { if (spirv_1_4) { return SPV_ENV_VULKAN_1_1_SPIRV_1_4; } else { return SPV_ENV_VULKAN_1_1; } } return SPV_ENV_VULKAN_1_0; } void AdjustValidatorOptions(const DeviceExtensions &device_extensions, const DeviceFeatures &enabled_features, spvtools::ValidatorOptions &options) { if (device_extensions.vk_khr_relaxed_block_layout) { options.SetRelaxBlockLayout(true); } if (device_extensions.vk_khr_uniform_buffer_standard_layout && enabled_features.core12.uniformBufferStandardLayout == VK_TRUE) { options.SetUniformBufferStandardLayout(true); } if (device_extensions.vk_ext_scalar_block_layout && enabled_features.core12.scalarBlockLayout == VK_TRUE) { options.SetScalarBlockLayout(true); } if (device_extensions.vk_khr_workgroup_memory_explicit_layout && enabled_features.workgroup_memory_explicit_layout_features.workgroupMemoryExplicitLayoutScalarBlockLayout) { options.SetWorkgroupScalarBlockLayout(true); } }
1
19,973
Looks like this might be one of those "promoted features" where you can enable it by _either_ enabling the extension _or_ enabling the feature bit. If that is the case, I think this needs to be: `(device_extensions.vk_ext_scalar_block_layout == kEnabledByCreateinfo) || (enabled_features.core12.scalarBlockLayout == VK_TRUE)`.
KhronosGroup-Vulkan-ValidationLayers
cpp
@@ -52,6 +52,10 @@ template struct ONEDAL_EXPORT integer_overflow_ops<std::uint16_t>; template struct ONEDAL_EXPORT integer_overflow_ops<std::uint32_t>; template struct ONEDAL_EXPORT integer_overflow_ops<std::uint64_t>; +#if defined(__APPLE__) +template struct ONEDAL_EXPORT integer_overflow_ops<std::size_t>; +#endif + } // namespace v1 namespace v2 {
1
/******************************************************************************* * Copyright 2020-2021 Intel Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *******************************************************************************/ #include "oneapi/dal/detail/common.hpp" namespace oneapi::dal::detail { namespace v1 { template <typename Data> void integer_overflow_ops<Data>::check_sum_overflow(const Data& first, const Data& second) { v2::integer_overflow_ops<Data>{}.check_sum_overflow(first, second); } template <typename Data> void integer_overflow_ops<Data>::check_mul_overflow(const Data& first, const Data& second) { v2::integer_overflow_ops<Data>{}.check_mul_overflow(first, second); } template <typename Data> bool integer_overflow_ops<Data>::is_safe_sum(const Data& first, const Data& second, Data& sum_result) { return v2::integer_overflow_ops<Data>{}.is_safe_sum(first, second, sum_result); } template <typename Data> bool integer_overflow_ops<Data>::is_safe_mul(const Data& first, const Data& second, Data& mul_result) { return v2::integer_overflow_ops<Data>{}.is_safe_mul(first, second, mul_result); } template struct ONEDAL_EXPORT integer_overflow_ops<std::int8_t>; template struct ONEDAL_EXPORT integer_overflow_ops<std::int16_t>; template struct ONEDAL_EXPORT integer_overflow_ops<std::int32_t>; template struct ONEDAL_EXPORT integer_overflow_ops<std::int64_t>; template struct ONEDAL_EXPORT integer_overflow_ops<std::uint8_t>; template struct ONEDAL_EXPORT integer_overflow_ops<std::uint16_t>; template struct ONEDAL_EXPORT integer_overflow_ops<std::uint32_t>; template struct ONEDAL_EXPORT integer_overflow_ops<std::uint64_t>; } // namespace v1 namespace v2 { template <typename Data> bool integer_overflow_ops<Data>::is_safe_sum(const Data& first, const Data& second, Data& sum_result) { sum_result = first + second; volatile Data tmp = sum_result; tmp -= first; return tmp == second; } template <typename Data> bool integer_overflow_ops<Data>::is_safe_mul(const Data& first, const Data& second, Data& mul_result) { mul_result = first * second; if (first != 0 && second != 0) { volatile Data tmp = mul_result; tmp /= first; return tmp == second; } return true; } template <typename Data> Data integer_overflow_ops<Data>::check_sum_overflow(const Data& first, const Data& second) { Data op_result; if (!is_safe_sum(first, second, op_result)) { throw range_error(dal::detail::error_messages::overflow_found_in_sum_of_two_values()); } return op_result; } template <typename Data> Data integer_overflow_ops<Data>::check_mul_overflow(const Data& first, const Data& second) { Data op_result; if (!is_safe_mul(first, second, op_result)) { throw range_error( dal::detail::error_messages::overflow_found_in_multiplication_of_two_values()); } return op_result; } template struct ONEDAL_EXPORT integer_overflow_ops<std::int8_t>; template struct ONEDAL_EXPORT integer_overflow_ops<std::int16_t>; template struct ONEDAL_EXPORT integer_overflow_ops<std::int32_t>; template struct ONEDAL_EXPORT integer_overflow_ops<std::int64_t>; template struct ONEDAL_EXPORT integer_overflow_ops<std::uint8_t>; template struct ONEDAL_EXPORT integer_overflow_ops<std::uint16_t>; template struct ONEDAL_EXPORT integer_overflow_ops<std::uint32_t>; template struct ONEDAL_EXPORT integer_overflow_ops<std::uint64_t>; } // namespace v2 } // namespace oneapi::dal::detail
1
29,935
Why should we define it for v1? This is preserved only for backward compatibility, all further modifications must be done in the latest vX
oneapi-src-oneDAL
cpp
@@ -229,12 +229,8 @@ class DaskInterface(PandasInterface): return columns.data.compute() @classmethod - def length(cls, dataset): - """ - Length of dask dataframe is unknown, always return 1 - for performance, use shape to compute dataframe shape. - """ - return 1 + def nonzero(cls, dataset): + return True
1
from __future__ import absolute_import try: import itertools.izip as zip except ImportError: pass import numpy as np import pandas as pd import dask.dataframe as dd from dask.dataframe import DataFrame from dask.dataframe.core import Scalar from .. import util from ..element import Element from ..ndmapping import NdMapping, item_check from .interface import Interface from .pandas import PandasInterface class DaskInterface(PandasInterface): """ The DaskInterface allows a Dataset objects to wrap a dask DataFrame object. Using dask allows loading data lazily and performing out-of-core operations on the data, making it possible to work on datasets larger than memory. The DaskInterface covers almost the complete API exposed by the PandasInterface with two notable exceptions: 1) Sorting is not supported and any attempt at sorting will be ignored with an warning. 2) Dask does not easily support adding a new column to an existing dataframe unless it is a scalar, add_dimension will therefore error when supplied a non-scalar value. 4) Not all functions can be easily applied to a dask dataframe so some functions applied with aggregate and reduce will not work. """ types = (DataFrame,) datatype = 'dask' default_partitions = 100 @classmethod def init(cls, eltype, data, kdims, vdims): data, kdims, vdims = PandasInterface.init(eltype, data, kdims, vdims) if not isinstance(data, DataFrame): data = dd.from_pandas(data, npartitions=cls.default_partitions, sort=False) return data, kdims, vdims @classmethod def shape(cls, dataset): return (len(dataset.data), len(dataset.data.columns)) @classmethod def range(cls, columns, dimension): column = columns.data[columns.get_dimension(dimension).name] if column.dtype.kind == 'O': column = np.sort(column[column.notnull()].compute()) return column[0], column[-1] else: return dd.compute(column.min(), column.max()) @classmethod def sort(cls, columns, by=[]): columns.warning('Dask dataframes do not support sorting') return columns.data @classmethod def values(cls, columns, dim, expanded=True, flat=True): data = columns.data[dim] if not expanded: data = data.unique() return data.compute().values @classmethod def select_mask(cls, dataset, selection): """ Given a Dataset object and a dictionary with dimension keys and selection keys (i.e tuple ranges, slices, sets, lists or literals) return a boolean mask over the rows in the Dataset object that have been selected. """ select_mask = None for dim, k in selection.items(): if isinstance(k, tuple): k = slice(*k) masks = [] series = dataset.data[dim] if isinstance(k, slice): if k.start is not None: masks.append(k.start <= series) if k.stop is not None: masks.append(series < k.stop) elif isinstance(k, (set, list)): iter_slc = None for ik in k: mask = series == ik if iter_slc is None: iter_slc = mask else: iter_slc |= mask masks.append(iter_slc) elif callable(k): masks.append(k(series)) else: masks.append(series == k) for mask in masks: if select_mask: select_mask &= mask else: select_mask = mask return select_mask @classmethod def select(cls, columns, selection_mask=None, **selection): df = columns.data if selection_mask is not None: return df[selection_mask] selection_mask = cls.select_mask(columns, selection) indexed = cls.indexed(columns, selection) df = df if selection_mask is None else df[selection_mask] if indexed and len(df) == 1: return df[columns.vdims[0].name].compute().iloc[0] return df @classmethod def groupby(cls, columns, dimensions, container_type, group_type, **kwargs): index_dims = [columns.get_dimension(d) for d in dimensions] element_dims = [kdim for kdim in columns.kdims if kdim not in index_dims] group_kwargs = {} if group_type != 'raw' and issubclass(group_type, Element): group_kwargs = dict(util.get_param_values(columns), kdims=element_dims) group_kwargs.update(kwargs) data = [] groupby = columns.data.groupby(dimensions) ind_array = columns.data[dimensions].compute().values indices = (tuple(ind) for ind in ind_array) for coord in util.unique_iterator(indices): if any(isinstance(c, float) and np.isnan(c) for c in coord): continue if len(coord) == 1: coord = coord[0] group = group_type(groupby.get_group(coord), **group_kwargs) data.append((coord, group)) if issubclass(container_type, NdMapping): with item_check(False): return container_type(data, kdims=index_dims) else: return container_type(data) @classmethod def aggregate(cls, columns, dimensions, function, **kwargs): data = columns.data cols = [d.name for d in columns.kdims if d in dimensions] vdims = columns.dimensions('value', True) dtypes = data.dtypes numeric = [c for c, dtype in zip(dtypes.index, dtypes.values) if dtype.kind in 'iufc' and c in vdims] reindexed = data[cols+numeric] inbuilts = {'amin': 'min', 'amax': 'max', 'mean': 'mean', 'std': 'std', 'sum': 'sum', 'var': 'var'} if len(dimensions): groups = reindexed.groupby(cols, sort=False) if (function.__name__ in inbuilts): agg = getattr(groups, inbuilts[function.__name__])() else: agg = groups.apply(function) return agg.reset_index() else: if (function.__name__ in inbuilts): agg = getattr(reindexed, inbuilts[function.__name__])() else: raise NotImplementedError return pd.DataFrame(agg.compute()).T @classmethod def unpack_scalar(cls, columns, data): """ Given a columns object and data in the appropriate format for the interface, return a simple scalar. """ if len(data.columns) > 1 or len(data) != 1: return data if isinstance(data, dd.DataFrame): data = data.compute() return data.iat[0,0] @classmethod def sample(cls, columns, samples=[]): data = columns.data dims = columns.dimensions('key', label=True) mask = None for sample in samples: if np.isscalar(sample): sample = [sample] for i, (c, v) in enumerate(zip(dims, sample)): dim_mask = data[c]==v if mask is None: mask = dim_mask else: mask |= dim_mask return data[mask] @classmethod def add_dimension(cls, columns, dimension, dim_pos, values, vdim): data = columns.data if dimension.name not in data.columns: if not np.isscalar(values): err = ('Dask dataframe does not support assigning ' 'non-scalar value.') raise NotImplementedError(err) data = data.assign(**{dimension.name: values}) return data @classmethod def concat(cls, columns_objs): cast_objs = cls.cast(columns_objs) return dd.concat([col.data for col in cast_objs]) @classmethod def dframe(cls, columns, dimensions): return columns.data.compute() @classmethod def length(cls, dataset): """ Length of dask dataframe is unknown, always return 1 for performance, use shape to compute dataframe shape. """ return 1 Interface.register(DaskInterface)
1
15,886
This is a definite improvement! Hardcoding nonzero is vastly better than hardcoding length. Even so, is there no way to determine the actual value of nonzero in a way that doesn't load the entire dataset?
holoviz-holoviews
py
@@ -94,6 +94,13 @@ namespace OpenTelemetry.Instrumentation.Http.Implementation return hostTagValue; } + /// <summary> + /// Gets the OpenTelemetry standard uri tag value for a span based on its request <see cref="Uri"/>. + /// </summary> + /// <param name="uri"><see cref="Uri"/>.</param> + /// <returns>Span uri value.</returns> + public static string GetUriTagValueFromRequestUri(Uri uri) => string.Concat(uri.Scheme, Uri.SchemeDelimiter, uri.Authority, uri.PathAndQuery, uri.Fragment); + private static string ConvertMethodToOperationName(string method) => $"HTTP {method}"; private static string ConvertHttpMethodToOperationName(HttpMethod method) => $"HTTP {method}";
1
// <copyright file="HttpTagHelper.cs" company="OpenTelemetry Authors"> // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // </copyright> using System; using System.Collections.Concurrent; using System.Net.Http; namespace OpenTelemetry.Instrumentation.Http.Implementation { /// <summary> /// A collection of helper methods to be used when building Http activities. /// </summary> internal static class HttpTagHelper { private static readonly ConcurrentDictionary<string, string> MethodOperationNameCache = new ConcurrentDictionary<string, string>(); private static readonly ConcurrentDictionary<HttpMethod, string> HttpMethodOperationNameCache = new ConcurrentDictionary<HttpMethod, string>(); private static readonly ConcurrentDictionary<HttpMethod, string> HttpMethodNameCache = new ConcurrentDictionary<HttpMethod, string>(); private static readonly ConcurrentDictionary<string, ConcurrentDictionary<int, string>> HostAndPortToStringCache = new ConcurrentDictionary<string, ConcurrentDictionary<int, string>>(); private static readonly ConcurrentDictionary<Version, string> ProtocolVersionToStringCache = new ConcurrentDictionary<Version, string>(); private static readonly Func<string, string> ConvertMethodToOperationNameRef = ConvertMethodToOperationName; private static readonly Func<HttpMethod, string> ConvertHttpMethodToOperationNameRef = ConvertHttpMethodToOperationName; private static readonly Func<HttpMethod, string> ConvertHttpMethodToNameRef = ConvertHttpMethodToName; private static readonly Func<Version, string> ConvertProtocolVersionToStringRef = ConvertProtocolVersionToString; /// <summary> /// Gets the OpenTelemetry standard name for an activity based on its Http method. /// </summary> /// <param name="method">Http method.</param> /// <returns>Activity name.</returns> public static string GetOperationNameForHttpMethod(string method) => MethodOperationNameCache.GetOrAdd(method, ConvertMethodToOperationNameRef); /// <summary> /// Gets the OpenTelemetry standard operation name for a span based on its <see cref="HttpMethod"/>. /// </summary> /// <param name="method"><see cref="HttpMethod"/>.</param> /// <returns>Span operation name.</returns> public static string GetOperationNameForHttpMethod(HttpMethod method) => HttpMethodOperationNameCache.GetOrAdd(method, ConvertHttpMethodToOperationNameRef); /// <summary> /// Gets the OpenTelemetry standard method name for a span based on its <see cref="HttpMethod"/>. /// </summary> /// <param name="method"><see cref="HttpMethod"/>.</param> /// <returns>Span method name.</returns> public static string GetNameForHttpMethod(HttpMethod method) => HttpMethodNameCache.GetOrAdd(method, ConvertHttpMethodToNameRef); /// <summary> /// Gets the OpenTelemetry standard version tag value for a span based on its protocol <see cref="Version"/>. /// </summary> /// <param name="protocolVersion"><see cref="Version"/>.</param> /// <returns>Span flavor value.</returns> public static string GetFlavorTagValueFromProtocolVersion(Version protocolVersion) => ProtocolVersionToStringCache.GetOrAdd(protocolVersion, ConvertProtocolVersionToStringRef); /// <summary> /// Gets the OpenTelemetry standard host tag value for a span based on its request <see cref="Uri"/>. /// </summary> /// <param name="requestUri"><see cref="Uri"/>.</param> /// <returns>Span host value.</returns> public static string GetHostTagValueFromRequestUri(Uri requestUri) { string host = requestUri.Host; if (requestUri.IsDefaultPort) { return host; } int port = requestUri.Port; if (!HostAndPortToStringCache.TryGetValue(host, out ConcurrentDictionary<int, string> portCache)) { portCache = new ConcurrentDictionary<int, string>(); HostAndPortToStringCache.TryAdd(host, portCache); } if (!portCache.TryGetValue(port, out string hostTagValue)) { hostTagValue = $"{requestUri.Host}:{requestUri.Port}"; portCache.TryAdd(port, hostTagValue); } return hostTagValue; } private static string ConvertMethodToOperationName(string method) => $"HTTP {method}"; private static string ConvertHttpMethodToOperationName(HttpMethod method) => $"HTTP {method}"; private static string ConvertHttpMethodToName(HttpMethod method) => method.ToString(); private static string ConvertProtocolVersionToString(Version protocolVersion) => protocolVersion.ToString(); } }
1
19,868
do we have a way to avoid the string concats, if there is no username/password in the Uri? if (uri has UsernameInfo) { do what is done in this PR. } else { existing behavior. }
open-telemetry-opentelemetry-dotnet
.cs
@@ -1862,6 +1862,7 @@ static void cb_stackdriver_flush(const void *data, size_t bytes, else { /* The request was issued successfully, validate the 'error' field */ flb_plg_debug(ctx->ins, "HTTP Status=%i", c->resp.status); + flb_plg_debug(ctx->ins, "HTTP data=%s", c->resp.data); if (c->resp.status == 200) { ret_code = FLB_OK; }
1
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ /* Fluent Bit * ========== * Copyright (C) 2019-2020 The Fluent Bit Authors * Copyright (C) 2015-2018 Treasure Data Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <fluent-bit/flb_output_plugin.h> #include <fluent-bit/flb_http_client.h> #include <fluent-bit/flb_pack.h> #include <fluent-bit/flb_utils.h> #include <fluent-bit/flb_time.h> #include <fluent-bit/flb_oauth2.h> #include <fluent-bit/flb_regex.h> #include <msgpack.h> #include "gce_metadata.h" #include "stackdriver.h" #include "stackdriver_conf.h" #include "stackdriver_operation.h" #include "stackdriver_source_location.h" #include "stackdriver_http_request.h" #include "stackdriver_timestamp.h" #include "stackdriver_helper.h" #include <mbedtls/base64.h> #include <mbedtls/sha256.h> /* * Base64 Encoding in JWT must: * * - remove any trailing padding '=' character * - replace '+' with '-' * - replace '/' with '_' * * ref: https://www.rfc-editor.org/rfc/rfc7515.txt Appendix C */ int jwt_base64_url_encode(unsigned char *out_buf, size_t out_size, unsigned char *in_buf, size_t in_size, size_t *olen) { int i; size_t len; /* do normal base64 encoding */ mbedtls_base64_encode(out_buf, out_size - 1, &len, in_buf, in_size); /* Replace '+' and '/' characters */ for (i = 0; i < len && out_buf[i] != '='; i++) { if (out_buf[i] == '+') { out_buf[i] = '-'; } else if (out_buf[i] == '/') { out_buf[i] = '_'; } } /* Now 'i' becomes the new length */ *olen = i; return 0; } static int jwt_encode(char *payload, char *secret, char **out_signature, size_t *out_size, struct flb_stackdriver *ctx) { int ret; int len; int buf_size; size_t olen; char *buf; char *sigd; char *headers = "{\"alg\": \"RS256\", \"typ\": \"JWT\"}"; unsigned char sha256_buf[32] = {0}; mbedtls_sha256_context sha256_ctx; mbedtls_rsa_context *rsa; flb_sds_t out; mbedtls_pk_context pk_ctx; unsigned char sig[256] = {0}; buf_size = (strlen(payload) + strlen(secret)) * 2; buf = flb_malloc(buf_size); if (!buf) { flb_errno(); return -1; } /* Encode header */ len = strlen(headers); mbedtls_base64_encode((unsigned char *) buf, buf_size - 1, &olen, (unsigned char *) headers, len); /* Create buffer to store JWT */ out = flb_sds_create_size(2048); if (!out) { flb_errno(); flb_free(buf); return -1; } /* Append header */ flb_sds_cat(out, buf, olen); flb_sds_cat(out, ".", 1); /* Encode Payload */ len = strlen(payload); jwt_base64_url_encode((unsigned char *) buf, buf_size, (unsigned char *) payload, len, &olen); /* Append Payload */ flb_sds_cat(out, buf, olen); /* do sha256() of base64(header).base64(payload) */ mbedtls_sha256_init(&sha256_ctx); mbedtls_sha256_starts(&sha256_ctx, 0); mbedtls_sha256_update(&sha256_ctx, (const unsigned char *) out, flb_sds_len(out)); mbedtls_sha256_finish(&sha256_ctx, sha256_buf); /* In mbedTLS cert length must include the null byte */ len = strlen(secret) + 1; /* Load Private Key */ mbedtls_pk_init(&pk_ctx); ret = mbedtls_pk_parse_key(&pk_ctx, (unsigned char *) secret, len, NULL, 0); if (ret != 0) { flb_plg_error(ctx->ins, "error loading private key"); flb_free(buf); flb_sds_destroy(out); return -1; } /* Create RSA context */ rsa = mbedtls_pk_rsa(pk_ctx); if (!rsa) { flb_plg_error(ctx->ins, "error creating RSA context"); flb_free(buf); flb_sds_destroy(out); mbedtls_pk_free(&pk_ctx); return -1; } ret = mbedtls_rsa_pkcs1_sign(rsa, NULL, NULL, MBEDTLS_RSA_PRIVATE, MBEDTLS_MD_SHA256, 0, (unsigned char *) sha256_buf, sig); if (ret != 0) { flb_plg_error(ctx->ins, "error signing SHA256"); flb_free(buf); flb_sds_destroy(out); mbedtls_pk_free(&pk_ctx); return -1; } sigd = flb_malloc(2048); if (!sigd) { flb_errno(); flb_free(buf); flb_sds_destroy(out); mbedtls_pk_free(&pk_ctx); return -1; } jwt_base64_url_encode((unsigned char *) sigd, 2048, sig, 256, &olen); flb_sds_cat(out, ".", 1); flb_sds_cat(out, sigd, olen); *out_signature = out; *out_size = flb_sds_len(out); flb_free(buf); flb_free(sigd); mbedtls_pk_free(&pk_ctx); return 0; } /* Create a new oauth2 context and get a oauth2 token */ static int get_oauth2_token(struct flb_stackdriver *ctx) { int ret; char *token; char *sig_data; size_t sig_size; time_t issued; time_t expires; char payload[1024]; /* Create oauth2 context */ ctx->o = flb_oauth2_create(ctx->config, FLB_STD_AUTH_URL, 3000); if (!ctx->o) { flb_plg_error(ctx->ins, "cannot create oauth2 context"); return -1; } /* In case of using metadata server, fetch token from there */ if (ctx->metadata_server_auth) { return gce_metadata_read_token(ctx); } /* JWT encode for oauth2 */ issued = time(NULL); expires = issued + FLB_STD_TOKEN_REFRESH; snprintf(payload, sizeof(payload) - 1, "{\"iss\": \"%s\", \"scope\": \"%s\", " "\"aud\": \"%s\", \"exp\": %lu, \"iat\": %lu}", ctx->client_email, FLB_STD_SCOPE, FLB_STD_AUTH_URL, expires, issued); /* Compose JWT signature */ ret = jwt_encode(payload, ctx->private_key, &sig_data, &sig_size, ctx); if (ret != 0) { flb_plg_error(ctx->ins, "JWT signature generation failed"); return -1; } flb_plg_debug(ctx->ins, "JWT signature:\n%s", sig_data); ret = flb_oauth2_payload_append(ctx->o, "grant_type", -1, "urn:ietf:params:oauth:" "grant-type:jwt-bearer", -1); if (ret == -1) { flb_plg_error(ctx->ins, "error appending oauth2 params"); flb_sds_destroy(sig_data); return -1; } ret = flb_oauth2_payload_append(ctx->o, "assertion", -1, sig_data, sig_size); if (ret == -1) { flb_plg_error(ctx->ins, "error appending oauth2 params"); flb_sds_destroy(sig_data); return -1; } flb_sds_destroy(sig_data); /* Retrieve access token */ token = flb_oauth2_token_get(ctx->o); if (!token) { flb_plg_error(ctx->ins, "error retrieving oauth2 access token"); return -1; } return 0; } static char *get_google_token(struct flb_stackdriver *ctx) { int ret = 0; if (!ctx->o) { ret = get_oauth2_token(ctx); } else if (flb_oauth2_token_expired(ctx->o) == FLB_TRUE) { flb_oauth2_destroy(ctx->o); ret = get_oauth2_token(ctx); } if (ret != 0) { return NULL; } return ctx->o->access_token; } static bool validate_msgpack_unpacked_data(msgpack_object root) { return root.type == MSGPACK_OBJECT_ARRAY && root.via.array.size == 2 && root.via.array.ptr[1].type == MSGPACK_OBJECT_MAP; } void replace_prefix_dot(flb_sds_t s, int tag_prefix_len) { int i; int str_len; char c; if (!s) { return; } str_len = flb_sds_len(s); if (tag_prefix_len > str_len) { flb_error("[output] tag_prefix shouldn't be longer than local_resource_id"); return; } for (i = 0; i < tag_prefix_len; i++) { c = s[i]; if (c == '.') { s[i] = '_'; } } } static flb_sds_t get_str_value_from_msgpack_map(msgpack_object_map map, const char *key, int key_size) { int i; msgpack_object k; msgpack_object v; flb_sds_t ptr = NULL; for (i = 0; i < map.size; i++) { k = map.ptr[i].key; v = map.ptr[i].val; if (k.type != MSGPACK_OBJECT_STR) { continue; } if (k.via.str.size == key_size && strncmp(key, (char *) k.via.str.ptr, k.via.str.size) == 0) { /* make sure to free it after use */ ptr = flb_sds_create_len(v.via.str.ptr, v.via.str.size); break; } } return ptr; } /* parse_monitored_resource is to extract the monitoired resource labels * from "logging.googleapis.com/monitored_resource" in log data * and append to 'resource'/'labels' in log entry. * Monitored resource type is already read from resource field in stackdriver * output plugin configuration parameters. * * The structure of monitored_resource is: * { * "logging.googleapis.com/monitored_resource": { * "labels": { * "resource_label": <label_value>, * } * } * } * See https://cloud.google.com/logging/docs/api/v2/resource-list#resource-types * for required labels for each monitored resource. */ static int parse_monitored_resource(struct flb_stackdriver *ctx, const void *data, size_t bytes, msgpack_packer *mp_pck) { int ret = -1; size_t off = 0; msgpack_object *obj; msgpack_unpacked result; msgpack_unpacked_init(&result); while (msgpack_unpack_next(&result, data, bytes, &off) == MSGPACK_UNPACK_SUCCESS) { if (result.data.type != MSGPACK_OBJECT_ARRAY) { continue; } if (result.data.via.array.size != 2) { continue; } obj = &result.data.via.array.ptr[1]; if (obj->type != MSGPACK_OBJECT_MAP) { continue; } msgpack_object_kv *kv = obj->via.map.ptr; msgpack_object_kv *const kvend = obj->via.map.ptr + obj->via.map.size; for (; kv < kvend; ++kv) { if (kv->val.type == MSGPACK_OBJECT_MAP && kv->key.type == MSGPACK_OBJECT_STR && strncmp (MONITORED_RESOURCE_KEY, kv->key.via.str.ptr, kv->key.via.str.size) == 0) { msgpack_object subobj = kv->val; msgpack_object_kv *p = subobj.via.map.ptr; msgpack_object_kv *pend = subobj.via.map.ptr + subobj.via.map.size; for (; p < pend; ++p) { if (p->key.type != MSGPACK_OBJECT_STR || p->val.type != MSGPACK_OBJECT_MAP) { continue; } if (strncmp("labels", p->key.via.str.ptr, p->key.via.str.size) == 0) { msgpack_object labels = p->val; msgpack_object_kv *q = labels.via.map.ptr; msgpack_object_kv *qend = labels.via.map.ptr + labels.via.map.size; int fields = 0; for (; q < qend; ++q) { if (q->key.type != MSGPACK_OBJECT_STR || q->val.type != MSGPACK_OBJECT_STR) { flb_plg_error(ctx->ins, "Key and value should be string in the %s/labels", MONITORED_RESOURCE_KEY); } ++fields; } if (fields > 0) { msgpack_pack_map(mp_pck, fields); q = labels.via.map.ptr; for (; q < qend; ++q) { if (q->key.type != MSGPACK_OBJECT_STR || q->val.type != MSGPACK_OBJECT_STR) { continue; } flb_plg_debug(ctx->ins, "[%s] found in the payload", MONITORED_RESOURCE_KEY); msgpack_pack_str(mp_pck, q->key.via.str.size); msgpack_pack_str_body(mp_pck, q->key.via.str.ptr, q->key.via.str.size); msgpack_pack_str(mp_pck, q->val.via.str.size); msgpack_pack_str_body(mp_pck, q->val.via.str.ptr, q->val.via.str.size); } msgpack_unpacked_destroy(&result); ret = 0; return ret; } } } } } } msgpack_unpacked_destroy(&result); flb_plg_debug(ctx->ins, "[%s] not found in the payload", MONITORED_RESOURCE_KEY); return ret; } /* * Given a local_resource_id, split the content using the proper separator generating * a linked list to store the spliited string */ static struct mk_list *parse_local_resource_id_to_list(char *local_resource_id, char *type) { int ret = -1; int max_split = -1; int len_k8s_container; int len_k8s_node; int len_k8s_pod; struct mk_list *list; len_k8s_container = sizeof(K8S_CONTAINER) - 1; len_k8s_node = sizeof(K8S_NODE) - 1; len_k8s_pod = sizeof(K8S_POD) - 1; /* Allocate list head */ list = flb_malloc(sizeof(struct mk_list)); if (!list) { flb_errno(); return NULL; } mk_list_init(list); /* Determinate the max split value based on type */ if (strncmp(type, K8S_CONTAINER, len_k8s_container) == 0) { /* including the prefix of tag */ max_split = 4; } else if (strncmp(type, K8S_NODE, len_k8s_node) == 0) { max_split = 2; } else if (strncmp(type, K8S_POD, len_k8s_pod) == 0) { max_split = 3; } /* The local_resource_id is splitted by '.' */ ret = flb_slist_split_string(list, local_resource_id, '.', max_split); if (ret == -1 || mk_list_size(list) != max_split) { flb_error("error parsing local_resource_id [%s] for type %s", local_resource_id, type); flb_slist_destroy(list); flb_free(list); return NULL; } return list; } /* * extract_local_resource_id(): * - extract the value from "logging.googleapis.com/local_resource_id" field * - if local_resource_id is missing from the payLoad, use the tag of the log */ static int extract_local_resource_id(const void *data, size_t bytes, struct flb_stackdriver *ctx, const char *tag) { msgpack_object root; msgpack_object_map map; msgpack_unpacked result; flb_sds_t local_resource_id; size_t off = 0; msgpack_unpacked_init(&result); if (msgpack_unpack_next(&result, data, bytes, &off) == MSGPACK_UNPACK_SUCCESS) { root = result.data; if (!validate_msgpack_unpacked_data(root)) { msgpack_unpacked_destroy(&result); flb_plg_error(ctx->ins, "unexpected record format"); return -1; } map = root.via.array.ptr[1].via.map; local_resource_id = get_str_value_from_msgpack_map(map, LOCAL_RESOURCE_ID_KEY, LEN_LOCAL_RESOURCE_ID_KEY); if (local_resource_id == NULL) { /* if local_resource_id is not found, use the tag of the log */ flb_plg_debug(ctx->ins, "local_resource_id not found, " "tag [%s] is assigned for local_resource_id", tag); local_resource_id = flb_sds_create(tag); } /* we need to create up the local_resource_id from previous log */ if (ctx->local_resource_id) { flb_sds_destroy(ctx->local_resource_id); } ctx->local_resource_id = flb_sds_create(local_resource_id); } else { msgpack_unpacked_destroy(&result); flb_plg_error(ctx->ins, "failed to unpack data"); return -1; } flb_sds_destroy(local_resource_id); msgpack_unpacked_destroy(&result); return 0; } /* * process_local_resource_id(): * - use the extracted local_resource_id to assign the label keys for different * resource types that are specified in the configuration of stackdriver_out plugin */ static int process_local_resource_id(struct flb_stackdriver *ctx, char *type) { int ret = -1; int first = FLB_TRUE; int counter = 0; int len_k8s_container; int len_k8s_node; int len_k8s_pod; int prefix_len; struct local_resource_id_list *ptr; struct mk_list *list = NULL; struct mk_list *head; flb_sds_t new_local_resource_id; if (!ctx->local_resource_id) { flb_plg_error(ctx->ins, "local_resource_is is not assigned"); return -1; } len_k8s_container = sizeof(K8S_CONTAINER) - 1; len_k8s_node = sizeof(K8S_NODE) - 1; len_k8s_pod = sizeof(K8S_POD) - 1; prefix_len = flb_sds_len(ctx->tag_prefix); if (flb_sds_casecmp(ctx->tag_prefix, ctx->local_resource_id, prefix_len) != 0) { flb_plg_error(ctx->ins, "tag_prefix [%s] doesn't match the prefix of" " local_resource_id [%s]", ctx->tag_prefix, ctx->local_resource_id); return -1; } new_local_resource_id = flb_sds_create_len(ctx->local_resource_id, flb_sds_len(ctx->local_resource_id)); replace_prefix_dot(new_local_resource_id, prefix_len - 1); if (strncmp(type, K8S_CONTAINER, len_k8s_container) == 0) { list = parse_local_resource_id_to_list(new_local_resource_id, K8S_CONTAINER); if (!list) { goto error; } /* iterate through the list */ mk_list_foreach(head, list) { ptr = mk_list_entry(head, struct local_resource_id_list, _head); if (first) { first = FLB_FALSE; continue; } /* Follow the order of fields in local_resource_id */ if (counter == 0) { if (ctx->namespace_name) { flb_sds_destroy(ctx->namespace_name); } ctx->namespace_name = flb_sds_create(ptr->val); } else if (counter == 1) { if (ctx->pod_name) { flb_sds_destroy(ctx->pod_name); } ctx->pod_name = flb_sds_create(ptr->val); } else if (counter == 2) { if (ctx->container_name) { flb_sds_destroy(ctx->container_name); } ctx->container_name = flb_sds_create(ptr->val); } counter++; } if (!ctx->namespace_name || !ctx->pod_name || !ctx->container_name) { goto error; } } else if (strncmp(type, K8S_NODE, len_k8s_node) == 0) { list = parse_local_resource_id_to_list(new_local_resource_id, K8S_NODE); if (!list) { goto error; } mk_list_foreach(head, list) { ptr = mk_list_entry(head, struct local_resource_id_list, _head); if (first) { first = FLB_FALSE; continue; } if (ptr != NULL) { if (ctx->node_name) { flb_sds_destroy(ctx->node_name); } ctx->node_name = flb_sds_create(ptr->val); } } if (!ctx->node_name) { goto error; } } else if (strncmp(type, K8S_POD, len_k8s_pod) == 0) { list = parse_local_resource_id_to_list(new_local_resource_id, K8S_POD); if (!list) { goto error; } mk_list_foreach(head, list) { ptr = mk_list_entry(head, struct local_resource_id_list, _head); if (first) { first = FLB_FALSE; continue; } /* Follow the order of fields in local_resource_id */ if (counter == 0) { if (ctx->namespace_name) { flb_sds_destroy(ctx->namespace_name); } ctx->namespace_name = flb_sds_create(ptr->val); } else if (counter == 1) { if (ctx->pod_name) { flb_sds_destroy(ctx->pod_name); } ctx->pod_name = flb_sds_create(ptr->val); } counter++; } if (!ctx->namespace_name || !ctx->pod_name) { goto error; } } ret = 0; if (list) { flb_slist_destroy(list); flb_free(list); } flb_sds_destroy(new_local_resource_id); return ret; error: if (list) { flb_slist_destroy(list); flb_free(list); } if (strncmp(type, K8S_CONTAINER, len_k8s_container) == 0) { if (ctx->namespace_name) { flb_sds_destroy(ctx->namespace_name); } if (ctx->pod_name) { flb_sds_destroy(ctx->pod_name); } if (ctx->container_name) { flb_sds_destroy(ctx->container_name); } } else if (strncmp(type, K8S_NODE, len_k8s_node) == 0) { if (ctx->node_name) { flb_sds_destroy(ctx->node_name); } } else if (strncmp(type, K8S_POD, len_k8s_pod) == 0) { if (ctx->namespace_name) { flb_sds_destroy(ctx->namespace_name); } if (ctx->pod_name) { flb_sds_destroy(ctx->pod_name); } } flb_sds_destroy(new_local_resource_id); return -1; } /* * parse_labels * - Iterate throught the original payload (obj) and find out the entry that matches * the labels_key * - Used to convert all labels under labels_key to root-level `labels` field */ static msgpack_object *parse_labels(struct flb_stackdriver *ctx, msgpack_object *obj) { int i; int len; msgpack_object_kv *kv = NULL; if (!obj || obj->type != MSGPACK_OBJECT_MAP) { return NULL; } len = flb_sds_len(ctx->labels_key); for (i = 0; i < obj->via.map.size; i++) { kv = &obj->via.map.ptr[i]; if (flb_sds_casecmp(ctx->labels_key, kv->key.via.str.ptr, len) == 0) { /* only the first matching entry will be returned */ return &kv->val; } } //flb_plg_debug(ctx->ins, "labels_key [%s] not found in the payload", // ctx->labels_key); return NULL; } static void cb_results(const char *name, const char *value, size_t vlen, void *data) { struct flb_stackdriver *ctx = data; if (vlen == 0) { return; } if (strcmp(name, "pod_name") == 0) { if (ctx->pod_name != NULL) { flb_sds_destroy(ctx->pod_name); } ctx->pod_name = flb_sds_create_len(value, vlen); } else if (strcmp(name, "namespace_name") == 0) { if (ctx->namespace_name != NULL) { flb_sds_destroy(ctx->namespace_name); } ctx->namespace_name = flb_sds_create_len(value, vlen); } else if (strcmp(name, "container_name") == 0) { if (ctx->container_name != NULL) { flb_sds_destroy(ctx->container_name); } ctx->container_name = flb_sds_create_len(value, vlen); } return; } int is_tag_match_regex(struct flb_stackdriver *ctx, const char *tag, int tag_len) { int ret; int tag_prefix_len; int len_to_be_matched; const char *tag_str_to_be_matcheds; struct flb_regex *regex; tag_prefix_len = flb_sds_len(ctx->tag_prefix); tag_str_to_be_matcheds = tag + tag_prefix_len; len_to_be_matched = tag_len - tag_prefix_len; regex = flb_regex_create(DEFAULT_TAG_REGEX); ret = flb_regex_match(regex, (unsigned char *) tag_str_to_be_matcheds, len_to_be_matched); flb_regex_destroy(regex); /* 1 -> match; 0 -> doesn't match; < 0 -> error */ return ret; } /* extract_resource_labels_from_regex(3) will only be called if the * tag matches the regex rule */ int extract_resource_labels_from_regex(struct flb_stackdriver *ctx, const char *tag, int tag_len) { int ret = 1; int tag_prefix_len; int len_to_be_matched; const char *tag_str_to_be_matcheds; struct flb_regex *regex; struct flb_regex_search result; tag_prefix_len = flb_sds_len(ctx->tag_prefix); tag_str_to_be_matcheds = tag + tag_prefix_len; len_to_be_matched = tag_len - tag_prefix_len; regex = flb_regex_create(DEFAULT_TAG_REGEX); ret = flb_regex_do(regex, tag_str_to_be_matcheds, len_to_be_matched, &result); if (ret <= 0) { flb_plg_warn(ctx->ins, "invalid pattern for given tag %s", tag); return -1; } flb_regex_parse(regex, &result, cb_results, ctx); flb_regex_destroy(regex); return ret; } static int cb_stackdriver_init(struct flb_output_instance *ins, struct flb_config *config, void *data) { int ret; int io_flags = FLB_IO_TLS; char *token; struct flb_stackdriver *ctx; /* Create config context */ ctx = flb_stackdriver_conf_create(ins, config); if (!ctx) { flb_plg_error(ins, "configuration failed"); return -1; } /* Set context */ flb_output_set_context(ins, ctx); /* Network mode IPv6 */ if (ins->host.ipv6 == FLB_TRUE) { io_flags |= FLB_IO_IPV6; } /* Create Upstream context for Stackdriver Logging (no oauth2 service) */ ctx->u = flb_upstream_create_url(config, FLB_STD_WRITE_URL, io_flags, &ins->tls); ctx->metadata_u = flb_upstream_create_url(config, "http://metadata.google.internal", FLB_IO_TCP, NULL); if (!ctx->u) { flb_plg_error(ctx->ins, "upstream creation failed"); return -1; } if (!ctx->metadata_u) { flb_plg_error(ctx->ins, "metadata upstream creation failed"); return -1; } /* Metadata Upstream Sync flags */ ctx->metadata_u->flags &= ~FLB_IO_ASYNC; if (ins->test_mode == FLB_FALSE) { /* Retrieve oauth2 token */ token = get_google_token(ctx); if (!token) { flb_plg_warn(ctx->ins, "token retrieval failed"); } } if (ctx->metadata_server_auth) { ret = gce_metadata_read_project_id(ctx); if (ret == -1) { return -1; } ret = gce_metadata_read_zone(ctx); if (ret == -1) { return -1; } ret = gce_metadata_read_instance_id(ctx); if (ret == -1) { return -1; } } /* Validate project_id */ if (!ctx->project_id) { flb_plg_error(ctx->ins, "property 'project_id' is not set"); return -1; } return 0; } static int validate_severity_level(severity_t * s, const char * str, const unsigned int str_size) { int i = 0; const static struct { severity_t s; const unsigned int str_size; const char * str; } enum_mapping[] = { {FLB_STD_EMERGENCY, 9, "EMERGENCY"}, {FLB_STD_EMERGENCY, 5, "EMERG" }, {FLB_STD_ALERT , 1, "A" }, {FLB_STD_ALERT , 5, "ALERT" }, {FLB_STD_CRITICAL , 1, "C" }, {FLB_STD_CRITICAL , 1, "F" }, {FLB_STD_CRITICAL , 4, "CRIT" }, {FLB_STD_CRITICAL , 5, "FATAL" }, {FLB_STD_CRITICAL , 8, "CRITICAL" }, {FLB_STD_ERROR , 1, "E" }, {FLB_STD_ERROR , 3, "ERR" }, {FLB_STD_ERROR , 5, "ERROR" }, {FLB_STD_ERROR , 6, "SEVERE" }, {FLB_STD_WARNING , 1, "W" }, {FLB_STD_WARNING , 4, "WARN" }, {FLB_STD_WARNING , 7, "WARNING" }, {FLB_STD_NOTICE , 1, "N" }, {FLB_STD_NOTICE , 6, "NOTICE" }, {FLB_STD_INFO , 1, "I" }, {FLB_STD_INFO , 4, "INFO" }, {FLB_STD_DEBUG , 1, "D" }, {FLB_STD_DEBUG , 5, "DEBUG" }, {FLB_STD_DEBUG , 5, "TRACE" }, {FLB_STD_DEBUG , 9, "TRACE_INT"}, {FLB_STD_DEBUG , 4, "FINE" }, {FLB_STD_DEBUG , 5, "FINER" }, {FLB_STD_DEBUG , 6, "FINEST" }, {FLB_STD_DEBUG , 6, "CONFIG" }, {FLB_STD_DEFAULT , 7, "DEFAULT" } }; for (i = 0; i < sizeof (enum_mapping) / sizeof (enum_mapping[0]); ++i) { if (enum_mapping[i].str_size != str_size) { continue; } if (strncasecmp(str, enum_mapping[i].str, str_size) == 0) { *s = enum_mapping[i].s; return 0; } } return -1; } static int get_msgpack_obj(msgpack_object * subobj, const msgpack_object * o, const flb_sds_t key, const int key_size, msgpack_object_type type) { int i = 0; msgpack_object_kv * p = NULL; if (o == NULL || subobj == NULL) { return -1; } for (i = 0; i < o->via.map.size; i++) { p = &o->via.map.ptr[i]; if (p->val.type != type) { continue; } if (flb_sds_cmp(key, p->key.via.str.ptr, p->key.via.str.size) == 0) { *subobj = p->val; return 0; } } return -1; } static int get_severity_level(severity_t * s, const msgpack_object * o, const flb_sds_t key) { msgpack_object tmp; if (get_msgpack_obj(&tmp, o, key, flb_sds_len(key), MSGPACK_OBJECT_STR) == 0 && validate_severity_level(s, tmp.via.str.ptr, tmp.via.str.size) == 0) { return 0; } *s = 0; return -1; } static int get_stream(msgpack_object_map map) { int i; int len_stdout; int val_size; msgpack_object k; msgpack_object v; /* len(stdout) == len(stderr) */ len_stdout = sizeof(STDOUT) - 1; for (i = 0; i < map.size; i++) { k = map.ptr[i].key; v = map.ptr[i].val; if (k.type == MSGPACK_OBJECT_STR && strncmp(k.via.str.ptr, "stream", k.via.str.size) == 0) { val_size = v.via.str.size; if (val_size == len_stdout) { if (strncmp(v.via.str.ptr, STDOUT, val_size) == 0) { return STREAM_STDOUT; } else if (strncmp(v.via.str.ptr, STDERR, val_size) == 0) { return STREAM_STDERR; } } } } return STREAM_UNKNOWN; } static insert_id_status validate_insert_id(msgpack_object * insert_id_value, const msgpack_object * obj) { int i = 0; msgpack_object_kv * p = NULL; insert_id_status ret = INSERTID_NOT_PRESENT; if (obj == NULL) { return ret; } for (i = 0; i < obj->via.map.size; i++) { p = &obj->via.map.ptr[i]; if (p->key.type != MSGPACK_OBJECT_STR) { continue; } if (validate_key(p->key, DEFAULT_INSERT_ID_KEY, INSERT_ID_SIZE)) { if (p->val.type == MSGPACK_OBJECT_STR && p->val.via.str.size > 0) { *insert_id_value = p->val; ret = INSERTID_VALID; } else { ret = INSERTID_INVALID; } break; } } return ret; } static int pack_json_payload(int insert_id_extracted, int operation_extracted, int operation_extra_size, int source_location_extracted, int source_location_extra_size, int http_request_extracted, int http_request_extra_size, timestamp_status tms_status, msgpack_packer *mp_pck, msgpack_object *obj, struct flb_stackdriver *ctx) { /* Specified fields include local_resource_id, operation, sourceLocation ... */ int i, j; int to_remove = 0; int ret; int map_size; int new_map_size; int len; int len_to_be_removed; int key_not_found; flb_sds_t removed; flb_sds_t monitored_resource_key; flb_sds_t local_resource_id_key; flb_sds_t stream; msgpack_object_kv *kv = obj->via.map.ptr; msgpack_object_kv *const kvend = obj->via.map.ptr + obj->via.map.size; monitored_resource_key = flb_sds_create(MONITORED_RESOURCE_KEY); local_resource_id_key = flb_sds_create(LOCAL_RESOURCE_ID_KEY); stream = flb_sds_create("stream"); /* * array of elements that need to be removed from payload, * special field 'operation' will be processed individually */ flb_sds_t to_be_removed[] = { monitored_resource_key, local_resource_id_key, ctx->labels_key, stream /* more special fields are required to be added */ }; if (insert_id_extracted == FLB_TRUE) { to_remove += 1; } if (operation_extracted == FLB_TRUE && operation_extra_size == 0) { to_remove += 1; } if (source_location_extracted == FLB_TRUE && source_location_extra_size == 0) { to_remove += 1; } if (http_request_extracted == FLB_TRUE && http_request_extra_size == 0) { to_remove += 1; } if (tms_status == FORMAT_TIMESTAMP_OBJECT) { to_remove += 1; } if (tms_status == FORMAT_TIMESTAMP_DUO_FIELDS) { to_remove += 2; } map_size = obj->via.map.size; len_to_be_removed = sizeof(to_be_removed) / sizeof(to_be_removed[0]); for (i = 0; i < map_size; i++) { kv = &obj->via.map.ptr[i]; len = kv->key.via.str.size; for (j = 0; j < len_to_be_removed; j++) { removed = to_be_removed[j]; /* * check length of key to avoid partial matching * e.g. labels key = labels && kv->key = labelss */ if (flb_sds_cmp(removed, kv->key.via.str.ptr, len) == 0) { to_remove += 1; break; } } } new_map_size = map_size - to_remove; ret = msgpack_pack_map(mp_pck, new_map_size); if (ret < 0) { goto error; } /* points back to the beginning of map */ kv = obj->via.map.ptr; for(; kv != kvend; ++kv ) { key_not_found = 1; /* processing logging.googleapis.com/insertId */ if (insert_id_extracted == FLB_TRUE && validate_key(kv->key, DEFAULT_INSERT_ID_KEY, INSERT_ID_SIZE)) { continue; } /* processing logging.googleapis.com/operation */ if (validate_key(kv->key, OPERATION_FIELD_IN_JSON, OPERATION_KEY_SIZE) && kv->val.type == MSGPACK_OBJECT_MAP) { if (operation_extra_size > 0) { msgpack_pack_object(mp_pck, kv->key); pack_extra_operation_subfields(mp_pck, &kv->val, operation_extra_size); } continue; } if (validate_key(kv->key, SOURCELOCATION_FIELD_IN_JSON, SOURCE_LOCATION_SIZE) && kv->val.type == MSGPACK_OBJECT_MAP) { if (source_location_extra_size > 0) { msgpack_pack_object(mp_pck, kv->key); pack_extra_source_location_subfields(mp_pck, &kv->val, source_location_extra_size); } continue; } if (validate_key(kv->key, HTTPREQUEST_FIELD_IN_JSON, HTTP_REQUEST_KEY_SIZE) && kv->val.type == MSGPACK_OBJECT_MAP) { if(http_request_extra_size > 0) { msgpack_pack_object(mp_pck, kv->key); pack_extra_http_request_subfields(mp_pck, &kv->val, http_request_extra_size); } continue; } if (validate_key(kv->key, "timestamp", 9) && tms_status == FORMAT_TIMESTAMP_OBJECT) { continue; } if (validate_key(kv->key, "timestampSeconds", 16) && tms_status == FORMAT_TIMESTAMP_DUO_FIELDS) { continue; } if (validate_key(kv->key, "timestampNanos", 14) && tms_status == FORMAT_TIMESTAMP_DUO_FIELDS) { continue; } len = kv->key.via.str.size; for (j = 0; j < len_to_be_removed; j++) { removed = to_be_removed[j]; if (flb_sds_cmp(removed, kv->key.via.str.ptr, len) == 0) { key_not_found = 0; break; } } if (key_not_found) { ret = msgpack_pack_object(mp_pck, kv->key); if (ret < 0) { goto error; } ret = msgpack_pack_object(mp_pck, kv->val); if (ret < 0) { goto error; } } } flb_sds_destroy(monitored_resource_key); flb_sds_destroy(local_resource_id_key); flb_sds_destroy(stream); return 0; error: flb_sds_destroy(monitored_resource_key); flb_sds_destroy(local_resource_id_key); flb_sds_destroy(stream); return ret; } static int stackdriver_format(struct flb_config *config, struct flb_input_instance *ins, void *plugin_context, void *flush_ctx, const char *tag, int tag_len, const void *data, size_t bytes, void **out_data, size_t *out_size) { int len; int ret; int array_size = 0; /* The default value is 3: timestamp, jsonPayload, logName. */ int entry_size = 3; int stream; size_t s; size_t off = 0; char path[PATH_MAX]; char time_formatted[255]; const char *newtag; msgpack_object *obj; msgpack_object *labels_ptr; msgpack_unpacked result; msgpack_sbuffer mp_sbuf; msgpack_packer mp_pck; flb_sds_t out_buf; struct flb_stackdriver *ctx = plugin_context; /* Parameters for severity */ int severity_extracted = FLB_FALSE; severity_t severity; /* Parameters for insertId */ msgpack_object insert_id_obj; insert_id_status in_status; int insert_id_extracted; /* Parameters in Operation */ flb_sds_t operation_id; flb_sds_t operation_producer; int operation_first = FLB_FALSE; int operation_last = FLB_FALSE; int operation_extracted = FLB_FALSE; int operation_extra_size = 0; /* Parameters for sourceLocation */ flb_sds_t source_location_file; int64_t source_location_line = 0; flb_sds_t source_location_function; int source_location_extracted = FLB_FALSE; int source_location_extra_size = 0; /* Parameters for httpRequest */ struct http_request_field http_request; int http_request_extracted = FLB_FALSE; int http_request_extra_size = 0; /* Parameters for Timestamp */ struct tm tm; struct flb_time tms; timestamp_status tms_status; /* Count number of records */ array_size = flb_mp_count(data, bytes); /* * Search each entry and validate insertId. * Reject the entry if insertId is invalid. * If all the entries are rejected, stop formatting. * */ off = 0; msgpack_unpacked_init(&result); while (msgpack_unpack_next(&result, data, bytes, &off) == MSGPACK_UNPACK_SUCCESS) { flb_time_pop_from_msgpack(&tms, &result, &obj); /* Extract insertId */ in_status = validate_insert_id(&insert_id_obj, obj); if (in_status == INSERTID_INVALID) { flb_plg_error(ctx->ins, "Incorrect insertId received. InsertId should be non-empty string."); array_size -= 1; } } msgpack_unpacked_destroy(&result); if (array_size == 0) { *out_size = 0; return -1; } /* Create temporal msgpack buffer */ msgpack_sbuffer_init(&mp_sbuf); msgpack_packer_init(&mp_pck, &mp_sbuf, msgpack_sbuffer_write); /* * Pack root map (resource & entries): * * {"resource": {"type": "...", "labels": {...}, * "entries": [] */ msgpack_pack_map(&mp_pck, 2); msgpack_pack_str(&mp_pck, 8); msgpack_pack_str_body(&mp_pck, "resource", 8); /* type & labels */ msgpack_pack_map(&mp_pck, 2); /* type */ msgpack_pack_str(&mp_pck, 4); msgpack_pack_str_body(&mp_pck, "type", 4); msgpack_pack_str(&mp_pck, flb_sds_len(ctx->resource)); msgpack_pack_str_body(&mp_pck, ctx->resource, flb_sds_len(ctx->resource)); msgpack_pack_str(&mp_pck, 6); msgpack_pack_str_body(&mp_pck, "labels", 6); if (ctx->k8s_resource_type) { ret = extract_local_resource_id(data, bytes, ctx, tag); if (ret != 0) { flb_plg_error(ctx->ins, "fail to construct local_resource_id"); msgpack_sbuffer_destroy(&mp_sbuf); return -1; } } ret = parse_monitored_resource(ctx, data, bytes, &mp_pck); if (ret != 0) { if (strcmp(ctx->resource, "global") == 0) { /* global resource has field project_id */ msgpack_pack_map(&mp_pck, 1); msgpack_pack_str(&mp_pck, 10); msgpack_pack_str_body(&mp_pck, "project_id", 10); msgpack_pack_str(&mp_pck, flb_sds_len(ctx->project_id)); msgpack_pack_str_body(&mp_pck, ctx->project_id, flb_sds_len(ctx->project_id)); } else if (strcmp(ctx->resource, "gce_instance") == 0) { /* gce_instance resource has fields project_id, zone, instance_id */ msgpack_pack_map(&mp_pck, 3); msgpack_pack_str(&mp_pck, 10); msgpack_pack_str_body(&mp_pck, "project_id", 10); msgpack_pack_str(&mp_pck, flb_sds_len(ctx->project_id)); msgpack_pack_str_body(&mp_pck, ctx->project_id, flb_sds_len(ctx->project_id)); msgpack_pack_str(&mp_pck, 4); msgpack_pack_str_body(&mp_pck, "zone", 4); msgpack_pack_str(&mp_pck, flb_sds_len(ctx->zone)); msgpack_pack_str_body(&mp_pck, ctx->zone, flb_sds_len(ctx->zone)); msgpack_pack_str(&mp_pck, 11); msgpack_pack_str_body(&mp_pck, "instance_id", 11); msgpack_pack_str(&mp_pck, flb_sds_len(ctx->instance_id)); msgpack_pack_str_body(&mp_pck, ctx->instance_id, flb_sds_len(ctx->instance_id)); } else if (strcmp(ctx->resource, K8S_CONTAINER) == 0) { /* k8s_container resource has fields project_id, location, cluster_name, * namespace_name, pod_name, container_name * * The local_resource_id for k8s_container is in format: * k8s_container.<namespace_name>.<pod_name>.<container_name> */ if (is_tag_match_regex(ctx, tag, tag_len) > 0) { ret = extract_resource_labels_from_regex(ctx, tag, tag_len); } else { ret = process_local_resource_id(ctx, K8S_CONTAINER); } if (ret == -1) { flb_plg_error(ctx->ins, "fail to extract resource labels " "for k8s_container resource type"); msgpack_sbuffer_destroy(&mp_sbuf); return -1; } msgpack_pack_map(&mp_pck, 6); msgpack_pack_str(&mp_pck, 10); msgpack_pack_str_body(&mp_pck, "project_id", 10); msgpack_pack_str(&mp_pck, flb_sds_len(ctx->project_id)); msgpack_pack_str_body(&mp_pck, ctx->project_id, flb_sds_len(ctx->project_id)); msgpack_pack_str(&mp_pck, 8); msgpack_pack_str_body(&mp_pck, "location", 8); msgpack_pack_str(&mp_pck, flb_sds_len(ctx->cluster_location)); msgpack_pack_str_body(&mp_pck, ctx->cluster_location, flb_sds_len(ctx->cluster_location)); msgpack_pack_str(&mp_pck, 12); msgpack_pack_str_body(&mp_pck, "cluster_name", 12); msgpack_pack_str(&mp_pck, flb_sds_len(ctx->cluster_name)); msgpack_pack_str_body(&mp_pck, ctx->cluster_name, flb_sds_len(ctx->cluster_name)); msgpack_pack_str(&mp_pck, 14); msgpack_pack_str_body(&mp_pck, "namespace_name", 14); msgpack_pack_str(&mp_pck, flb_sds_len(ctx->namespace_name)); msgpack_pack_str_body(&mp_pck, ctx->namespace_name, flb_sds_len(ctx->namespace_name)); msgpack_pack_str(&mp_pck, 8); msgpack_pack_str_body(&mp_pck, "pod_name", 8); msgpack_pack_str(&mp_pck, flb_sds_len(ctx->pod_name)); msgpack_pack_str_body(&mp_pck, ctx->pod_name, flb_sds_len(ctx->pod_name)); msgpack_pack_str(&mp_pck, 14); msgpack_pack_str_body(&mp_pck, "container_name", 14); msgpack_pack_str(&mp_pck, flb_sds_len(ctx->container_name)); msgpack_pack_str_body(&mp_pck, ctx->container_name, flb_sds_len(ctx->container_name)); } else if (strcmp(ctx->resource, K8S_NODE) == 0) { /* k8s_node resource has fields project_id, location, cluster_name, node_name * * The local_resource_id for k8s_node is in format: * k8s_node.<node_name> */ ret = process_local_resource_id(ctx, K8S_NODE); if (ret != 0) { flb_plg_error(ctx->ins, "fail to process local_resource_id from " "log entry for k8s_node"); msgpack_sbuffer_destroy(&mp_sbuf); return -1; } msgpack_pack_map(&mp_pck, 4); msgpack_pack_str(&mp_pck, 10); msgpack_pack_str_body(&mp_pck, "project_id", 10); msgpack_pack_str(&mp_pck, flb_sds_len(ctx->project_id)); msgpack_pack_str_body(&mp_pck, ctx->project_id, flb_sds_len(ctx->project_id)); msgpack_pack_str(&mp_pck, 8); msgpack_pack_str_body(&mp_pck, "location", 8); msgpack_pack_str(&mp_pck, flb_sds_len(ctx->cluster_location)); msgpack_pack_str_body(&mp_pck, ctx->cluster_location, flb_sds_len(ctx->cluster_location)); msgpack_pack_str(&mp_pck, 12); msgpack_pack_str_body(&mp_pck, "cluster_name", 12); msgpack_pack_str(&mp_pck, flb_sds_len(ctx->cluster_name)); msgpack_pack_str_body(&mp_pck, ctx->cluster_name, flb_sds_len(ctx->cluster_name)); msgpack_pack_str(&mp_pck, 9); msgpack_pack_str_body(&mp_pck, "node_name", 9); msgpack_pack_str(&mp_pck, flb_sds_len(ctx->node_name)); msgpack_pack_str_body(&mp_pck, ctx->node_name, flb_sds_len(ctx->node_name)); } else if (strcmp(ctx->resource, K8S_POD) == 0) { /* k8s_pod resource has fields project_id, location, cluster_name, * namespace_name, pod_name. * * The local_resource_id for k8s_pod is in format: * k8s_pod.<namespace_name>.<pod_name> */ ret = process_local_resource_id(ctx, K8S_POD); if (ret != 0) { flb_plg_error(ctx->ins, "fail to process local_resource_id from " "log entry for k8s_pod"); msgpack_sbuffer_destroy(&mp_sbuf); return -1; } msgpack_pack_map(&mp_pck, 5); msgpack_pack_str(&mp_pck, 10); msgpack_pack_str_body(&mp_pck, "project_id", 10); msgpack_pack_str(&mp_pck, flb_sds_len(ctx->project_id)); msgpack_pack_str_body(&mp_pck, ctx->project_id, flb_sds_len(ctx->project_id)); msgpack_pack_str(&mp_pck, 8); msgpack_pack_str_body(&mp_pck, "location", 8); msgpack_pack_str(&mp_pck, flb_sds_len(ctx->cluster_location)); msgpack_pack_str_body(&mp_pck, ctx->cluster_location, flb_sds_len(ctx->cluster_location)); msgpack_pack_str(&mp_pck, 12); msgpack_pack_str_body(&mp_pck, "cluster_name", 12); msgpack_pack_str(&mp_pck, flb_sds_len(ctx->cluster_name)); msgpack_pack_str_body(&mp_pck, ctx->cluster_name, flb_sds_len(ctx->cluster_name)); msgpack_pack_str(&mp_pck, 14); msgpack_pack_str_body(&mp_pck, "namespace_name", 14); msgpack_pack_str(&mp_pck, flb_sds_len(ctx->namespace_name)); msgpack_pack_str_body(&mp_pck, ctx->namespace_name, flb_sds_len(ctx->namespace_name)); msgpack_pack_str(&mp_pck, 8); msgpack_pack_str_body(&mp_pck, "pod_name", 8); msgpack_pack_str(&mp_pck, flb_sds_len(ctx->pod_name)); msgpack_pack_str_body(&mp_pck, ctx->pod_name, flb_sds_len(ctx->pod_name)); } else { flb_plg_error(ctx->ins, "unsupported resource type '%s'", ctx->resource); msgpack_sbuffer_destroy(&mp_sbuf); return -1; } } msgpack_pack_str(&mp_pck, 7); msgpack_pack_str_body(&mp_pck, "entries", 7); /* Append entries */ msgpack_pack_array(&mp_pck, array_size); off = 0; msgpack_unpacked_init(&result); while (msgpack_unpack_next(&result, data, bytes, &off) == MSGPACK_UNPACK_SUCCESS) { /* Get timestamp */ flb_time_pop_from_msgpack(&tms, &result, &obj); tms_status = extract_timestamp(obj, &tms); /* * Pack entry * * { * "severity": "...", * "labels": "...", * "logName": "...", * "jsonPayload": {...}, * "timestamp": "..." * } */ entry_size = 3; /* Extract severity */ severity_extracted = FLB_FALSE; if (ctx->severity_key && get_severity_level(&severity, obj, ctx->severity_key) == 0) { severity_extracted = FLB_TRUE; entry_size += 1; } /* Extract insertId */ in_status = validate_insert_id(&insert_id_obj, obj); if (in_status == INSERTID_VALID) { insert_id_extracted = FLB_TRUE; entry_size += 1; } else if (in_status == INSERTID_NOT_PRESENT) { insert_id_extracted = FLB_FALSE; } else { continue; } /* Extract operation */ operation_id = flb_sds_create(""); operation_producer = flb_sds_create(""); operation_first = FLB_FALSE; operation_last = FLB_FALSE; operation_extra_size = 0; operation_extracted = extract_operation(&operation_id, &operation_producer, &operation_first, &operation_last, obj, &operation_extra_size); if (operation_extracted == FLB_TRUE) { entry_size += 1; } /* Extract sourceLocation */ source_location_file = flb_sds_create(""); source_location_line = 0; source_location_function = flb_sds_create(""); source_location_extra_size = 0; source_location_extracted = extract_source_location(&source_location_file, &source_location_line, &source_location_function, obj, &source_location_extra_size); if (source_location_extracted == FLB_TRUE) { entry_size += 1; } /* Extract httpRequest */ init_http_request(&http_request); http_request_extra_size = 0; http_request_extracted = extract_http_request(&http_request, obj, &http_request_extra_size); if (http_request_extracted == FLB_TRUE) { entry_size += 1; } /* Extract labels */ labels_ptr = parse_labels(ctx, obj); if (labels_ptr != NULL) { if (labels_ptr->type != MSGPACK_OBJECT_MAP) { flb_plg_error(ctx->ins, "the type of labels should be map"); flb_sds_destroy(operation_id); flb_sds_destroy(operation_producer); msgpack_unpacked_destroy(&result); msgpack_sbuffer_destroy(&mp_sbuf); return -1; } entry_size += 1; } msgpack_pack_map(&mp_pck, entry_size); /* Add severity into the log entry */ if (severity_extracted == FLB_TRUE) { msgpack_pack_str(&mp_pck, 8); msgpack_pack_str_body(&mp_pck, "severity", 8); msgpack_pack_int(&mp_pck, severity); } /* Add insertId field into the log entry */ if (insert_id_extracted == FLB_TRUE) { msgpack_pack_str(&mp_pck, 8); msgpack_pack_str_body(&mp_pck, "insertId", 8); msgpack_pack_object(&mp_pck, insert_id_obj); } /* Add operation field into the log entry */ if (operation_extracted == FLB_TRUE) { add_operation_field(&operation_id, &operation_producer, &operation_first, &operation_last, &mp_pck); } /* Add sourceLocation field into the log entry */ if (source_location_extracted == FLB_TRUE) { add_source_location_field(&source_location_file, source_location_line, &source_location_function, &mp_pck); } /* Add httpRequest field into the log entry */ if (http_request_extracted == FLB_TRUE) { add_http_request_field(&http_request, &mp_pck); } /* labels */ if (labels_ptr != NULL) { msgpack_pack_str(&mp_pck, 6); msgpack_pack_str_body(&mp_pck, "labels", 6); msgpack_pack_object(&mp_pck, *labels_ptr); } /* Clean up id and producer if operation extracted */ flb_sds_destroy(operation_id); flb_sds_destroy(operation_producer); flb_sds_destroy(source_location_file); flb_sds_destroy(source_location_function); destroy_http_request(&http_request); /* jsonPayload */ msgpack_pack_str(&mp_pck, 11); msgpack_pack_str_body(&mp_pck, "jsonPayload", 11); pack_json_payload(insert_id_extracted, operation_extracted, operation_extra_size, source_location_extracted, source_location_extra_size, http_request_extracted, http_request_extra_size, tms_status, &mp_pck, obj, ctx); /* avoid modifying the original tag */ newtag = tag; if (ctx->k8s_resource_type) { stream = get_stream(result.data.via.array.ptr[1].via.map); if (stream == STREAM_STDOUT) { newtag = "stdout"; } else if (stream == STREAM_STDERR) { newtag = "stderr"; } } /* logName */ len = snprintf(path, sizeof(path) - 1, "projects/%s/logs/%s", ctx->project_id, newtag); msgpack_pack_str(&mp_pck, 7); msgpack_pack_str_body(&mp_pck, "logName", 7); msgpack_pack_str(&mp_pck, len); msgpack_pack_str_body(&mp_pck, path, len); /* timestamp */ msgpack_pack_str(&mp_pck, 9); msgpack_pack_str_body(&mp_pck, "timestamp", 9); /* Format the time */ /* * If format is timestamp_object or timestamp_duo_fields, * tms has been updated. * * If timestamp is not presen, * use the default tms(current time). */ gmtime_r(&tms.tm.tv_sec, &tm); s = strftime(time_formatted, sizeof(time_formatted) - 1, FLB_STD_TIME_FMT, &tm); len = snprintf(time_formatted + s, sizeof(time_formatted) - 1 - s, ".%09" PRIu64 "Z", (uint64_t) tms.tm.tv_nsec); s += len; msgpack_pack_str(&mp_pck, s); msgpack_pack_str_body(&mp_pck, time_formatted, s); } /* Convert from msgpack to JSON */ out_buf = flb_msgpack_raw_to_json_sds(mp_sbuf.data, mp_sbuf.size); msgpack_sbuffer_destroy(&mp_sbuf); if (!out_buf) { flb_plg_error(ctx->ins, "error formatting JSON payload"); msgpack_unpacked_destroy(&result); return -1; } *out_data = out_buf; *out_size = flb_sds_len(out_buf); return 0; } static void set_authorization_header(struct flb_http_client *c, char *token) { int len; char header[512]; len = snprintf(header, sizeof(header) - 1, "Bearer %s", token); flb_http_add_header(c, "Authorization", 13, header, len); } static void cb_stackdriver_flush(const void *data, size_t bytes, const char *tag, int tag_len, struct flb_input_instance *i_ins, void *out_context, struct flb_config *config) { (void) i_ins; (void) config; int ret; int ret_code = FLB_RETRY; size_t b_sent; char *token; flb_sds_t payload_buf; size_t payload_size; void *out_buf; size_t out_size; struct flb_stackdriver *ctx = out_context; struct flb_upstream_conn *u_conn; struct flb_http_client *c; /* Get upstream connection */ u_conn = flb_upstream_conn_get(ctx->u); if (!u_conn) { FLB_OUTPUT_RETURN(FLB_RETRY); } /* Reformat msgpack to stackdriver JSON payload */ ret = stackdriver_format(config, i_ins, ctx, NULL, tag, tag_len, data, bytes, &out_buf, &out_size); if (ret != 0) { flb_upstream_conn_release(u_conn); FLB_OUTPUT_RETURN(FLB_RETRY); } payload_buf = (flb_sds_t) out_buf; payload_size = out_size; /* Get or renew Token */ token = get_google_token(ctx); if (!token) { flb_plg_error(ctx->ins, "cannot retrieve oauth2 token"); flb_upstream_conn_release(u_conn); flb_sds_destroy(payload_buf); FLB_OUTPUT_RETURN(FLB_RETRY); } /* Compose HTTP Client request */ c = flb_http_client(u_conn, FLB_HTTP_POST, FLB_STD_WRITE_URI, payload_buf, payload_size, NULL, 0, NULL, 0); flb_http_buffer_size(c, 4192); flb_http_add_header(c, "User-Agent", 10, "Fluent-Bit", 10); flb_http_add_header(c, "Content-Type", 12, "application/json", 16); /* Compose and append Authorization header */ set_authorization_header(c, token); /* Send HTTP request */ ret = flb_http_do(c, &b_sent); /* validate response */ if (ret != 0) { flb_plg_warn(ctx->ins, "http_do=%i", ret); ret_code = FLB_RETRY; } else { /* The request was issued successfully, validate the 'error' field */ flb_plg_debug(ctx->ins, "HTTP Status=%i", c->resp.status); if (c->resp.status == 200) { ret_code = FLB_OK; } else { if (c->resp.payload_size > 0) { /* we got an error */ flb_plg_warn(ctx->ins, "error\n%s", c->resp.payload); } else { flb_plg_debug(ctx->ins, "response\n%s", c->resp.payload); } ret_code = FLB_RETRY; } } /* Cleanup */ flb_sds_destroy(payload_buf); flb_http_client_destroy(c); flb_upstream_conn_release(u_conn); /* Done */ FLB_OUTPUT_RETURN(ret_code); } static int cb_stackdriver_exit(void *data, struct flb_config *config) { struct flb_stackdriver *ctx = data; if (!ctx) { return -1; } flb_stackdriver_conf_destroy(ctx); return 0; } struct flb_output_plugin out_stackdriver_plugin = { .name = "stackdriver", .description = "Send events to Google Stackdriver Logging", .cb_init = cb_stackdriver_init, .cb_flush = cb_stackdriver_flush, .cb_exit = cb_stackdriver_exit, /* Test */ .test_formatter.callback = stackdriver_format, /* Plugin flags */ .flags = FLB_OUTPUT_NET | FLB_IO_TLS, };
1
13,036
do not print the payload since it might be corrupted, you can get the data with _debug.http.response_payload configuration property
fluent-fluent-bit
c
@@ -33,6 +33,7 @@ var _ = Describe("addons flow", func() { It("app init creates an copilot directory", func() { Expect("./copilot").Should(BeADirectory()) + Expect("./copilot/.workspace").Should(BeAnExistingFile()) }) It("app ls includes new app", func() {
1
// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package addons_test import ( "encoding/json" "fmt" "io" "io/ioutil" "net/http" "os" "github.com/aws/amazon-ecs-cli-v2/e2e/internal/client" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) var _ = Describe("addons flow", func() { Context("when creating a new app", func() { var ( initErr error ) BeforeAll(func() { _, initErr = cli.AppInit(&client.AppInitRequest{ AppName: appName, }) }) It("app init succeeds", func() { Expect(initErr).NotTo(HaveOccurred()) }) It("app init creates an copilot directory", func() { Expect("./copilot").Should(BeADirectory()) }) It("app ls includes new app", func() { apps, err := cli.AppList() Expect(err).NotTo(HaveOccurred()) Expect(apps).To(ContainSubstring(appName)) }) It("app show includes app name", func() { appShowOutput, err := cli.AppShow(appName) Expect(err).NotTo(HaveOccurred()) Expect(appShowOutput.Name).To(Equal(appName)) Expect(appShowOutput.URI).To(BeEmpty()) }) }) Context("when creating a new environment", func() { var ( testEnvInitErr error ) BeforeAll(func() { _, testEnvInitErr = cli.EnvInit(&client.EnvInitRequest{ AppName: appName, EnvName: "test", Profile: "default", Prod: false, }) }) It("env init should succeed", func() { Expect(testEnvInitErr).NotTo(HaveOccurred()) }) }) Context("when adding a svc", func() { var ( svcInitErr error ) BeforeAll(func() { _, svcInitErr = cli.SvcInit(&client.SvcInitRequest{ Name: svcName, SvcType: "Load Balanced Web Service", Dockerfile: "./hello/Dockerfile", SvcPort: "80", }) }) It("svc init should succeed", func() { Expect(svcInitErr).NotTo(HaveOccurred()) }) It("svc init should create svc manifests", func() { Expect("./copilot/hello/manifest.yml").Should(BeAnExistingFile()) }) It("svc ls should list the service", func() { svcList, svcListError := cli.SvcList(appName) Expect(svcListError).NotTo(HaveOccurred()) Expect(len(svcList.Services)).To(Equal(1)) svcsByName := map[string]client.SvcDescription{} for _, svc := range svcList.Services { svcsByName[svc.Name] = svc } for _, svc := range []string{svcName} { Expect(svcsByName[svc].AppName).To(Equal(appName)) Expect(svcsByName[svc].Name).To(Equal(svc)) } }) }) Context("copy addons file to copilot dir", func() { It("should copy all addons/ files to the app's workspace", func() { err := os.MkdirAll("./copilot/hello/addons", 0777) Expect(err).NotTo(HaveOccurred(), "create addons dir") fds, err := ioutil.ReadDir("./hello/addons") Expect(err).NotTo(HaveOccurred(), "read addons dir") for _, fd := range fds { destFile, err := os.Create(fmt.Sprintf("./copilot/hello/addons/%s", fd.Name())) Expect(err).NotTo(HaveOccurred(), "create destination file") defer destFile.Close() srcFile, err := os.Open(fmt.Sprintf("./hello/addons/%s", fd.Name())) Expect(err).NotTo(HaveOccurred(), "open source file") defer srcFile.Close() _, err = io.Copy(destFile, srcFile) Expect(err).NotTo(HaveOccurred(), "copy file") } }) }) Context("when deploying svc", func() { var ( appDeployErr error ) BeforeAll(func() { _, appDeployErr = cli.SvcDeploy(&client.SvcDeployInput{ Name: svcName, EnvName: "test", ImageTag: "gallopinggurdey", }) }) It("svc deploy should succeed", func() { Expect(appDeployErr).NotTo(HaveOccurred()) }) It("should be able to make a POST request", func() { svc, svcShowErr := cli.SvcShow(&client.SvcShowRequest{ AppName: appName, Name: svcName, }) Expect(svcShowErr).NotTo(HaveOccurred()) Expect(len(svc.Routes)).To(Equal(1)) // Make a POST request to the API to store the user name in DynamoDB. route := svc.Routes[0] Expect(route.Environment).To(Equal("test")) Eventually(func() (int, error) { resp, fetchErr := http.Post(fmt.Sprintf("%s/%s/%s", route.URL, svcName, appName), "application/json", nil) return resp.StatusCode, fetchErr }, "30s", "1s").Should(Equal(201)) }) It("should be able to retrieve the results", func() { svc, svcShowErr := cli.SvcShow(&client.SvcShowRequest{ AppName: appName, Name: svcName, }) Expect(svcShowErr).NotTo(HaveOccurred()) Expect(len(svc.Routes)).To(Equal(1)) // Make a GET request to the API to retrieve the list of user names from DynamoDB. route := svc.Routes[0] Expect(route.Environment).To(Equal("test")) var resp *http.Response var fetchErr error Eventually(func() (int, error) { resp, fetchErr = http.Get(fmt.Sprintf("%s/hello", route.URL)) return resp.StatusCode, fetchErr }, "10s", "1s").Should(Equal(200)) bodyBytes, err := ioutil.ReadAll(resp.Body) Expect(err).NotTo(HaveOccurred()) type Result struct { Names []string } result := Result{} err = json.Unmarshal(bodyBytes, &result) Expect(err).NotTo(HaveOccurred()) Expect(result.Names[0]).To(Equal(appName)) }) It("svc logs should display logs", func() { var svcLogs []client.SvcLogsOutput var svcLogsErr error Eventually(func() ([]client.SvcLogsOutput, error) { svcLogs, svcLogsErr = cli.SvcLogs(&client.SvcLogsRequest{ AppName: appName, Name: svcName, EnvName: "test", Since: "1h", }) return svcLogs, svcLogsErr }, "60s", "10s").ShouldNot(BeEmpty()) for _, logLine := range svcLogs { Expect(logLine.Message).NotTo(Equal("")) Expect(logLine.LogStreamName).NotTo(Equal("")) Expect(logLine.Timestamp).NotTo(Equal(0)) Expect(logLine.IngestionTime).NotTo(Equal(0)) } }) }) })
1
13,771
maybe "app init creates an copilot directory and a workspace file"? since you validate for the file as well.
aws-copilot-cli
go
@@ -7,10 +7,9 @@ class Topic < ActiveRecord::Base options.has_many :products, source_type: 'Product' options.has_many :topics, source_type: 'Topic' options.has_many :videos, source_type: 'Video' + options.has_many :trails, source_type: 'Trail' end - has_many :trails - validates :name, presence: true validates :slug, presence: true, uniqueness: true
1
class Topic < ActiveRecord::Base extend FriendlyId has_many :classifications, dependent: :destroy with_options(through: :classifications, source: :classifiable) do |options| options.has_many :products, source_type: 'Product' options.has_many :topics, source_type: 'Topic' options.has_many :videos, source_type: 'Video' end has_many :trails validates :name, presence: true validates :slug, presence: true, uniqueness: true friendly_id :name, use: [:slugged, :finders] def self.explorable where(explorable: true) end def to_s name end def published_trails trails.published end def to_param slug end def weekly_iteration_videos videos.where(watchable: Show.the_weekly_iteration) end end
1
17,455
Prefer double-quoted strings unless you need single quotes to avoid extra backslashes for escaping.
thoughtbot-upcase
rb
@@ -1,6 +1,6 @@ <?php -namespace Sabre\Event; +namespace Amp; /** * @template TReturn
1
<?php namespace Sabre\Event; /** * @template TReturn * @param callable():\Generator<mixed, mixed, mixed, TReturn> $gen * @return Promise<TReturn> */ function coroutine(callable $gen) : Promise {} /** * @template TReturn */ class Promise { /** * @return TReturn */ function wait() {} }
1
7,173
Nit: Could rename this file from SabreEvent.php now that it's not for Sabre
vimeo-psalm
php
@@ -39,4 +39,9 @@ func Start() { func Stop() { tch.Stop() yarpc.Stop() + http.Stop() + apachethrift.Stop() } + +// TODO(abg): We should probably use defers to ensure things that started up +// successfully are stopped before we exit.
1
// Copyright (c) 2016 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package server import ( "github.com/yarpc/yarpc-go/crossdock/server/apachethrift" "github.com/yarpc/yarpc-go/crossdock/server/http" "github.com/yarpc/yarpc-go/crossdock/server/tch" "github.com/yarpc/yarpc-go/crossdock/server/yarpc" ) // Start starts all required Crossdock test servers func Start() { tch.Start() yarpc.Start() http.Start() apachethrift.Start() } // Stop stops all required Crossdock test servers func Stop() { tch.Stop() yarpc.Stop() }
1
10,601
I believe this `Stop` function _is_ called with deferred.
yarpc-yarpc-go
go
@@ -28,13 +28,11 @@ import { __ } from '@wordpress/i18n'; import API from 'googlesitekit-api'; import Data from 'googlesitekit-data'; import { STORE_NAME } from './constants'; -import { parseAccountID } from '../util'; import { createFetchStore } from '../../../googlesitekit/data/create-fetch-store'; const fetchGetURLChannelsStore = createFetchStore( { baseName: 'getURLChannels', - controlCallback: ( { clientID } ) => { - const accountID = parseAccountID( clientID ); + controlCallback: ( { accountID, clientID } ) => { if ( undefined === accountID ) { // Mirror the API response that would happen for an invalid client ID. return new Promise( () => {
1
/** * modules/adsense data store: URL channels. * * Site Kit by Google, Copyright 2020 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * External dependencies */ import invariant from 'invariant'; import { __ } from '@wordpress/i18n'; /** * Internal dependencies */ import API from 'googlesitekit-api'; import Data from 'googlesitekit-data'; import { STORE_NAME } from './constants'; import { parseAccountID } from '../util'; import { createFetchStore } from '../../../googlesitekit/data/create-fetch-store'; const fetchGetURLChannelsStore = createFetchStore( { baseName: 'getURLChannels', controlCallback: ( { clientID } ) => { const accountID = parseAccountID( clientID ); if ( undefined === accountID ) { // Mirror the API response that would happen for an invalid client ID. return new Promise( () => { throw { code: 'invalid_param', message: __( 'The clientID parameter is not a valid AdSense client ID.', 'google-site-kit' ), data: { status: 400 }, }; } ); } return API.get( 'modules', 'adsense', 'urlchannels', { accountID, clientID }, { useCache: false, } ); }, reducerCallback: ( state, urlchannels, { clientID } ) => { return { ...state, urlchannels: { ...state.urlchannels, [ clientID ]: [ ...urlchannels ], }, }; }, argsToParams: ( clientID ) => { invariant( clientID, 'clientID is required.' ); return { clientID }; }, } ); // Actions const RESET_URLCHANNELS = 'RESET_URLCHANNELS'; const BASE_INITIAL_STATE = { urlchannels: {}, }; const baseActions = { *resetURLChannels() { const { dispatch } = yield Data.commonActions.getRegistry(); yield { payload: {}, type: RESET_URLCHANNELS, }; return dispatch( STORE_NAME ) .invalidateResolutionForStoreSelector( 'getURLChannels' ); }, }; const baseReducer = ( state, { type } ) => { switch ( type ) { case RESET_URLCHANNELS: { const { siteStatus, siteSetupComplete, } = state.savedSettings || {}; return { ...state, urlchannels: INITIAL_STATE.urlchannels, settings: { ...( state.settings || {} ), siteStatus, siteSetupComplete, }, }; } default: { return { ...state }; } } }; const baseResolvers = { *getURLChannels( clientID ) { if ( undefined === clientID ) { return; } const registry = yield Data.commonActions.getRegistry(); const existingURLChannels = registry.select( STORE_NAME ).getURLChannels( clientID ); if ( existingURLChannels ) { return; } yield fetchGetURLChannelsStore.actions.fetchGetURLChannels( clientID ); }, }; const baseSelectors = { /** * Gets all Google AdSense URL channels for this account and client. * * @since 1.9.0 * * @param {Object} state Data store's state. * @param {string} clientID The AdSense Client ID to fetch URL channels for. * @return {(Array.<Object>|undefined)} An array of AdSense URL channels; `undefined` if not loaded. */ getURLChannels( state, clientID ) { if ( undefined === clientID ) { return undefined; } const { urlchannels } = state; return urlchannels[ clientID ]; }, }; const store = Data.combineStores( fetchGetURLChannelsStore, { INITIAL_STATE: BASE_INITIAL_STATE, actions: baseActions, reducer: baseReducer, resolvers: baseResolvers, selectors: baseSelectors, } ); export const INITIAL_STATE = store.INITIAL_STATE; export const actions = store.actions; export const controls = store.controls; export const reducer = store.reducer; export const resolvers = store.resolvers; export const selectors = store.selectors; export default store;
1
30,632
This entire clause should now be removed. It was only relevant if `accountID` couldn't be parsed from `clientID`, which is now no longer needed.
google-site-kit-wp
js
@@ -77,10 +77,12 @@ extlinks = { } intersphinx_mapping = { - 'ansible': ('https://docs.ansible.com/ansible/devel/', None), + 'ansible': ('https://docs.ansible.com/ansible/latest/', None), 'pip': ('https://pip.pypa.io/en/latest/', None), 'python': ('https://docs.python.org/3', None), 'python2': ('https://docs.python.org/2', None), + 'testinfra': ('https://testinfra.readthedocs.io/en/latest/', None), + 'yamllint': ('http://yamllint.readthedocs.io/en/latest/', None), } # The version info for the project you're documenting, acts as replacement for
1
# -*- coding: utf-8 -*- # # Molecule documentation build configuration file, created by # sphinx-quickstart on Sat Oct 17 16:07:47 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import datetime import os import shlex import sys import molecule import alabaster # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('../..')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.coverage', 'sphinx.ext.doctest', 'sphinx.ext.extlinks', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'alabaster', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Molecule' copyright = u' %s, Red Hat Inc.' % datetime.date.today().year author = u'AUTHORS.rst' github_url = 'https://github.com' github_repo_org = 'ansible' github_repo_name = 'molecule' github_repo_slug = f'{github_repo_org}/{github_repo_name}' github_repo_url = f'{github_url}/{github_repo_slug}' extlinks = { 'issue': (f'{github_repo_url}/issues/%s', '#'), 'pr': (f'{github_repo_url}/pull/%s', 'PR #'), 'commit': (f'{github_repo_url}/commit/%s', ''), 'gh': (f'{github_url}/%s', 'GitHub: '), } intersphinx_mapping = { 'ansible': ('https://docs.ansible.com/ansible/devel/', None), 'pip': ('https://pip.pypa.io/en/latest/', None), 'python': ('https://docs.python.org/3', None), 'python2': ('https://docs.python.org/2', None), } # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = molecule.__version__ # The full version, including alpha/beta/rc tags. release = version # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True # List of warnings to suppress. suppress_warnings = ['image.nonlocal_uri'] # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'alabaster' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} html_theme_options = { 'logo': 'logo.png', 'github_user': github_repo_org, 'github_repo': github_repo_name, 'github_button': True, 'github_banner': True, 'github_type': 'star', 'github_count': True, 'badge_branch': 'master', 'travis_button': True, 'codecov_button': True, 'analytics_id': 'UA-128382387-1', 'show_powered_by': False, 'extra_nav_links': { 'View on GitHub': github_repo_url, }, } # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] html_theme_path = [alabaster.get_path()] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} html_sidebars = { '**': [ 'about.html', 'navigation.html', 'searchbox.html', ], } # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. html_show_sourcelink = False # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. html_show_copyright = False # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' #html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # Now only 'ja' uses this config value #html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. #html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = 'Moleculedoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', # Latex figure (float) alignment #'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'Molecule.tex', u'Molecule Documentation', u'AUTHORS.rst', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'molecule', u'Molecule Documentation', [author], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'Molecule', u'Molecule Documentation', author, 'AUTHORS.rst', 'Automated Testing for Ansible roles', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False
1
8,526
FTR: Unlike commonly known practice, `latest` in Ansible docs corresponds to the stable version, not to the latest state of the main Git branch. Is this your intention? Just checking...
ansible-community-molecule
py
@@ -5207,6 +5207,18 @@ const NAType *Translate::synthesizeType() err4106arg = SQLCHARSETSTRING_UTF8; break; + case GBK_TO_UTF8: + if (translateSource->getCharSet() == CharInfo::GBK || translateSource->getCharSet() == CharInfo::UnknownCharSet ) + charsetTarget = CharInfo::UTF8; + else + { + if( CmpCommon::getDefaultString(HIVE_FILE_CHARSET) == CmpCommon::getDefaultString(HIVE_DEFAULT_CHARSET) ) + err4106arg = SQLCHARSETCODE_GB2312; + else + charsetTarget = CharInfo::UTF8; + } + break; + case ISO88591_TO_UTF8: if (translateSource->getCharSet() == CharInfo::ISO88591) {
1
/********************************************************************** // @@@ START COPYRIGHT @@@ // // Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. // // @@@ END COPYRIGHT @@@ **********************************************************************/ /* -*-C++-*- ****************************************************************************** * * File: SynthType.C * Description: Methods for synthesizing a type * Created: 3/15/95 * Language: C++ * * * * ****************************************************************************** */ #define SQLPARSERGLOBALS_NADEFAULTS #include "Sqlcomp.h" #include "AllItemExpr.h" #include "BindWA.h" #include "CmpStatement.h" #include "CmpErrors.h" #include "ComSqlId.h" #include "OptimizerSimulator.h" // For TRIGGERS_STATUS_VECTOR_SIZE and SIZEOF_UNIQUE_EXECUTE_ID #include "Triggers.h" #include "TriggerEnable.h" #ifndef NDEBUG static Int32 NCHAR_DEBUG = -1; // note that, for perf, we call getenv only once #endif #include "SqlParserGlobalsCmn.h" //#define getDefaultCharSet CharInfo::getCharSetEnum(ActiveSchemaDB()->getDefaults().getValue(DEFAULT_CHARSET)) #define getDefaultCharSet SqlParser_DEFAULT_CHARSET // ----------------------------------------------------------------------- // utility functions -- cosmetics of error message arguments // ----------------------------------------------------------------------- // These just shorten error messages by removing irrelevant junk static void shortenTypeSQLname(NAString &typStr) { size_t i = typStr.index('('); if (i && i != NA_NPOS) typStr.remove(i); // remove from lparen onward i = typStr.index(' '); if (i && i != NA_NPOS) typStr.remove(i); // remove from space onward } static void shortenTypeSQLname(const NAType &op, NABuiltInTypeEnum typEnum, NAString &typStr) { if (op.getTypeQualifier() == typEnum) { size_t i = typStr.index(')'); if (i != NA_NPOS) i = typStr.index(" ", i); // find space AFTER the parens else i = typStr.index(' '); // find first space (no parens) if (i && i != NA_NPOS) typStr.remove(i); // remove from space onward } } // This one's NOT static -- GenRfork calls it! void emitDyadicTypeSQLnameMsg(Lng32 sqlCode, const NAType &op1, const NAType &op2, const char *str1 = NULL, const char *str2 = NULL, ComDiagsArea * diagsArea = NULL, const Lng32 int1 = -999999) { NAString tsn1(op1.closestEquivalentExternalType( HEAP )->getTypeSQLname(TRUE /*terse*/)); NAString tsn2(op2.closestEquivalentExternalType( HEAP )->getTypeSQLname(TRUE /*terse*/)); NABoolean charShorten = TRUE, numShorten = TRUE; if (op1.getTypeQualifier() == NA_CHARACTER_TYPE && op2.getTypeQualifier() == NA_CHARACTER_TYPE) { const CharType &ct1 = (CharType&)op1; const CharType &ct2 = (CharType&)op2; NABoolean csDiff = (ct1.getCharSet() != ct2.getCharSet() || ct1.getCharSet() == CharInfo::UnknownCharSet); NABoolean coDiff = (ct1.getCollation() != ct2.getCollation() || ct1.getCollation() == CharInfo::UNKNOWN_COLLATION); if (csDiff) charShorten = FALSE; // leave the CHARACTER SET info as is in tsnX else if (coDiff) { // add COLLATE info to the type-texts tsn1 += NAString(" ") + ct1.getCollateClause(ct1.getCollation()); tsn2 += NAString(" ") + ct2.getCollateClause(ct2.getCollation()); charShorten = FALSE; } // else, do do the charShorten thing numShorten = FALSE; // both args are character: shorten is a no-op } else if (op1.getTypeQualifier() == NA_NUMERIC_TYPE && op2.getTypeQualifier() == NA_NUMERIC_TYPE) { if (!str1 || strcmp(str1, "||")) // If a CONCAT, we do shorten; else: numShorten = FALSE; // do not shorten, we need distinguishing info charShorten = FALSE; // both args are numeric: shorten is a no-op } if (charShorten) { // "CHAR(8) CHARACTER SET UNICODE" => "CHAR(8)" shortenTypeSQLname(op1, NA_CHARACTER_TYPE, tsn1); shortenTypeSQLname(op2, NA_CHARACTER_TYPE, tsn2); } if (numShorten) { // "NUMERIC(8, 2) SIGNED" => "NUMERIC(8, 2)" shortenTypeSQLname(op1, NA_NUMERIC_TYPE, tsn1); shortenTypeSQLname(op2, NA_NUMERIC_TYPE, tsn2); } ComDiagsArea * da = (diagsArea ? diagsArea : CmpCommon::diags()); *da << DgSqlCode(sqlCode); switch (sqlCode) { case -4034: CMPASSERT(str1); *da << DgString0(tsn1) << DgString1(str1) << DgString2(tsn2) << DgString3(str2 ? str2 : ""); break; case -4039: CMPASSERT(str1); *da << DgColumnName(str1) << DgString0(tsn1) << DgString1(tsn2); break; case arkcmpErrorISPWrongDataType: // -19016 CMPASSERT(str1); CMPASSERT(int1 != -999999); *da << DgString0(str1) << DgString1(tsn1) << DgInt0(int1) << DgString2(tsn2); break; default: *da << DgString0(tsn1) << DgString1(tsn2); } } // emitDyadicTypeSQLnameMsg // ----------------------------------------------------------------------- // helper functions for *Type::isComparable() methods // ----------------------------------------------------------------------- static inline NABoolean involvesEQorNE(const OperatorType &opType) { return opType.match(ITM_WILDCARD_EQ_NE) || OperatorTypeEnum(opType) == ITM_LIKE || OperatorTypeEnum(opType) == ITM_LIKE_DOUBLEBYTE || OperatorTypeEnum(opType) == ITM_POSITION || OperatorTypeEnum(opType) == ITM_REPLACE || OperatorTypeEnum(opType) == ITM_REPLACE_UNICODE || OperatorTypeEnum(opType) == ITM_TRIM || // == ' ' (space char) OperatorTypeEnum(opType) == ITM_VALUEIDUNION; // when DISTINCT flag } static ItemExpr * propagateCoAndCoToItem(ItemExpr *ie, CharInfo::Collation co, CharInfo::Coercibility ce) { // The special check is here for isAUserSuppliedInput() // because we cannot directly mutate the type of a // ConstValue, HostVar, DynamicParam, or AnsiUSERFunction // because their bindNode's call ItemExpr::bindUserInput() // which makes multiple refs to an input all map to // (all share) the SAME valueId. // // Here we only want to mutate THIS ref's type, // or, equivalently, CAST a leaf type to the new type -- // where leaves are basic items the Binder does lookup on -- // ColRefs (Base/IndexColumns), ConstValues, HostVars, DynamicParams. // In these lookups we do NOT want to change the leaf's original ValueId // NOR its original type -- hence, we CAST. // It is safe not to do an additional bindNode (we have no bindWA anyway!) // because we KNOW the new type is okay! CharType *ct = (CharType *)&ie->getValueId().getType(); CMPASSERT(ct->getTypeQualifier() == NA_CHARACTER_TYPE); if (ct->getCollation() != co) { #ifndef NDEBUG if (NCHAR_DEBUG > 0) cerr << "CMP--:\t" << ct->getCollation() << ',' << ct->getCoercibility() << ' ' << co << ',' << ce << '\t' << ie->getText() << '\t' << ie->getValueId(); CMPASSERT(co != CharInfo::UNKNOWN_COLLATION); // sanity check CMPASSERT(ce != CharInfo::NO_COLLATING_SEQUENCE); // sanity check #endif ct = (CharType *)ct->newCopy(HEAP); ct->setCoAndCo(co, ce); // add a cast to the right type // (since the child may be shared by other ItemExprs we can't // simply change it, but we might someday be able to do an // optimization similar to what ICAT does) ie = new HEAP Cast(ie, ct); ie->synthTypeAndValueId(); #ifndef NDEBUG if (NCHAR_DEBUG > 0) cerr << '\t' << ie->getValueId() << endl; #endif } // collation of operand overridden return ie; } static void propagateCoAndCoToChildren(ItemExpr *parentOp, CharInfo::Collation co, CharInfo::Coercibility ce) { // Propagate the new co & ce to the immediate operands -- // yes, a shallow propagation is what we want. #ifndef NDEBUG // Just double-check that Cast::synthType() is NOT calling here... // If it did, we would rewrite its child and the Cast would get optimized // away as a no-op. CMPASSERT(!parentOp->getOperator().match(ITM_ANY_CAST)); #endif for (Int32 i = 0; i < parentOp->getArity(); i++) { ItemExpr *ie = parentOp->child(i); if (ie && ie->getValueId().getType().getTypeQualifier() == NA_CHARACTER_TYPE) parentOp->child(i) = propagateCoAndCoToItem(ie, co, ce); } } // LCOV_EXCL_START - cnu static Int32 getNumCHARACTERArgs(ItemExpr *parentOp) { Int32 n = 0; for (Int32 i = 0; i < parentOp->getArity(); i++) { ItemExpr *ie = parentOp->child(i); if (ie && ie->getValueId().getType().getTypeQualifier() == NA_CHARACTER_TYPE) n++; } return n; } // LCOV_EXCL_STOP // ----------------------------------------------------------------------- // The virtual NAType::isComparable() methods -- implemented here rather than // ../common/*Type.cpp because they're called only from here, and here // we've embedded stuff like ../optimizer/ItemExpr methods and // the ../arkcmp/CmpCommon global-diags-area. // ----------------------------------------------------------------------- NABoolean NAType::isComparable(const NAType &other, ItemExpr *parentOp, Int32 emitErr) const { #ifndef NDEBUG CMPASSERT(parentOp); //## reserved for future errmsg 4034 w/ unparse, #endif // for CoAndCo propagation and for errmsgs! if (isCompatible(other)) return TRUE; NAString defVal; NABoolean charsetInference = (CmpCommon::getDefault(INFER_CHARSET, defVal) == DF_ON); if(charsetInference && getTypeQualifier() == NA_CHARACTER_TYPE && other.getTypeQualifier() == NA_CHARACTER_TYPE){ // do not reject matches of UNKNOWN_CHARSET with UNKNOWN_CHARSET CharType *ct = (CharType *)this; if (ct->isCompatibleAllowUnknownCharset(other)) return TRUE; } if (emitErr == EmitErrIfAnyChar) if (getTypeQualifier() != NA_CHARACTER_TYPE && other.getTypeQualifier() != NA_CHARACTER_TYPE) emitErr = FALSE; if (emitErr) { // 4041 Type $1 cannot be compared with type $2. //10-070228-2913 -Begin //When MODE_SPECIAL_1 'ON', UNICODE CHARSET, and UPPER //function is involved data type is converted to VARCHAR(dataStorageSize). //dataSotragesize = getMaxCharLen() * bytesPerChar. //see in Upper::synthesizeType() method. //When generating error condition we convert it back to original size. if ( getTypeQualifier() == NA_CHARACTER_TYPE) { CharType &ct1 = (CharType&)*this; if((ct1.isCaseinsensitive()) && (ct1.getCharSet() == CharInfo::UNICODE) && (parentOp->child(0)->castToItemExpr()->getOperatorType() == ITM_UPPER)) { ct1.setDataStorageSize(ct1.getDataStorageSize()/3); } //10-070228-2913 -End } emitDyadicTypeSQLnameMsg(-4041, *this, other); } return FALSE; } NABoolean CharType::isComparable(const NAType &otherNA, ItemExpr *parentOp, Int32 emitErr) const { if (NOT NAType::isComparable(otherNA, parentOp, emitErr)) return FALSE; const CharType &other = (const CharType &)otherNA; CharInfo::Collation co; CharInfo::Coercibility ce; computeCoAndCo(other, co, ce); NABoolean cmpOK = (co != CharInfo::UNKNOWN_COLLATION); if (emitErr) emitErr = +1; // for fall-thru msg suppression if (cmpOK) { // a "mini-cache" to avoid proc call, for perf static THREAD_P CharInfo::Collation cachedCO = CharInfo::UNKNOWN_COLLATION; static THREAD_P Int32 cachedFlags = CollationInfo::ALL_NEGATIVE_SYNTAX_FLAGS; if (cachedCO != co) { // use the mini-cache cachedCO = co; cachedFlags = CharInfo::getCollationFlags(co); } if (involvesEQorNE(parentOp->getOperator())) { if (cachedFlags & CollationInfo::EQ_NE_CMP_ILLEGAL) cmpOK = FALSE; } else if (cachedFlags & CollationInfo::ORDERED_CMP_ILLEGAL) cmpOK = FALSE; if (!cmpOK && emitErr > 0) { // 4044 Collation $0~String0 does not support the $1~String1 operator. *CmpCommon::diags() << DgSqlCode(-4044) << DgString0(CharInfo::getCollationName(co)) << DgString1(parentOp->getTextUpper()); emitErr = -1; // We fall thru but do not also emit error 4041. } } // additional collation flag checks if (cmpOK) propagateCoAndCoToChildren(parentOp, co, ce); // type-synth/propagate! else { if (emitErr > 0) // 4041 Type $1 cannot be compared with type $2. emitDyadicTypeSQLnameMsg(-4041, *this, other); // if (emitErr) // +1 OR -1 // //## also emit errmsg 4034 w/ unparse? } #ifndef NDEBUG if (NCHAR_DEBUG < 0) NCHAR_DEBUG = getenv("NCHAR_DEBUG") ? +1 : 0; if (NCHAR_DEBUG > 0) { // LCOV_EXCL_START - dpm NAString p(CmpCommon::statementHeap()); parentOp->unparse(p); NAString s(getTypeSQLname(TRUE /*terse*/)); s += NAString(" ") + getCollateClause(getCollation()); cerr << "CMP" << (cmpOK ? "==" : "<>") << ":\t" << (Int32)parentOp->getOperatorType() << '\t' << p << endl << s << '\t' << getCoercibilityText(getCoercibility()) << endl; s = other.getTypeSQLname(TRUE /*terse*/); s += NAString(" ") + other.getCollateClause(other.getCollation()); cerr << s << '\t' << other.getCoercibilityText(other.getCoercibility()) << endl; cerr << CharInfo::getCollationName(co) << '\t' << CharType::getCoercibilityText(ce) << endl; if (!cmpOK) cerr << endl; // LCOV_EXCL_STOP } #endif return cmpOK; } // ----------------------------------------------------------------------- // additional, miscellaneous helper functions // ----------------------------------------------------------------------- // Called by BiRelat and QuantifiedComp comparison predicates. static NABoolean synthItemExprLists(ItemExprList &exprList1, ItemExprList &exprList2, NABoolean allowIncompatibleComparison, NABoolean &allowsUnknown, ItemExpr *parentOp) { if (exprList1.entries() != exprList2.entries()) { // 4042 The operands of a comparison predicate must be of equal degree. *CmpCommon::diags() << DgSqlCode(-4042); return FALSE; } NABoolean ODBC = (CmpCommon::getDefault(ODBC_PROCESS) == DF_ON); NABoolean JDBC = (CmpCommon::getDefault(JDBC_PROCESS) == DF_ON); allowsUnknown = FALSE; for (CollIndex i = 0; i < exprList1.entries(); i++) { // // Type cast any params. // ValueId vid1 = exprList1[i]->getValueId(); ValueId vid2 = exprList2[i]->getValueId(); NABoolean dummy; if (vid1.getType().getTypeQualifier() != NA_UNKNOWN_TYPE && vid2.getType().getTypeQualifier() == NA_UNKNOWN_TYPE && vid2.getItemExpr()->getOperatorType() == ITM_CONSTANT && (vid2.getItemExpr()->castToConstValue(dummy))->isNull()) { vid2.coerceType(vid1.getType()); } else if (vid2.getType().getTypeQualifier() != NA_UNKNOWN_TYPE && vid1.getType().getTypeQualifier() == NA_UNKNOWN_TYPE && vid1.getItemExpr()->getOperatorType() == ITM_CONSTANT && (vid1.getItemExpr()->castToConstValue(dummy))->isNull()) { vid1.coerceType(vid2.getType()); } // if this query is being processed for ODBC, then type cast param // to have the same type as the other side of birelat op. Otherwise, // give param the default type if the other side is an exact numeric. if ((NOT ODBC) && (NOT JDBC)) { // give param the default type if the other side is an exact numeric. if (vid1.getType().getTypeQualifier() == NA_UNKNOWN_TYPE && vid2.getType().getTypeQualifier() == NA_NUMERIC_TYPE) { // if op1 is a param with unknown type and // op2 is an exact numeric, type cast op1 to the default numeric type const NumericType& op2 = (NumericType&)vid2.getType(); if (op2.isExact()) vid1.coerceType(NA_NUMERIC_TYPE); } else if (vid2.getType().getTypeQualifier() == NA_UNKNOWN_TYPE && vid1.getType().getTypeQualifier() == NA_NUMERIC_TYPE) { const NumericType& op1 = (NumericType&)vid1.getType(); if (op1.isExact()) vid2.coerceType(NA_NUMERIC_TYPE); }; vid1.coerceType(vid2.getType(), NA_NUMERIC_TYPE); } else { // coerce to default character type(VARCHAR(32)) for ODBC. vid1.coerceType(vid2.getType(), NA_CHARACTER_TYPE); } vid2.coerceType(vid1.getType()); // // Check that the operands are comparable. // const NAType *operand1 = &vid1.getType(); const NAType *operand2 = &vid2.getType(); NABoolean DoCompatibilityTest = TRUE; NAString defVal; if ( operand1->getTypeQualifier() == NA_CHARACTER_TYPE && operand2->getTypeQualifier() == NA_CHARACTER_TYPE ) { if ( CmpCommon::getDefault(ALLOW_IMPLICIT_CHAR_CASTING) == DF_ON ) { DoCompatibilityTest = FALSE; // // NOTE: The Generator has code to throw in a Translate node if an // incompatible character set comparison is attempted. // } const CharType *charOp1 = (CharType*)&(vid1.getType()); const CharType *charOp2 = (CharType*)&(vid2.getType()); NABoolean charsetInference = (CmpCommon::getDefault(INFER_CHARSET, defVal) == DF_ON); if ( charsetInference ) { const CharType* desiredType = CharType::findPushDownCharType(getDefaultCharSet, charOp1, charOp2, 0); if ( desiredType ) { // just push down the charset field. All other fields are // meaningless. vid1.coerceType((NAType&)*desiredType, NA_CHARACTER_TYPE); vid2.coerceType((NAType&)*desiredType, NA_CHARACTER_TYPE); } } // get the newly pushed-down/relaxed types operand1 = &vid1.getType(); operand2 = &vid2.getType(); if ( DoCompatibilityTest && NOT operand1->isCompatible(*operand2) ) { // 4041 comparison between these two types is not allowed emitDyadicTypeSQLnameMsg(-4041, *operand1, *operand2); // LCOV_EXCL_LINE - cnu return FALSE; } } allowsUnknown = allowsUnknown OR operand1->supportsSQLnullLogical() OR operand2->supportsSQLnullLogical(); if (allowIncompatibleComparison) { // incompatible conversion is only allowed between: // 1. char and numeric types. // 2. char literal and date types // Or for special_1 mode: // 3. DATE and numeric. Date is an interval from year 1900. // 4. interval and numeric. // Or for MODE_SPECIAL_3: // between date and timestamp. // Check if this is char and numeric comparison if (((operand1->getTypeQualifier() == NA_CHARACTER_TYPE) && (operand2->getTypeQualifier() == NA_NUMERIC_TYPE) && ((((CharType*)operand1)->getCharSet() == CharInfo::ISO88591) || (((CharType*)operand1)->getCharSet() == CharInfo::UTF8))) || ((operand1->getTypeQualifier() == NA_NUMERIC_TYPE) && (operand2->getTypeQualifier() == NA_CHARACTER_TYPE) && ((((CharType*)operand2)->getCharSet() == CharInfo::ISO88591) || (((CharType*)operand2)->getCharSet() == CharInfo::UTF8)))) { return TRUE; } // Check if this is char and date comparison if (((operand1->getTypeQualifier() == NA_CHARACTER_TYPE) && (operand2->getTypeQualifier() == NA_DATETIME_TYPE) && ((((CharType*)operand1)->getCharSet() == CharInfo::ISO88591) || (((CharType*)operand1)->getCharSet() == CharInfo::UTF8))) || ((operand1->getTypeQualifier() == NA_DATETIME_TYPE) && (operand2->getTypeQualifier() == NA_CHARACTER_TYPE) && ((((CharType*)operand2)->getCharSet() == CharInfo::ISO88591) || (((CharType*)operand2)->getCharSet() == CharInfo::UTF8)))) { return TRUE; } if (CmpCommon::getDefault(MODE_SPECIAL_1) == DF_ON) { // Check if this is numeric literal and date comparison if (((operand1->getTypeQualifier() == NA_NUMERIC_TYPE) && (operand2->getTypeQualifier() == NA_DATETIME_TYPE)) || ((operand1->getTypeQualifier() == NA_DATETIME_TYPE) && (operand2->getTypeQualifier() == NA_NUMERIC_TYPE))) { NumericType *numOper; DatetimeType *dtOper; if (operand1->getTypeQualifier() == NA_NUMERIC_TYPE) { numOper = &(NumericType&)vid1.getType(); dtOper = &(DatetimeType&)vid2.getType(); } else { numOper = &(NumericType&)vid2.getType(); dtOper = &(DatetimeType&)vid1.getType(); } // make sure it is DATE to exact NUMERIC with scale // of 0 comparison. if ((numOper->isExact()) && (numOper->getScale() == 0) && (dtOper->getPrecision() == SQLDTCODE_DATE)) return TRUE; } // Check if this is numeric literal and interval comparison if (((operand1->getTypeQualifier() == NA_NUMERIC_TYPE) && (operand2->getTypeQualifier() == NA_INTERVAL_TYPE) && (vid1.getItemExpr()->getOperatorType() == ITM_CONSTANT)) || ((operand1->getTypeQualifier() == NA_INTERVAL_TYPE) && (operand2->getTypeQualifier() == NA_NUMERIC_TYPE) && (vid2.getItemExpr()->getOperatorType() == ITM_CONSTANT))) { IntervalType* interval; const NumericType* numeric; if (operand1->getTypeQualifier() == NA_NUMERIC_TYPE) { numeric = &(NumericType&)vid1.getType(); interval = &(IntervalType&)vid2.getType(); } else { numeric = &(NumericType&)vid2.getType(); interval = &(IntervalType&)vid1.getType(); } // make sure it is exact NUMERIC with scale // of 0 comparison. if ((numeric->isExact()) && (numeric->getScale() == 0) && (interval->getFractionPrecision() == 0)) return TRUE; } } if (CmpCommon::getDefault(MODE_SPECIAL_3) == DF_ON) { if (((vid1.getType().getPrecision() == SQLDTCODE_TIMESTAMP) && (vid2.getType().getPrecision() == SQLDTCODE_DATE)) || ((vid2.getType().getPrecision() == SQLDTCODE_TIMESTAMP) && (vid1.getType().getPrecision() == SQLDTCODE_DATE))) return TRUE; } } //## errmsg 4034 w/ unparse? if ( DoCompatibilityTest && NOT operand1->isComparable(*operand2, parentOp) ) return FALSE; } return TRUE; // success } // synthItemExprLists static const NAType *synthAvgSum(const NAType& operand, NABoolean inScalarGroup) { NABoolean aggNeedsToBeNullable; aggNeedsToBeNullable = operand.supportsSQLnullPhysical() || inScalarGroup; switch (operand.getTypeQualifier()) { case NA_NUMERIC_TYPE: { NumericType * const type = (NumericType * const ) &operand; if(type->isExact()) { ///////////////////////////////////////////////////////////////////// // Rules to compute datatype, precision and scale of SUM/AVG for // EXACT numerics. // // Precision and Datatype computation: // // If precision is less than 19, make it LargeInt (Int64). // // If precision is > 19, make result precision = operand precision // + 10 and result to be BigNum datatype. // // Scale computation: // Result scale is always equal to operand's scale. // // Result is also signed if operand is signed. // ///////////////////////////////////////////////////////////////////// Lng32 precision = (type->getMagnitude() + 9) / 10 + type->getScale(); Lng32 scale = type->getScale(); NABoolean isARealBigNum = FALSE; if (type->isBigNum()) { // make the max precision to be the precision of operand + 10. // Just a nice, round number. // // It could also be always made to // be MAX_NUMERIC_PRECISION_ALLOWED. But that would mean that // the result is always NUMERIC(128) which may be too much // for all aggregates. We can think about it. precision = MINOF(precision + 10, (Lng32)CmpCommon::getDefaultNumeric(MAX_NUMERIC_PRECISION_ALLOWED)); isARealBigNum = ((SQLBigNum*)type)->isARealBigNum(); } else { NABoolean limitPrecision = (CmpCommon::getDefault(LIMIT_MAX_NUMERIC_PRECISION) == DF_ON); if (precision <= MAX_NUMERIC_PRECISION) precision = MAX_NUMERIC_PRECISION; if (limitPrecision) { if (precision > MAX_NUMERIC_PRECISION) { precision = MAX_NUMERIC_PRECISION; } } else { if ( precision >= MAX_NUMERIC_PRECISION + 1 ) precision = MINOF(precision + 10, (Lng32)CmpCommon::getDefaultNumeric(MAX_NUMERIC_PRECISION_ALLOWED)); } } if ((NOT type->isBigNum()) && (precision <= MAX_NUMERIC_PRECISION)) { if (scale > 0) return new HEAP SQLNumeric(8, // length = 8 bytes precision, scale, (precision > 9 ? TRUE : type->isSigned()), aggNeedsToBeNullable); else return new HEAP SQLLargeInt(TRUE, aggNeedsToBeNullable); } else { return new HEAP SQLBigNum(precision, scale, isARealBigNum, type->isSigned(), aggNeedsToBeNullable, NULL); } } else { return new HEAP SQLDoublePrecision(aggNeedsToBeNullable); } } break; case NA_INTERVAL_TYPE: { IntervalType * const type = (IntervalType * const ) &operand; if (type->isSupportedType()) { return new HEAP SQLInterval(aggNeedsToBeNullable, type->getStartField(), type->computeLeadingPrecision(type->getStartField(), MAX_NUMERIC_PRECISION, type->getEndField(), type->getFractionPrecision()), type->getEndField(), type->getFractionPrecision()); } // else fall through to error } break; } // 4038 The operand of an AVG or SUM function must be numeric or interval. *CmpCommon::diags() << DgSqlCode(-4038); return NULL; } // // getFirstKnownCharSet() - get CharSet of first vid that has one. // CharInfo::CharSet getFirstKnownCharSet( ValueId vid1, ValueId vid2, ValueId vid3) { CharInfo::CharSet first_cs = CharInfo::ISO88591; // Default to ISO88591 if ( CmpCommon::getDefault(ALLOW_IMPLICIT_CHAR_CASTING) == DF_ON ) { const NAType *otyp1 = &(vid1.getType()); const NAType *otyp2 = &(vid2.getType()); const NAType *otyp3 = &(vid3.getType()); if (otyp1->getTypeQualifier() == NA_CHARACTER_TYPE) first_cs = ((CharType *)otyp1)->getCharSet(); else if (otyp2->getTypeQualifier() == NA_CHARACTER_TYPE) first_cs = ((CharType *)otyp2)->getCharSet(); else if (otyp3->getTypeQualifier() == NA_CHARACTER_TYPE) first_cs = ((CharType *)otyp3)->getCharSet(); } return first_cs; } // ----------------------------------------------------------------------- // member functions for class ItemExpr // ----------------------------------------------------------------------- const NAType *ItemExpr::synthTypeWithCollateClause(BindWA *bindWA, const NAType *type) { // First, call the VIRTUAL method, if we were not passed a type to use if (!type) // e.g. ColReference::bindNode passes a type type = synthesizeType(); if (type && collateClause()) { CharInfo::Collation co = collateClause()->collation_; CharInfo::Coercibility ce = collateClause()->coercibility_; collateClause() = NULL; CMPASSERT(ce == CharInfo::EXPLICIT); // else, Parser screwed up? const ColumnDescList *cols = NULL; if (isASubquery()) cols = ((Subquery *)this)->getRETDesc()->getColumnList(); if (type->getTypeQualifier() != NA_CHARACTER_TYPE) { // 4034 The operation (operand COLLATE coll-name) is not allowed. NAString optext(CmpCommon::statementHeap()); unparse(optext, DEFAULT_PHASE, USER_FORMAT_DELUXE); if (isASubquery()) { // Cosmetics: convert 'SCAN C.S.T' to '(SELECT a,b FROM C.S.T)' NAString x(optext, CmpCommon::statementHeap()); x.remove(5); //offset of "SCAN " x.toUpper(); if (x == "SCAN " || x == "SCAN(") { x = NAString("(SELECT ") + cols->getColumnDescListAsString() + " FROM "; optext.remove(0,5); optext.prepend(x); optext += ")"; } } *CmpCommon::diags() << DgSqlCode(-4034) << DgString0(optext) << DgString1("COLLATE") << DgString2(CharInfo::getCollationName(co)); // 4073 COLLATE may not appear after a $string0 type expression. NAString typnam(type->getTypeSQLname(TRUE/*terse*/)); shortenTypeSQLname(typnam); if (typnam.length() == 9) { //len("SQLRecord") // Cosmetics: convert 'SQLRecord' to 'NON-SCALAR' NAString x(typnam, CmpCommon::statementHeap()); x.toUpper(); if (x == "SQLRECORD") typnam="NON-SCALAR"; } *CmpCommon::diags() << DgSqlCode(-4073) << DgString0(typnam); type = NULL; } else if (co == CharInfo::UNKNOWN_COLLATION && bindWA->getCurrentScope()->context()->inOrderBy()) { // 3169 $0~string0 is not a known collation. *CmpCommon::diags() << DgSqlCode(-3169) << DgString0(CharInfo::getCollationName(co)); type = NULL; } else { // Consider // ('a' COLLATE AAA || 'b') COLLATE BBB // 'a' is AAA/EXPLICIT, so the concat within the parens is too -- // but ANSI 6.13 SR 4(a)(i) says that outside the parens we override // the inner EXPLICIT, so the expression outside the parens // is BBB/EXPLICIT. No computeCoAndCo() needed, simply setCoAndCo(). // type = type->newCopy(HEAP); CharType *ct = (CharType *)type; ct->setCoAndCo(co, ce); if (!ct->isCharSetAndCollationComboOK()) { // 3179 Collation $0 is not defined on character set $1. *CmpCommon::diags() << DgSqlCode(-3179) << DgString0(CharInfo::getCollationName(ct->getCollation())) << DgString1(CharInfo::getCharSetName(ct->getCharSet())); type = NULL; } if (isASubquery()) { CMPASSERT(cols->entries() == 1); ColumnDesc *col = cols->at(0); ItemExpr *ie = propagateCoAndCoToItem( col->getValueId().getItemExpr(), co, ce); col->setValueId(ie->getValueId()); } } } if (!type) bindWA->setErrStatus(); return type; } // This is the virtual method for ItemExpr's that do not define their own const NAType *ItemExpr::synthesizeType() { if (getArity() > 0) return &child(0)->castToItemExpr()->getValueId().getType(); return new HEAP SQLUnknown(); } // Propagate type information down the ItemExpr tree. // Called by coerceType(). The default implementation // does nothing. Currently is only redefined by ValueIdUnion // to propagate the desired type to the sources of the ValueIdUnion. // #pragma nowarn(1506) // warning elimination #pragma warning (disable : 4018) //warning elimination const NAType * ItemExpr::pushDownType(NAType& desiredType, enum NABuiltInTypeEnum defaultQualifier) { for(CollIndex i = 0; i < getArity(); i++) { child(i) -> getValueId().coerceType(desiredType, defaultQualifier); } return (NAType *)synthesizeType(); //return &desiredType; } const NAType * Cast::pushDownType(NAType& desiredType, enum NABuiltInTypeEnum defaultQualifier) { for(CollIndex i = 0; i < getArity(); i++) { child(i) -> getValueId().coerceType(desiredType, defaultQualifier); } if (getType()->getTypeQualifier() == NA_UNKNOWN_TYPE && desiredType.getTypeQualifier() != NA_UNKNOWN_TYPE) { type_ = desiredType.newCopy(HEAP); } return (NAType *)synthesizeType(); } // LCOV_EXCL_START - cnu void ItemExpr::coerceChildType(NAType& desiredType, enum NABuiltInTypeEnum defaultQualifier) { for(CollIndex i = 0; i < getArity(); i++) { child(i) -> getValueId().coerceType(desiredType, defaultQualifier); } } // LCOV_EXCL_STOP #pragma warning (default : 4018) //warning elimination #pragma warn(1506) // warning elimination // ----------------------------------------------------------------------- // member functions for class BuiltinFunction. // This methods is for those functions which are not defined as a // derived class or do not have a derived synthesizeType method. // This method should not be called from any derived class's // synthesizeType method. // ----------------------------------------------------------------------- const NAType *BuiltinFunction::synthesizeType() { NAType * retType = NULL; switch (getOperatorType()) { case ITM_CONVERTTOBITS: { ValueId vid1 = child(0)->getValueId(); // untyped param operands are typed as Int32 Unsigned. SQLInt si(FALSE); vid1.coerceType(si, NA_NUMERIC_TYPE); const NAType &typ1 = vid1.getType(); // one byte of display size for each bit. // 8 bits per byte. Int32 maxLength = typ1.getNominalSize() * 8; if ( typ1.getTypeQualifier() == NA_CHARACTER_TYPE && typ1.isVaryingLen() == TRUE ) retType = new HEAP SQLVarChar(maxLength, typ1.supportsSQLnull()); else retType = new HEAP SQLChar(maxLength, typ1.supportsSQLnull()); } break; case ITM_NULLIFZERO: { // type cast any params ValueId vid = child(0)->getValueId(); vid.coerceType(NA_NUMERIC_TYPE); const NAType &typ1 = child(0)->getValueId().getType(); if (typ1.getTypeQualifier() != NA_NUMERIC_TYPE) { // 4045 nullifzero function is only defined for numeric types. *CmpCommon::diags() << DgSqlCode(-4045) << DgString0(getTextUpper()); return NULL; } // returned type is the same as child's type but always nullable. retType = typ1.newCopy(HEAP); if (NOT typ1.supportsSQLnull()) { retType->setNullable(TRUE); } } break; case ITM_NVL: { // type cast any params ValueId vid1 = child(0)->getValueId(); vid1.coerceType(NA_NUMERIC_TYPE); const NAType &typ1 = vid1.getType(); ValueId vid2 = child(1)->getValueId(); vid2.coerceType(typ1); const NAType &typ2 = vid2.getType(); // // Synthesize the result. // UInt32 flags = ((CmpCommon::getDefault(LIMIT_MAX_NUMERIC_PRECISION) == DF_ON) ? NAType::LIMIT_MAX_NUMERIC_PRECISION : 0); retType = (NAType*)typ1.synthesizeType(SYNTH_RULE_UNION, typ1, typ2, HEAP, &flags); if (retType == NULL) { // 4049 CASE can't have result types that are mixed emitDyadicTypeSQLnameMsg(-4049, typ1, typ2); return NULL; } if (NOT typ1.supportsSQLnull()) { retType = typ1.newCopy(HEAP); } } break; case ITM_QUERYID_EXTRACT: { // type cast any params ValueId vid1 = child(0)->getValueId(); SQLChar c1(ComSqlId::MAX_QUERY_ID_LEN); vid1.coerceType(c1, NA_CHARACTER_TYPE); ValueId vid2 = child(1)->getValueId(); SQLChar c2(40, FALSE); vid2.coerceType(c2, NA_CHARACTER_TYPE); const CharType &typ1 = (CharType&)child(0)->getValueId().getType(); if (typ1.getTypeQualifier() != NA_CHARACTER_TYPE) { // LCOV_EXCL_START - cnu // 4043 The operand of a $0~String0 function must be character. *CmpCommon::diags() << DgSqlCode(-4043) << DgString0(getTextUpper()); return NULL; // LCOV_EXCL_STOP } const CharType &typ2 = (CharType&)child(1)->getValueId().getType(); if (typ2.getTypeQualifier() != NA_CHARACTER_TYPE) { // LCOV_EXCL_START - cnu // 4043 The operand of a $0~String0 function must be character. *CmpCommon::diags() << DgSqlCode(-4043) << DgString0(getTextUpper()); return NULL; // LCOV_EXCL_STOP } retType = new HEAP SQLVarChar(ComSqlId::MAX_QUERY_ID_LEN, (typ1.supportsSQLnull() || typ2.supportsSQLnull()), FALSE, // not upshifted FALSE, // not case-insensitive CharInfo::ISO88591); } break; case ITM_TOKENSTR: { // tokenstr('token', 'string'); // First param is a quoted_string and is typed as char during // parsing phase. ValueId vid1 = child(0)->getValueId(); ValueId vid2 = child(1)->getValueId(); // untyped param operands are typed as CHAR vid2.coerceType(NA_CHARACTER_TYPE); const NAType &typ1 = vid1.getType(); const NAType &typ2 = vid2.getType(); if ((typ1.getTypeQualifier() != NA_CHARACTER_TYPE) || (typ2.getTypeQualifier() != NA_CHARACTER_TYPE)) { // 4043 The operand of a $0~String0 function must be character. *CmpCommon::diags() << DgSqlCode(-4043) << DgString0(getTextUpper()); return NULL; } retType = new HEAP SQLVarChar(typ2.getNominalSize(), typ2.supportsSQLnull()); } break; case ITM_UNIQUE_ID: { retType = new HEAP SQLChar(16, FALSE); } break; default: { retType = (NAType *)ItemExpr::synthesizeType(); } } // switch return retType; } // ----------------------------------------------------------------------- // member functions for class UDFunction. // ----------------------------------------------------------------------- const NAType *UDFunction::synthesizeType() { const NAType * retType = NULL; ValueId outVarId; // We assosiate the type of the UDFunction ItemExpr to that of the // first output of the Function. If the function has more than one output // that gets hadled when we flatten the MVF out and use the ValueIdProxies // to represent those outputs. See bindRowValues(). if (udfDesc_ != NULL) { outVarId = udfDesc_->getOutputColumnList()[0]; const NAType &funcType = outVarId.getType(); retType = &funcType; } return retType; } // ----------------------------------------------------------------------- // member functions for class Abs // ----------------------------------------------------------------------- const NAType *Abs::synthesizeType() { // The expression is ABS(<value>) // The result is the absolute value of the operand. // type cast any params ValueId vid = child(0)->getValueId(); SQLDoublePrecision dp(TRUE); vid.coerceType(dp, NA_NUMERIC_TYPE); const NAType &typ1 = child(0)->getValueId().getType(); if (typ1.getTypeQualifier() != NA_NUMERIC_TYPE) { // 4045 Absolute function is only defined for numeric types. *CmpCommon::diags() << DgSqlCode(-4045) << DgString0(getTextUpper()); return NULL; } // now it's safe to cast the types to numeric type const NumericType &ntyp1 = (NumericType &) typ1; NAType *result = NULL; if (ntyp1.isExact()) { Lng32 precision = (ntyp1.getMagnitude() + 9) / 10 + ntyp1.getScale(); if (precision <= MAX_NUMERIC_PRECISION) { Int32 length; if (precision < 5) length = 2; else if (precision < 10) length = 4; else length = 8; result = new HEAP SQLNumeric(length, precision, ntyp1.getScale(), ntyp1.isSigned()); } else if (NOT ntyp1.isBigNum() && (ntyp1.getScale()==0) ) // this must be LargeInt result = new HEAP SQLLargeInt(ntyp1.isSigned()); else result = new HEAP SQLDoublePrecision(); } else { result = new HEAP SQLDoublePrecision(); } if (ntyp1.supportsSQLnullLogical()) result->setNullable(TRUE); return result; } // ----------------------------------------------------------------------- // member functions for class CodeVal // ----------------------------------------------------------------------- const NAType *CodeVal::synthesizeType() { // The expression is ASCII(<value>)/CODE_VALUE(<value>) // The result is the ASCII or UNICODE value of the first character in <value>. // // Type cast any params. // ValueId vid1 = child(0)->getValueId(); vid1.coerceType(NA_CHARACTER_TYPE); const CharType &typ1 = (CharType&)child(0)->getValueId().getType(); if (typ1.getTypeQualifier() != NA_CHARACTER_TYPE) { // 4043 The operand of a $0~String0 function must be character. *CmpCommon::diags() << DgSqlCode(-4043) << DgString0(getTextUpper()); return NULL; } switch (getOperatorType()) { // LCOV_EXCL_START - rfi case ITM_NCHAR_MP_CODE_VALUE: if ( CharInfo::is_NCHAR_MP(typ1.getCharSet()) != TRUE ) { // 4000: internal binder error. This should not happen because we set the // operator type according to the charset of the operand in NO_OPERATOR_TYPE // case first. If we get to here, then type code/operand has been changed // such that they do no match. *CmpCommon::diags() << DgSqlCode(-4000); return NULL; } break; case ITM_UNICODE_CODE_VALUE: if ( typ1.getCharSet() != CharInfo::UNICODE ) { // 4000: internal binder error. This should not happen because we set the // operator type according to the charset of the operand in NO_OPERATOR_TYPE // case first. If we get to here, then type code/operand has been changed // such that they do no match. *CmpCommon::diags() << DgSqlCode(-4000); return NULL; } break; // LCOV_EXCL_STOP case ITM_ASCII: { CharInfo::CharSet cs = typ1.getCharSet(); if ( CharInfo::maxBytesPerChar( cs ) != 1 ) { if ( cs == CharInfo::UNICODE ) { // 4106 The character set for the operand of string function // ascii/code_value must be $1~String1. *CmpCommon::diags() << DgSqlCode(-4106) << DgString0(getTextUpper()) << DgString1(SQLCHARSETSTRING_ISO88591); return NULL; } } } break; case ITM_CODE_VALUE: // Before R2.4, code_value and ASCII functions returned the same result. // In R2.4, code_value will return the code value of the first // character. ASCII will return an error if the first character in the // buffer is not an ASCII character, for example, SJIS or UTF8 // character. Add one case for code_value. break; case NO_OPERATOR_TYPE: { switch ( typ1.getCharSet() ) { // LCOV_EXCL_START - mp case CharInfo::KANJI_MP: case CharInfo::KSC5601_MP: setOperatorType(ITM_NCHAR_MP_CODE_VALUE); break; // LCOV_EXCL_STOP case CharInfo::UNICODE: setOperatorType(ITM_UNICODE_CODE_VALUE); break; case CharInfo::ISO88591: default: setOperatorType(ITM_CODE_VALUE); break; } break; // fall through } default: // Character set $0~string0 is not supported for function $1~string1 *CmpCommon::diags() << DgSqlCode(-3403) << DgString0(getTextUpper()) << DgString1(CharInfo::getCharSetName(typ1.getCharSet())); return NULL; } NAType *result = new (HEAP) SQLInt(FALSE, typ1.supportsSQLnullLogical()); return result; } // ----------------------------------------------------------------------- // member functions for class Aggregate // ----------------------------------------------------------------------- const NAType *Aggregate::synthesizeType() { const NAType *result; switch (getEffectiveOperatorType()) { case ITM_COUNT: case ITM_COUNT_NONULL: result = new HEAP SQLLargeInt(TRUE /* 'long long' on NSK can't be unsigned */, FALSE /*not null*/); break; case ITM_AVG: case ITM_SUM: { ValueId vid = child(0)->getValueId(); vid.coerceType(NA_NUMERIC_TYPE); const NAType& operand = child(0)->castToItemExpr()->getValueId().getType(); // If Top of a split aggregate, use the data type of the child // aggregate. if(topPartOfAggr()) { // If this is in a scalar groupby, it can potentially return NULL. // Make sure that the type is nullable. if (inScalarGroupBy()) result = operand.synthesizeNullableType(HEAP); else result = operand.newCopy(HEAP); } else { result = synthAvgSum(operand, inScalarGroupBy()); } break; } case ITM_MAX: case ITM_MIN: { ValueId vid = child(0)->getValueId(); vid.coerceType(NA_CHARACTER_TYPE); const NAType& operand = child(0)->castToItemExpr()->getValueId().getType(); if ( operand.getTypeQualifier() == NA_CHARACTER_TYPE ) { if (CmpCommon::wantCharSetInference()) { const CharType *charOp = (CharType*)&(vid.getType()); const CharType* desiredType = CharType::findPushDownCharType(getDefaultCharSet, charOp, 0); if ( desiredType ) { // just push down the charset field. All other fields are // meaningless. vid.coerceType((NAType&)*desiredType, NA_CHARACTER_TYPE); } } } if (!operand.isSupportedType()) { // LCOV_EXCL_START - mp emitDyadicTypeSQLnameMsg(-4041, operand, operand); return NULL; // LCOV_EXCL_STOP } if (inScalarGroupBy()) result = operand.synthesizeNullableType(HEAP); else result = operand.newCopy(HEAP); break; } case ITM_ONE_ROW: case ITM_ONEROW: { const NAType& operand = child(0)->castToItemExpr()->getValueId().getType(); result = operand.synthesizeNullableType(HEAP); break; } case ITM_ONE_TRUE: case ITM_ANY_TRUE_MAX: case ITM_ANY_TRUE: { const SQLBoolean& operand = (const SQLBoolean &) child(0)->castToItemExpr()->getValueId().getType(); // The argument of a ONE/ANY TRUE must be of type SQLBoolean CMPASSERT(operand.getTypeQualifier() == NA_BOOLEAN_TYPE); result = new HEAP SQLBoolean(operand.canBeSQLUnknown()); break; } default: result = ItemExpr::synthesizeType(); break; } return result; } // ----------------------------------------------------------------------- // member functions for class AggMinMax // ----------------------------------------------------------------------- const NAType *AggrMinMax::synthesizeType() { const NAType *result; const NAType& operand = child(0)->castToItemExpr()->getValueId().getType(); // result = operand.synthesizeNullableType(HEAP); result = operand.newCopy(HEAP); return result; } // ----------------------------------------------------------------------- // member functions for class PivotGroup // ----------------------------------------------------------------------- const NAType *PivotGroup::synthesizeType() { return new HEAP SQLVarChar(maxLen_, TRUE); } // ----------------------------------------------------------------------- // member functions for class AnsiUSERFunction // ----------------------------------------------------------------------- static const Lng32 MAX_NT_DOMAIN_NAME_LEN = 30; static const Lng32 MAX_NT_USERNAME_LEN = 20; //the ldap username needs to fit into this field, so make them equal static const Lng32 OPT_MAX_USERNAME_LEN = ComSqlId::MAX_LDAP_USER_NAME_LEN+1; const NAType *AnsiUSERFunction::synthesizeType() { return new HEAP SQLVarChar(OPT_MAX_USERNAME_LEN, FALSE); } const NAType *MonadicUSERFunction::synthesizeType() { // // Type cast any params. // ValueId vid = child(0)->getValueId(); SQLInt si(TRUE); vid.coerceType(si, NA_NUMERIC_TYPE); // // Check that the operands are compatible. // const NAType& typ1 = vid.getType(); if (typ1.getTypeQualifier() != NA_NUMERIC_TYPE) { // 4043 The operand of a USER function must be character. *CmpCommon::diags() << DgSqlCode(-4045) << DgString0(getTextUpper()); return NULL; } // now it's safe to cast the types to numeric type const NumericType &ntyp1 = (NumericType &) typ1; if (NOT ntyp1.isExact()) { // 4046 USER function is only defined for exact numeric types. *CmpCommon::diags() << DgSqlCode(-4046) << DgString0(getTextUpper()); return NULL; } if (ntyp1.getScale() != 0) { // 4047 Arguments of USER function must have a scale of 0. *CmpCommon::diags() << DgSqlCode(-4047) << DgString0(getTextUpper()); return NULL; } // // Return the result. // return new HEAP SQLVarChar(OPT_MAX_USERNAME_LEN, typ1.supportsSQLnullLogical()); } const NAType *MonadicUSERIDFunction::synthesizeType() { // // Type cast any params. // ValueId vid = child(0)->getValueId(); vid.coerceType(NA_CHARACTER_TYPE); // // Check that the operands are compatible. // const NAType& operand = vid.getType(); if (operand.getTypeQualifier() != NA_CHARACTER_TYPE) { // 4043 The operand of a USERID function must be character. *CmpCommon::diags() << DgSqlCode(-4043) << DgString0(getTextUpper()); return NULL; } // // Return the result. // return new HEAP SQLVarChar(OPT_MAX_USERNAME_LEN, operand.supportsSQLnullLogical()); } // ----------------------------------------------------------------------- // member functions for class Assign // ----------------------------------------------------------------------- const NAType *Assign::doSynthesizeType(ValueId & targetId, ValueId & sourceId) { NABoolean ODBC = (CmpCommon::getDefault(ODBC_PROCESS) == DF_ON); NABoolean JDBC = (CmpCommon::getDefault(JDBC_PROCESS) == DF_ON); NABoolean forceSourceParamToBeNullable = (CmpCommon::getDefault(COMP_BOOL_173) == DF_ON); // // Type cast any params. // targetId = child(0)->castToItemExpr()->getValueId(); sourceId = child(1)->castToItemExpr()->getValueId(); const NAType& targetType = targetId.getType(); NABoolean sourceIsUntypedParam = (sourceId.getType().getTypeQualifier() == NA_UNKNOWN_TYPE); // Charset inference. const NAType& sourceType = sourceId.getType(); targetId.coerceType(sourceType); sourceId.coerceType(targetType); // if this param is the source of an insert/update stmt coming in // from odbc/jdbc interface and is not nullable, then make it nullable // if the user has asked for it. if ((NOT sourceId.getType().supportsSQLnull()) && (ODBC || JDBC) && (forceSourceParamToBeNullable) && (sourceIsUntypedParam)) { NAType &sourceType = (NAType&)(sourceId.getType()); sourceType.setNullable(TRUE); // Propagate (pushDowntype()) this type to the children of this valueid // in case one of the children could not be typed. // const NAType* synthesizedNewType = sourceId.getItemExpr()->pushDownType(sourceType); sourceId.changeType(synthesizedNewType); } // // Check that the operands are compatible. // if (NOT targetId.getType().isCompatible(sourceId.getType())) { if (((CmpCommon::getDefault(MODE_SPECIAL_1) == DF_ON) || (CmpCommon::getDefault(ALLOW_INCOMPATIBLE_ASSIGNMENT) == DF_ON)) && (sourceId.getType().getTypeQualifier() != NA_RECORD_TYPE ) && ((child(1)->getOperatorType() != ITM_CONSTANT) || (NOT ((ConstValue *) child(1).getPtr() )->isNull()))) { // target type is not the same as source type. // Assignment allowed in special_1 mode. // bindNode will add an explicit CAST node. // All supported incompatible conversions will be handled by CAST. return &targetType; } return NULL; } // // Return the result. // return &targetType; } const NAType *Assign::synthesizeType(const char * str1, const Lng32 int1) { ValueId targetId, sourceId; const NAType * result = doSynthesizeType(targetId, sourceId); if (result == NULL) { emitDyadicTypeSQLnameMsg(arkcmpErrorISPWrongDataType, targetId.getType(), sourceId.getType(), str1, NULL, // No str2 value NULL, // Default diags area int1); return NULL; } // // Return the result. // return result; } const NAType *Assign::synthesizeType() { ValueId targetId, sourceId; const NAType * result = doSynthesizeType(targetId, sourceId); if (result == NULL) { emitDyadicTypeSQLnameMsg(-4039, targetId.getType(), sourceId.getType(), ToAnsiIdentifier(targetId.getNAColumn()->getColName())); return NULL; } // // Return the result. // return result; } // ----------------------------------------------------------------------- // member functions for class BaseColumn // ----------------------------------------------------------------------- const NAType *BaseColumn::synthesizeType() { return &getType(); } // ----------------------------------------------------------------------- // member functions for class IndexColumn // ----------------------------------------------------------------------- const NAType * IndexColumn::synthesizeType() { return &indexColDefinition_.getType(); } // ----------------------------------------------------------------------- // member functions for class Between // ----------------------------------------------------------------------- const NAType *Between::synthesizeType() { ItemExprList exprList1(child(0).getPtr(), HEAP); ItemExprList exprList2(child(1).getPtr(), HEAP); ItemExprList exprList3(child(2).getPtr(), HEAP); if (exprList1.entries() != exprList2.entries() OR exprList1.entries() != exprList3.entries()) { // 4040 The operands of a between predicate must be of equal degree. *CmpCommon::diags() << DgSqlCode(-4040); return NULL; } NABoolean allowsUnknown = FALSE; NABoolean allowIncompatibleComparison = (((CmpCommon::getDefault(ALLOW_INCOMPATIBLE_COMPARISON) == DF_ON) || (CmpCommon::getDefault(MODE_SPECIAL_1) == DF_ON) || (CmpCommon::getDefault(MODE_SPECIAL_3) == DF_ON)) && (!CmpCommon::statement()->isDDL()) && (child(0)->castToItemExpr()->getOperatorType() != ITM_ONE_ROW) && (child(1)->castToItemExpr()->getOperatorType() != ITM_ONE_ROW) && (child(2)->castToItemExpr()->getOperatorType() != ITM_ONE_ROW) && (child(0)->castToItemExpr()->getOperatorType() != ITM_ONEROW) && (child(1)->castToItemExpr()->getOperatorType() != ITM_ONEROW) && (child(2)->castToItemExpr()->getOperatorType() != ITM_ONEROW)); for (CollIndex i = 0; i < exprList1.entries(); i++) { // // Type cast any params. // ValueId vid1 = exprList1[i]->getValueId(); ValueId vid2 = exprList2[i]->getValueId(); ValueId vid3 = exprList3[i]->getValueId(); vid1.coerceType(vid2.getType()); vid1.coerceType(vid3.getType(), NA_NUMERIC_TYPE); vid2.coerceType(vid1.getType()); vid3.coerceType(vid1.getType()); // // Check that the operands are comparable. // const NAType& op1 = vid1.getType(); const NAType& op2 = vid2.getType(); const NAType& op3 = vid3.getType(); NABoolean compareOp2 = TRUE; NABoolean compareOp3 = TRUE; if (allowIncompatibleComparison) { if(((op1.getTypeQualifier() == NA_DATETIME_TYPE) && (op2.getTypeQualifier() == NA_CHARACTER_TYPE) && (vid2.getItemExpr()->getOperatorType() == ITM_CONSTANT)) || ((op2.getTypeQualifier() == NA_DATETIME_TYPE) && (op1.getTypeQualifier() == NA_CHARACTER_TYPE) && (vid1.getItemExpr()->getOperatorType() == ITM_CONSTANT))) compareOp2 = FALSE; if(((op1.getTypeQualifier() == NA_DATETIME_TYPE) && (op3.getTypeQualifier() == NA_CHARACTER_TYPE) && (vid3.getItemExpr()->getOperatorType() == ITM_CONSTANT)) || ((op3.getTypeQualifier() == NA_DATETIME_TYPE) && (op1.getTypeQualifier() == NA_CHARACTER_TYPE) && (vid1.getItemExpr()->getOperatorType() == ITM_CONSTANT))) compareOp3 = FALSE; if (CmpCommon::getDefault(MODE_SPECIAL_3) == DF_ON) { if (((op1.getPrecision() == SQLDTCODE_TIMESTAMP) && (op2.getPrecision() == SQLDTCODE_DATE)) || ((op2.getPrecision() == SQLDTCODE_TIMESTAMP) && (op1.getPrecision() == SQLDTCODE_DATE))) compareOp2 = FALSE; if (((op1.getPrecision() == SQLDTCODE_TIMESTAMP) && (op3.getPrecision() == SQLDTCODE_DATE)) || ((op3.getPrecision() == SQLDTCODE_TIMESTAMP) && (op1.getPrecision() == SQLDTCODE_DATE))) compareOp3 = FALSE; } } if (op1.getTypeQualifier() == NA_CHARACTER_TYPE && op2.getTypeQualifier() == NA_CHARACTER_TYPE && op3.getTypeQualifier() == NA_CHARACTER_TYPE) { if ( CmpCommon::getDefault(ALLOW_IMPLICIT_CHAR_CASTING) == DF_ON ) { compareOp2 = FALSE; compareOp3 = FALSE; } } if ((compareOp2) && (NOT op1.isComparable(op2, this))) //## errmsg 4034 w/ unparse? return FALSE; if ((compareOp3) && (NOT op1.isComparable(op3, this))) //## errmsg 4034 w/ unparse? return FALSE; // If any of the operands is nullable the result could be unknown allowsUnknown = allowsUnknown OR op1.supportsSQLnullLogical() OR op2.supportsSQLnullLogical() OR op3.supportsSQLnullLogical(); } // // Return the result. // return new HEAP SQLBoolean(allowsUnknown); } // ----------------------------------------------------------------------- // member functions for class BiArith // ----------------------------------------------------------------------- const NAType *BiArith::synthesizeType() { // // Type cast any params. // ValueId vid1 = child(0)->getValueId(); ValueId vid2 = child(1)->getValueId(); if (vid1.getType().getTypeQualifier() == NA_UNKNOWN_TYPE && vid2.getType().getTypeQualifier() == NA_NUMERIC_TYPE) { // if op1 is a param with unknown type and op2 // is an exact numeric, type cast op1 to the default // numeric type const NumericType& op2 = (NumericType&)vid2.getType(); if (op2.isExact()) vid1.coerceType(NA_NUMERIC_TYPE); } else if (vid2.getType().getTypeQualifier() == NA_UNKNOWN_TYPE && vid1.getType().getTypeQualifier() == NA_NUMERIC_TYPE) { // if op2 is a param with unknown type and op1 // is an exact numeric, type cast op2 to the default // numeric type const NumericType& op1 = (NumericType&)vid1.getType(); if (op1.isExact()) vid2.coerceType(NA_NUMERIC_TYPE); }; vid1.coerceType(vid2.getType(), NA_NUMERIC_TYPE); vid2.coerceType(vid1.getType()); UInt32 flags = ((CmpCommon::getDefault(LIMIT_MAX_NUMERIC_PRECISION) == DF_ON) ? NAType::LIMIT_MAX_NUMERIC_PRECISION : 0); if (CmpCommon::getDefault(MODE_SPECIAL_1) == DF_ON) { flags |= NAType::MODE_SPECIAL_1; } if (CmpCommon::getDefault(MODE_SPECIAL_2) == DF_ON) { flags |= NAType::MODE_SPECIAL_2; } if (CmpCommon::getDefault(MODE_SPECIAL_4) == DF_ON) { flags |= NAType::MODE_SPECIAL_4; } NABoolean limitPrecision = ((flags & NAType::LIMIT_MAX_NUMERIC_PRECISION) != 0); // // Synthesize the result. // const NAType& operand1 = vid1.getType(); const NAType& operand2 = vid2.getType(); const NAType *result; switch (getOperatorType()) { case ITM_PLUS: result = operand1.synthesizeType(SYNTH_RULE_ADD, operand1, operand2, HEAP, &flags); break; case ITM_MINUS: result = (getIntervalQualifier() == NULL) ? operand1.synthesizeType(SYNTH_RULE_SUB, operand1, operand2, HEAP, &flags) : operand1.synthesizeTernary(SYNTH_RULE_SUB, operand1, operand2, *getIntervalQualifier(), HEAP); break; case ITM_TIMES: result = operand1.synthesizeType(SYNTH_RULE_MUL, operand1, operand2, HEAP, &flags); break; case ITM_DIVIDE: { // if roundingMode is already set in this node, use it. // ignoreSpecialRounding() == TRUE indicates rounding disabled, in // which case follow default roundingMode = 0. short roundingMode = getRoundingMode(); if (roundingMode == 0 && ! ignoreSpecialRounding() ) roundingMode = (short)CmpCommon::getDefaultLong(ROUNDING_MODE); if (roundingMode != 0) { flags |= NAType::ROUND_RESULT; // also limit precision, if rounding is to be done. // Rounding is only supported using division rounding mechanism // for exact and simple (no BigNums) numerics. flags |= NAType::LIMIT_MAX_NUMERIC_PRECISION; } result = operand1.synthesizeType(SYNTH_RULE_DIV, operand1, operand2, HEAP, &flags); if ((roundingMode != 0) && (result) && ((flags & NAType::RESULT_ROUNDED) != 0)) { // if rounding was requested and done, set that info in // the BiArith node. setRoundingMode(roundingMode); } else { setRoundingMode(0); } } break; case ITM_EXPONENT: result = operand1.synthesizeType(SYNTH_RULE_EXP, operand1, operand2, HEAP, &flags); break; default: result = ItemExpr::synthesizeType(); break; } if (!result) { if (operand1.getTypeQualifier() == NA_RECORD_TYPE || operand2.getTypeQualifier() == NA_RECORD_TYPE) { // 4020 arith operation not allowed on row-value-constructor. *CmpCommon::diags() << DgSqlCode(-4020); } else { const char *intervalQ; if (getIntervalQualifier()) intervalQ = getIntervalQualifier()->getTypeSQLname(TRUE /*terse*/); else intervalQ = ""; // 4034 The operation (~op1 ~operator ~op2) ~iq is not allowed. emitDyadicTypeSQLnameMsg(-4034, operand1, operand2, getTextUpper(), intervalQ); } } return result; } // ----------------------------------------------------------------------- // member functions for class BiLogic // ----------------------------------------------------------------------- const NAType *BiLogic::synthesizeType() { const SQLBoolean& operand0 = (SQLBoolean&) child(0).getValueId().getType(); const SQLBoolean& operand1 = (SQLBoolean&) child(1).getValueId().getType(); NABoolean allowsUnknown = operand0.canBeSQLUnknown() OR operand1.canBeSQLUnknown(); return new HEAP SQLBoolean(allowsUnknown); } // ----------------------------------------------------------------------- // member functions for class BiRelat // ----------------------------------------------------------------------- const NAType *BiRelat::synthesizeType() { ItemExpr *ie1 = child(0); ItemExpr *ie2 = child(1); if (ie1->getOperatorType() == ITM_ONE_ROW) ie1 = ie1->child(0); if (ie2->getOperatorType() == ITM_ONE_ROW) ie2 = ie2->child(0); ItemExprList exprList1(ie1, HEAP); ItemExprList exprList2(ie2, HEAP); // in some cases, we allow comparisons between 'incompatible' datatypes. // This is allowed if CQD is set, and it is a single valued scaler // predicate (a <op> b), and the comparison is done between a char/varhar // and numeric type. // In these conditions, the char type is converted to numeric by putting // a CAST node on top of it. // This incompatible comparison is not allowed if the statement is a DDL NABoolean allowIncompatibleComparison = FALSE; if ((((!CmpCommon::statement()->isDDL()) && ((CmpCommon::getDefault(ALLOW_INCOMPATIBLE_COMPARISON) == DF_ON) || (CmpCommon::getDefault(MODE_SPECIAL_1) == DF_ON) || (CmpCommon::getDefault(MODE_SPECIAL_3) == DF_ON))) || (CmpCommon::getDefault(MODE_SPECIAL_4) == DF_ON)) && (child(0)->castToItemExpr()->getOperatorType() != ITM_ONE_ROW) && (child(1)->castToItemExpr()->getOperatorType() != ITM_ONE_ROW) && (exprList1.entries() == 1) && (exprList2.entries() == 1)) allowIncompatibleComparison = TRUE; NABoolean allowsUnknown; if (!synthItemExprLists(exprList1, exprList2, allowIncompatibleComparison, allowsUnknown, this)) return NULL; return new HEAP SQLBoolean(allowsUnknown); } // ----------------------------------------------------------------------- // member functions for class BoolResult // ----------------------------------------------------------------------- const NAType *BoolResult::synthesizeType() { return new HEAP SQLBoolean(getOperatorType() == ITM_RETURN_NULL); } // ----------------------------------------------------------------------- // member functions for class BoolVal // ----------------------------------------------------------------------- const NAType *BoolVal::synthesizeType() { return new HEAP SQLBoolean(getOperatorType() == ITM_RETURN_NULL); } //------------------------------------------------------------------ // member functions for class RaiseError //------------------------------------------------------------------ const NAType *RaiseError::synthesizeType() { // -- Triggers if (getArity() == 1) { // Verify the string expression is of character type. if (child(0)->getValueId().getType().getTypeQualifier() != NA_CHARACTER_TYPE) { // parameter 3 must be of type string. *CmpCommon::diags() << DgSqlCode(-3185); return NULL; } } return new HEAP SQLBoolean(FALSE); // can be overridden in IfThenElse } // ----------------------------------------------------------------------- // member functions for class IfThenElse // ----------------------------------------------------------------------- const NAType *IfThenElse::synthesizeType() { // // The ELSE clause may be a NULL pointer if this is part of a CASE statement // created by the generator. // ValueId thenId = child(1)->getValueId(); if (child(2).getPtr() == NULL) return &thenId.getType(); ValueId elseId = child(2)->getValueId(); // // Type cast any params. // thenId.coerceType(elseId.getType(), NA_NUMERIC_TYPE); elseId.coerceType(thenId.getType()); // infer the charset if unknown. if ( thenId.getType().getTypeQualifier() == NA_CHARACTER_TYPE && elseId.getType().getTypeQualifier() == NA_CHARACTER_TYPE ) { const CharType *thenCharType = (CharType*)&thenId.getType(); const CharType *elseCharType = (CharType*)&elseId.getType(); if (CmpCommon::wantCharSetInference()) { const CharType* desiredType = CharType::findPushDownCharType(getDefaultCharSet, thenCharType, elseCharType, 0); if ( desiredType ) { // just push down the charset field. All other fields are // meaningless. thenId.coerceType((NAType&)*desiredType, NA_CHARACTER_TYPE); elseId.coerceType((NAType&)*desiredType, NA_CHARACTER_TYPE); } } } // // Synthesize the result. // UInt32 flags = ((CmpCommon::getDefault(LIMIT_MAX_NUMERIC_PRECISION) == DF_ON) ? NAType::LIMIT_MAX_NUMERIC_PRECISION : 0); if (CmpCommon::getDefault(MODE_SPECIAL_1) == DF_ON) { flags |= NAType::MODE_SPECIAL_1; } if (CmpCommon::getDefault(MODE_SPECIAL_2) == DF_ON) { flags |= NAType::MODE_SPECIAL_2; } if (CmpCommon::getDefault(TYPE_UNIONED_CHAR_AS_VARCHAR) == DF_ON) { flags |= NAType::MAKE_RESULT_VARCHAR; } const NAType& thenType = thenId.getType(); const NAType& elseType = elseId.getType(); const NAType *result = thenType.synthesizeType(SYNTH_RULE_UNION, thenType, elseType, HEAP, &flags); if (result == NULL) { // Ignore the RaiseError's type and pass thru the other operand's type if (thenId.getItemExpr()->getOperatorType() == ITM_RAISE_ERROR) return &elseType; if (elseId.getItemExpr()->getOperatorType() == ITM_RAISE_ERROR) return &thenType; // 4049 CASE can't have result types that are mixed emitDyadicTypeSQLnameMsg(-4049, thenType, elseType); } return result; } // ----------------------------------------------------------------------- // member functions for class Cast // ----------------------------------------------------------------------- // Exact numeric can be cast to a single-field interval, and vice versa. // In special_1 mode, numerics can be cast to multi-field intervals. static NABoolean numericCastIsCompatible(const NAType &src, const NAType &tgt) { if (src.getTypeQualifier() == NA_NUMERIC_TYPE && tgt.getTypeQualifier() == NA_INTERVAL_TYPE && tgt.isSupportedType()) { NumericType& numeric = (NumericType&)src; IntervalType& interval = (IntervalType&)tgt; if (numeric.isExact()) { if (CmpCommon::getDefault(MODE_SPECIAL_1) == DF_ON) return TRUE; else if (interval.getStartField() == interval.getEndField()) return TRUE; } } //check for numeric to date conversion else if ((CmpCommon::getDefault(MODE_SPECIAL_1) == DF_ON) && (tgt.getTypeQualifier() == NA_DATETIME_TYPE) && (src.getTypeQualifier() == NA_NUMERIC_TYPE)) { DatetimeType &dtType = (DatetimeType&)tgt; NumericType &numeric = (NumericType&)src; if ((dtType.getPrecision() == SQLDTCODE_DATE) && (numeric.isExact()) && (NOT numeric.isBigNum()) && (numeric.getScale() == 0)) { return TRUE; } } return FALSE; } // Begin_Fix 10-040114-2431 // 02/18/2004 // Added as part of above mentioned fix // synthesizeType for Narrow ensures that // if we match the nullability of the child // if that is required. This is done by setting // Cast::matchChildType_ flag, if it is not already // set. If Cast::matchChildType_ is not set, we set // it and then unset it after Cast::synthesizeType(). // Setting Cast::matchChildType_ does more than just // matching my child's nullability (please see class // Cast in ItemFunc.h), therefore if it is not set // initially we just unset it after calling // Cast::synthesizeType(). const NAType *Narrow::synthesizeType() { //check if we Cast::matchChildType_ is set NABoolean matchChildType = Cast::matchChildType(); //if Cast::matchChildType_ is not set and we //want to force our nullability to be the same //as the child's nullability, set Cast::matchChildType_ if ((!matchChildType) && (matchChildNullability_)) { //setting this flag will force our nullability //to be the same as the child's nullability Cast::setMatchChildType(TRUE); } //call Cast::synthesizeType() to do the real type synthesis const NAType * result = Cast::synthesizeType(); //if Cast::matchChildType_ was not initially set //then just unset it again. if ((!matchChildType) && (matchChildNullability_)) { Cast::setMatchChildType(FALSE); } return result; } // End_Fix 10-040114-2431 const NAType *Cast::synthesizeType() { // // Type cast any params. // Assert that we are bound, or created by Generator, so we have type info. // ValueId vid = child(0)->getValueId(); CMPASSERT(vid != NULL_VALUE_ID); NABoolean untypedParam = ((child(0)->getOperatorType() == ITM_DYN_PARAM) && (vid.getType().getTypeQualifier() == NA_UNKNOWN_TYPE)); NAType * result = NULL; NABoolean typeChanged = FALSE; NABoolean sensitiveChanged = FALSE; NABoolean charsetChanged = FALSE; vid.coerceType(*getType()); if (untypedParam) { // an untyped param is being typed using CAST. if (vid.getType().supportsSQLnull() != getType()->supportsSQLnull()) { // Set the null attribute to be the same as that of the cast node. NAType * newType = vid.getType().newCopy(HEAP); newType->setNullable(getType()->supportsSQLnull()); vid.changeType(newType); } // mark this cast node so code for it is not generated at code // generation time. setMatchChildType(TRUE); } // Fix for CR 10-010426-2464: If its child supports NULL but itself // does not AND the node's nullability is changable (i.e. not specified // explicitly in the application), set it same as its child. // NOTE: the new copy is necessary only because that the type_ is a const // member. If the const is ever removed, setNullable can be called // directly else if (vid.getType().supportsSQLnull() != getType()->supportsSQLnull()) { if (matchChildType()) // NOT NULL phrase not specified { result = getType()->newCopy(HEAP); result->setNullable(vid.getType()); typeChanged = TRUE; } } const NAType &src = vid.getType(); const NAType &tgt = (typeChanged)? *result: *getType(); NABuiltInTypeEnum srcQual = src.getTypeQualifier(); NABuiltInTypeEnum tgtQual = tgt.getTypeQualifier(); if ((src.getTypeQualifier() == NA_CHARACTER_TYPE) && (tgt.getTypeQualifier() == NA_CHARACTER_TYPE)) { const CharType &cSrc = (CharType&)src; CharType &cTgt = (CharType&)tgt; if (cSrc.isCaseinsensitive() && (NOT cTgt.isCaseinsensitive())) sensitiveChanged = TRUE; if ( cSrc.getCharSet() != CharInfo::UnknownCharSet && cTgt.getCharSet() == CharInfo::UnknownCharSet) charsetChanged = TRUE; if (sensitiveChanged || charsetChanged) { result = tgt.newCopy(HEAP); typeChanged = TRUE; if (sensitiveChanged) ((CharType*)result)->setCaseinsensitive(TRUE); if (charsetChanged) ((CharType*)result)->setCharSet(cSrc.getCharSet()); } } const NAType &res = (typeChanged)? *result: *getType(); // // The NULL constant can be cast to any type. // if (getExpr()->getOperatorType() == ITM_CONSTANT) if (((ConstValue*)getExpr())->isNull()) return (typeChanged)? result: getType(); // // See the chart in ANSI 6.10, a rather symmetrical piece of work. // Currently the "M" (Maybe) general subrules are being interpreted // as "Y" (Yes, legally castable). // Also, the Bitstring datatypes are not currently supported. // Internally, we use SQLBooleans for some predicate results (=ANY, e.g.). // // The diagonal of compatible types is fine. // Character types can be cast from or to with impunity. // Numeric can be cast to our internal SQLBoolean. // Exact numeric can be cast to a single-field interval, and vice versa. // Timestamp can be cast to time or date; date or time can cast to timestamp. // NABoolean legal = FALSE; // If both operands are char, they must be compatible (i.e., same charset); // they do NOT have to be comparable (i.e., collation/coercibility ok)! // // The result type takes the charset from the target, and: // - if target is a standard Ansi data type -- i.e., no COLLATE-clause -- // then DEFAULT collation and COERCIBLE coercibility are used, // per Ansi 6.10 SR 8; // - if target is a Tandem-extension data type declaration -- // i.e., with an explicit COLLATE-clause, such as // CAST(a AS CHAR(n) COLLATE SJIS) -- // then the specified collation and coercibility (EXPLICIT) are used. // // Note that both of these come "for free": // - if no COLLATE-clause was specified, // SqlParser.y and the CharType-ctor-defaults will give // DEFAULT/COERCIBLE to the unadorned data type; // - if a COLLATE-clause was specified by user // or if we are doing internal-expr casts -- // e.g., if our caller is // propagateCoAndCoToXXX(), or CodeGen, or ColReference::bindNode -- // we simply use that collate/coerc. // // In fact, if these DIDN'T come for free, we would break INTERNAL casts: // if (srcQual == NA_CHARACTER_TYPE && tgtQ == NA_CHARACTER_TYPE)[ // if (((const CharType&)src).getCharSet() == tgt.getCharSet())[ // CharType* newType = (CharType*)(tgtCT.newCopy(HEAP)); // newType->setCoAndCo(CharInfo::DefaultCollation, COERCIBLE); // return newType; // // But if in future we support Ansi "domains", // then need to revisit this, per Ansi 6.10 SR 1a + 8. // // In other words, a) use isCompatible(), not isComparable(), // and b) just pass the tgt's collation/coercibility along! // if ((srcQual == NA_LOB_TYPE) && (tgtQual != NA_LOB_TYPE)) legal = FALSE; else if (charsetChanged && src.isCompatible(res)) legal = TRUE; else if (src.isCompatible(tgt)) legal = TRUE; else if (srcQual == NA_CHARACTER_TYPE || tgtQual == NA_CHARACTER_TYPE) { legal = (srcQual != tgtQual); // if BOTH are CHAR: isCompatible() failed // disable casting KANJI/KSC5601 from/to any other data types. Same behavior as MP. if ( (srcQual == NA_CHARACTER_TYPE && CharInfo::is_NCHAR_MP(((const CharType&)src).getCharSet())) || (tgtQual == NA_CHARACTER_TYPE && CharInfo::is_NCHAR_MP(((const CharType&)tgt).getCharSet())) ) legal = FALSE; // if BOTH are CHAR: make legal if both unknown charset if ( (srcQual == NA_CHARACTER_TYPE && (((const CharType&)src).getCharSet())==CharInfo::UnknownCharSet) && (tgtQual == NA_CHARACTER_TYPE && (((const CharType&)tgt).getCharSet())==CharInfo::UnknownCharSet) ) legal = TRUE; if ( srcQual == tgtQual ) // if BOTH are CHAR { if ( CmpCommon::getDefault(ALLOW_IMPLICIT_CHAR_CASTING) == DF_ON ) { legal = TRUE; // // NOTE: The Generator has code to throw in a Translate node if an // incompatible character set comparison is attempted. // } } } else if (srcQual == NA_NUMERIC_TYPE) legal = numericCastIsCompatible(src, tgt) || tgtQual == NA_BOOLEAN_TYPE; else if (srcQual == NA_INTERVAL_TYPE) legal = numericCastIsCompatible(tgt, src); else if (srcQual == NA_DATETIME_TYPE && tgtQual == NA_NUMERIC_TYPE) { legal = (((DatetimeType&)src).getSubtype() == DatetimeType::SUBTYPE_SQLDate); } else if (srcQual == NA_DATETIME_TYPE && tgtQual == NA_DATETIME_TYPE) { legal = ((DatetimeType&)src).getSubtype() == DatetimeType::SUBTYPE_SQLTimestamp || ((DatetimeType&)tgt).getSubtype() == DatetimeType::SUBTYPE_SQLTimestamp || ((DatetimeType&)tgt).fieldsOverlap((DatetimeType &)src); } if (!src.isSupportedType() || !tgt.isSupportedType()) { if (src == tgt) { legal = TRUE; } else { legal = FALSE; } } if (legal) return (typeChanged)? result: getType(); // 4035 can't cast type from src to tgt emitDyadicTypeSQLnameMsg(-4035, src, tgt); return NULL; } const NAType *CastConvert::synthesizeType() { const NAType * type = Cast::synthesizeType(); if (type == NULL) return NULL; NABuiltInTypeEnum qual = type->getTypeQualifier(); if (qual != NA_CHARACTER_TYPE) return type; // return a char type that is large enough to hold the ascii // representation of the operand. const NAType &childType = child(0)->castToItemExpr()->getValueId().getType(); Lng32 maxLength = childType.getDisplayLength(childType.getFSDatatype(), childType.getNominalSize(), childType.getPrecision(), childType.getScale(), 0); CharType * origType = (CharType *)getType(); if (DFS2REC::isAnyVarChar(origType->getFSDatatype()) == FALSE) type = new HEAP SQLChar(maxLength, childType.supportsSQLnull(), origType->isUpshifted()); else type = new HEAP SQLVarChar(maxLength, childType.supportsSQLnull(), origType->isUpshifted()); return type; } const NAType *CastType::synthesizeType() { // NABuiltInTypeEnum qual = child(0)->getValueId().getType().getTypeQualifier(); // if (qual != NA_CHARACTER_TYPE) // return NULL; // source must be a character type return getType(); } // ----------------------------------------------------------------------- // member functions for class CharFunc // ----------------------------------------------------------------------- const NAType *CharFunc::synthesizeType() { // The expression is CHAR(<num>) or UNICODE_CHAR(<num>) or NCHAR(<num>) // The result is the character that has the // ASCII or UNICODE or <NATIONAL_CHARSET> code of <num>. // // Type cast any params. // SQLInt nType(FALSE); ValueId vid1 = child(0)->getValueId(); vid1.coerceType(nType, NA_NUMERIC_TYPE); const NAType &typ1 = child(0)->getValueId().getType(); if (typ1.getTypeQualifier() != NA_NUMERIC_TYPE) { // 4045 Operand must be numeric. *CmpCommon::diags() << DgSqlCode(-4045) << DgString0(getTextUpper()); return NULL; } // now it's safe to cast the type to numeric type const NumericType &ntyp1 = (NumericType &) typ1; if (! ntyp1.isExact()) { // 4046 Operand must be exact. *CmpCommon::diags() << DgSqlCode(-4046) << DgString0(getTextUpper()); return NULL; } if (typ1.getScale() != 0) { // 4047 Operand must be not have scale. *CmpCommon::diags() << DgSqlCode(-4047) << DgString0(getTextUpper()); return NULL; } CharInfo::CharSet cs_to_use = charSet_ ; CharType *result; if(charSet_ == CharInfo::UCS2 || charSet_ < 0) // UCS2, kanji and KSC5601_MP result = new (HEAP) SQLChar ( 1, typ1.supportsSQLnullLogical(), FALSE/*not upshift*/, FALSE/*case sensitive*/, FALSE/*not varchar*/, charSet_); else result = new (HEAP) SQLVarChar(CharInfo::maxBytesPerChar( cs_to_use ) , typ1.supportsSQLnullLogical() , FALSE /*not upshift*/ , FALSE /*case sensitive*/ , cs_to_use , CharInfo::DefaultCollation , CharInfo::COERCIBLE ); return result; } // ----------------------------------------------------------------------- // member functions for class ConvertHex // ----------------------------------------------------------------------- const NAType *ConvertHex::synthesizeType() { // // Type cast any params. // ValueId vid = child(0)->getValueId(); vid.coerceType(NA_CHARACTER_TYPE); // // Check that the operands are compatible. // const NAType* operand = &vid.getType(); if (getOperatorType() == ITM_CONVERTFROMHEX) { if (operand->getTypeQualifier() != NA_CHARACTER_TYPE) { // 4043 The operand of an ConvertHex function must be character. *CmpCommon::diags() << DgSqlCode(-4043) << DgString0(getTextUpper()); return NULL; } const CharType* charType = (CharType*)operand; if ( charType->getCharSet() == CharInfo::UnknownCharSet ) { const CharType* desiredType = CharType::desiredCharType(CharInfo::ISO88591); vid.coerceType(*desiredType, NA_CHARACTER_TYPE); operand = &vid.getType(); } // operand's size must be an even number since two hex characters make // up one result byte. const CharType* chartype1 = (CharType*)operand; if (NOT chartype1->sizeIsEven()) { *CmpCommon::diags() << DgSqlCode(-4068) << DgString0(getTextUpper()); return NULL; } } Int32 maxLength; if (getOperatorType() == ITM_CONVERTTOHEX) maxLength = operand->getNominalSize() * 2; else maxLength = operand->getNominalSize() / 2; NAType * type; if ( operand -> getTypeQualifier() == NA_CHARACTER_TYPE && ( (operand -> isVaryingLen() == TRUE) || ( (const CharType*)operand)->getCharSet()==CharInfo::UTF8 ) ) type = new HEAP SQLVarChar(maxLength, operand->supportsSQLnull()); else type = new HEAP SQLChar(maxLength, operand->supportsSQLnull()); // // Return the result. // return type; } // ----------------------------------------------------------------------- // member functions for class CharLength // ----------------------------------------------------------------------- const NAType *CharLength::synthesizeType() { // // Type cast any params. // ValueId vid = child(0)->getValueId(); vid.coerceType(NA_CHARACTER_TYPE); // // Check that the operands are compatible. // const NAType& operand = vid.getType(); if (operand.getTypeQualifier() != NA_CHARACTER_TYPE) { // 4043 The operand of a CHAR_LENGTH function must be character. *CmpCommon::diags() << DgSqlCode(-4043) << DgString0(getTextForError()); return NULL; } const CharType* charOperand = (CharType*)&(vid.getType()); NAString defVal; NABoolean charsetInference = (CmpCommon::getDefault(INFER_CHARSET, defVal) == DF_ON); if(charsetInference) { const CharType* desiredType = CharType::findPushDownCharType(getDefaultCharSet, charOperand, 0); if ( desiredType ) { // just push down the charset field. All other fields are // ignored. //coerceChildType((NAType&)*desiredType, NA_CHARACTER_TYPE); vid.coerceType(*desiredType, NA_CHARACTER_TYPE); // get the newly pushed-down types charOperand = (CharType*)&(vid.getType()); } } if ( charOperand -> getCharSet() == CharInfo::UnknownCharSet ) { *CmpCommon::diags() << DgSqlCode(-4127); return NULL; } // // Return the result. // return new HEAP SQLInt(FALSE // unsigned ,operand.supportsSQLnullLogical() ); } // ----------------------------------------------------------------------- // member functions for class Concat // ----------------------------------------------------------------------- const NAType *Concat::synthesizeType() { // // Type cast any params. // ValueId vid1 = child(0)->getValueId(); ValueId vid2 = child(1)->getValueId(); // these first two extra calls handle any parameters // operands must be gotten twice because they could change types. CharInfo::CharSet new_cs = getFirstKnownCharSet(vid1, vid2, vid2); // If vid not aleady of NA_CHARACTER_TYPE, make varchar(255) of character set = new_cs vid1.coerceType(NA_CHARACTER_TYPE, new_cs); vid2.coerceType(NA_CHARACTER_TYPE, new_cs); vid1.coerceType(vid2.getType(), NA_CHARACTER_TYPE); vid2.coerceType(vid1.getType()); // // Synthesize the result. // const NAType* operand1 = &vid1.getType(); const NAType* operand2 = &vid2.getType(); NABoolean isCaseInsensitive = FALSE; if ( operand1 -> getTypeQualifier() == NA_CHARACTER_TYPE && operand2 -> getTypeQualifier() == NA_CHARACTER_TYPE ) { const CharType *op1 = (CharType *)operand1; const CharType *op2 = (CharType *)operand2; if (CmpCommon::wantCharSetInference()) { const CharType* desiredType = CharType::findPushDownCharType(getDefaultCharSet, op1, op2, 0); if ( desiredType ) { // just push down the charset field. All other fields are // meaningless. vid1.coerceType((NAType&)*desiredType, NA_CHARACTER_TYPE); vid2.coerceType((NAType&)*desiredType, NA_CHARACTER_TYPE); } } if (op1->isCaseinsensitive() || op2->isCaseinsensitive()) { isCaseInsensitive = TRUE; } } const NAType *result = operand1->synthesizeType(SYNTH_RULE_CONCAT, *operand1, *operand2, HEAP); if (result == NULL) { // 4034 The operation (~op1 ~operator ~op2) is not allowed. emitDyadicTypeSQLnameMsg(-4034, *operand1, *operand2, getTextUpper()); return result; } if ((result->getTypeQualifier() == NA_CHARACTER_TYPE) && (isCaseInsensitive)) { CharType *ct = (CharType *)result; ct->setCaseinsensitive(TRUE); } return result; } // ----------------------------------------------------------------------- // member functions for class ConstValue // ----------------------------------------------------------------------- const NAType * ConstValue::synthesizeType() { return getType(); } // ----------------------------------------------------------------------- // member functions for class ConvertTimestamp // ----------------------------------------------------------------------- const NAType *ConvertTimestamp::synthesizeType() { // // Type cast any params. // ValueId vid = child(0)->getValueId(); SQLLargeInt largeintType; vid.coerceType(largeintType); // // Check that the operands are compatible. // const NAType& operand = vid.getType(); if (operand.getTypeQualifier() != NA_NUMERIC_TYPE OR NOT ((NumericType&) operand).isExact()) { // 4070 The operand of a CONVERTTIMESTAMP function must be exact numeric. *CmpCommon::diags() << DgSqlCode(-4070) << DgString0(getTextUpper()); return NULL; } // // Return the result. // return new HEAP SQLTimestamp (operand.supportsSQLnullLogical(), SQLTimestamp::DEFAULT_FRACTION_PRECISION, HEAP); } // ----------------------------------------------------------------------- // member functions for class CurrentTimestamp // ----------------------------------------------------------------------- const NAType *CurrentTimestamp::synthesizeType() { return new HEAP SQLTimestamp (FALSE, SQLTimestamp::DEFAULT_FRACTION_PRECISION, HEAP); } // ----------------------------------------------------------------------- // member functions for class InternalTimestamp // ----------------------------------------------------------------------- const NAType *InternalTimestamp::synthesizeType() { return new SQLTimestamp(FALSE); } // ----------------------------------------------------------------------- // member functions for class CurrentTimestampRunning // ----------------------------------------------------------------------- const NAType *CurrentTimestampRunning::synthesizeType() { return new HEAP SQLTimestamp(FALSE); } // ----------------------------------------------------------------------- // member functions for class DateFormat // ----------------------------------------------------------------------- #pragma nowarn(1506) // warning elimination const NAType *DateFormat::synthesizeType() { // // Type cast any params. // ValueId vid = child(0)->getValueId(); SQLTimestamp timestampType; vid.coerceType(timestampType); // // Check that the operands are compatible. // if (!vid.getType().isSupportedType()) { // 4071 The operand of a DATEFORMAT function must be a datetime. // LCOV_EXCL_START - mp *CmpCommon::diags() << DgSqlCode(-4071) << DgString0(getTextUpper()); return NULL; // LCOV_EXCL_STOP } if (((getDateFormat() == DEFAULT) || (getDateFormat() == USA) || (getDateFormat() == EUROPEAN)) && (vid.getType().getTypeQualifier() != NA_DATETIME_TYPE)) { // 4071 The operand of a DATEFORMAT function must be a datetime. *CmpCommon::diags() << DgSqlCode(-4071) << DgString0(getTextUpper()); return NULL; } if (getDateFormat() == DATE_FORMAT_STR) { if ((CmpCommon::getDefault(MODE_SPECIAL_2) == DF_ON) && ((vid.getType().getTypeQualifier() != NA_DATETIME_TYPE) && (vid.getType().getTypeQualifier() != NA_CHARACTER_TYPE))) { // 4071 The operand of a DATEFORMAT function must be a datetime. *CmpCommon::diags() << DgSqlCode(-4071) << DgString0(getTextUpper()); return NULL; } } if ((getDateFormat() == TIME_FORMAT_STR) && ((vid.getType().getTypeQualifier() != NA_NUMERIC_TYPE) && (vid.getType().getTypeQualifier() != NA_CHARACTER_TYPE) && (vid.getType().getTypeQualifier() != NA_DATETIME_TYPE))) { // 4071 The operand of a DATEFORMAT function must be a datetime. *CmpCommon::diags() << DgSqlCode(-4071) << DgString0(getTextUpper()); return NULL; } Lng32 length = 0; NABoolean formatAsDate = FALSE; NABoolean formatAsTimestamp = FALSE; NABoolean formatAsTime = FALSE; if (vid.getType().getTypeQualifier() == NA_DATETIME_TYPE) { // This code is now identical for all DatetimeTypes const DatetimeType& operand = (DatetimeType &)vid.getType(); // // Return the result. // if (getDateFormat() == DATE_FORMAT_STR) { if (child(1)->castToItemExpr()->getOperatorType() == ITM_CONSTANT) { ConstValue * cv = (ConstValue*)(child(1)->castToItemExpr()); length = cv->getStorageSize(); } else { // must be a const for now. CMPASSERT(child(1)->castToItemExpr()->getOperatorType() == ITM_CONSTANT); } } else if (getDateFormat() == TIMESTAMP_FORMAT_STR) { if (child(1)->castToItemExpr()->getOperatorType() == ITM_CONSTANT) { ConstValue * cv = (ConstValue*)(child(1)->castToItemExpr()); length = cv->getStorageSize(); if ((NAString((char*)(cv->getConstValue()), cv->getStorageSize()) == "YYYYMMDDHH24MISS") || (NAString((char*)(cv->getConstValue()), cv->getStorageSize()) == "YYYYMMDD:HH24:MI:SS") || (NAString((char*)(cv->getConstValue()), cv->getStorageSize()) == "DD.MM.YYYY:HH24:MI:SS") || (NAString((char*)(cv->getConstValue()), cv->getStorageSize()) == "YYYY-MM-DD HH24:MI:SS")) { // length includes 2 extra bytes for "24" that was specified // in the format. length -= 2; } } else if (getDateFormat() == TIME_FORMAT_STR) { if (child(1)->castToItemExpr()->getOperatorType() == ITM_CONSTANT) { ConstValue * cv = (ConstValue*)(child(1)->castToItemExpr()); length = cv->getStorageSize(); if (NAString((char*)(cv->getConstValue()), cv->getStorageSize()) == "HH24:MI:SS") { // length includes 2 extra bytes for "24" that was specified // in the format. length -= 2; } } } else { // must be a const for now. CMPASSERT(child(1)->castToItemExpr()->getOperatorType() == ITM_CONSTANT); } } else { length = operand.getDisplayLength(); if(operand.containsField(REC_DATE_HOUR) && (getDateFormat() == USA)) length += 3; // add 3 for a blank and "am" or "pm" } } else if (vid.getType().getTypeQualifier() == NA_CHARACTER_TYPE) { // // Return the result. // if (getDateFormat() == DATE_FORMAT_STR) { formatAsDate = TRUE; if (child(1)->castToItemExpr()->getOperatorType() == ITM_CONSTANT) { ConstValue * cv = (ConstValue*)(child(1)->castToItemExpr()); length = cv->getStorageSize(); } else { // must be a const for now. CMPASSERT(child(1)->castToItemExpr()->getOperatorType() == ITM_CONSTANT); } } else if (getDateFormat() == TIMESTAMP_FORMAT_STR) { formatAsTimestamp = TRUE; if (child(1)->castToItemExpr()->getOperatorType() == ITM_CONSTANT) { ConstValue * cv = (ConstValue*)(child(1)->castToItemExpr()); length = cv->getStorageSize(); } else { // must be a const for now. CMPASSERT(child(1)->castToItemExpr()->getOperatorType() == ITM_CONSTANT); } } else if (getDateFormat() == TIME_FORMAT_STR) { formatAsTime = TRUE; if (child(1)->castToItemExpr()->getOperatorType() == ITM_CONSTANT) { ConstValue * cv = (ConstValue*)(child(1)->castToItemExpr()); length = cv->getStorageSize(); } else { // must be a const for now. CMPASSERT(child(1)->castToItemExpr()->getOperatorType() == ITM_CONSTANT); } } } else if (vid.getType().getTypeQualifier() == NA_NUMERIC_TYPE) { const NumericType &numeric = (NumericType&)vid.getType(); if ((numeric.isExact()) && (NOT numeric.isBigNum()) && (numeric.getScale() == 0)) { if (child(1)->castToItemExpr()->getOperatorType() == ITM_CONSTANT) { ConstValue * cv = (ConstValue*)(child(1)->castToItemExpr()); length = cv->getStorageSize(); } else { // must be a const for now. CMPASSERT(child(1)->castToItemExpr()->getOperatorType() == ITM_CONSTANT); } } else { // 4047 Arguments of USER function must have a scale of 0. *CmpCommon::diags() << DgSqlCode(-4047) << DgString0(getTextUpper()); return NULL; } } else { // 4071 The operand of a DATEFORMAT function must be a datetime. *CmpCommon::diags() << DgSqlCode(-4071) << DgString0(getTextUpper()); return NULL; } if (formatAsDate) return new HEAP SQLDate(vid.getType().supportsSQLnullLogical()); else if (formatAsTimestamp) return new HEAP SQLTimestamp(vid.getType().supportsSQLnullLogical()); else if (formatAsTime) return new HEAP SQLTime(vid.getType().supportsSQLnullLogical()); else return new HEAP SQLChar(length, vid.getType().supportsSQLnullLogical()); } #pragma warn(1506) // warning elimination // ----------------------------------------------------------------------- // member functions for class DayOfWeek // ----------------------------------------------------------------------- const NAType *DayOfWeek::synthesizeType() { // // Type cast any params. // ValueId vid = child(0)->getValueId(); SQLTimestamp timestampType; vid.coerceType(timestampType); // // Check that the operand contains a DAY field // const NAType& operand = vid.getType(); if ((operand.getTypeQualifier() != NA_DATETIME_TYPE) || (!((DatetimeType&) operand).containsField (REC_DATE_YEAR)) || (!((DatetimeType&) operand).containsField (REC_DATE_MONTH)) || (!((DatetimeType&) operand).containsField (REC_DATE_DAY))) { // Need to reword: // 4072 The operand of function DAYOFWEEK must be a Datetime containing a DAY field. *CmpCommon::diags() << DgSqlCode(-4072) << DgString0(getTextUpper()) << DgString1("YEAR, MONTH and DAY"); return NULL; } // // Return the result. // const Int16 DisAmbiguate = 0; // added for 64bit project return new HEAP SQLNumeric(FALSE, 1, 0, DisAmbiguate, operand.supportsSQLnullLogical()); } // ----------------------------------------------------------------------- // member functions for class DynamicParam // ----------------------------------------------------------------------- const NAType *DynamicParam::synthesizeType() { // dynamic params are always nullable. return new HEAP SQLUnknown(TRUE); } // LCOV_EXCL_START - cnu const NAType *ExplodeVarchar::synthesizeType() { return getType(); } // LCOV_EXCL_STOP const NAType *Format::synthesizeType() { NAType * retType = NULL; retType = (NAType *)ItemExpr::synthesizeType(); return retType; } // ----------------------------------------------------------------------- // member functions for class RoutineParam // ----------------------------------------------------------------------- const NAType *RoutineParam::synthesizeType() { return getType(); } // ----------------------------------------------------------------------- // member functions for class Function -- a catchall for those funx which // don't have their own virtual synthType() // ----------------------------------------------------------------------- const NAType *Function::synthesizeType() { // Function derives directly from ItemExpr, so safe to do this const NAType *result = ItemExpr::synthesizeType(); if (0) { if (result->getTypeQualifier() == NA_CHARACTER_TYPE) { Int32 n = getNumCHARACTERArgs(this); if (n > 1) { #ifndef NDEBUG // if (NCHAR_DEBUG > 0) { NAString unparsed(CmpCommon::statementHeap()); unparse(unparsed, DEFAULT_PHASE, USER_FORMAT_DELUXE); cerr << "## FUNCTION " << (Int32)getOperatorType() << " (" << n << " char-type args)\t" << unparsed << "\t *might* not be computing its result collation/coercibility properly and/or pushing co/co back down to its children..." << endl; // Also emit a warning so you can catch this in regression results unparsed.prepend("## FUNCTION, co/co issue: "); *CmpCommon::diags() << DgSqlCode(+1110) << DgString0(unparsed); } #endif } // WE DO *NOT* SYNTHESIZE A RESULT CoAndCo here // nor push it back down to children via propagateCoAndCoToChildren, // because that might not be the right thing. // If the above CERR msg appears and you see a problem, // you should add a synthesizeType method for that particular // Function-derived class. // CharType *ct = (CharType *)result; // propagateCoAndCoToChildren(this, // ct->getCollation(), ct->getCoercibility()); } } return result; } // ----------------------------------------------------------------------- // member functions for class Hash // ----------------------------------------------------------------------- const NAType *Hash::synthesizeType() { // result of hash function is always a non-nullable, unsigned 32 bit integer return new HEAP SQLInt(FALSE, FALSE); } // ----------------------------------------------------------------------- // member functions for class HashComb // ----------------------------------------------------------------------- NABoolean HashCommon::areChildrenExactNumeric(Lng32 left, Lng32 right) { const NAType &typ1 = child(left)->getValueId().getType(); const NAType &typ2 = child(right)->getValueId().getType(); if (typ1.getTypeQualifier() != NA_NUMERIC_TYPE || typ2.getTypeQualifier() != NA_NUMERIC_TYPE) return FALSE; // now it's safe to cast the types to numeric type const NumericType &ntyp1 = (NumericType &) typ1; const NumericType &ntyp2 = (NumericType &) typ2; return (ntyp1.isExact() AND ntyp2.isExact() AND ntyp1.binaryPrecision() AND ntyp2.binaryPrecision() AND ntyp1.getPrecision() == ntyp2.getPrecision() AND ntyp1.isUnsigned() AND ntyp2.isUnsigned()); } const NAType *HashComb::synthesizeType() { // Both dividend and divisor must be exact numeric with scale 0. // The result has values from 0 to <divisor> - 1 and therefore // can always fit into the data type of the divisor. // HashComb is an internal operator and errors are fatal CMPASSERT(areChildrenExactNumeric(0, 1)); // result of hashcomb function is always a non-nullable, // unsigned 32 bit integer return new HEAP SQLInt(FALSE, FALSE); } const NAType *HiveHashComb::synthesizeType() { // Both dividend and divisor must be exact numeric with scale 0. // The result has values from 0 to <divisor> - 1 and therefore // can always fit into the data type of the divisor. // HashComb is an internal operator and errors are fatal CMPASSERT(areChildrenExactNumeric(0, 1)); // result of hashcomb function is always a non-nullable, // unsigned 32 bit integer return new HEAP SQLInt(FALSE, FALSE); } // ----------------------------------------------------------------------- // member functions for class HashDistHash // Hash Function used by Hash Partitioning. This function cannot change // once Hash Partitioning is released! Defined for all data types, // returns a 32 bit non-nullable hash value for the data item. // ----------------------------------------------------------------------- const NAType *HashDistPartHash::synthesizeType() { // result of hash function is always a non-nullable, unsigned 32 bit integer return new HEAP SQLInt(FALSE, FALSE); } // ----------------------------------------------------------------------- // member functions for class HiveHash // ----------------------------------------------------------------------- const NAType *HiveHash::synthesizeType() { // result of hivehash function is always a non-nullable, unsigned 32 bit integer return new HEAP SQLInt(FALSE, FALSE); } // ----------------------------------------------------------------------- // member functions for class HashDistHashComb // This function is used to combine two hash values to produce a new // hash value. Used by Hash Partitioning. This function cannot change // once Hash Partitioning is released! Defined for all data types, // returns a 32 bit non-nullable hash value for the data item. // ----------------------------------------------------------------------- const NAType *HashDistPartHashComb::synthesizeType() { // Both dividend and divisor must be exact numeric with scale 0. // The result has values from 0 to <divisor> - 1 and therefore // can always fit into the data type of the divisor. const NAType &typ1 = child(0)->getValueId().getType(); const NAType &typ2 = child(1)->getValueId().getType(); // HashDistHashComb is an internal operator and errors are fatal CMPASSERT(typ1.getTypeQualifier() == NA_NUMERIC_TYPE AND typ2.getTypeQualifier() == NA_NUMERIC_TYPE); // now it's safe to cast the types to numeric type const NumericType &ntyp1 = (NumericType &) typ1; const NumericType &ntyp2 = (NumericType &) typ2; // Make sure both operands are SQLInt. // CMPASSERT(ntyp1.getFSDatatype() == REC_BIN32_UNSIGNED AND ntyp2.getFSDatatype() == REC_BIN32_UNSIGNED AND ntyp1.isAnyUnsignedInt() AND ntyp2.isAnyUnsignedInt() AND ntyp1.getPrecision() == SQL_UINT_PRECISION AND ntyp2.getPrecision() == SQL_UINT_PRECISION); // result of hashcomb function is always a non-nullable, // unsigned 32 bit integer return new HEAP SQLInt(FALSE, FALSE); } // ----------------------------------------------------------------------- // member functions for class ReplaceNull // ----------------------------------------------------------------------- const NAType *ReplaceNull::synthesizeType() { // result of ReplaceNull is always the same as the second argument // except it is non nullable. ValueId childId = child(1)->getValueId(); NAType *newType = childId.getType().newCopy(HEAP); //newType->setNullable(FALSE); return newType; } // ----------------------------------------------------------------------- // member functions for class JulianTimestamp // ----------------------------------------------------------------------- const NAType *JulianTimestamp::synthesizeType() { // // Type cast any params. // ValueId vid = child(0)->getValueId(); SQLTimestamp timestampType; vid.coerceType(timestampType); // // Check that the operands are compatible. // const NAType& operand = vid.getType(); if (operand.getTypeQualifier() != NA_DATETIME_TYPE) { // 4071 The operand of a JULIANTIMESTAMP function must be a datetime. *CmpCommon::diags() << DgSqlCode(-4071) << DgString0(getTextUpper()); return NULL; } // // Return the result. // return new HEAP SQLLargeInt(TRUE, operand.supportsSQLnullLogical()); } // ----------------------------------------------------------------------- // member functions for class StatementExecutionCount // ----------------------------------------------------------------------- const NAType * StatementExecutionCount::synthesizeType() { return new HEAP SQLLargeInt(TRUE,FALSE); } // ----------------------------------------------------------------------- // member functions for class CurrentTransId // ----------------------------------------------------------------------- const NAType * CurrentTransId::synthesizeType() { return new HEAP SQLLargeInt(TRUE,FALSE); } // ----------------------------------------------------------------------- // member functions for class BitOperFunc // ----------------------------------------------------------------------- const NAType *BitOperFunc::synthesizeType() { NABoolean nullable = FALSE; for (Int32 i = 0; i < getArity(); i++) { // type cast any params ValueId vid = child(i)->getValueId(); // untyped param operands are typed as Int32 Unsigned. SQLInt dp(FALSE); vid.coerceType(dp, NA_NUMERIC_TYPE); const NAType &typ = vid.getType(); if (typ.getTypeQualifier() != NA_NUMERIC_TYPE) { // 4045 operand must be numeric. // 4052 2nd operand must be numeric. // 4059 1st operand must be numeric. if (getArity() == 1) *CmpCommon::diags() << DgSqlCode(-4045) << DgString0(getTextUpper()); else { if (i == 0) *CmpCommon::diags() << DgSqlCode(-4059) << DgString0(getTextUpper()); else if (i == 1) *CmpCommon::diags() << DgSqlCode(-4052) << DgString0(getTextUpper()); else *CmpCommon::diags() << DgSqlCode(-4053) << DgString0(getTextUpper()); } return NULL; } if (typ.supportsSQLnullLogical()) nullable = TRUE; } const NAType *result = NULL; switch (getOperatorType()) { case ITM_BITAND: case ITM_BITOR: case ITM_BITXOR: { CMPASSERT(getArity() == 2); // now it's safe to cast the types to numeric type const NumericType &ntyp1 = (NumericType &) child(0)->getValueId().getType(); const NumericType &ntyp2 = (NumericType &) child(1)->getValueId().getType(); if (NOT ntyp1.isExact() OR NOT ntyp2.isExact() OR ntyp1.isBigNum() OR ntyp2.isBigNum()) { // 4046 BIT operation is only defined for exact numeric types. *CmpCommon::diags() << DgSqlCode(-4046) << DgString0(getTextUpper()); return NULL; } if (ntyp1.getScale() != 0 OR ntyp2.getScale() != 0) { // 4047 Arguments of BIT operation must both have a scale of 0. *CmpCommon::diags() << DgSqlCode(-4047) << DgString0(getTextUpper()); return NULL; } UInt32 flags = NAType::MAKE_UNION_RESULT_BINARY; result = ntyp1.synthesizeType( SYNTH_RULE_UNION, ntyp1, ntyp2, HEAP, &flags); } break; case ITM_BITNOT: { CMPASSERT(getArity() == 1); // now it's safe to cast the types to numeric type const NumericType &ntyp1 = (NumericType &) child(0)->getValueId().getType(); if (NOT ntyp1.isExact() OR ntyp1.isBigNum()) { // 4046 BIT operation is only defined for exact numeric types. *CmpCommon::diags() << DgSqlCode(-4046) << DgString0(getTextUpper()); return NULL; } if (ntyp1.getScale() != 0) { // 4047 Arguments of BIT operation must both have a scale of 0. *CmpCommon::diags() << DgSqlCode(-4047) << DgString0(getTextUpper()); return NULL; } // result of BITNOT is the same type as the operand, if the // operand is binary. // If operand is decimal, then convert it to equivalent binary. NAType * result1 = NULL; if (ntyp1.binaryPrecision()) result1 = (NumericType*)ntyp1.newCopy(HEAP); else { const Int16 DisAmbiguate = 0; result1 = new HEAP SQLNumeric(NOT ntyp1.isUnsigned(), ntyp1.getPrecision(), ntyp1.getScale(), DisAmbiguate); // added for 64bit proj. } result1->setNullable(nullable); result = result1; } break; case ITM_BITEXTRACT: { CMPASSERT(getArity() == 3); // now it's safe to cast the types to numeric type const NumericType &ntyp1 = (NumericType &) child(0)->getValueId().getType(); const NumericType &ntyp2 = (NumericType &) child(1)->getValueId().getType(); const NumericType &ntyp3 = (NumericType &) child(2)->getValueId().getType(); if (ntyp1.isBigNum() || ntyp1.isDecimal()) { // 4046 BIT operation is only defined for exact numeric types. *CmpCommon::diags() << DgSqlCode(-4046) << DgString0(getTextUpper()); return NULL; } if ((NOT ntyp2.isExact()) || (NOT ntyp3.isExact())) { // 4046 BIT operation is only defined for exact numeric types. *CmpCommon::diags() << DgSqlCode(-4046) << DgString0(getTextUpper()); return NULL; } if ((ntyp2.getScale() != 0) || (ntyp3.getScale() != 0)) { // 4047 Arguments of BIT operation must both have a scale of 0. *CmpCommon::diags() << DgSqlCode(-4047) << DgString0(getTextUpper()); return NULL; } // result can contain as many bits as the length of the operand. // Make the result an Int32 or Int64. NAType * result1 = NULL; if (ntyp1.getNominalSize() <= 9) result = new HEAP SQLInt(TRUE, nullable); else result = new HEAP SQLLargeInt(TRUE, nullable); } break; default: { // LCOV_EXCL_START - rfi // 4000 Internal Error. This function not supported. *CmpCommon::diags() << DgSqlCode(-4000); result = NULL; // LCOV_EXCL_STOP } break; } return result; } // ----------------------------------------------------------------------- // member functions for class MathFunc // ----------------------------------------------------------------------- const NAType *MathFunc::synthesizeType() { CMPASSERT(getArity() <= 2); NABoolean nullable = FALSE; for (Int32 i = 0; i < getArity(); i++) { // type cast any params ValueId vid = child(i)->getValueId(); SQLDoublePrecision dp(TRUE); vid.coerceType(dp, NA_NUMERIC_TYPE); const NAType &typ = vid.getType(); if (typ.getTypeQualifier() != NA_NUMERIC_TYPE) { // 4045 operand must be numeric. // 4052 2nd operand must be numeric. // 4059 1st operand must be numeric. if (getArity() == 1) *CmpCommon::diags() << DgSqlCode(-4045) << DgString0(getTextUpper()); else *CmpCommon::diags() << DgSqlCode(i ? -4052 : -4059) << DgString0(getTextUpper()); return NULL; } if (typ.supportsSQLnullLogical()) nullable = TRUE; } const NAType *result = NULL; switch (getOperatorType()) { case ITM_ABS: case ITM_ACOS: case ITM_ASIN: case ITM_ATAN: case ITM_ATAN2: case ITM_CEIL: case ITM_COS: case ITM_COSH: case ITM_DEGREES: case ITM_EXP: case ITM_EXPONENT: case ITM_FLOOR: case ITM_LOG: case ITM_LOG10: case ITM_PI: case ITM_POWER: case ITM_RADIANS: case ITM_ROUND: case ITM_SCALE_TRUNC: case ITM_SIN: case ITM_SINH: case ITM_SQRT: case ITM_TAN: case ITM_TANH: { result = new HEAP SQLDoublePrecision(nullable); } break; default: { // LCOV_EXCL_START - rfi // 4000 Internal Error. This function not supported. *CmpCommon::diags() << DgSqlCode(-4000); result = NULL; // LCOV_EXCL_STOP } break; } return result; } // ----------------------------------------------------------------------- // member functions for class Modulus // ----------------------------------------------------------------------- const NAType *Modulus::synthesizeType() { // The expression is <dividend> mod <divisor>. // Both dividend and divisor must be exact numeric with scale 0. // The result has values from 0 to <divisor> - 1 and therefore // can always fit into the data type of the divisor. // // Type cast any params. // ValueId vid1 = child(0)->getValueId(); ValueId vid2 = child(1)->getValueId(); SQLInt si(TRUE); vid1.coerceType(si, NA_NUMERIC_TYPE); vid2.coerceType(si, NA_NUMERIC_TYPE); const NAType &typ1 = child(0)->getValueId().getType(); const NAType &typ2 = child(1)->getValueId().getType(); if (typ1.getTypeQualifier() != NA_NUMERIC_TYPE OR typ2.getTypeQualifier() != NA_NUMERIC_TYPE) { // 4046 Modulus function is only defined for exact numeric types. *CmpCommon::diags() << DgSqlCode(-4046) << DgString0(getTextUpper()); return NULL; } // now it's safe to cast the types to numeric type const NumericType &ntyp1 = (NumericType &) typ1; const NumericType &ntyp2 = (NumericType &) typ2; if (NOT ntyp1.isExact() OR NOT ntyp2.isExact()) { // 4046 Modulus function is only defined for exact numeric types. *CmpCommon::diags() << DgSqlCode(-4046) << DgString0(getTextUpper()); return NULL; } if (ntyp1.getScale() != 0 OR ntyp2.getScale() != 0) { // 4047 Arguments of modulus function must both have a scale of 0. *CmpCommon::diags() << DgSqlCode(-4047) << DgString0(getTextUpper()); return NULL; } if (ntyp1.decimalPrecision() && ntyp1.getPrecision() > MAX_NUMERIC_PRECISION) { // 3037: precision of dividend cannot exceed 18. *CmpCommon::diags() << DgSqlCode(-3037) << DgString0(child(0)->getTextUpper()); return NULL; } if (ntyp2.decimalPrecision() && ntyp2.getPrecision() > MAX_NUMERIC_PRECISION) { // 3037: precision of divisor cannot exceed 18. *CmpCommon::diags() << DgSqlCode(-3037) << DgString0(child(1)->getTextUpper()); return NULL; } NumericType * result = (NumericType*)ntyp2.newCopy(HEAP); result->setNullable(typ1.supportsSQLnullLogical() || typ2.supportsSQLnullLogical()); if (ntyp1.isUnsigned()) result->makeUnsigned(); else result->makeSigned(); return result; } // ----------------------------------------------------------------------- // member functions for class Repeat // ----------------------------------------------------------------------- const NAType *Repeat::synthesizeType() { // The expression is REPEAT(<value1>, <value2>) // The result is string <value1> repeated <value2> times. // // Type cast any params. // ValueId vid1 = child(0)->getValueId(); ValueId vid2 = child(1)->getValueId(); vid1.coerceType(NA_CHARACTER_TYPE); const SQLInt t(FALSE); vid2.coerceType(t, NA_NUMERIC_TYPE); const NAType &typ1 = child(0)->getValueId().getType(); if (typ1.getTypeQualifier() != NA_CHARACTER_TYPE) { // 4051 Operand 1 must be character. *CmpCommon::diags() << DgSqlCode(-4051) << DgString0(getTextUpper()); return NULL; } const NAType &typ2 = child(1)->getValueId().getType(); if (typ2.getTypeQualifier() != NA_NUMERIC_TYPE) { // 4052 Operand 2 must be numeric. *CmpCommon::diags() << DgSqlCode(-4052) << DgString0(getTextUpper()); return NULL; } const CharType &ctyp1 = (CharType &) typ1; // now it's safe to cast the type to numeric type const NumericType &ntyp2 = (NumericType &) typ2; if (ntyp2.getScale() != 0) { // 4047 Operand must be not have scale. *CmpCommon::diags() << DgSqlCode(-4047) << DgString0(getTextUpper()); return NULL; } if (! ntyp2.isExact()) { // 4046 Operand 2 must be exact. *CmpCommon::diags() << DgSqlCode(-4046) << DgString0(getTextUpper()); return NULL; } Int64 size_in_bytes; Int64 size_in_chars; // figure out the max length of result. NABoolean negate; if ((child(1)->getOperatorType() == ITM_CONSTANT) && (child(1)->castToConstValue(negate))) { ConstValue * cv = child(1)->castToConstValue(negate); Int64 repeatCount; if (! cv->isNull()) { if (cv->canGetExactNumericValue()) { repeatCount = cv->getExactNumericValue(); if (repeatCount <= 0) repeatCount = 1; } else { // 4116 The 2nd operand of REPEAT(o1, o2) is invalid *CmpCommon::diags() << DgSqlCode(-4116) << DgString0(getTextUpper()); return NULL; } } else { repeatCount = 1; } size_in_bytes = typ1.getNominalSize() * repeatCount; size_in_chars = ctyp1.getStrCharLimit() * repeatCount; // check size limit only for fixed character type if ( ! typ1.isVaryingLen() ) { if ( size_in_bytes > CONST_100K ) { *CmpCommon::diags() << DgSqlCode(-4129) << DgString0(getTextUpper()); return NULL; } } else // varchar. The nominal size of the result is // the min of (size, CONST_100K). { size_in_bytes = MINOF(CONST_100K, size_in_bytes); size_in_chars = size_in_bytes / CharInfo::minBytesPerChar(ctyp1.getCharSet()); } } else if (getMaxLength() > -1) { size_in_bytes = MINOF(CmpCommon::getDefaultNumeric(TRAF_MAX_CHARACTER_COL_LENGTH), getMaxLength() * typ1.getNominalSize()); size_in_chars = size_in_bytes / CharInfo::minBytesPerChar(ctyp1.getCharSet()); } else { // Assign some arbitrary max result size since we can't // figure out the actual max size. size_in_bytes = CmpCommon::getDefaultNumeric(TRAF_MAX_CHARACTER_COL_LENGTH); size_in_chars = size_in_bytes / CharInfo::minBytesPerChar(ctyp1.getCharSet()); } NAType *result = new (HEAP) SQLVarChar(CharLenInfo((Lng32)size_in_chars, (Lng32)size_in_bytes), (typ1.supportsSQLnullLogical() || typ2.supportsSQLnullLogical()), ctyp1.isUpshifted(), ctyp1.isCaseinsensitive(), ctyp1.getCharSet(), ctyp1.getCollation(), ctyp1.getCoercibility()); return result; } // ----------------------------------------------------------------------- // member functions for class Replace // ----------------------------------------------------------------------- const NAType *Replace::synthesizeType() { // // Type cast any params. // ValueId vid1 = child(0)->getValueId(); ValueId vid2 = child(1)->getValueId(); ValueId vid3 = child(2)->getValueId(); CharInfo::CharSet new_cs = getFirstKnownCharSet(vid1, vid2, vid3); vid1.coerceType(NA_CHARACTER_TYPE, new_cs); vid2.coerceType(NA_CHARACTER_TYPE, new_cs); vid3.coerceType(NA_CHARACTER_TYPE, new_cs); const NAType *typ1 = &(child(0)->getValueId().getType()); const NAType *typ2 = &(child(1)->getValueId().getType()); const NAType *typ3 = &(child(2)->getValueId().getType()); /* Soln-10-050426-7137 begin */ NAString defVal; NABoolean charsetInference = (CmpCommon::getDefault(INFER_CHARSET, defVal) == DF_ON); if(charsetInference) { const CharType *replaceSource = (CharType *)typ1; const CharType *replaceChar= (CharType *)typ2; const CharType *replacingChar = (CharType *)typ3; const CharType* desiredType = CharType::findPushDownCharType(getDefaultCharSet, replaceSource, replaceChar, replacingChar, 0); if ( desiredType ) { // push down charset and re-synthesize vid1.coerceType((NAType&)*desiredType, NA_CHARACTER_TYPE); vid2.coerceType((NAType&)*desiredType, NA_CHARACTER_TYPE); vid3.coerceType((NAType&)*desiredType, NA_CHARACTER_TYPE); // get the newly pushed-down types typ1 = (CharType*)&vid1.getType(); typ2 = (CharType*)&vid2.getType(); typ3 = (CharType*)&vid3.getType(); } } /* Soln-10-050426-7137 end */ // typ3 does not need to be comparable, only compatible! // if (typ1->getTypeQualifier() != NA_CHARACTER_TYPE OR NOT typ1->isCompatible(*typ3)) { // 4064 The operands of a $0 function must be compatible character types. // ##Should say "The FIRST and THIRD operands must be compatible..." *CmpCommon::diags() << DgSqlCode(-4064) << DgString0(getTextForError());; return NULL; } if (NOT typ1->isComparable(*typ2, this)) { // 4063 The operands of a $0 function must be comparable character types. // ##Should say "The FIRST and SECOND operands must be comparable..." *CmpCommon::diags() << DgSqlCode(-4063) << DgString0(getTextForError()); return NULL; } const CharType *ctyp1 = (CharType *)typ1; Lng32 minLength_in_bytes = ctyp1->getDataStorageSize(); Lng32 minLength_in_chars = ctyp1->getStrCharLimit(); Lng32 ctype2Length_in_bytes = ((CharType *)typ2)->getDataStorageSize(); Lng32 ctype3Length_in_bytes = ((CharType *)typ3)->getDataStorageSize(); Lng32 ctype2Length_in_chars = ((CharType *)typ2)->getStrCharLimit(); Lng32 ctype3Length_in_chars = ((CharType *)typ3)->getStrCharLimit(); if ( ctype2Length_in_bytes == 0 ) { *CmpCommon::diags() << DgSqlCode(-8428) << DgString0(getTextForError()); return NULL; } // Fix for CR 10-000724-1369 // figure out result size. Lng32 size_in_bytes = minLength_in_bytes;; Lng32 size_in_chars = minLength_in_chars; // NOTE: We are trying to find the MAX result size! if ( ((CharType *)typ2)->isVaryingLen() ) { ctype2Length_in_chars = 1; // Use *minimum* possible length ctype2Length_in_bytes = 1; // Use *minimum* possible length } if ( ctyp1->getCharSet() == CharInfo::UNICODE ) { if (ctype2Length_in_chars < ctype3Length_in_chars) { Int32 maxOccurrences = size_in_chars / ctype2Length_in_chars; Int32 remainder = size_in_chars - (maxOccurrences * ctype2Length_in_chars); size_in_chars = maxOccurrences * ctype3Length_in_chars + remainder; size_in_bytes = size_in_chars * ctyp1->getBytesPerChar(); } } else { if (ctype2Length_in_chars < ctype3Length_in_chars) { Int32 maxOccurrences = size_in_chars / ctype2Length_in_chars; Int32 remainder = size_in_chars - (maxOccurrences * ctype2Length_in_chars); size_in_chars = maxOccurrences * ctype3Length_in_chars + remainder; } if (ctype2Length_in_bytes < ctype3Length_in_bytes) { Int32 maxOccurrences = size_in_bytes / ctype2Length_in_bytes; Int32 remainder = size_in_bytes - (maxOccurrences * ctype2Length_in_bytes); size_in_bytes = maxOccurrences * ctype3Length_in_bytes + remainder; } } if ( size_in_chars > CONST_32K ) size_in_chars = CONST_32K ; if ( size_in_bytes > CONST_32K ) size_in_bytes = CONST_32K ; CharLenInfo CLInfo( size_in_chars, size_in_bytes ); NAType *result = new (HEAP) SQLVarChar(CLInfo, (typ1->supportsSQLnullLogical() || typ2->supportsSQLnullLogical() || typ3->supportsSQLnullLogical()), ctyp1->isUpshifted(), ctyp1->isCaseinsensitive(), ctyp1->getCharSet(), ctyp1->getCollation(), ctyp1->getCoercibility()); return result; } // ----------------------------------------------------------------------- // member functions for class HashDistrib // ----------------------------------------------------------------------- const NAType *HashDistrib::synthesizeType() { // Both operands (hash of the partitioning keys and number of partitions) // must be exact numeric with scale 0. The result has values from 0 to // <number of partitions> - 1 and therefore can always fit into the data // type of the number of partitions. const NAType &typ1 = child(0)->getValueId().getType(); const NAType &typ2 = child(1)->getValueId().getType(); if (typ1.getTypeQualifier() != NA_NUMERIC_TYPE OR typ2.getTypeQualifier() != NA_NUMERIC_TYPE) { // 4045 Progressive Distribution function is only defined // for numeric types. *CmpCommon::diags() << DgSqlCode(-4045); return NULL; } // now it's safe to cast the types to numeric type const NumericType &ntyp1 = (NumericType &) typ1; const NumericType &ntyp2 = (NumericType &) typ2; if (NOT ntyp1.isExact() OR NOT ntyp2.isExact()) { // 4046 Progessive Distribution function is only defined for // exact numeric types. *CmpCommon::diags() << DgSqlCode(-4046); return NULL; } if (ntyp1.getScale() != 0 OR ntyp2.getScale() != 0) { // 4047 Arguments of Progessive Distribution function must both // have a scale of 0. *CmpCommon::diags() << DgSqlCode(-4047); return NULL; } NAType *result = typ2.newCopy(HEAP); // the only thing the LHS contributes is that the result may become nullable if (typ1.supportsSQLnullLogical()) result->setNullable(TRUE); return result; } const NAType *ProgDistribKey::synthesizeType() { // return: Large Int. return new HEAP SQLLargeInt(TRUE, FALSE); } // ----------------------------------------------------------------------- // member functions for class PAGroup // ----------------------------------------------------------------------- const NAType *PAGroup::synthesizeType() { // Both operands (pre-grouped number of partitions and number of groups) // must be exact numeric with scale 0. The result has values from 0 to // <number of groups> - 1 and therefore can always fit into the data // type of the number of groups. const NAType &typ1 = child(0)->getValueId().getType(); const NAType &typ2 = child(1)->getValueId().getType(); const NAType &typ3 = child(2)->getValueId().getType(); if (typ1.getTypeQualifier() != NA_NUMERIC_TYPE OR typ2.getTypeQualifier() != NA_NUMERIC_TYPE OR typ3.getTypeQualifier() != NA_NUMERIC_TYPE) { // 4045 PA Group function is only defined // for numeric types. *CmpCommon::diags() << DgSqlCode(-4045); return NULL; } // now it's safe to cast the types to numeric type const NumericType &ntyp1 = (NumericType &) typ1; const NumericType &ntyp2 = (NumericType &) typ2; const NumericType &ntyp3 = (NumericType &) typ3; if (NOT ntyp1.isExact() OR NOT ntyp2.isExact() OR NOT ntyp3.isExact()) { // 4046 PA Group function is only defined for // exact numeric types. *CmpCommon::diags() << DgSqlCode(-4046); return NULL; } if (ntyp1.getScale() != 0 OR ntyp2.getScale() != 0 OR ntyp3.getScale() != 0) { // 4047 Arguments of the PA Group function must both // have a scale of 0. *CmpCommon::diags() << DgSqlCode(-4047); return NULL; } NAType *result = typ1.newCopy(HEAP); return result; } // ----------------------------------------------------------------------- // member functions for class Encode // ----------------------------------------------------------------------- const NAType *CompEncode::synthesizeType() { ValueId vid = child(0)->getValueId(); const NAType &src = vid.getType(); // result of encode function is a non-nullable fixed char. // Result is not nullable // because null values are encoded too. // Length of encode function is equal to length_ field, if it // is set to a positive number. Otherwise, it is equal to the // total size of operand. Lng32 keyLength = 0; NABoolean supportsSQLnull = FALSE; if (regularNullability_) { // should not be common for CompEncode, preserve nullability of child keyLength = src.getNominalSize(); supportsSQLnull = src.supportsSQLnull(); } else { if (length_ < 0) // common case for encode, include prefix fields but leave // out the var len header (if any), which is not order-preserving keyLength = src.getTotalSize() - src.getVarLenHdrSize(); else keyLength = length_; } if (src.getTypeQualifier() != NA_CHARACTER_TYPE) { return new HEAP SQLChar(keyLength, supportsSQLnull); } else { const CharType &cSrc = (CharType&)src; CharInfo::Collation collation = cSrc.getCollation(); // set casesensitivity of encoding based on child's type. // This may get overwritten by the caller (for example, to // build a key for a predicate of the form: where keycol = 'val', // both sides of the predicate must be caseinsensitive. if (cSrc.isCaseinsensitive()) { setCaseinsensitiveEncode(TRUE); } setEncodedCollation(cSrc.getCollation()); if (CollationInfo::isSystemCollation(collation)) { keyLength = CompEncode::getEncodedLength( collation, collationType_, child(0)->getValueId().getType().getNominalSize(), cSrc.supportsSQLnull()); switch (collationType_) { case CollationInfo::Sort: { // in this case the encode is non nullable if not regularNullability return new HEAP SQLChar(keyLength, supportsSQLnull); } case CollationInfo::Compare: { return new HEAP SQLChar(keyLength, cSrc.supportsSQLnull()); } case CollationInfo::Search: { return new HEAP SQLVarChar(keyLength, cSrc.supportsSQLnull()); } default: { CMPASSERT(0); return NULL; } } } else { return new HEAP SQLChar(keyLength, supportsSQLnull); } } } const NAType *CompDecode::synthesizeType() { if (unencodedType_) return unencodedType_; else return CompEncode::synthesizeType(); } // ----------------------------------------------------------------------- // member functions for class Extract // ----------------------------------------------------------------------- #pragma nowarn(1506) // warning elimination const NAType *Extract::synthesizeType() { // Assert that we are bound, or created by Generator, so we have type info. ValueId vid = child(0)->getValueId(); CMPASSERT(vid != NULL_VALUE_ID); const DatetimeIntervalCommonType &dti = (DatetimeIntervalCommonType &)vid.getType(); NABuiltInTypeEnum type = dti.getTypeQualifier(); if (type != NA_DATETIME_TYPE && type != NA_INTERVAL_TYPE) { // 4036 The source field must be of DateTime or Interval type. *CmpCommon::diags() << DgSqlCode(-4036); if (getFieldFunction()) { // 4062 The preceding error actually occurred in function $0~String0. *CmpCommon::diags() << DgSqlCode(-4062) << DgString0(dti.getFieldName(getExtractField())); } return NULL; } // ANSI 6.6 SR 3a. enum rec_datetime_field extractStartField = getExtractField(); enum rec_datetime_field extractEndField = extractStartField; if (extractStartField > REC_DATE_MAX_SINGLE_FIELD) { // YEARQUARTER, YEARMONTH, or YEARWEEK extractStartField = REC_DATE_YEAR; if (extractEndField > REC_DATE_YEARMONTH_EXTRACT) extractEndField = REC_DATE_DAY; // extracting week requires the day else extractEndField = REC_DATE_MONTH; // months/quarters need only the month } if (dti.getStartField() > extractStartField || dti.getEndField() < extractEndField || !dti.isSupportedType()) { // 4037 cannot extract field from type *CmpCommon::diags() << DgSqlCode(-4037) << DgString0(dti.getFieldName(getExtractField())) << DgString1(dti.getTypeSQLname(TRUE /*terse*/)); return NULL; } // ANSI 6.6 SR 4. Precision is implementation-defined: // EXTRACT(YEAR from datetime): result precision is 4 + scale // EXTRACT(other from datetime): result precision is 2 + scale // EXTRACT(startfield from interval): result precision is leading prec + scal // EXTRACT(other from interval): result precision is 2 + scale // where scale is 0 if extract field is not SECOND, else at least fract prec. // Lng32 prec, scale = 0; if (type == NA_INTERVAL_TYPE && getExtractField() == dti.getStartField()) prec = dti.getLeadingPrecision(); else if (getExtractField() == REC_DATE_YEAR) prec = 4; // YEAR field can be 9999 max else if (getExtractField() == REC_DATE_YEARQUARTER_EXTRACT || getExtractField() == REC_DATE_YEARQUARTER_D_EXTRACT) prec = 5; // YEARQUARTER is yyyyq else if (getExtractField() == REC_DATE_YEARMONTH_EXTRACT || getExtractField() == REC_DATE_YEARMONTH_D_EXTRACT) prec = 6; // YEARMONTH is yyyymm else if (getExtractField() == REC_DATE_YEARWEEK_EXTRACT || getExtractField() == REC_DATE_YEARWEEK_D_EXTRACT) prec = 6; // YEARMWEEK is yyyyww else prec = 2; // else max of 12, 31, 24, 59 if (getExtractField() == REC_DATE_SECOND) { prec += dti.getFractionPrecision(); scale += dti.getFractionPrecision(); } const Int16 disAmbiguate = 0; // added for 64bit project return new HEAP SQLNumeric(type == NA_INTERVAL_TYPE, /*allowNegValues*/ prec, scale, disAmbiguate, dti.supportsSQLnull()); } #pragma warn(1506) // warning elimination // ----------------------------------------------------------------------- // member functions for class Increment // ----------------------------------------------------------------------- const NAType *Increment::synthesizeType() { // It should get the type of its child return &child(0)->getValueId().getType(); } // ----------------------------------------------------------------------- // member functions for class Decrement // ----------------------------------------------------------------------- const NAType *Decrement::synthesizeType() { return &child(0)->getValueId().getType(); } // ----------------------------------------------------------------------- // member functions for class TriRelational // ----------------------------------------------------------------------- const NAType *TriRelational::synthesizeType() { ItemExprList exprList1(child(0).getPtr(), HEAP); ItemExprList exprList2(child(1).getPtr(), HEAP); if (exprList1.entries() != exprList2.entries()) { // 4042 The operands of a comparison predicate must be of equal degree. *CmpCommon::diags() << DgSqlCode(-4042); return NULL; } NABoolean allowsUnknown = FALSE; for (CollIndex i = 0; i < exprList1.entries(); i++) { // // Type cast any params. // ValueId vid1 = exprList1[i]->getValueId(); ValueId vid2 = exprList2[i]->getValueId(); vid1.coerceType(vid2.getType(), NA_NUMERIC_TYPE); vid2.coerceType(vid1.getType()); // // Check that the operands are compatible. // const NAType& operand1 = vid1.getType(); const NAType& operand2 = vid2.getType(); if (NOT operand1.isCompatible(operand2)) { // 4041 comparison between these two types is not allowed emitDyadicTypeSQLnameMsg(-4041, operand1, operand2); return NULL; } allowsUnknown = allowsUnknown OR operand1.supportsSQLnullLogical() OR operand2.supportsSQLnullLogical(); } // // is the third operand a boolean? // if (child(2)->getValueId().getType().getTypeQualifier() != NA_BOOLEAN_TYPE) { // 4048 third arg of ternary comparison must be boolean *CmpCommon::diags() << DgSqlCode(-4048) << DgString0(child(2)->getValueId().getType().getTypeSQLname(TRUE /*terse*/)); return NULL; } // // Return the result. // return new HEAP SQLBoolean(allowsUnknown); } // ----------------------------------------------------------------------- // member functions for class RangeLookup // ----------------------------------------------------------------------- const NAType *RangeLookup::synthesizeType() { // the result is a signed 32 bit number return new HEAP SQLInt(TRUE,FALSE); } // ----------------------------------------------------------------------- // member functions for class HostVar // ----------------------------------------------------------------------- const NAType *HostVar::synthesizeType() { return getType(); } // ----------------------------------------------------------------------- // member functions for class InverseOrder // ----------------------------------------------------------------------- const NAType *InverseOrder::synthesizeType() { return &child(0)->getValueId().getType(); } // member functions for class Like // ----------------------------------------------------------------------- const NAType *Like::synthesizeType() { // // Type cast any params. // ValueId vid1 = child(0)->getValueId(); ValueId vid2 = child(1)->getValueId(); ValueId vid3; CharType * cType1 = 0; CharType * cType2 = 0; // if either side of LIKE was an untyped param, then assign it // the same casesensitive attr, collation, and character set as the other side. if ( vid1.getType().getTypeQualifier() != NA_UNKNOWN_TYPE && vid2.getType().getTypeQualifier() == NA_UNKNOWN_TYPE) { vid1.coerceType(NA_CHARACTER_TYPE); cType1 = (CharType*)&vid1.getType(); vid2.coerceType(NA_CHARACTER_TYPE, cType1->getCharSet()); cType2 = (CharType*)&vid2.getType(); cType2->setCollation(cType1->getCollation()); cType2->setCaseinsensitive(cType1->isCaseinsensitive()); } else if ( vid2.getType().getTypeQualifier() != NA_UNKNOWN_TYPE && vid1.getType().getTypeQualifier() == NA_UNKNOWN_TYPE) { vid2.coerceType(NA_CHARACTER_TYPE); cType2 = (CharType*)&vid2.getType(); vid1.coerceType(NA_CHARACTER_TYPE, cType2->getCharSet()); cType1 = (CharType*)&vid1.getType(); cType1->setCollation(cType2->getCollation()); cType1->setCaseinsensitive(cType2->isCaseinsensitive()); } else { vid1.coerceType(NA_CHARACTER_TYPE); vid2.coerceType(NA_CHARACTER_TYPE); } const NAType *typ1 = &vid1.getType(); const NAType *typ2 = &vid2.getType(); const NAType *typ3 = NULL; if (getArity() > 2) { // Escape clause was specified vid3 = child(2)->getValueId(); const SQLChar charType(1); vid3.coerceType(charType); typ3 = &vid3.getType(); } // 2/13/98: make sure like pattern and source types are comparable. const NAType& operand1 = vid1.getType(); const NAType& operand2 = vid2.getType(); const CharType *likeSource = (CharType*)&operand1; const CharType *likePat = (CharType*)&operand2; const CharType *escapeChar = ( getArity() > 2 ) ? (CharType*)&(vid3.getType()) : 0; NAString defVal; NABoolean charsetInference = (CmpCommon::getDefault(INFER_CHARSET, defVal) == DF_ON); if(charsetInference) { const CharType* desiredType = CharType::findPushDownCharType(getDefaultCharSet, likeSource, likePat, escapeChar, 0); if ( desiredType ) { // push down charset and re-synthesize vid1.coerceType((NAType&)*desiredType, NA_CHARACTER_TYPE); vid2.coerceType((NAType&)*desiredType, NA_CHARACTER_TYPE); if ( getArity() > 2 ) vid3.coerceType((NAType&)*desiredType, NA_CHARACTER_TYPE); // get the newly pushed-down types typ1 = &vid1.getType(); typ2 = &vid2.getType(); typ3 = ( getArity() > 2 ) ? &vid3.getType() : 0; } } // Check that the operands are comparable. // if (NOT typ1->isComparable(*typ2, this, NAType::EmitErrIfAnyChar) OR (typ3 AND NOT typ1->isComparable(*typ3, this, NAType::EmitErrIfAnyChar)) OR typ1->getTypeQualifier() != NA_CHARACTER_TYPE) { // 4050 The operands of a LIKE predicate must be comparable character types. *CmpCommon::diags() << DgSqlCode(-4050) << DgString0("LIKE"); return NULL; } // If any of the arguments can be nullable then LIKE can evaluate to Unknown NABoolean allowsUnknown = typ1->supportsSQLnull() OR typ2->supportsSQLnull() OR (typ3 AND typ3->supportsSQLnull()); return new HEAP SQLBoolean(allowsUnknown); } // ----------------------------------------------------------------------- // member functions for classes Lower and Upper` // ----------------------------------------------------------------------- const NAType *Lower::synthesizeType() { // // Type cast any params. // ValueId vid = child(0)->getValueId(); vid.coerceType(NA_CHARACTER_TYPE); // // Check that the operands are compatible. // const NAType& operand = vid.getType(); if (operand.getTypeQualifier() != NA_CHARACTER_TYPE) { // 4043 The operand of a LOWER function must be character. *CmpCommon::diags() << DgSqlCode(-4043) << DgString0(getTextForError()); return NULL; } CharType *ct = (CharType *)&operand; if ( CharInfo::is_NCHAR_MP(ct->getCharSet()) ) { // LCOV_EXCL_START - mp // 3217: Character set KANJI/KSC5601 is not allowed in the LOWER function. *CmpCommon::diags() << DgSqlCode(-3217) << DgString0(CharInfo::getCharSetName(ct->getCharSet())) << DgString1("LOWER"); // LCOV_EXCL_STOP } if ((ct->isUpshifted()) || (ct->isCaseinsensitive())) { ct = (CharType *)ct->newCopy(HEAP); if (ct->isUpshifted()) ct->setUpshifted(FALSE); if (ct->isCaseinsensitive()) ct->setCaseinsensitive(TRUE); } // // For UTF8 strings, we must make the TYPE be a VARCHAR because there are certain // UCS2 characters (e.g. 0x0130) where the value of LOWER is actually fewer bytes in // length than the original character! // if (ct->getCharSet() == CharInfo::UTF8) { // NOTE: See comment near end of Upper::synthesizeType() for reason we don't multiply by 3 here. ct = new (HEAP) SQLVarChar(CharLenInfo(ct->getStrCharLimit(), (ct->getDataStorageSize())) ,ct->supportsSQLnull() ,ct->isUpshifted() ,ct->isCaseinsensitive() ,ct->getCharSet() ,ct->getCollation() ,ct->getCoercibility() ); } return ct; } const NAType *Upper::synthesizeType() { // // Type cast any params. // ValueId vid = child(0)->getValueId(); vid.coerceType(NA_CHARACTER_TYPE); // // Check that the operands are compatible. // const NAType& operand = vid.getType(); if (operand.getTypeQualifier() != NA_CHARACTER_TYPE) { // 4043 The operand of an UPPER function must be character. *CmpCommon::diags() << DgSqlCode(-4043) << DgString0(getTextForError()); return NULL; } CharType *ct = (CharType *)&operand; if ( CharInfo::is_NCHAR_MP(ct->getCharSet()) ) { // LCOV_EXCL_START - mp *CmpCommon::diags() << DgSqlCode(-3217) << DgString0(CharInfo::getCharSetName(ct->getCharSet())) << DgString1("UPPER"); // LCOV_EXCL_STOP } if (NOT ct->isUpshifted()) { ct = (CharType *)ct->newCopy(HEAP); ct->setUpshifted(TRUE); } if (ct->getCharSet() == CharInfo::UNICODE) { ct = new (HEAP) SQLVarChar(3*(ct->getStrCharLimit()) ,ct->supportsSQLnull() ,ct->isUpshifted() ,ct->isCaseinsensitive() ,ct->getCharSet() ,ct->getCollation() ,ct->getCoercibility() ); } if (ct->getCharSet() == CharInfo::UTF8) { // // NOTE: For some UCS2 characters, the UPPER function can produce *three* UCS2 characters // and for that reason, the UPPER function provides for 3 times as much output as the // input string is long. HOWEVER, such is never the case for the LOWER function. // ct = new (HEAP) SQLVarChar(CharLenInfo(3*ct->getStrCharLimit(), 3*(ct->getDataStorageSize())) ,ct->supportsSQLnull() ,ct->isUpshifted() ,ct->isCaseinsensitive() ,ct->getCharSet() ,ct->getCollation() ,ct->getCoercibility() ); } return ct; } // ----------------------------------------------------------------------- // member functions for class NATypeToItem // ----------------------------------------------------------------------- const NAType * NATypeToItem::synthesizeType() { return natype_pointer; }; const NAType* NATypeToItem::pushDownType(NAType& newType, enum NABuiltInTypeEnum defaultQualifier) { return &newType; } // ----------------------------------------------------------------------- // member functions for class OctetLength // ----------------------------------------------------------------------- const NAType *OctetLength::synthesizeType() { // // Type cast any params. // ValueId vid = child(0)->getValueId(); vid.coerceType(NA_CHARACTER_TYPE); // // Check that the operands are compatible. // const NAType& operand = vid.getType(); if (operand.getTypeQualifier() != NA_CHARACTER_TYPE) { // 4043 The operand of an OCTET_LENGTH function must be character. *CmpCommon::diags() << DgSqlCode(-4043) << DgString0(getTextUpper()); return NULL; } const CharType* charOperand = (CharType*)&(vid.getType()); NAString defVal; NABoolean charsetInference = (CmpCommon::getDefault(INFER_CHARSET, defVal) == DF_ON); if(charsetInference) { const CharType* desiredType = CharType::findPushDownCharType(getDefaultCharSet, charOperand, 0); if ( desiredType ) { // push down charset and re-synthesize vid.coerceType((NAType&)*desiredType, NA_CHARACTER_TYPE); // get the newly pushed-down types charOperand = (CharType*)&(vid.getType()); } } if ( charOperand -> getCharSet() == CharInfo::UnknownCharSet ) { *CmpCommon::diags() << DgSqlCode(-4127); return NULL; } // // Return the result. // return new HEAP SQLInt(FALSE // unsigned ,operand.supportsSQLnullLogical() ); } // ----------------------------------------------------------------------- // member functions for class PositionFunc // ----------------------------------------------------------------------- const NAType *PositionFunc::synthesizeType() { NABoolean JDBC = (CmpCommon::getDefault(JDBC_PROCESS) == DF_ON); if ((NOT JDBC) && (getArity() == 3)) { // third argument not supported for non-JDBC callers. *CmpCommon::diags() << DgSqlCode(-3131); return NULL; } else { // third argument is only supported for JDBC_PROCESS callers and // is ignored. This is done for WLS/JDBC project who only want // to not get a syntax error if a third argument is passed in. // Go figure. // They need this to get through some certification tests. } // // Type cast any params. // ValueId vid1 = child(0)->getValueId(); ValueId vid2 = child(1)->getValueId(); CharInfo::CharSet new_cs = getFirstKnownCharSet(vid1, vid2, vid2); vid1.coerceType(NA_CHARACTER_TYPE, new_cs); vid2.coerceType(NA_CHARACTER_TYPE, new_cs); const NAType *operand1 = &vid1.getType(); const NAType *operand2 = &vid2.getType(); const NAType *operand3 = NULL; if (getArity() == 3) { ValueId vid3 = child(2)->getValueId(); SQLInt si; vid3.coerceType(si, NA_NUMERIC_TYPE); operand3 = &vid3.getType(); } // // Check that the operands are comparable. // ##Hmm, Ansi 6.6 does NOT say they need to be comparable, // ##just compatible (same char repertoire, i.e. same charset), // ##but that must be a mistake (we need string =ity testing // ##to do POSITION, so the collations will need to be the same)... // /* if (NOT operand1.isComparable(operand2, this, NAType::EmitErrIfAnyChar) || operand1.getTypeQualifier() != NA_CHARACTER_TYPE || operand2.getTypeQualifier() != NA_CHARACTER_TYPE) { // 4063 The operands of a $0 function must be comparable character types. *CmpCommon::diags() << DgSqlCode(-4063) << DgString0(getTextForError()); return NULL; } */ if (operand1->getTypeQualifier() != NA_CHARACTER_TYPE OR operand2->getTypeQualifier() != NA_CHARACTER_TYPE) { // 4063 The operands of a POSITION function must be character. *CmpCommon::diags() << DgSqlCode(-4063) << DgString0(getTextForError()); return NULL; } if (operand3) { if (operand3->getTypeQualifier() != NA_NUMERIC_TYPE) { // 4053 The third operand of a POSITION function must be numeric. *CmpCommon::diags() << DgSqlCode(-4053) << DgString0(getTextUpper()); return NULL; } if (((NumericType*)operand3)->getScale() != 0) { // 4047 The third operand of a POSITION function must have a scale of 0. *CmpCommon::diags() << DgSqlCode(-4047) << DgString0(getTextUpper()); return NULL; } } // 1/5/98: make sure position pattern and source types are comparable. const CharType *posPat = (CharType*)operand1; const CharType *posSource = (CharType*)operand2; NAString defVal; NABoolean charsetInference = (CmpCommon::getDefault(INFER_CHARSET, defVal) == DF_ON); if(charsetInference) { // 9/24/98: charset inference const CharType* desiredType = CharType::findPushDownCharType(getDefaultCharSet, posPat, posSource, 0); if ( desiredType ) { // push down charset and re-synthesize vid1.coerceType((NAType&)*desiredType, NA_CHARACTER_TYPE); vid2.coerceType((NAType&)*desiredType, NA_CHARACTER_TYPE); // get the newly pushed-down types /* posPat = (CharType*)&(vid1.getType()); posSource = (CharType*)&(vid2.getType()); */ operand1 = &vid1.getType(); operand2 = &vid2.getType(); } } /* if ( ! (posPat->isComparable(*posSource, TRUE)) ) { // } */ if ( ! (operand1->isComparable(*operand2, this, NAType::EmitErrIfAnyChar)) ) { // 4063 The operands of a POSITION function must be character. *CmpCommon::diags() << DgSqlCode(-4063) << DgString0(getTextForError()); return NULL; } // // Return the result. // return new HEAP SQLInt(FALSE, // unsigned operand1->supportsSQLnullLogical() || operand2->supportsSQLnullLogical() ); } // ----------------------------------------------------------------------- // member functions for class Substring // ----------------------------------------------------------------------- const NAType *Substring::synthesizeType() { // // Type cast any params. // ValueId vid1 = child(0)->getValueId(); ValueId vid2 = child(1)->getValueId(); vid1.coerceType(NA_CHARACTER_TYPE); SQLInt si; vid2.coerceType(si, NA_NUMERIC_TYPE); if (getArity() == 3) { ValueId vid3 = child(2)->getValueId(); vid3.coerceType(si, NA_NUMERIC_TYPE); } const NAType *operand1 = &child(0)->getValueId().getType(); const NAType *operand2 = &child(1)->getValueId().getType(); const NAType *operand3 = NULL; if (getArity() == 3) { operand3 = &child(2)->getValueId().getType(); } if ((operand1->getTypeQualifier() != NA_CHARACTER_TYPE) && (operand1->getFSDatatype() != REC_CLOB)) { // 4051 The first operand of a SUBSTRING function must be character. *CmpCommon::diags() << DgSqlCode(-4051) << DgString0(getTextUpper()); return NULL; } if (operand2->getTypeQualifier() != NA_NUMERIC_TYPE) { // 4052 The second operand of a SUBSTRING function must be numeric. *CmpCommon::diags() << DgSqlCode(-4052) << DgString0(getTextUpper()); return NULL; } if (((NumericType*)operand2)->getScale() != 0) { // 4047 The second operand of a SUBSTRING function must have a scale of 0. *CmpCommon::diags() << DgSqlCode(-4047) << DgString0(getTextUpper()); return NULL; } if (operand3) { if (operand3->getTypeQualifier() != NA_NUMERIC_TYPE) { // 4053 The third operand of a SUBSTRING function must be numeric. *CmpCommon::diags() << DgSqlCode(-4053) << DgString0(getTextUpper()); return NULL; } if (((NumericType*)operand3)->getScale() != 0) { // 4047 The third operand of a SUBSTR function must have a scale of 0. *CmpCommon::diags() << DgSqlCode(-4047) << DgString0(getTextUpper()); return NULL; } } CharInfo::CharSet op1_cs = operand1->getCharSet(); /* ((operand1->getFSDatatype() == REC_CLOB) ? ((SQLClob*)operand1)->getCharSet() : ((CharType *)operand1)->getCharSet()); */ const CharType *charOperand = (CharType *) operand1; Lng32 maxLength_bytes = charOperand->getDataStorageSize(); Lng32 maxLength_chars = charOperand->getPrecisionOrMaxNumChars(); if ( maxLength_chars <= 0 ) // If unlimited maxLength_chars = maxLength_bytes / CharInfo::minBytesPerChar(op1_cs); NABoolean negate; Lng32 pos = 0; { // The position arg is allowed to be negative (see Ansi 6.7 GR 1). ConstValue *cv = child(1)->castToConstValue(negate); // adjust the max length for the result only if it is a positive start position. // solu 10-030603-6815. if (cv && negate == FALSE) { if (cv->canGetExactNumericValue()) { Int64 pos64 = cv->getExactNumericValue(); if (pos64 <= MINOF(maxLength_chars,INT_MAX)) { pos = int64ToInt32(pos64); if ((pos-1) > 0) { maxLength_chars -= (pos-1); // shorten max maxLength_bytes -= (pos-1) * CharInfo::minBytesPerChar(op1_cs); if ( maxLength_bytes > charOperand->getDataStorageSize() ) maxLength_bytes = charOperand->getDataStorageSize() ; } } // value is in bounds } // can get exact numeric } // constant pos op } // position operand NABoolean resultIsFixedChar = FALSE; Lng32 length = 0; Int64 length64 = 0; if (operand3) { ConstValue *cv = child(2)->castToConstValue(negate); if (cv) { if (negate) { // 8403 The length arg of a SUBSTRING function cannot be less than zero. *CmpCommon::diags() << DgSqlCode(-8403); return NULL; } if (cv->canGetExactNumericValue()) { length64 = cv->getExactNumericValue(); if (length64 <= INT_MAX) { length = int64ToInt32(length64); if (maxLength_chars > length) { maxLength_chars = length; maxLength_bytes = MINOF(maxLength_bytes, length * CharInfo::maxBytesPerChar(op1_cs)); } } // value is in bounds } // can get exact numeric } // constant length op } // length operand specified /* length64 = length; if ((NOT DFS2REC::isAnyVarChar(operand1->getFSDatatype())) && (pos > 0) && (length64 > 0) && ((pos + length64 - 1) <= maxLength)) resultIsFixedChar = TRUE; */ // 12/22/97: the substring inherits the charset, collation and // coercibility from the source string. /* if (resultIsFixedChar) return new HEAP SQLChar(maxLength, operand1->supportsSQLnull() OR operand2->supportsSQLnull() OR ((operand3 != NULL) AND operand3->supportsSQLnull()) ,charOperand->isUpshifted() ,charOperand->isCaseinsensitive() ,FALSE ,charOperand->getCharSet() ,charOperand->getCollation() ,charOperand->getCoercibility() ); else */ if (operand1->getFSDatatype() == REC_CLOB) { return new HEAP SQLClob(maxLength_bytes, Lob_Invalid_Storage, operand1->supportsSQLnull() OR operand2->supportsSQLnull() OR ((operand3 != NULL) AND operand3->supportsSQLnull())); } else { return new HEAP SQLVarChar(CharLenInfo(maxLength_chars, maxLength_bytes), // OLD: maxLength operand1->supportsSQLnull() OR operand2->supportsSQLnull() OR ((operand3 != NULL) AND operand3->supportsSQLnull()) ,charOperand->isUpshifted() ,charOperand->isCaseinsensitive() ,operand1->getCharSet() ,charOperand->getCollation() ,charOperand->getCoercibility() ); } } // ----------------------------------------------------------------------- // member functions for class Trim // ----------------------------------------------------------------------- const NAType *Trim::synthesizeType() { // // Type cast any params. // ValueId vid1 = child(0)->getValueId(); ValueId vid2 = child(1)->getValueId(); CharInfo::CharSet new_cs = getFirstKnownCharSet(vid1, vid2, vid2); vid1.coerceType(NA_CHARACTER_TYPE, new_cs); vid2.coerceType(NA_CHARACTER_TYPE, new_cs); if (vid1.getType().getTypeQualifier() != NA_CHARACTER_TYPE || vid2.getType().getTypeQualifier() != NA_CHARACTER_TYPE) { //4133: Both trim character and source have to be CHARACTER typed. *CmpCommon::diags() << DgSqlCode(-4133); return NULL; } // // Check that the operands are compatible. // const CharType *trimChar = (const CharType *)&vid1.getType(); const CharType *trimSource = (const CharType *)&vid2.getType(); // charset inference if ( trimChar->getCharSet() == CharInfo::UnknownCharSet && trimSource->getCharSet() != CharInfo::UnknownCharSet ) { // Special case for MP NCHAR when the default trim character is // a single single-byte character like ' '. Here we prepend a space // character to the local string copy (locale_string) inside // the constant value holding the sinlge byte trim character. During // invocation of vid1.coerceType(), the newly fabricated double-byte // trim character will be instantiated in the constant value object. if (CharInfo::is_NCHAR_MP(trimSource->getCharSet()) && vid1.getItemExpr()-> getOperator() == ITM_CONSTANT) { ConstValue* trimCharValue = (ConstValue*)vid1.getItemExpr(); if ( trimCharValue -> getStorageSize() == 1) { trimCharValue->getLocaleString()->prepend(' '); } } vid1.coerceType(*trimSource, NA_CHARACTER_TYPE); trimChar = (CharType*)&vid1.getType(); } else if ( trimChar->getCharSet() != CharInfo::UnknownCharSet && trimSource->getCharSet() == CharInfo::UnknownCharSet ) { vid2.coerceType(*trimChar, NA_CHARACTER_TYPE); trimSource = (CharType*)&vid2.getType(); } else if ( trimChar->getCharSet() == CharInfo::UnknownCharSet && trimSource->getCharSet() == CharInfo::UnknownCharSet ) { const CharType* desiredType = CharType::findPushDownCharType(getDefaultCharSet, 0); vid1.coerceType(*desiredType, NA_CHARACTER_TYPE); trimChar = (CharType*)&vid1.getType(); vid2.coerceType(*trimChar, NA_CHARACTER_TYPE); trimSource = (CharType*)&vid2.getType(); } if (NOT trimChar->isComparable(*trimSource, this, NAType::EmitErrIfAnyChar)) { // Per Ansi 6.7 SR 6(f), trim source and trim char must be comparable. // // 4063 The operands of a $0 function must be comparable character types. *CmpCommon::diags() << DgSqlCode(-4063) << DgString0(getTextForError()); return NULL; } // Per Ansi 6.7 SR 6(g,h), the result // takes the collation and coercibility of the trim source. // Int32 size = trimSource->getDataStorageSize(); return new HEAP SQLVarChar(CharLenInfo(trimSource->getStrCharLimit(), size ) ,trimChar->supportsSQLnull() OR trimSource->supportsSQLnull() ,trimSource->isUpshifted() ,trimSource->isCaseinsensitive() ,trimSource->getCharSet() ,trimSource->getCollation() ,trimSource->getCoercibility() ); } // ----------------------------------------------------------------------- // member functions for class UnLogic // ----------------------------------------------------------------------- const NAType *UnLogic::synthesizeType() { NABoolean allowsUnknown = FALSE; // All Unary Ops evaluate to TRUE/FALSE except NOT which can also // evaluate to UNKNOWN switch(getOperatorType()) { case ITM_NOT: { CMPASSERT(child(0).getValueId().getType().getTypeQualifier() == NA_BOOLEAN_TYPE); const SQLBoolean& operand0 = (SQLBoolean &) child(0).getValueId().getType(); allowsUnknown = operand0.canBeSQLUnknown(); break; } case ITM_IS_UNKNOWN: case ITM_IS_NOT_UNKNOWN: case ITM_IS_FALSE: case ITM_IS_TRUE: CMPASSERT(child(0).getValueId().getType().getTypeQualifier() == NA_BOOLEAN_TYPE); // Falling throuuuuuuuuuu case ITM_IS_NULL: case ITM_IS_NOT_NULL: allowsUnknown = FALSE; break; default: CMPASSERT(0); // Case not handled break; } return new HEAP SQLBoolean(allowsUnknown); } // ----------------------------------------------------------------------- // member functions for class Translate // ----------------------------------------------------------------------- const NAType *Translate::synthesizeType() { // // Type cast any params. // ValueId vid = child(0)->getValueId(); vid.coerceType(NA_CHARACTER_TYPE); // // Check that the operands are compatible. // const NAType& operand = vid.getType(); if (operand.getTypeQualifier() != NA_CHARACTER_TYPE) { // 4043 The operand of an UPPER function must be character. *CmpCommon::diags() << DgSqlCode(-4043) << DgString0(getTextUpper()); return NULL; } const CharType *translateSource = (CharType*)&operand; // pushdown ISO88591 if the charset is unknown at this time. switch ( getTranslateMapTableId() ) { case ISO88591_TO_UNICODE: case SJIS_TO_UCS2: case UTF8_TO_UCS2: case UTF8_TO_ISO88591: case UTF8_TO_SJIS: case SJIS_TO_UNICODE: case ISO88591_TO_UTF8: case SJIS_TO_UTF8: if ( translateSource->getCharSet() == CharInfo::UnknownCharSet) { CharInfo::CharSet assumedSrcCS = CharInfo::ISO88591; switch (getTranslateMapTableId()) { case SJIS_TO_UCS2: case SJIS_TO_UNICODE: case SJIS_TO_UTF8: assumedSrcCS = CharInfo::SJIS; break; case UTF8_TO_UCS2: case UTF8_TO_ISO88591: case UTF8_TO_SJIS: assumedSrcCS = CharInfo::UTF8; break; default: break; } vid.coerceType(*CharType::desiredCharType(assumedSrcCS), NA_CHARACTER_TYPE ); translateSource = (CharType*)&vid.getType(); } break; case UCS2_TO_SJIS: case UCS2_TO_UTF8: case UNICODE_TO_SJIS: case UNICODE_TO_ISO88591: case KANJI_MP_TO_ISO88591: case KSC5601_MP_TO_ISO88591: if ( translateSource->getCharSet() == CharInfo::UnknownCharSet ) { vid.coerceType(*CharType::desiredCharType(CharInfo::UNICODE), NA_CHARACTER_TYPE ); translateSource = (CharType*)&vid.getType(); } break; } CharInfo::CharSet charsetTarget = CharInfo::UnknownCharSet; NAString err4106arg(CmpCommon::statementHeap()); switch (getTranslateMapTableId()) { case ISO88591_TO_UNICODE: case SJIS_TO_UCS2: case UTF8_TO_UCS2: case SJIS_TO_UNICODE: if (translateSource->getCharSet() == CharInfo::ISO88591 || translateSource->getCharSet() == CharInfo::UTF8 || translateSource->getCharSet() == CharInfo::SJIS ) { charsetTarget = CharInfo::UNICODE; } else switch (getTranslateMapTableId()) { case UTF8_TO_UCS2: err4106arg = SQLCHARSETSTRING_UTF8; break; case SJIS_TO_UCS2: case SJIS_TO_UNICODE: err4106arg = SQLCHARSETSTRING_SJIS; break; case ISO88591_TO_UNICODE: default: err4106arg = SQLCHARSETSTRING_ISO88591; break; } break; case UNICODE_TO_SJIS: if (translateSource->getCharSet() == CharInfo::UNICODE) charsetTarget = CharInfo::SJIS; else err4106arg = SQLCHARSETSTRING_UNICODE; break; case UNICODE_TO_ISO88591: if (translateSource->getCharSet() == CharInfo::UNICODE) charsetTarget = CharInfo::ISO88591; else err4106arg = SQLCHARSETSTRING_UNICODE; break; case UCS2_TO_SJIS: if (translateSource->getCharSet() == CharInfo::UNICODE) charsetTarget = CharInfo::SJIS; else err4106arg = SQLCHARSETSTRING_UNICODE; break; case UCS2_TO_UTF8: if (translateSource->getCharSet() == CharInfo::UNICODE) charsetTarget = CharInfo::UTF8; else err4106arg = SQLCHARSETSTRING_UNICODE; break; case KANJI_MP_TO_ISO88591: if (translateSource->getCharSet() == CharInfo::KANJI_MP) charsetTarget = CharInfo::ISO88591; else err4106arg = SQLCHARSETSTRING_KANJI; break; case KSC5601_MP_TO_ISO88591: if (translateSource->getCharSet() == CharInfo::KSC5601_MP) charsetTarget = CharInfo::ISO88591; else err4106arg = SQLCHARSETSTRING_KSC5601; break; case UTF8_TO_SJIS: if ( (translateSource->getCharSet() == CharInfo::UTF8) || (translateSource->getCharSet() == CharInfo::ISO88591) ) charsetTarget = CharInfo::SJIS; else err4106arg = SQLCHARSETSTRING_UTF8; break; case ISO88591_TO_UTF8: if (translateSource->getCharSet() == CharInfo::ISO88591) { charsetTarget = CharInfo::UTF8; } else err4106arg = SQLCHARSETSTRING_ISO88591; break; case UTF8_TO_ISO88591: if ( (translateSource->getCharSet() == CharInfo::UTF8) || (translateSource->getCharSet() == CharInfo::ISO88591) ) charsetTarget = CharInfo::ISO88591; else err4106arg = SQLCHARSETSTRING_UTF8; break; default: // 4105 Unknown translation *CmpCommon::diags() << DgSqlCode(-4105); return NULL; } if (charsetTarget != CharInfo::UnknownCharSet) { Lng32 resultLen = CharInfo::getMaxConvertedLenInBytes(translateSource->getCharSet(), translateSource->getNominalSize(), charsetTarget); return new HEAP SQLVarChar(CharLenInfo(translateSource->getStrCharLimit(), resultLen), TRUE, FALSE, translateSource->isCaseinsensitive(), charsetTarget, CharInfo::DefaultCollation, CharInfo::IMPLICIT); // ANSI 6.7 SR 5b } *CmpCommon::diags() << DgSqlCode(-4106) << DgString0(getTextUpper()) << DgString1(err4106arg); return NULL; } // ----------------------------------------------------------------------- // member functions for class ValueIdUnion // ----------------------------------------------------------------------- #pragma nowarn(1506) // warning elimination const NAType *ValueIdUnion::synthesizeType() { const NAType *result = 0; CollIndex i = 0; for (i = 0; i < entries(); i++) { result = &getSource(i).getType(); if (result->getTypeQualifier() != NA_UNKNOWN_TYPE) break; } CMPASSERT(result); if (result->getTypeQualifier() == NA_UNKNOWN_TYPE) return result->newCopy(HEAP); CollIndex r = i; // the r'th source was the first non-unknown for (i = 0; i < entries(); i++) { if (i != r) { // r'th source started it all, we did it already getSource(i).coerceType(*result); ValueId vidi = getSource(i); const NAType& opR = *result; // save operand BEFORE synth const NAType& opI = vidi.getType(); UInt32 flags = ((CmpCommon::getDefault(LIMIT_MAX_NUMERIC_PRECISION) == DF_ON) ? NAType::LIMIT_MAX_NUMERIC_PRECISION : 0); if (CmpCommon::getDefault(TYPE_UNIONED_CHAR_AS_VARCHAR) == DF_ON) flags |= NAType::MAKE_RESULT_VARCHAR; if (CmpCommon::getDefault(MODE_SPECIAL_4) == DF_ON) { flags |= NAType::MODE_SPECIAL_4; } result = result->synthesizeType(SYNTH_RULE_UNION, opR, opI, HEAP, &flags); if (!result) { // 4055 The select lists or tuples must have compatible data types. emitDyadicTypeSQLnameMsg(-4055, opR, opI); //## Here, also emit errmsg 4034 w/ unparse? return NULL; } else if (getUnionFlags() == Union::UNION_DISTINCT) { if (NOT opR.isComparable(opI, this)) { // 4134 The operation (x UNION y) is not allowed. Try UNION ALL. *CmpCommon::diags() << DgSqlCode(-4134) << DgString0(getText(USER_FORMAT_DELUXE)); return NULL; } } } } return result; } #pragma warn(1506) // warning elimination // ValueIdUnion::pushDownType() ----------------------------------- // Propagate type information down the ItemExpr tree. This method // is called by coerceType(). It will attempt to coerce (a recursive call) // the type of each member of the ValueIdUnion to the desired type. // This only has an effect when none of the members of the ValueIdUnion // could be typed bottom up. An example query that illustrates this // is: // Select NULL from t1 // Union all // Select NULL from t2 // Union all // Select c from t3; // // This results in a tree of ValueIdUnion nodes: // ValueIdUnion(c (ValueIdUnion(Null, Null))); // // The inner ValueIdUnion node can not be typed bottom up, but // the outer ValueIdUnion node will attempt to coerce the type of // the inner node. This will in turn (through pushDownType) coerce // the types of the members (NULLs) of the inner ValueIdUnion node // and re-synthesize the Type of the inner ValueIdUnion node. // // #pragma nowarn(1506) // warning elimination const NAType * ValueIdUnion::pushDownType(NAType& desiredType, enum NABuiltInTypeEnum defaultQualifier) { for(CollIndex i = 0; i < entries(); i++) { getSource(i).coerceType(desiredType, defaultQualifier); } return (NAType *)synthesizeType(); } #pragma warn(1506) // warning elimination const NAType * RowsetArrayScan::pushDownType(NAType& desiredType, enum NABuiltInTypeEnum defaultQualifier) { elemType_ = &desiredType; const NAType *newType = child(0)->pushDownType(desiredType, desiredType.getTypeQualifier()); child(0)->getValueId().changeType(newType); //BEGIN 10-050523-8022 //When datatype has constraint NOT_NULL_DROPPABLE , getting the //null indicator from //supportsSQLnullogical was leading to truncation of host data in //ExRowsetArrayScan::eval function of file exp_function.cpp. elemNullInd_ = desiredType.supportsSQLnull(); //End 10-050523-8022 return &desiredType; } const NAType * HostVar::pushDownType(NAType& desiredType, enum NABuiltInTypeEnum defaultQualifier) { // If this is a rowset host var, we need to propagate the desired type into it if (varType_->getTypeQualifier() == NA_ROWSET_TYPE) { SQLRowset *rw1 = (SQLRowset *) varType_; SQLRowset *rw2 = new HEAP SQLRowset(&desiredType, rw1->getMaxNumElements(), rw1->getNumElements()); NAType *tempType = &desiredType; rw2->setNullable(*tempType); varType_ = rw2; return rw2; } return &desiredType; } // ----------------------------------------------------------------------- // member functions for class ValueIdProxy // ----------------------------------------------------------------------- const NAType *ValueIdProxy::synthesizeType() { const NAType *proxyType = &getOutputId().getType(); return proxyType->newCopy(HEAP); } // Propagate type information down the node we are Proxy for. // Called by coerceType(). // #pragma nowarn(1506) // warning elimination #pragma warning (disable : 4018) //warning elimination const NAType * ValueIdProxy::pushDownType(NAType& desiredType, enum NABuiltInTypeEnum defaultQualifier) { outputValueId_.coerceType(desiredType, defaultQualifier); return (NAType *)synthesizeType(); } // ----------------------------------------------------------------------- // member functions for class VEGPredicate // ----------------------------------------------------------------------- const NAType *VEGPredicate::synthesizeType() { return new HEAP SQLBoolean(); } // ----------------------------------------------------------------------- // member functions for class VEGReference // $$$ WORK REMAINING TO BE DONE: // $$$ compute the intersection of the datatypes // $$$ of the members of the VEG and assign it as the type // $$$ for the VEGReference. // ----------------------------------------------------------------------- const NAType *VEGReference::synthesizeType() { if (getVEG()->seenBefore()) return NULL; getVEG()->markAsSeenBefore(); NAType *type = NULL; if (NOT getVEG()->getAllValues().isEmpty()) { // return the type of any one expression from the VEG. ValueId exprId; const ValueIdSet & vegValues = getVEG()->getAllValues(); for (exprId = vegValues.init(); vegValues.next(exprId); vegValues.advance(exprId)) { if (exprId.getItemExpr()->getOperatorType() == ITM_VEG_REFERENCE) { // Saw a VEGReference inside the VEG which is not type synthesized // yet. Drive its synthesis if it's not a VEGReference to a VEG // which we are in the process of type synthesizing. In that case, // we could ignore the VEGRef, since (1) we want to avoid infinite // recursion; (2) it will get its type after the completion of an // earlier call to synthesizeType(). // if (exprId.getValueDesc()->getDomainDesc() == NULL) { VEGReference *vegref = (VEGReference *)(exprId.getItemExpr()); if (NOT vegref->getVEG()->seenBefore()) { vegref->synthTypeAndValueId(FALSE); // Remember the first non-null type seen. if (type == NULL) type = (NAType *) &(exprId.getType()); } } else { // Remember the first non-null type seen. if (type == NULL) type = (NAType *) &(exprId.getType()); } } else { // Remember the first non-null type seen. if (type == NULL) type = (NAType *) &(exprId.getType()); } } } else { type = new HEAP SQLUnknown(); } getVEG()->markAsNotSeenBefore(); CMPASSERT(type); return type; } const NAType *ScalarVariance::synthesizeType() { return new HEAP SQLDoublePrecision(TRUE); // Variance is always Nullable } // UnPackCol::synthesizeType() -------------------------------- // The type of the UnPackCol is the type of the original unpacked // column. This type is store within the UnPackCol node. // const NAType *UnPackCol::synthesizeType() { // The type of the original unpacked column. // return getType(); } // RowsetArrayScan::synthesizeType() -------------------------------- // The type of the RowsetArrayScan is the type of the original unpacked // element. This type is store within the RowsetArrayScan node. // const NAType *RowsetArrayScan::synthesizeType() { // The element type return getType(); } const NAType *RowsetArrayInto::synthesizeType() { // The element type return getType(); } const NAType *RandomNum::synthesizeType() { NAType *result = NULL; if (getArity() == 1) { // // Type cast any params. // SQLInt nType(FALSE); ValueId vid = child(0)->getValueId(); vid.coerceType(nType, NA_NUMERIC_TYPE); const NAType& operand = vid.getType(); if (operand.getTypeQualifier() != NA_NUMERIC_TYPE) { // 4045 The operand of a Random function must be numeric. *CmpCommon::diags() << DgSqlCode(-4045) << DgString0(getTextUpper()); return NULL; } // now it's safe to cast the types to numeric type const NumericType &ntyp1 = (NumericType &) operand; if (NOT ntyp1.isExact()) { // 4070 Random function is only defined for exact numeric types. *CmpCommon::diags() << DgSqlCode(-4070) << DgString0(getTextUpper()); return NULL; } if (ntyp1.getScale() != 0) { // 4047 Arguments of random function must have a scale of 0. *CmpCommon::diags() << DgSqlCode(-4047) << DgString0(getTextUpper()); return NULL; } // return: int unsigned result = (NAType *) new HEAP SQLInt(FALSE, ntyp1.supportsSQLnullLogical()); } else { // return: int unsigned not null result = (NAType *) new HEAP SQLInt(FALSE,FALSE); } return result; } const NAType *Mask::synthesizeType() { // The expression is <op1> Mask <op2>. // Both operands must be exact numeric with scale 0. // The result can always fit into the data type of the first child. const NAType &typ1 = child(0)->getValueId().getType(); const NAType &typ2 = child(1)->getValueId().getType(); // Mask is an internal operator and errors are fatal CMPASSERT(typ1.getTypeQualifier() == NA_NUMERIC_TYPE AND typ2.getTypeQualifier() == NA_NUMERIC_TYPE); // now it's safe to cast the types to numeric type const NumericType &ntyp1 = (NumericType &) typ1; const NumericType &ntyp2 = (NumericType &) typ2; // for now make sure both operands basically have the same binary // type, but this may be changed in the future CMPASSERT(ntyp1.isExact() AND ntyp2.isExact() AND ntyp1.getScale() == 0 AND ntyp2.getScale() == 0 AND ntyp1.getPrecision() == ntyp2.getPrecision() AND ntyp1.binaryPrecision() AND ntyp2.binaryPrecision()); const NAType *result = typ1.newCopy(HEAP); return result; } const NAType *Shift::synthesizeType() { // The expression is <op1> Shift <op2>. // Both operands must be exact numeric with scale 0. // The result can always fit into the data type of the first child. const NAType &typ1 = child(0)->getValueId().getType(); const NAType &typ2 = child(1)->getValueId().getType(); // Mask is an internal operator and errors are fatal CMPASSERT(typ1.getTypeQualifier() == NA_NUMERIC_TYPE AND typ2.getTypeQualifier() == NA_NUMERIC_TYPE); // now it's safe to cast the types to numeric type const NumericType &ntyp1 = (NumericType &) typ1; const NumericType &ntyp2 = (NumericType &) typ2; // for now make sure both operands basically have the same binary // type, but this may be changed in the future CMPASSERT(ntyp1.isExact() AND ntyp2.isExact() AND ntyp1.getScale() == 0 AND ntyp2.getScale() == 0 AND ntyp1.binaryPrecision() AND ntyp2.binaryPrecision()); const NAType *result = typ1.newCopy(HEAP); return result; } // ----------------------------------------------------------------------- // member functions for class PackFunc // ----------------------------------------------------------------------- const NAType* PackFunc::synthesizeType() { // --------------------------------------------------------------------- // If format information is valid, type is already available. Otherwise, // compute type information for the result of the PackFunc from the type // information of its operand. // --------------------------------------------------------------------- if(NOT isFormatInfoValid_) { // Type of column to be packed. const NAType* columnType = &child(0)->getValueId().getType(); deriveFormatInfoFromUnpackType(columnType); } return type_; } const NAType * ZZZBinderFunction::synthesizeType() { // the synthesizeType method is needed only when we process an item // expression at DDL time. For DML the function gets transformed into // another function in the binder before we reach type synthesis switch (getOperatorType()) { case ITM_DATEDIFF_YEAR: case ITM_DATEDIFF_MONTH: case ITM_DATEDIFF_QUARTER: case ITM_DATEDIFF_WEEK: return new HEAP SQLInt(TRUE, child(0)->getValueId().getType().supportsSQLnull() || child(1)->getValueId().getType().supportsSQLnull()); case ITM_LEFT: { // make a temporary transformation for synthesizing the right type Substring *temp = new HEAP Substring(child(0).getPtr(), new HEAP ConstValue((Lng32) 1, (NAMemory *) HEAP), child(1)); temp->synthTypeAndValueId(); return temp->getValueId().getType().newCopy(HEAP); } case ITM_YEARWEEK: case ITM_YEARWEEKD: return new HEAP SQLNumeric(4, 6, 0, TRUE, child(0)->getValueId().getType().supportsSQLnull()); default: // use the parent class implementation by default return BuiltinFunction::synthesizeType(); } } const NAType *Subquery::synthesizeType() { return new HEAP SQLBoolean(); } const NAType *RowSubquery::synthesizeType() { const NAType *rowType = &getSubquery()->selectList()->getValueId().getType(); return rowType->newCopy(HEAP); } // Propagate type information down the compExpr of the RowSubquery // Called by coerceType(). We only change type if the selectList of // the RowSubquery is of degree 1 and the returned value in the select // list is of unknown or character type. // #pragma nowarn(1506) // warning elimination #pragma warning (disable : 4018) //warning elimination const NAType * RowSubquery::pushDownType(NAType& desiredType, enum NABuiltInTypeEnum defaultQualifier) { // In the case where the select list of the rowSubquery contains // a dynamic parameter, we need to change its type.. if ( getDegree() == 1 ) { RelRoot *sq_root = (RelRoot *) getSubquery(); ValueId outVid = sq_root->compExpr()[0]; if ( outVid.getType().getTypeQualifier() == NA_UNKNOWN_TYPE || outVid.getType().getTypeQualifier() == NA_CHARACTER_TYPE ) outVid.coerceType(desiredType, defaultQualifier); } return (NAType *)synthesizeType(); } const NAType *Exists::synthesizeType() { // EXISTS predicate can never evaluate to Unknown return new HEAP SQLBoolean(FALSE); } const NAType *QuantifiedComp::synthesizeType() { // Genesis 10-980305-3294 ItemExprList exprList1(child(0).getPtr(), HEAP); ItemExprList exprList2(getSubquery()->selectList(), HEAP); NABoolean allowsUnknown; NABoolean allowIncompatibleComparison = FALSE; if (CmpCommon::getDefault(MODE_SPECIAL_1) == DF_ON) allowIncompatibleComparison = TRUE; if (!synthItemExprLists(exprList1, exprList2, allowIncompatibleComparison, allowsUnknown, this)) return NULL; return new HEAP SQLBoolean(allowsUnknown); } // MV, const NAType *GenericUpdateOutputFunction::synthesizeType() { const NAType *type = NULL; if( getOperator().match(ITM_JULIANTIMESTAMP) ) { // // Type cast any params. // ValueId vid = child(0)->getValueId(); SQLTimestamp timestampType; vid.coerceType(timestampType); // // Check that the operands are compatible. // const NAType& operand = vid.getType(); if (operand.getTypeQualifier() != NA_DATETIME_TYPE) { // 4071 The operand of a JULIANTIMESTAMP function must be a datetime. *CmpCommon::diags() << DgSqlCode(-4071) << DgString0(getTextUpper()); return NULL; } type = new HEAP SQLLargeInt(TRUE, operand.supportsSQLnullLogical()); } else { type = new HEAP SQLInt(TRUE, FALSE); } return type; } //++Triggers, const NAType *UniqueExecuteId::synthesizeType() { return new HEAP SQLChar(SIZEOF_UNIQUE_EXECUTE_ID, FALSE); } const NAType *GetTriggersStatus::synthesizeType() { return new HEAP SQLChar(TRIGGERS_STATUS_VECTOR_SIZE, FALSE); } const NAType *GetBitValueAt::synthesizeType() { const NAType *operand1 = &child(0)->getValueId().getType(); const NAType *operand2 = &child(1)->getValueId().getType(); if (operand1->getTypeQualifier() != NA_CHARACTER_TYPE) { // 4051 Operand 1 must be character. *CmpCommon::diags() << DgSqlCode(-4051) << DgString0(getTextUpper()); return NULL; } if (operand2->getTypeQualifier() != NA_NUMERIC_TYPE) { // 4052 Operand 2 must be numeric. *CmpCommon::diags() << DgSqlCode(-4052) << DgString0(getTextUpper()); return NULL; } return new HEAP SQLInt(FALSE, FALSE); } //--Triggers, const NAType *IsBitwiseAndTrue::synthesizeType() { const NAType *operand1 = &child(0)->getValueId().getType(); const NAType *operand2 = &child(1)->getValueId().getType(); if (operand1->getTypeQualifier() != NA_CHARACTER_TYPE) { // 4051 Operand 1 must be character. *CmpCommon::diags() << DgSqlCode(-4051) << DgString0(getTextUpper()); return NULL; } if (operand1->getTypeQualifier() != NA_CHARACTER_TYPE) { // 4051 Operand 1 must be character. *CmpCommon::diags() << DgSqlCode(-4051) << DgString0(getTextUpper()); return NULL; } return new HEAP SQLBoolean(FALSE); } //--MV const NAType *ItemList::synthesizeType() { const NAType * elementType = &child(0)->getValueId().getType(); SQLRecord *restOfRecord; if (child(1)->getOperatorType() == ITM_ITEM_LIST) { restOfRecord = (SQLRecord *)&child(1)->getValueId().getType(); CMPASSERT(restOfRecord->getTypeQualifier() == NA_RECORD_TYPE); } else { restOfRecord = new HEAP SQLRecord(&child(1)->getValueId().getType(),NULL); } return new HEAP SQLRecord(elementType,restOfRecord); } // ----------------------------------------------------------------------- // member functions for class ItmSeqOffset // ----------------------------------------------------------------------- const NAType *ItmSeqOffset::synthesizeType() { // Verify that child 1 is numeric. // Return the type of child 0. const NAType &operand1 = child(0)->getValueId().getType(); if (getArity() > 1) { const NAType &operand2 = child(1)->getValueId().getType(); if (operand2.getTypeQualifier() != NA_NUMERIC_TYPE) { // The second operand of an OFFSET function must be numeric. *CmpCommon::diags() << DgSqlCode(-4052) << DgString0(getTextUpper()); return NULL; } } if (getArity() > 2) { const NAType &operand3 = child(2)->getValueId().getType(); if (operand3.getTypeQualifier() != NA_NUMERIC_TYPE) { // The third operand of an OFFSET function must be numeric. *CmpCommon::diags() << DgSqlCode(-4053) << DgString0(getTextUpper()); return NULL; } } NAType *result = operand1.newCopy(HEAP); if(nullRowIsZero()) { result->setNullable(FALSE); CMPASSERT(result->getTypeQualifier() == NA_NUMERIC_TYPE || result->getTypeQualifier() == NA_INTERVAL_TYPE); } else { result->setNullable(TRUE); } if (isOLAP()) { result->setNullable(TRUE); } return result; } // ----------------------------------------------------------------------- // member functions for class ItmSeqDiff1 // ----------------------------------------------------------------------- const NAType *ItmSeqDiff1::synthesizeType() { // Verify that children are numeric. // Return the result type of child(0) - child(0). for (Int32 i = 0; i < getArity(); i++) { const NAType &operand = child(i)->getValueId().getType(); NABuiltInTypeEnum opType = operand.getTypeQualifier() ; if ((opType != NA_NUMERIC_TYPE && opType != NA_DATETIME_TYPE && opType != NA_INTERVAL_TYPE) || !operand.isSupportedType()) { if (i == 0) { // The first operand of a DIFF1 function must be numeric. *CmpCommon::diags() << DgSqlCode(-4059) << DgString0(getTextUpper()); } else { // The second operand of a DIFF1 function must be numeric. *CmpCommon::diags() << DgSqlCode(-4052) << DgString0(getTextUpper()); } return NULL; } // if not numeric } const NAType &operand1 = child(0)->getValueId().getType(); const NAType *result1 = operand1.synthesizeType(SYNTH_RULE_SUB, operand1, operand1, HEAP); NAType *result; if (getArity()==2) // will be transformed into: DIFF1(child(0)) / DIFF1(child(1)) { const NAType &operand2 = child(1)->getValueId().getType(); const NAType *result2 = operand2.synthesizeType(SYNTH_RULE_SUB, operand2, operand2, HEAP); if (result2->getTypeQualifier() == NA_INTERVAL_TYPE) { result2 = new HEAP SQLLargeInt(TRUE, FALSE ); } result = (NAType *)result2->synthesizeType(SYNTH_RULE_DIV, *result1, *result2, HEAP); } else { result = (NAType *)result1; } result->setNullable(TRUE); return result; } // ----------------------------------------------------------------------- // member functions for class ItmSeqDiff2 // ----------------------------------------------------------------------- const NAType *ItmSeqDiff2::synthesizeType() { // Verify that children are numeric. // Return the result type of child(0) - child(0). for (Int32 i = 0; i < getArity(); i++) { const NAType &operand = child(i)->getValueId().getType(); NABuiltInTypeEnum opType = operand.getTypeQualifier() ; if ((opType != NA_NUMERIC_TYPE && opType != NA_DATETIME_TYPE && opType != NA_INTERVAL_TYPE ) || !operand.isSupportedType()) { if (i == 0) { // The first operand of a DIFF1 function must be numeric. *CmpCommon::diags() << DgSqlCode(-4059) << DgString0(getTextUpper()); } else { // The second operand of a DIFF1 function must be numeric. *CmpCommon::diags() << DgSqlCode(-4052) << DgString0(getTextUpper()); } return NULL; } // if not numeric } const NAType &operand1 = child(0)->getValueId().getType(); const NAType *result1 = operand1.synthesizeType(SYNTH_RULE_SUB, operand1, operand1, HEAP); NAType *result = (NAType *)result1->synthesizeType(SYNTH_RULE_SUB, *result1, *result1, HEAP); if (getArity()==2) // will be transformed into: DIFF2(child(0)) / DIFF1(child(1)) { const NAType &operand2 = child(1)->getValueId().getType(); const NAType *result2 = operand2.synthesizeType(SYNTH_RULE_SUB, operand2, operand2, HEAP); if (result2->getTypeQualifier() == NA_INTERVAL_TYPE) { result2 = new HEAP SQLLargeInt(TRUE, FALSE ); } result = (NAType *)result2->synthesizeType(SYNTH_RULE_DIV, *result, *result2, HEAP); } result->setNullable(TRUE); return result; } // ----------------------------------------------------------------------- // member functions for class ItmSeqRunningFunction // ----------------------------------------------------------------------- const NAType *ItmSeqRunningFunction::synthesizeType() { const NAType *result = NULL; if ((getOperatorType() == ITM_RUNNING_COUNT) || (getOperatorType() == ITM_RUNNING_RANK) || (getOperatorType() == ITM_RUNNING_DRANK) || (getOperatorType() == ITM_RUNNING_CHANGE)) { result = new HEAP SQLLargeInt(TRUE /* 'long long' on NSK can't be unsigned */, FALSE /*not null*/); } else { const NAType& operand = child(0)->castToItemExpr()->getValueId().getType(); switch (getOperatorType()) { case ITM_RUNNING_AVG: { // needs to mimic what will happen after transformation const NAType *operand1 = synthAvgSum(operand, FALSE); const NAType *newInt = new HEAP SQLLargeInt(TRUE,FALSE); if (operand1){ result = operand1->synthesizeType(SYNTH_RULE_DIV, *operand1, *newInt, HEAP); } } break; case ITM_RUNNING_SUM: result = synthAvgSum(operand, FALSE); break; case ITM_LAST_NOT_NULL: case ITM_RUNNING_MAX: case ITM_RUNNING_MIN: result = operand.newCopy(HEAP); break; case ITM_RUNNING_SDEV: case ITM_RUNNING_VARIANCE: result = new HEAP SQLDoublePrecision(TRUE); // See ScalarVariance::synthesizeType() break; default: CMPASSERT("Unknown running function in synthesizeType()."); break; } // end switch getOperatorType() if (result){ ((NAType *)result)->setNullable(TRUE); } } // end else not RUNNINGCOUNT return result; } const NAType *ItmSeqOlapFunction::synthesizeType() { const NAType *result = NULL; if (getOperatorType() == ITM_OLAP_COUNT) { result = new HEAP SQLLargeInt(TRUE /* 'long long' on NSK can't be unsigned */, TRUE /* null*/); } else if (/*(getOperatorType() == ITM_OLAP_COUNT) || */ //-- causes runtime error: ERROR[8421] NULL cannot be assigned to a NOT NULL column. (getOperatorType() == ITM_OLAP_RANK) || (getOperatorType() == ITM_OLAP_DRANK)) { result = new HEAP SQLLargeInt(TRUE /* 'long long' on NSK can't be unsigned */, FALSE /*not null*/); } else { const NAType& operand = child(0)->castToItemExpr()->getValueId().getType(); switch (getOperatorType()) { case ITM_OLAP_AVG: { // needs to mimic what will happen after transformation const NAType *operand1 = synthAvgSum(operand, FALSE); const NAType *newInt = new HEAP SQLLargeInt(TRUE, TRUE /*FALSE*/); if (operand1) { result = operand1->synthesizeType(SYNTH_RULE_DIV, *operand1, *newInt, HEAP); } } break; case ITM_OLAP_SUM: result = synthAvgSum(operand, FALSE); break; case ITM_OLAP_MAX: case ITM_OLAP_MIN: result = operand.newCopy(HEAP); break; case ITM_OLAP_SDEV: case ITM_OLAP_VARIANCE: result = new HEAP SQLDoublePrecision(TRUE); break; default: CMPASSERT("Unknown running function in synthesizeType()."); break; } // end switch getOperatorType() if (result) { ((NAType *)result)->setNullable(TRUE); } } // end else not RUNNINGCOUNT return result; } // ----------------------------------------------------------------------- // member functions for class ItmSeqRowsSince // ----------------------------------------------------------------------- const NAType *ItmSeqRowsSince::synthesizeType() { if (getArity() == 2) { const NAType &operand2 = child(1)->getValueId().getType(); if (operand2.getTypeQualifier() != NA_NUMERIC_TYPE) { // The second operand of a ROWS SINCE function must be numeric. *CmpCommon::diags() << DgSqlCode(-4052) << DgString0(getTextUpper()); return NULL; } } return new HEAP SQLInt(TRUE /* 'long long' on NSK can't be unsigned */, TRUE /* nullable */); } // ----------------------------------------------------------------------- // member functions for class ItmSeqMovingFunction // ----------------------------------------------------------------------- #pragma nowarn(262) // warning elimination const NAType *ItmSeqMovingFunction::synthesizeType() { const NAType *result=NULL; // // Verify that moving window sizes are numeric values // NABoolean child2isOK = TRUE; const NAType &operand1 = child(1)->getValueId().getType(); if (operand1.getTypeQualifier() != NA_NUMERIC_TYPE) { // The second operand of a MOVING sequence function must be numeric. *CmpCommon::diags() << DgSqlCode(-4052) << DgString0(getTextUpper()); return NULL; } if (getArity() > 2 ) // check child(2) type { const NAType &operand2 = child(2)->getValueId().getType(); if (operand2.getTypeQualifier() != NA_NUMERIC_TYPE) { // The third operand of a MOVING sequence function must be numeric. *CmpCommon::diags() << DgSqlCode(-4053) << DgString0(getTextUpper()); return NULL; } } if ((getOperatorType() == ITM_MOVING_COUNT) || (getOperatorType() == ITM_MOVING_RANK) || (getOperatorType() == ITM_MOVING_DRANK)) { result = new HEAP SQLLargeInt(TRUE /* 'long long' on NSK can't be unsigned */, FALSE /*not null*/); } else { const NAType& operand = child(0)->castToItemExpr()->getValueId().getType(); switch (getOperatorType()) { case ITM_MOVING_AVG: { // needs to mimic what will happen after transformation const NAType *operand1 = synthAvgSum(operand, FALSE); const NAType *newInt = new HEAP SQLLargeInt(TRUE,FALSE); if (operand1) { result = operand1->synthesizeType(SYNTH_RULE_DIV, *operand1, *newInt, HEAP); } } break; case ITM_MOVING_SUM: result = synthAvgSum(operand, FALSE); break; case ITM_MOVING_MAX: case ITM_MOVING_MIN: result = operand.newCopy(HEAP); break; case ITM_MOVING_SDEV: case ITM_MOVING_VARIANCE: result = new HEAP SQLDoublePrecision(TRUE); // See ScalarVariance::synthesizeType() break; default: CMPASSERT("Unknown moving function in synthesizeType()."); break; } // end switch getOperatorType() if (result) { ((NAType *)result)->setNullable(TRUE); } } // end else not MOVING_COUNT return result; } #pragma warn(262) // warning elimination // ----------------------------------------------------------------------- // member functions for class ItmSeqThisFunction // ----------------------------------------------------------------------- const NAType *ItmSeqThisFunction::synthesizeType() { // Return the type of child const NAType &operand = child(0)->getValueId().getType(); NAType *result = operand.newCopy(HEAP); result->setNullable(TRUE); return result; } // ----------------------------------------------------------------------- // member functions for class ItmScalarMinMax // ----------------------------------------------------------------------- const NAType *ItmScalarMinMax::synthesizeType() { // The expression is SCALAR_MIN(<val1>, <val2>) or SCALAR_MAX(<val1>, <val2>) // The result is the min or max value of the operands. ValueId valId1 = child(0)->getValueId(); ValueId valId2 = child(1)->getValueId(); // // Type cast any params. // valId1.coerceType(valId2.getType(), NA_NUMERIC_TYPE); valId2.coerceType(valId1.getType()); // // Synthesize the result. // const NAType& op1 = valId1.getType(); const NAType& op2 = valId2.getType(); UInt32 flags = ((CmpCommon::getDefault(LIMIT_MAX_NUMERIC_PRECISION) == DF_ON) ? NAType::LIMIT_MAX_NUMERIC_PRECISION : 0); const NAType *result = op1.synthesizeType(SYNTH_RULE_UNION, op1, op2, HEAP, &flags); if (result == NULL) { // 4041 Type $1 cannot be compared with type $2. emitDyadicTypeSQLnameMsg(-4041, op1, op2); return NULL; } if (result->getTypeQualifier() == NA_CHARACTER_TYPE) { CharType *ct = (CharType *)result; propagateCoAndCoToChildren(this, ct->getCollation(), ct->getCoercibility()); } return result; } // ----------------------------------------------------------------------- // member functions for class ItmSeqNotTHISFunction // ----------------------------------------------------------------------- const NAType *ItmSeqNotTHISFunction::synthesizeType() { // Return the type of child const NAType &operand = child(0)->getValueId().getType(); NAType *result = operand.newCopy(HEAP); result->setNullable(TRUE); return result; } // ----------------------------------------------------------------------- // member functions for class AuditImage // ----------------------------------------------------------------------- const NAType *AuditImage::synthesizeType() { const NAType * type = NULL; const NATable *naTable = getNATable(); // The data types of the columns (in order) in the object // must match the data types of columns specified in the // expression list for AUDIT_IMAGE. The columns in the expression // list form the children of AUDIT_IMAGE node. const NAColumnArray &naColumns = naTable->getNAColumnArray(); for (UInt32 i=0; i < naColumns.entries(); i++) { const NAColumn *tableNACol = naColumns[i]; const NAType *tableColumnType = tableNACol->getType(); // Populate member varaible columnTypeList_ (to be used // during codeGen. See AuditImage::codeGen()) with the // column types of the object. columnTypeList_.insert(tableColumnType); // Actual datatype checking is done only for non-Constants. // Compatiblity type checking is done for Constants. // Note: Constants are used only for internal testing. const NAType *childType = &children()[i].getValueId().getType(); if (children()[i].getPtr()->getOperatorType() == ITM_CONSTANT || childType->getTypeQualifier() == NA_UNKNOWN_TYPE) { children()[i].getValueId().coerceType(*tableColumnType); // the coerceType method above might have changed // the childType. So, get it one more time. childType = &children()[i].getValueId().getType(); if (NOT childType->isCompatible(*tableColumnType)) { *CmpCommon::diags() << DgSqlCode(-4316) << DgTableName(getObjectName().getQualifiedNameAsString()) << DgColumnName(tableNACol->getColName()); return NULL; } } else { // Not a constant, so enforce type checking; but w.r.t NULL - check // only if it's physical and not if the value of // has the exact enum NAType::SupportsSQLnull. if (!(tableColumnType->equalPhysical(*childType))) { *CmpCommon::diags() << DgSqlCode(-4316) << DgTableName(getObjectName().getQualifiedNameAsString()) << DgColumnName(tableNACol->getColName()); return NULL; } } } const Lng32 recordLength = naTable->getRecordLength(); type = new HEAP SQLVarChar(recordLength); return type; } const NAType * HbaseColumnLookup::synthesizeType() { NAType * type = NULL; if (naType_) type = (NAType*)naType_; else type = new HEAP SQLVarChar(100000); return type; } const NAType * HbaseColumnsDisplay::synthesizeType() { NAType * type = new HEAP SQLVarChar(displayWidth_); return type; } const NAType * HbaseColumnCreate::synthesizeType() { NAType * type = NULL; if (resultType_) type = (NAType*)resultType_; else type = new HEAP SQLVarChar(100000); return type; } const NAType * SequenceValue::synthesizeType() { NAType * type = NULL; type = new HEAP SQLLargeInt(TRUE, FALSE); return type; } const NAType * HbaseTimestamp::synthesizeType() { NAType * type = NULL; type = new HEAP SQLLargeInt(TRUE, col_->getValueId().getType().supportsSQLnull()); return type; } const NAType * HbaseVersion::synthesizeType() { NAType * type = NULL; type = new HEAP SQLLargeInt(TRUE, FALSE); return type; } const NAType * RowNumFunc::synthesizeType() { NAType * type = NULL; type = new HEAP SQLLargeInt(TRUE, FALSE); return type; } const NAType *LOBoper::synthesizeType() { // Return blob or clob type NAType *result = new HEAP SQLBlob(1000); if (child(0)) { ValueId vid1 = child(0)->getValueId(); const NAType &typ1 = (NAType&)vid1.getType(); if (typ1.getFSDatatype() == REC_BLOB) { result = new HEAP SQLBlob(1000, Lob_Local_File, typ1.supportsSQLnull()); } else if (typ1.getFSDatatype() == REC_CLOB) { result = new HEAP SQLClob(1000, Lob_Invalid_Storage, typ1.supportsSQLnull()); } } return result; } const NAType *LOBinsert::synthesizeType() { // Return blob type ValueId vid1 = child(0)->getValueId(); const NAType &typ1 = (NAType&)vid1.getType(); if ((obj_ == STRING_) || (obj_ == FILE_) || (obj_ == EXTERNAL_) || (obj_ == LOAD_)) { if (typ1.getTypeQualifier() != NA_CHARACTER_TYPE) { // 4221 The operand of a $0~String0 function must be character. *CmpCommon::diags() << DgSqlCode(-4221) << DgString0("LOBINSERT") << DgString1("CHARACTER"); return NULL; } } else if (obj_ == LOB_) { if (typ1.getTypeQualifier() != NA_LOB_TYPE) { // 4043 The operand of a $0~String0 function must be blob *CmpCommon::diags() << DgSqlCode(-4221) << DgString0("LOBINSERT") << DgString1("LOB"); return NULL; } } else if (obj_ == BUFFER_) { if (typ1.getTypeQualifier() != NA_NUMERIC_TYPE) { // 4043 The operand of a $0~String0 function must be blob *CmpCommon::diags() << DgSqlCode(-4221) << DgString0("LOBINSERT") << DgString1("LARGEINT"); return NULL; } } else { // 4221 The operand of a $0~String0 function must be character. *CmpCommon::diags() << DgSqlCode(-4221) << DgString0("LOBINSERT") << DgString1("BLOB"); return NULL; } NAType * result = NULL; if (lobFsType() == REC_BLOB) { result = new HEAP SQLBlob(lobSize(), Lob_Invalid_Storage, typ1.supportsSQLnull()); } else if (lobFsType() == REC_CLOB) { result = new HEAP SQLClob(lobSize(), Lob_Invalid_Storage, typ1.supportsSQLnull()); } return result; } const NAType *LOBupdate::synthesizeType() { // Return blob type ValueId vid1 = child(0)->getValueId(); const NAType &typ1 = (NAType&)vid1.getType(); ValueId vid2 = child(1)->getValueId(); const NAType &typ2 = (NAType&)vid2.getType(); if ((obj_ == STRING_) || (obj_ == FILE_) || (obj_ == EXTERNAL_)) { if (typ1.getTypeQualifier() != NA_CHARACTER_TYPE) { // 4221 The operand of a $0~String0 function must be character. *CmpCommon::diags() << DgSqlCode(-4221) << DgString0("LOBUPDATE") << DgString1("CHARACTER"); return NULL; } } else if (obj_ == LOB_) { if (typ1.getTypeQualifier() != NA_LOB_TYPE) { // 4043 The operand of a $0~String0 function must be blob *CmpCommon::diags() << DgSqlCode(-4221) << DgString0("LOBUPDATE") << DgString1("LOB"); return NULL; } } else if (obj_ == BUFFER_) { if (typ1.getTypeQualifier() != NA_NUMERIC_TYPE) { // 4043 The operand of a $0~String0 function must be blob *CmpCommon::diags() << DgSqlCode(-4221) << DgString0("LOBUPDATE") << DgString1("LARGEINT"); return NULL; } ValueId vid3 = child(2)->getValueId(); const NAType &typ3 = (NAType&)vid3.getType(); if (typ3.getTypeQualifier() != NA_NUMERIC_TYPE) { // 4043 The operand of a $0~String0 function must be blob *CmpCommon::diags() << DgSqlCode(-4221) << DgString0("LOBUPDATE") << DgString1("LARGEINT"); return NULL; } } else { // 4221 The operand of a $0~String0 function must be character. *CmpCommon::diags() << DgSqlCode(-4221) << DgString0("LOBUPDATE") << DgString1("BLOB"); return NULL; } NAType * result = NULL; if (typ2.getFSDatatype() == REC_BLOB) { SQLBlob &blob = (SQLBlob&)typ2; result = new HEAP SQLBlob(blob.getLobLength(), Lob_Invalid_Storage, typ2.supportsSQLnull()); } else if (typ2.getFSDatatype() == REC_CLOB) { SQLClob &clob = (SQLClob&)typ2; result = new HEAP SQLClob(clob.getLobLength(), Lob_Invalid_Storage, typ2.supportsSQLnull()); } return result; } const NAType *LOBconvertHandle::synthesizeType() { // Return blob type ValueId vid1 = child(0)->getValueId(); const NAType &typ1 = (NAType&)vid1.getType(); NAType *result = NULL; if (obj_ == STRING_) { if (typ1.getTypeQualifier() != NA_LOB_TYPE) { // 4043 The operand of a $0~String0 function must be character. *CmpCommon::diags() << DgSqlCode(-4221) << DgString0("LOBCONVERTHANDLE") << DgString1("LOB"); return NULL; } if (typ1.getFSDatatype() == REC_BLOB) { SQLBlob& op1 = (SQLBlob&)vid1.getType(); result = new HEAP SQLBlob(op1.getLobLength(), Lob_Invalid_Storage, typ1.supportsSQLnull(), FALSE, TRUE); } else if (typ1.getFSDatatype() == REC_CLOB) { SQLClob& op1 = (SQLClob&)vid1.getType(); result = new HEAP SQLClob(op1.getLobLength(), Lob_Invalid_Storage, typ1.supportsSQLnull(), FALSE, TRUE); } return result; } else if (obj_ == LOB_) { if (typ1.getTypeQualifier() != NA_CHARACTER_TYPE) { // 4221 The operand of a $0~String0 function must be character. *CmpCommon::diags() << DgSqlCode(-4221) << DgString0("LOBCONVERTHANDLE") << DgString1("CHARACTER"); return NULL; } result = new HEAP SQLBlob(1000, Lob_Invalid_Storage, typ1.supportsSQLnull(), FALSE, FALSE); return result; } else { *CmpCommon::diags() << DgSqlCode(-4221) << DgString0("LOBCONVERTHANDLE") << DgString1("CHARACTER"); return NULL; } } const NAType *LOBconvert::synthesizeType() { // Return blob type ValueId vid1 = child(0)->getValueId(); const NAType &typ1 = (NAType&)vid1.getType(); if (obj_ == STRING_ || obj_ == FILE_) { if (typ1.getTypeQualifier() != NA_LOB_TYPE) { // 4043 The operand of a $0~String0 function must be blob *CmpCommon::diags() << DgSqlCode(-4221) << DgString0("LOBCONVERT") << DgString1("LOB"); return NULL; } SQLlob& op1 = (SQLlob&)vid1.getType(); Lng32 tgtSize = MINOF((Lng32)op1.getLobLength(), tgtSize_); NAType *result = new HEAP SQLVarChar(tgtSize, Lob_Invalid_Storage, typ1.supportsSQLnull()); return result; } else { // 4221 The operand of a $0~String0 function must be character. *CmpCommon::diags() << DgSqlCode(-4221) << DgString0("LOBCONVERT") << DgString1("LOB"); return NULL; } } const NAType *LOBextract::synthesizeType() { // Return blob type ValueId vid1 = child(0)->getValueId(); const NAType &typ1 = (NAType&)vid1.getType(); if (typ1.getTypeQualifier() != NA_LOB_TYPE) { // 4043 The operand of a $0~String0 function must be blob *CmpCommon::diags() << DgSqlCode(-4221) << DgString0("LOBEXTRACT") << DgString1("LOB"); return NULL; } SQLlob& op1 = (SQLlob&)vid1.getType(); Lng32 tgtSize = MINOF((Lng32)op1.getLobLength(), tgtSize_); NAType *result = new HEAP SQLVarChar(tgtSize, Lob_Invalid_Storage, typ1.supportsSQLnull()); return result; }
1
9,643
Why do we have || CharInfo::UnknownCharSet here? I do not see it it in neighbouring statements. This is just for my understanding.
apache-trafodion
cpp
@@ -180,6 +180,7 @@ namespace NLog.Targets /// <docgen category='Web Service Options' order='10' /> public string XmlRootNamespace { get; set; } + private readonly Dictionary<HttpWebRequest, KeyValuePair<DateTime,AsyncContinuation>> _pendingRequests = new Dictionary<HttpWebRequest, KeyValuePair<DateTime,AsyncContinuation>>(); /// <summary> /// Calls the target method. Must be implemented in concrete classes.
1
// // Copyright (c) 2004-2016 Jaroslaw Kowalski <[email protected]>, Kim Christensen, Julian Verdurmen // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * Neither the name of Jaroslaw Kowalski nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF // THE POSSIBILITY OF SUCH DAMAGE. // namespace NLog.Targets { using System; using System.ComponentModel; using System.Collections.Generic; using System.Globalization; using System.IO; using System.Net; using System.Text; using System.Xml; using NLog.Common; using NLog.Internal; using NLog.Config; /// <summary> /// Calls the specified web service on each log message. /// </summary> /// <seealso href="https://github.com/nlog/nlog/wiki/WebService-target">Documentation on NLog Wiki</seealso> /// <remarks> /// The web service must implement a method that accepts a number of string parameters. /// </remarks> /// <example> /// <p> /// To set up the target in the <a href="config.html">configuration file</a>, /// use the following syntax: /// </p> /// <code lang="XML" source="examples/targets/Configuration File/WebService/NLog.config" /> /// <p> /// This assumes just one target and a single rule. More configuration /// options are described <a href="config.html">here</a>. /// </p> /// <p> /// To set up the log target programmatically use code like this: /// </p> /// <code lang="C#" source="examples/targets/Configuration API/WebService/Simple/Example.cs" /> /// <p>The example web service that works with this example is shown below</p> /// <code lang="C#" source="examples/targets/Configuration API/WebService/Simple/WebService1/Service1.asmx.cs" /> /// </example> [Target("WebService")] public sealed class WebServiceTarget : MethodCallTargetBase { private const string SoapEnvelopeNamespaceUri = "http://schemas.xmlsoap.org/soap/envelope/"; private const string Soap12EnvelopeNamespaceUri = "http://www.w3.org/2003/05/soap-envelope"; /// <summary> /// dictionary that maps a concrete <see cref="HttpPostFormatterBase"/> implementation /// to a specific <see cref="WebServiceProtocol"/>-value. /// </summary> private static Dictionary<WebServiceProtocol, Func<WebServiceTarget, HttpPostFormatterBase>> _postFormatterFactories = new Dictionary<WebServiceProtocol, Func<WebServiceTarget, HttpPostFormatterBase>>() { { WebServiceProtocol.Soap11, t => new HttpPostSoap11Formatter(t)}, { WebServiceProtocol.Soap12, t => new HttpPostSoap12Formatter(t)}, { WebServiceProtocol.HttpPost, t => new HttpPostFormEncodedFormatter(t)}, { WebServiceProtocol.JsonPost, t => new HttpPostJsonFormatter(t)}, { WebServiceProtocol.XmlPost, t => new HttpPostXmlDocumentFormatter(t)}, }; /// <summary> /// Initializes a new instance of the <see cref="WebServiceTarget" /> class. /// </summary> public WebServiceTarget() { this.Protocol = WebServiceProtocol.Soap11; //default NO utf-8 bom const bool writeBOM = false; this.Encoding = new UTF8Encoding(writeBOM); this.IncludeBOM = writeBOM; } /// <summary> /// Initializes a new instance of the <see cref="WebServiceTarget" /> class. /// </summary> /// <param name="name">Name of the target</param> public WebServiceTarget(string name) : this() { this.Name = name; } /// <summary> /// Gets or sets the web service URL. /// </summary> /// <docgen category='Web Service Options' order='10' /> public Uri Url { get; set; } /// <summary> /// Gets or sets the Web service method name. Only used with Soap. /// </summary> /// <docgen category='Web Service Options' order='10' /> public string MethodName { get; set; } /// <summary> /// Gets or sets the Web service namespace. Only used with Soap. /// </summary> /// <docgen category='Web Service Options' order='10' /> public string Namespace { get; set; } /// <summary> /// Gets or sets the protocol to be used when calling web service. /// </summary> /// <docgen category='Web Service Options' order='10' /> [DefaultValue("Soap11")] public WebServiceProtocol Protocol { get { return _activeProtocol.Key; } set { _activeProtocol = new KeyValuePair<WebServiceProtocol, HttpPostFormatterBase>(value, null); } } private KeyValuePair<WebServiceProtocol, HttpPostFormatterBase> _activeProtocol = new KeyValuePair<WebServiceProtocol, HttpPostFormatterBase>(); /// <summary> /// Should we include the BOM (Byte-order-mark) for UTF? Influences the <see cref="Encoding"/> property. /// /// This will only work for UTF-8. /// </summary> public bool? IncludeBOM { get; set; } /// <summary> /// Gets or sets the encoding. /// </summary> /// <docgen category='Web Service Options' order='10' /> public Encoding Encoding { get; set; } /// <summary> /// Gets or sets a value whether escaping be done according to Rfc3986 (Supports Internationalized Resource Identifiers - IRIs) /// </summary> /// <value>A value of <c>true</c> if Rfc3986; otherwise, <c>false</c> for legacy Rfc2396.</value> /// <docgen category='Web Service Options' order='10' /> public bool EscapeDataRfc3986 { get; set; } /// <summary> /// Gets or sets a value whether escaping be done according to the old NLog style (Very non-standard) /// </summary> /// <value>A value of <c>true</c> if legacy encoding; otherwise, <c>false</c> for standard UTF8 encoding.</value> /// <docgen category='Web Service Options' order='10' /> public bool EscapeDataNLogLegacy { get; set; } /// <summary> /// Gets or sets the name of the root XML element, /// if POST of XML document chosen. /// If so, this property must not be <c>null</c>. /// (see <see cref="Protocol"/> and <see cref="WebServiceProtocol.XmlPost"/>). /// </summary> /// <docgen category='Web Service Options' order='10' /> public string XmlRoot { get; set; } /// <summary> /// Gets or sets the (optional) root namespace of the XML document, /// if POST of XML document chosen. /// (see <see cref="Protocol"/> and <see cref="WebServiceProtocol.XmlPost"/>). /// </summary> /// <docgen category='Web Service Options' order='10' /> public string XmlRootNamespace { get; set; } /// <summary> /// Calls the target method. Must be implemented in concrete classes. /// </summary> /// <param name="parameters">Method call parameters.</param> protected override void DoInvoke(object[] parameters) { // method is not used, instead asynchronous overload will be used throw new NotImplementedException(); } /// <summary> /// Invokes the web service method. /// </summary> /// <param name="parameters">Parameters to be passed.</param> /// <param name="continuation">The continuation.</param> protected override void DoInvoke(object[] parameters, AsyncContinuation continuation) { var request = (HttpWebRequest)WebRequest.Create(BuildWebServiceUrl(parameters)); Func<AsyncCallback, IAsyncResult> begin = (r) => request.BeginGetRequestStream(r, null); Func<IAsyncResult, Stream> getStream = request.EndGetRequestStream; DoInvoke(parameters, continuation, request, begin, getStream); } internal void DoInvoke(object[] parameters, AsyncContinuation continuation, HttpWebRequest request, Func<AsyncCallback, IAsyncResult> beginFunc, Func<IAsyncResult, Stream> getStreamFunc) { Stream postPayload = null; if (Protocol == WebServiceProtocol.HttpGet) { PrepareGetRequest(request); } else { if (_activeProtocol.Value == null) _activeProtocol = new KeyValuePair<WebServiceProtocol, HttpPostFormatterBase>(this.Protocol, _postFormatterFactories[this.Protocol](this)); postPayload = _activeProtocol.Value.PrepareRequest(request, parameters); } AsyncContinuation sendContinuation = ex => { if (ex != null) { continuation(ex); return; } request.BeginGetResponse( r => { try { using (var response = request.EndGetResponse(r)) { } continuation(null); } catch (Exception ex2) { InternalLogger.Error(ex2, "Error when sending to Webservice."); if (ex2.MustBeRethrown()) { throw; } continuation(ex2); } }, null); }; if (postPayload != null && postPayload.Length > 0) { postPayload.Position = 0; beginFunc( result => { try { using (Stream stream = getStreamFunc(result)) { WriteStreamAndFixPreamble(postPayload, stream, this.IncludeBOM, this.Encoding); postPayload.Dispose(); } sendContinuation(null); } catch (Exception ex) { postPayload.Dispose(); InternalLogger.Error(ex, "Error when sending to Webservice."); if (ex.MustBeRethrown()) { throw; } continuation(ex); } }); } else { sendContinuation(null); } } /// <summary> /// Builds the URL to use when calling the web service for a message, depending on the WebServiceProtocol. /// </summary> /// <param name="parameterValues"></param> /// <returns></returns> private Uri BuildWebServiceUrl(object[] parameterValues) { if (this.Protocol != WebServiceProtocol.HttpGet) { return this.Url; } UrlHelper.EscapeEncodingFlag encodingFlags = UrlHelper.GetUriStringEncodingFlags(EscapeDataNLogLegacy, false, EscapeDataRfc3986); //if the protocol is HttpGet, we need to add the parameters to the query string of the url var queryParameters = new StringBuilder(); string separator = string.Empty; for (int i = 0; i < this.Parameters.Count; i++) { queryParameters.Append(separator); queryParameters.Append(this.Parameters[i].Name); queryParameters.Append("="); string parameterValue = Convert.ToString(parameterValues[i], CultureInfo.InvariantCulture); UrlHelper.EscapeDataEncode(parameterValue, queryParameters, encodingFlags); separator = "&"; } var builder = new UriBuilder(this.Url); //append our query string to the URL following //the recommendations at https://msdn.microsoft.com/en-us/library/system.uribuilder.query.aspx if (builder.Query != null && builder.Query.Length > 1) { builder.Query = string.Concat(builder.Query.Substring(1), "&", queryParameters.ToString()); } else { builder.Query = queryParameters.ToString(); } return builder.Uri; } private void PrepareGetRequest(HttpWebRequest request) { request.Method = "GET"; } /// <summary> /// Write from input to output. Fix the UTF-8 bom /// </summary> /// <param name="input"></param> /// <param name="output"></param> /// <param name="writeUtf8BOM"></param> /// <param name="encoding"></param> private static void WriteStreamAndFixPreamble(Stream input, Stream output, bool? writeUtf8BOM, Encoding encoding) { //only when utf-8 encoding is used, the Encoding preamble is optional var nothingToDo = writeUtf8BOM == null || !(encoding is UTF8Encoding); const int preambleSize = 3; if (!nothingToDo) { //it's UTF-8 var hasBomInEncoding = encoding.GetPreamble().Length == preambleSize; //BOM already in Encoding. nothingToDo = writeUtf8BOM.Value && hasBomInEncoding; //Bom already not in Encoding nothingToDo = nothingToDo || !writeUtf8BOM.Value && !hasBomInEncoding; } var offset = nothingToDo ? 0 : preambleSize; input.CopyWithOffset(output, offset); } /// <summary> /// base class for POST formatters, that /// implement former <c>PrepareRequest()</c> method, /// that creates the content for /// the requested kind of HTTP request /// </summary> private abstract class HttpPostFormatterBase { protected HttpPostFormatterBase(WebServiceTarget target) { Target = target; } protected abstract string ContentType { get; } protected WebServiceTarget Target { get; private set; } public MemoryStream PrepareRequest(HttpWebRequest request, object[] parameterValues) { InitRequest(request); var ms = new MemoryStream(); WriteContent(ms, parameterValues); return ms; } protected virtual void InitRequest(HttpWebRequest request) { request.Method = "POST"; request.ContentType = string.Format("{1}; charset={0}", Target.Encoding.WebName, ContentType); } protected abstract void WriteContent(MemoryStream ms, object[] parameterValues); } private class HttpPostFormEncodedFormatter : HttpPostTextFormatterBase { readonly UrlHelper.EscapeEncodingFlag encodingFlags; public HttpPostFormEncodedFormatter(WebServiceTarget target) : base(target) { encodingFlags = UrlHelper.GetUriStringEncodingFlags(target.EscapeDataNLogLegacy, true, target.EscapeDataRfc3986); } protected override string ContentType { get { return "application/x-www-form-urlencoded"; } } protected override string Separator { get { return "&"; } } protected override string GetFormattedContent(string parametersContent) { return parametersContent; } protected override string GetFormattedParameter(MethodCallParameter parameter, object value) { string parameterValue = Convert.ToString(value, CultureInfo.InvariantCulture); if (string.IsNullOrEmpty(parameterValue)) { return string.Concat(parameter.Name, "="); } var sb = new StringBuilder(parameter.Name.Length + parameterValue.Length + 20); sb.Append(parameter.Name).Append("="); UrlHelper.EscapeDataEncode(parameterValue, sb, encodingFlags); return sb.ToString(); } } private class HttpPostJsonFormatter : HttpPostTextFormatterBase { public HttpPostJsonFormatter(WebServiceTarget target) : base(target) { } protected override string ContentType { get { return "application/json"; } } protected override string Separator { get { return ","; } } protected override string GetFormattedContent(string parametersContent) { return string.Concat("{", parametersContent, "}"); } protected override string GetFormattedParameter(MethodCallParameter parameter, object value) { return string.Concat("\"", parameter.Name, "\":", GetJsonValueString(value)); } private string GetJsonValueString(object value) { return ConfigurationItemFactory.Default.JsonSerializer.SerializeObject(value); } } private class HttpPostSoap11Formatter : HttpPostSoapFormatterBase { public HttpPostSoap11Formatter(WebServiceTarget target) : base(target) { } protected override string SoapEnvelopeNamespace { get { return WebServiceTarget.SoapEnvelopeNamespaceUri; } } protected override string SoapName { get { return "soap"; } } protected override void InitRequest(HttpWebRequest request) { base.InitRequest(request); string soapAction; if (Target.Namespace.EndsWith("/", StringComparison.Ordinal)) { soapAction = string.Concat(Target.Namespace, Target.MethodName); } else { soapAction = string.Concat(Target.Namespace, "/", Target.MethodName); } request.Headers["SOAPAction"] = soapAction; } } private class HttpPostSoap12Formatter : HttpPostSoapFormatterBase { public HttpPostSoap12Formatter(WebServiceTarget target) : base(target) { } protected override string SoapEnvelopeNamespace { get { return WebServiceTarget.Soap12EnvelopeNamespaceUri; } } protected override string SoapName { get { return "soap12"; } } } private abstract class HttpPostSoapFormatterBase : HttpPostXmlFormatterBase { private readonly XmlWriterSettings _xmlWriterSettings; protected HttpPostSoapFormatterBase(WebServiceTarget target) : base(target) { _xmlWriterSettings = new XmlWriterSettings { Encoding = target.Encoding }; } protected abstract string SoapEnvelopeNamespace { get; } protected abstract string SoapName { get; } protected override void WriteContent(MemoryStream ms, object[] parameterValues) { XmlWriter xtw = XmlWriter.Create(ms, _xmlWriterSettings); xtw.WriteStartElement(SoapName, "Envelope", SoapEnvelopeNamespace); xtw.WriteStartElement("Body", SoapEnvelopeNamespace); xtw.WriteStartElement(Target.MethodName, Target.Namespace); WriteAllParametersToCurrenElement(xtw, parameterValues); xtw.WriteEndElement(); // methodname xtw.WriteEndElement(); // Body xtw.WriteEndElement(); // soap:Envelope xtw.Flush(); } } private abstract class HttpPostTextFormatterBase : HttpPostFormatterBase { protected HttpPostTextFormatterBase(WebServiceTarget target) : base(target) { } protected abstract string Separator { get; } protected abstract string GetFormattedContent(string parametersContent); protected abstract string GetFormattedParameter(MethodCallParameter parameter, object value); protected override void WriteContent(MemoryStream ms, object[] parameterValues) { var sw = new StreamWriter(ms, Target.Encoding); sw.Write(string.Empty); var sb = new StringBuilder(); for (int i = 0; i < Target.Parameters.Count; i++) { if (sb.Length > 0) sb.Append(Separator); sb.Append(GetFormattedParameter(Target.Parameters[i], parameterValues[i])); } string content = GetFormattedContent(sb.ToString()); sw.Write(content); sw.Flush(); } } private class HttpPostXmlDocumentFormatter : HttpPostXmlFormatterBase { private readonly XmlWriterSettings _xmlWriterSettings; protected override string ContentType { get { return "application/xml"; } } public HttpPostXmlDocumentFormatter(WebServiceTarget target) : base(target) { if (string.IsNullOrEmpty(target.XmlRoot)) throw new InvalidOperationException("WebServiceProtocol.Xml requires WebServiceTarget.XmlRoot to be set."); _xmlWriterSettings = new XmlWriterSettings { Encoding = target.Encoding, OmitXmlDeclaration = true, Indent = false }; } protected override void WriteContent(MemoryStream ms, object[] parameterValues) { XmlWriter xtw = XmlWriter.Create(ms, _xmlWriterSettings); xtw.WriteStartElement(Target.XmlRoot, Target.XmlRootNamespace); WriteAllParametersToCurrenElement(xtw, parameterValues); xtw.WriteEndElement(); xtw.Flush(); } } private abstract class HttpPostXmlFormatterBase : HttpPostFormatterBase { protected HttpPostXmlFormatterBase(WebServiceTarget target) : base(target) { } protected override string ContentType { get { return "text/xml"; } } protected void WriteAllParametersToCurrenElement(XmlWriter currentXmlWriter, object[] parameterValues) { for (int i = 0; i < Target.Parameters.Count; i++) { currentXmlWriter.WriteElementString(Target.Parameters[i].Name, Convert.ToString(parameterValues[i], CultureInfo.InvariantCulture)); } } } } }
1
14,515
isn't `HttpWebRequest` an expensive key value?
NLog-NLog
.cs
@@ -148,6 +148,14 @@ class Proposal < ActiveRecord::Base observations.find_by(user: user) end + def eligible_observers + if observations.count > 0 + User.where(client_slug: client).where('id not in (?)', observations.pluck('user_id')) + else + User.where(client_slug: client) + end + end + def add_observer(email_or_user, adder=nil, reason=nil) user = find_user(email_or_user)
1
class Proposal < ActiveRecord::Base include WorkflowModel include ValueHelper has_paper_trail class_name: 'C2Version' CLIENT_MODELS = [] # this gets populated later FLOWS = %w(parallel linear).freeze workflow do state :pending do event :approve, :transitions_to => :approved event :restart, :transitions_to => :pending event :cancel, :transitions_to => :cancelled end state :approved do event :restart, :transitions_to => :pending event :cancel, :transitions_to => :cancelled event :approve, :transitions_to => :approved do halt # no need to trigger a state transition end end state :cancelled do event :approve, :transitions_to => :cancelled do halt # can't escape end end end has_many :approvals has_many :individual_approvals, ->{ individual }, class_name: 'Approvals::Individual' has_many :approvers, through: :individual_approvals, source: :user has_many :api_tokens, through: :individual_approvals has_many :attachments, dependent: :destroy has_many :approval_delegates, through: :approvers, source: :outgoing_delegations has_many :comments, dependent: :destroy has_many :delegates, through: :approval_delegates, source: :assignee has_many :observations, -> { where("proposal_roles.role_id in (select roles.id from roles where roles.name='observer')") } has_many :observers, through: :observations, source: :user belongs_to :client_data, polymorphic: true, dependent: :destroy belongs_to :requester, class_name: 'User' # The following list also servers as an interface spec for client_datas # Note: clients may implement: # :fields_for_display # :version # Note: clients should also implement :version delegate :client, to: :client_data, allow_nil: true validates :client_data_type, inclusion: { in: ->(_) { self.client_model_names }, message: "%{value} is not a valid client model type. Valid client model types are: #{CLIENT_MODELS.inspect}", allow_blank: true } validates :flow, presence: true, inclusion: {in: FLOWS} validates :requester_id, presence: true validates :public_id, uniqueness: true, allow_nil: true self.statuses.each do |status| scope status, -> { where(status: status) } end scope :closed, -> { where(status: ['approved', 'cancelled']) } #TODO: Backfill to change approvals in 'reject' status to 'cancelled' status scope :cancelled, -> { where(status: 'cancelled') } # @todo - this should probably be the only entry into the approval system def root_approval self.approvals.where(parent: nil).first end def parallel? self.flow == 'parallel' end def linear? self.flow == 'linear' end def delegate?(user) self.approval_delegates.exists?(assignee_id: user.id) end def existing_approval_for(user) where_clause = <<-SQL user_id = :user_id OR user_id IN (SELECT assigner_id FROM approval_delegates WHERE assignee_id = :user_id) OR user_id IN (SELECT assignee_id FROM approval_delegates WHERE assigner_id = :user_id) SQL self.approvals.where(where_clause, user_id: user.id).first end # Returns a list of all users involved with the Proposal. def users # TODO use SQL results = self.approvers + self.observers + self.delegates + [self.requester] results.compact.uniq end alias_method :subscribers, :users def root_approval=(root) old_approvals = self.approvals.to_a approval_list = root.pre_order_tree_traversal approval_list.each { |a| a.proposal = self } self.approvals = approval_list # position may be out of whack, so we reset it approval_list.each_with_index do |approval, idx| approval.set_list_position(idx + 1) # start with 1 end self.clean_up_old_approvals(old_approvals, approval_list) root.initialize! self.reset_status() end def clean_up_old_approvals(old_approvals, approval_list) # destroy any old approvals that are not a part of approval_list (old_approvals - approval_list).each do |appr| appr.destroy() if Approval.exists?(appr.id) end end # convenience wrapper for setting a single approver def approver=(approver) # Don't recreate the approval existing = self.existing_approval_for(approver) if existing.nil? self.root_approval = Approvals::Individual.new(user: approver) end end def reset_status() unless self.cancelled? # no escape from cancelled if self.root_approval.nil? || self.root_approval.approved? self.update(status: 'approved') else self.update(status: 'pending') end end end def has_subscriber?(user) users.include?(user) end def existing_observation_for(user) observations.find_by(user: user) end def add_observer(email_or_user, adder=nil, reason=nil) user = find_user(email_or_user) # this authz check is here instead of in a Policy because the Policy classes # are applied to the current_user, not (as in this case) the user being acted upon. if client_data && !client_data.slug_matches?(user) fail Pundit::NotAuthorizedError.new("May not add observer belonging to a different organization.") end unless existing_observation_for(user) create_new_observation(user, adder, reason) end end def add_requester(email) user = User.for_email(email) self.set_requester(user) end def set_requester(user) self.update_attributes!(requester_id: user.id) end # Approvals in which someone can take action def currently_awaiting_approvals self.individual_approvals.actionable end def currently_awaiting_approvers self.approvers.merge(self.currently_awaiting_approvals) end def awaiting_approver?(user) self.currently_awaiting_approvers.include?(user) end # delegated, with a fallback # TODO refactor to class method in a module def delegate_with_default(method) data = self.client_data result = nil if data && data.respond_to?(method) result = data.public_send(method) end if result.present? result elsif block_given? yield else result end end ## delegated methods ## def name self.delegate_with_default(:name) { "Request #{public_id}" } end def fields_for_display # TODO better default self.delegate_with_default(:fields_for_display) { [] } end # Be careful if altering the identifier. You run the risk of "expiring" all # pending approval emails def version [ self.updated_at.to_i, self.client_data.try(:version) ].compact.max end ####################### def restart # Note that none of the state machine's history is stored self.api_tokens.update_all(expires_at: Time.zone.now) self.approvals.update_all(status: 'pending') if self.root_approval self.root_approval.initialize! end Dispatcher.deliver_new_proposal_emails(self) end # Returns True if the user is an "active" approver and has acted on the proposal def is_active_approver?(user) self.individual_approvals.non_pending.exists?(user_id: user.id) end def self.client_model_names CLIENT_MODELS.map(&:to_s) end def self.client_slugs CLIENT_MODELS.map(&:client) end protected def create_new_observation(user, adder, reason) ObservationCreator.new( observer: user, proposal_id: id, reason: reason, observer_adder: adder ).run end private def find_user(email_or_user) if email_or_user.is_a?(User) email_or_user else User.for_email_with_slug(email_or_user, client) end end end
1
15,328
couldn't we run this query whether there are observations or not?
18F-C2
rb
@@ -125,12 +125,12 @@ class CmdlineTest(unittest.TestCase): @mock.patch("logging.getLogger") def test_cmdline_other_task(self, logger): - luigi.run(['--local-scheduler', '--no-lock', 'SomeTask', '--n', '1000']) + luigi.run(['SomeTask', '--local-scheduler', '--no-lock', '--n', '1000']) self.assertEqual(dict(MockTarget.fs.get_all_data()), {'/tmp/test_1000': b'done'}) @mock.patch("logging.getLogger") def test_cmdline_ambiguous_class(self, logger): - self.assertRaises(Exception, luigi.run, ['--local-scheduler', '--no-lock', 'AmbiguousClass']) + self.assertRaises(Exception, luigi.run, ['AmbiguousClass', '--local-scheduler', '--no-lock']) @mock.patch("logging.getLogger") @mock.patch("logging.StreamHandler")
1
# -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from __future__ import print_function try: import ConfigParser except ImportError: import configparser as ConfigParser import mock import os import subprocess from helpers import unittest from luigi import six import luigi import luigi.cmdline from luigi.mock import MockTarget class SomeTask(luigi.Task): n = luigi.IntParameter() def output(self): return MockTarget('/tmp/test_%d' % self.n) def run(self): f = self.output().open('w') f.write('done') f.close() class AmbiguousClass(luigi.Task): pass class AmbiguousClass(luigi.Task): # NOQA pass class TaskWithSameName(luigi.Task): def run(self): self.x = 42 class TaskWithSameName(luigi.Task): # NOQA # there should be no ambiguity def run(self): self.x = 43 class WriteToFile(luigi.Task): filename = luigi.Parameter() def output(self): return luigi.LocalTarget(self.filename) def run(self): f = self.output().open('w') print('foo', file=f) f.close() class FooBaseClass(luigi.Task): x = luigi.Parameter(default='foo_base_default') class FooSubClass(FooBaseClass): pass class ATaskThatFails(luigi.Task): def run(self): raise ValueError() class RequiredConfig(luigi.Config): required_test_param = luigi.Parameter() class TaskThatRequiresConfig(luigi.WrapperTask): def requires(self): if RequiredConfig().required_test_param == 'A': return SubTaskThatFails() class SubTaskThatFails(luigi.Task): def complete(self): return False def run(self): raise Exception() class CmdlineTest(unittest.TestCase): def setUp(self): MockTarget.fs.clear() @mock.patch("logging.getLogger") def test_cmdline_main_task_cls(self, logger): luigi.run(['--local-scheduler', '--no-lock', '--n', '100'], main_task_cls=SomeTask) self.assertEqual(dict(MockTarget.fs.get_all_data()), {'/tmp/test_100': b'done'}) @mock.patch("logging.getLogger") def test_cmdline_local_scheduler(self, logger): luigi.run(['SomeTask', '--no-lock', '--n', '101'], local_scheduler=True) self.assertEqual(dict(MockTarget.fs.get_all_data()), {'/tmp/test_101': b'done'}) @mock.patch("logging.getLogger") def test_cmdline_other_task(self, logger): luigi.run(['--local-scheduler', '--no-lock', 'SomeTask', '--n', '1000']) self.assertEqual(dict(MockTarget.fs.get_all_data()), {'/tmp/test_1000': b'done'}) @mock.patch("logging.getLogger") def test_cmdline_ambiguous_class(self, logger): self.assertRaises(Exception, luigi.run, ['--local-scheduler', '--no-lock', 'AmbiguousClass']) @mock.patch("logging.getLogger") @mock.patch("logging.StreamHandler") def test_setup_interface_logging(self, handler, logger): handler.return_value = mock.Mock(name="stream_handler") with mock.patch("luigi.interface.setup_interface_logging.has_run", new=False): luigi.interface.setup_interface_logging() self.assertEqual([mock.call(handler.return_value)], logger.return_value.addHandler.call_args_list) with mock.patch("luigi.interface.setup_interface_logging.has_run", new=False): if six.PY2: error = ConfigParser.NoSectionError else: error = KeyError self.assertRaises(error, luigi.interface.setup_interface_logging, '/blah') @mock.patch("warnings.warn") @mock.patch("luigi.interface.setup_interface_logging") def test_cmdline_logger(self, setup_mock, warn): with mock.patch("luigi.interface.core") as env_params: env_params.return_value.logging_conf_file = '' env_params.return_value.log_level = 'DEBUG' env_params.return_value.parallel_scheduling_processes = 1 luigi.run(['SomeTask', '--n', '7', '--local-scheduler', '--no-lock']) self.assertEqual([mock.call('', 'DEBUG')], setup_mock.call_args_list) with mock.patch("luigi.configuration.get_config") as getconf: getconf.return_value.get.side_effect = ConfigParser.NoOptionError(section='foo', option='bar') getconf.return_value.getint.return_value = 0 luigi.interface.setup_interface_logging.call_args_list = [] luigi.run(['SomeTask', '--n', '42', '--local-scheduler', '--no-lock']) self.assertEqual([], setup_mock.call_args_list) @mock.patch('argparse.ArgumentParser.print_usage') def test_non_existent_class(self, print_usage): self.assertRaises(luigi.task_register.TaskClassNotFoundException, luigi.run, ['--local-scheduler', '--no-lock', 'XYZ']) @mock.patch('argparse.ArgumentParser.print_usage') def test_no_task(self, print_usage): self.assertRaises(SystemExit, luigi.run, ['--local-scheduler', '--no-lock']) def test_luigid_logging_conf(self): with mock.patch('luigi.server.run') as server_run, \ mock.patch('logging.config.fileConfig') as fileConfig: luigi.cmdline.luigid([]) self.assertTrue(server_run.called) # the default test configuration specifies a logging conf file fileConfig.assert_called_with("test/testconfig/logging.cfg") def test_luigid_no_configure_logging(self): with mock.patch('luigi.server.run') as server_run, \ mock.patch('logging.basicConfig') as basicConfig, \ mock.patch('luigi.configuration.get_config') as get_config: get_config.return_value.getboolean.return_value = True # no_configure_logging=True luigi.cmdline.luigid([]) self.assertTrue(server_run.called) self.assertTrue(basicConfig.called) def test_luigid_no_logging_conf(self): with mock.patch('luigi.server.run') as server_run, \ mock.patch('logging.basicConfig') as basicConfig, \ mock.patch('luigi.configuration.get_config') as get_config: get_config.return_value.getboolean.return_value = False # no_configure_logging=False get_config.return_value.get.return_value = None # logging_conf_file=None luigi.cmdline.luigid([]) self.assertTrue(server_run.called) self.assertTrue(basicConfig.called) def test_luigid_missing_logging_conf(self): with mock.patch('luigi.server.run') as server_run, \ mock.patch('logging.basicConfig') as basicConfig, \ mock.patch('luigi.configuration.get_config') as get_config: get_config.return_value.getboolean.return_value = False # no_configure_logging=False get_config.return_value.get.return_value = "nonexistent.cfg" # logging_conf_file=None self.assertRaises(Exception, luigi.cmdline.luigid, []) self.assertFalse(server_run.called) self.assertFalse(basicConfig.called) class InvokeOverCmdlineTest(unittest.TestCase): def _run_cmdline(self, args): env = os.environ.copy() env['PYTHONPATH'] = env.get('PYTHONPATH', '') + ':.:test' print('Running: ' + ' '.join(args)) # To simplify rerunning failing tests p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) stdout, stderr = p.communicate() # Unfortunately subprocess.check_output is 2.7+ return p.returncode, stdout, stderr def test_bin_luigi(self): t = luigi.LocalTarget(is_tmp=True) args = ['./bin/luigi', '--module', 'cmdline_test', 'WriteToFile', '--filename', t.path, '--local-scheduler', '--no-lock'] self._run_cmdline(args) self.assertTrue(t.exists()) def test_direct_python(self): t = luigi.LocalTarget(is_tmp=True) args = ['python', 'test/cmdline_test.py', 'WriteToFile', '--filename', t.path, '--local-scheduler', '--no-lock'] self._run_cmdline(args) self.assertTrue(t.exists()) def test_python_module(self): t = luigi.LocalTarget(is_tmp=True) args = ['python', '-m', 'luigi', '--module', 'cmdline_test', 'WriteToFile', '--filename', t.path, '--local-scheduler', '--no-lock'] self._run_cmdline(args) self.assertTrue(t.exists()) def test_direct_python_help(self): returncode, stdout, stderr = self._run_cmdline(['python', 'test/cmdline_test.py', '--help-all']) self.assertTrue(stdout.find(b'--FooBaseClass-x') != -1) self.assertFalse(stdout.find(b'--x') != -1) def test_direct_python_help_class(self): returncode, stdout, stderr = self._run_cmdline(['python', 'test/cmdline_test.py', 'FooBaseClass', '--help']) self.assertTrue(stdout.find(b'--FooBaseClass-x') != -1) self.assertTrue(stdout.find(b'--x') != -1) def test_bin_luigi_help(self): returncode, stdout, stderr = self._run_cmdline(['./bin/luigi', '--module', 'cmdline_test', '--help-all']) self.assertTrue(stdout.find(b'--FooBaseClass-x') != -1) self.assertFalse(stdout.find(b'--x') != -1) def test_python_module_luigi_help(self): returncode, stdout, stderr = self._run_cmdline(['python', '-m', 'luigi', '--module', 'cmdline_test', '--help-all']) self.assertTrue(stdout.find(b'--FooBaseClass-x') != -1) self.assertFalse(stdout.find(b'--x') != -1) def test_bin_luigi_help_no_module(self): returncode, stdout, stderr = self._run_cmdline(['./bin/luigi', '--help']) self.assertTrue(stdout.find(b'usage:') != -1) def test_bin_luigi_help_not_spammy(self): """ Test that `luigi --help` fits on one screen """ returncode, stdout, stderr = self._run_cmdline(['./bin/luigi', '--help']) self.assertLessEqual(len(stdout.splitlines()), 15) def test_bin_luigi_all_help_spammy(self): """ Test that `luigi --help-all` doesn't fit on a screen Naturally, I don't mind this test breaking, but it convinces me that the "not spammy" test is actually testing what it claims too. """ returncode, stdout, stderr = self._run_cmdline(['./bin/luigi', '--help-all']) self.assertGreater(len(stdout.splitlines()), 15) def test_error_mesage_on_misspelled_task(self): returncode, stdout, stderr = self._run_cmdline(['./bin/luigi', 'RangeDaili']) self.assertTrue(stderr.find(b'RangeDaily') != -1) def test_bin_luigi_no_parameters(self): returncode, stdout, stderr = self._run_cmdline(['./bin/luigi']) self.assertTrue(stderr.find(b'No task specified') != -1) def test_python_module_luigi_no_parameters(self): returncode, stdout, stderr = self._run_cmdline(['python', '-m', 'luigi']) self.assertTrue(stderr.find(b'No task specified') != -1) def test_bin_luigi_help_class(self): returncode, stdout, stderr = self._run_cmdline(['./bin/luigi', '--module', 'cmdline_test', 'FooBaseClass', '--help']) self.assertTrue(stdout.find(b'--FooBaseClass-x') != -1) self.assertTrue(stdout.find(b'--x') != -1) def test_python_module_help_class(self): returncode, stdout, stderr = self._run_cmdline(['python', '-m', 'luigi', '--module', 'cmdline_test', 'FooBaseClass', '--help']) self.assertTrue(stdout.find(b'--FooBaseClass-x') != -1) self.assertTrue(stdout.find(b'--x') != -1) def test_bin_luigi_options_before_task(self): args = ['./bin/luigi', '--module', 'cmdline_test', '--no-lock', '--local-scheduler', '--FooBaseClass-x', 'hello', 'FooBaseClass'] returncode, stdout, stderr = self._run_cmdline(args) self.assertEqual(0, returncode) def test_bin_fail_on_unrecognized_args(self): returncode, stdout, stderr = self._run_cmdline(['./bin/luigi', '--no-lock', '--local-scheduler', 'Task', '--unknown-param', 'hiiii']) self.assertNotEqual(0, returncode) def test_deps_py_script(self): """ Test the deps.py script. """ args = 'python luigi/tools/deps.py --module examples.top_artists ArtistToplistToDatabase --date-interval 2015-W10'.split() returncode, stdout, stderr = self._run_cmdline(args) self.assertEqual(0, returncode) self.assertTrue(stdout.find(b'[FileSystem] data/streams_2015_03_04_faked.tsv') != -1) self.assertTrue(stdout.find(b'[DB] localhost') != -1) def test_deps_tree_py_script(self): """ Test the deps_tree.py script. """ args = 'python luigi/tools/deps_tree.py --module examples.top_artists AggregateArtists --date-interval 2012-06'.split() returncode, stdout, stderr = self._run_cmdline(args) self.assertEqual(0, returncode) for i in range(1, 30): self.assertTrue(stdout.find(("-[Streams-{{'date': '2012-06-{0}'}}".format(str(i).zfill(2))).encode('utf-8')) != -1) def test_bin_mentions_misspelled_task(self): """ Test that the error message is informative when a task is misspelled. In particular it should say that the task is misspelled and not that the local parameters do not exist. """ returncode, stdout, stderr = self._run_cmdline(['./bin/luigi', '--module', 'cmdline_test', 'HooBaseClass', '--x 5']) self.assertTrue(stderr.find(b'FooBaseClass') != -1) self.assertTrue(stderr.find(b'--x') != 0) def test_stack_trace_has_no_inner(self): """ Test that the stack trace for failing tasks are short The stack trace shouldn't contain unreasonably much implementation details of luigi In particular it should say that the task is misspelled and not that the local parameters do not exist. """ returncode, stdout, stderr = self._run_cmdline(['./bin/luigi', '--module', 'cmdline_test', 'ATaskThatFails', '--local-scheduler', '--no-lock']) print(stdout) self.assertFalse(stdout.find(b"run() got an unexpected keyword argument 'tracking_url_callback'") != -1) self.assertFalse(stdout.find(b'During handling of the above exception, another exception occurred') != -1) def test_cmd_line_params_are_available_for_execution_summary(self): """ Test that config parameters specified on the command line are available while generating the execution summary. """ returncode, stdout, stderr = self._run_cmdline([ './bin/luigi', '--module', 'cmdline_test', 'TaskThatRequiresConfig', '--local-scheduler', '--no-lock' '--RequiredConfig-required-test-param', 'A', ]) print(stdout) print(stderr) self.assertNotEquals(returncode, 1) self.assertFalse(b'required_test_param' in stderr) if __name__ == '__main__': # Needed for one of the tests luigi.run()
1
17,969
This shouldn't be needed to change right?
spotify-luigi
py
@@ -18,14 +18,14 @@ module.exports = { "port": 8080 } }, - "files": [ "examples/cdn/*"], + "files": [ "examples/bundle/*"], "index": "index.html", "watchOptions": {}, "server": true, "proxy": false, "port": 3000, "middleware": false, - "serveStatic": ["examples/cdn"], + "serveStatic": ["examples/bundle"], "ghostMode": { "clicks": true, "scroll": true,
1
/* |-------------------------------------------------------------------------- | Browser-sync config file |-------------------------------------------------------------------------- | | For up-to-date information about the options: | http://www.browsersync.io/docs/options/ | | There are more options than you see here, these are just the ones that are | set internally. See the website for more info. | | */ module.exports = { "ui": { "port": 3001, "weinre": { "port": 8080 } }, "files": [ "examples/cdn/*"], "index": "index.html", "watchOptions": {}, "server": true, "proxy": false, "port": 3000, "middleware": false, "serveStatic": ["examples/cdn"], "ghostMode": { "clicks": true, "scroll": true, "forms": { "submit": true, "inputs": true, "toggles": true } }, "logLevel": "info", "logPrefix": "BS", "logConnections": false, "logFileChanges": true, "logSnippet": true, "rewriteRules": false, "open": "local", "browser": "default", "xip": false, "hostnameSuffix": false, "reloadOnRestart": false, "notify": true, "scrollProportionally": true, "scrollThrottle": 0, "scrollRestoreTechnique": "window.name", "reloadDelay": 0, "reloadDebounce": 0, "plugins": [], "injectChanges": true, "startPath": null, "minify": true, "host": null, "codeSync": true, "timestamps": true, "clientEvents": [ "scroll", "input:text", "input:toggles", "form:submit", "form:reset", "click" ], "socket": { "socketIoOptions": { "log": false }, "path": "/browser-sync/socket.io", "clientPath": "/browser-sync", "namespace": "/browser-sync", "clients": { "heartbeatTimeout": 5000 } }, "tagNames": { "less": "link", "scss": "link", "css": "link", "jpg": "img", "jpeg": "img", "png": "img", "svg": "img", "gif": "img", "js": "script" } };
1
9,293
This seemed broken to me. Why would browserify only check the (previously `cdn`, now) `bundle` example? And even so, the path is incorrect here. Fixing this is unrelated so should not go into this PR I feel. But when we fix this in master, perhaps that solves the reload issues that you experienced @hedgerh?
transloadit-uppy
js
@@ -11,13 +11,12 @@ namespace Datadog.Trace.AppSec.Waf.NativeBindings [StructLayout(LayoutKind.Sequential)] internal struct DdwafResultStruct { - [Obsolete("This member will be removed from then ddwaf library by a future PR")] - public DDWAF_RET_CODE Action; + public bool Timeout; + + public int PerfTotalRuntime; public IntPtr Data; public IntPtr PerfData; - - public int PerfTotalRuntime; } }
1
// <copyright file="DdwafResultStruct.cs" company="Datadog"> // Unless explicitly stated otherwise all files in this repository are licensed under the Apache 2 License. // This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2017 Datadog, Inc. // </copyright> using System; using System.Runtime.InteropServices; namespace Datadog.Trace.AppSec.Waf.NativeBindings { [StructLayout(LayoutKind.Sequential)] internal struct DdwafResultStruct { [Obsolete("This member will be removed from then ddwaf library by a future PR")] public DDWAF_RET_CODE Action; public IntPtr Data; public IntPtr PerfData; public int PerfTotalRuntime; } }
1
25,888
Same question, do we have potential version-conflict crashing scenarios here?
DataDog-dd-trace-dotnet
.cs
@@ -89,6 +89,18 @@ int RGroupDecomposition::add(const ROMol &inmol) { const bool addCoords = true; MolOps::addHs(mol, explicitOnly, addCoords); + // mark any wildcards in input molecule: + for (auto &atom : mol.atoms()) { + if (atom->getAtomicNum() == 0) { + atom->setProp("INPUT_DUMMY", true); + // clean any existing R group numbers + atom->setIsotope(0); + atom->setAtomMapNum(0); + if (atom->hasProp(common_properties::_MolFileRLabel)) { + atom->clearProp(common_properties::_MolFileRLabel); + } + } + } int core_idx = 0; const RCore *rcore = nullptr; std::vector<MatchVectType> tmatches;
1
// // Copyright (c) 2017-2021, Novartis Institutes for BioMedical Research Inc. // and other RDKit contributors // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following // disclaimer in the documentation and/or other materials provided // with the distribution. // * Neither the name of Novartis Institutes for BioMedical Research Inc. // nor the names of its contributors may be used to endorse or promote // products derived from this software without specific prior written // permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // #include "RGroupDecomp.h" #include "RGroupDecompData.h" #include <GraphMol/RDKitBase.h> #include <GraphMol/Substruct/SubstructMatch.h> #include <GraphMol/SmilesParse/SmilesWrite.h> #include <GraphMol/SmilesParse/SmartsWrite.h> #include <GraphMol/SmilesParse/SmilesParse.h> #include <GraphMol/ChemTransforms/ChemTransforms.h> #include <GraphMol/FMCS/FMCS.h> #include <boost/scoped_ptr.hpp> #include <boost/dynamic_bitset.hpp> #include <set> #include <utility> #include <vector> // #define VERBOSE 1 namespace RDKit { // Attachment Points // labeled cores => isotopes // atom mappings // atom indices => use -1 - atom index, range is [-1, ...., -num_atoms] const std::string RLABEL = "tempRlabel"; const std::string RLABEL_TYPE = "tempRlabelType"; const std::string RLABEL_CORE_INDEX = "rLabelCoreIndex"; const std::string SIDECHAIN_RLABELS = "sideChainRlabels"; const std::string done = "RLABEL_PROCESSED"; const std::string CORE = "Core"; const std::string RPREFIX = "R"; namespace { void ADD_MATCH(R_DECOMP &match, int rlabel) { if (match.find(rlabel) == match.end()) { match[rlabel] = boost::make_shared<RGroupData>(); } } } // namespace RGroupDecomposition::RGroupDecomposition( const ROMol &inputCore, const RGroupDecompositionParameters &params) : data(new RGroupDecompData(inputCore, params)) {} RGroupDecomposition::RGroupDecomposition( const std::vector<ROMOL_SPTR> &cores, const RGroupDecompositionParameters &params) : data(new RGroupDecompData(cores, params)) {} RGroupDecomposition::~RGroupDecomposition() { delete data; } int RGroupDecomposition::add(const ROMol &inmol) { // get the sidechains if possible // Add hs for better symmetrization RWMol mol(inmol); const bool explicitOnly = false; const bool addCoords = true; MolOps::addHs(mol, explicitOnly, addCoords); int core_idx = 0; const RCore *rcore = nullptr; std::vector<MatchVectType> tmatches; std::vector<MatchVectType> tmatches_filtered; // Find the first matching core (onlyMatchAtRGroups) // or the first core that requires the smallest number // of newly added labels int global_min_heavy_nbrs = -1; SubstructMatchParameters sssparams(params().substructmatchParams); sssparams.uniquify = false; sssparams.recursionPossible = true; for (const auto &core : data->cores) { { // matching the core to the molecule is a two step process // First match to a reduced representation (the core minus terminal // R-groups). Next, match the R-groups. We do this as the core may not be // a substructure match for the molecule if a single molecule atom matches // 2 RGroup attachments (see https://github.com/rdkit/rdkit/pull/4002) // match the reduced represenation: std::vector<MatchVectType> baseMatches = SubstructMatch(mol, *core.second.matchingMol, sssparams); tmatches.clear(); for (const auto &baseMatch : baseMatches) { // Match the R Groups auto matchesWithDummy = core.second.matchTerminalUserRGroups(mol, baseMatch, sssparams); tmatches.insert(tmatches.end(), matchesWithDummy.cbegin(), matchesWithDummy.cend()); } } if (tmatches.empty()) { continue; } std::vector<int> tmatches_heavy_nbrs(tmatches.size(), 0); size_t i = 0; for (const auto &mv : tmatches) { bool passes_filter = data->params.onlyMatchAtRGroups; boost::dynamic_bitset<> target_match_indices(mol.getNumAtoms()); for (const auto &match : mv) { target_match_indices[match.second] = 1; } // target atoms that map to user defined R-groups std::vector<int> targetAttachments; for (const auto &match : mv) { const Atom *atm = mol.getAtomWithIdx(match.second); // is this a labelled rgroup or not? if (!core.second.isCoreAtomUserLabelled(match.first)) { // nope... if any neighbor is not part of the substructure // make sure we are a hydrogen, otherwise, skip the match for (const auto &nbri : boost::make_iterator_range(mol.getAtomNeighbors(atm))) { const auto &nbr = mol[nbri]; if (nbr->getAtomicNum() != 1 && !target_match_indices[nbr->getIdx()]) { if (data->params.onlyMatchAtRGroups) { passes_filter = false; break; } else { ++tmatches_heavy_nbrs[i]; } } } } else { // labelled R-group if (core.second.isTerminalRGroupWithUserLabel(match.first)) { targetAttachments.push_back(match.second); } } if (!passes_filter && data->params.onlyMatchAtRGroups) { break; } if (passes_filter && data->params.onlyMatchAtRGroups) { for (auto attachmentIdx : targetAttachments) { if (!core.second.checkAllBondsToAttachmentPointPresent( mol, attachmentIdx, mv)) { passes_filter = false; break; } } } } if (passes_filter) { tmatches_filtered.push_back(mv); } ++i; } if (!data->params.onlyMatchAtRGroups) { int min_heavy_nbrs = *std::min_element(tmatches_heavy_nbrs.begin(), tmatches_heavy_nbrs.end()); if (global_min_heavy_nbrs == -1 || min_heavy_nbrs < global_min_heavy_nbrs) { i = 0; tmatches_filtered.clear(); for (const auto heavy_nbrs : tmatches_heavy_nbrs) { if (heavy_nbrs <= min_heavy_nbrs) { tmatches_filtered.push_back(std::move(tmatches[i])); } ++i; } global_min_heavy_nbrs = min_heavy_nbrs; rcore = &core.second; core_idx = core.first; if (global_min_heavy_nbrs == 0) { break; } } } else if (!tmatches_filtered.empty()) { rcore = &core.second; core_idx = core.first; break; } } tmatches = std::move(tmatches_filtered); if (tmatches.size() > 1) { if (data->params.matchingStrategy == NoSymmetrization) { tmatches.resize(1); } else if (data->matches.size() == 0) { // Greedy strategy just grabs the first match and // takes the best matches from the rest if (data->params.matchingStrategy == Greedy) { tmatches.resize(1); } } } if (rcore == nullptr) { BOOST_LOG(rdDebugLog) << "No core matches" << std::endl; return -1; } // strategies // ========== // Exhaustive - saves all matches and optimizes later exhaustive // May never finish due to combinatorial complexity // Greedy - matches to *FIRST* available match // GreedyChunks - default - process every N chunks // Should probably scan all mols first to find match with // smallest number of matches... std::vector<RGroupMatch> potentialMatches; std::unique_ptr<ROMol> tMol; for (const auto &tmatche : tmatches) { const bool replaceDummies = false; const bool labelByIndex = true; const bool requireDummyMatch = false; bool hasCoreDummies = false; auto coreCopy = rcore->replaceCoreAtomsWithMolMatches(hasCoreDummies, mol, tmatche); tMol.reset(replaceCore(mol, *coreCopy, tmatche, replaceDummies, labelByIndex, requireDummyMatch)); #ifdef VERBOSE std::cerr << "Core Match core_idx " << core_idx << " idx " << data->matches.size() << ": " << MolToSmarts(*coreCopy) << std::endl; #endif if (tMol) { #ifdef VERBOSE std::cerr << "All Fragments " << MolToSmiles(*tMol) << std::endl; #endif R_DECOMP match; // rlabel rgroups MOL_SPTR_VECT fragments = MolOps::getMolFrags(*tMol, false); std::set<int> coreAtomAnyMatched; for (size_t i = 0; i < fragments.size(); ++i) { std::vector<int> attachments; boost::shared_ptr<ROMol> &newMol = fragments[i]; newMol->setProp<int>("core", core_idx); newMol->setProp<int>("idx", data->matches.size()); newMol->setProp<int>("frag_idx", i); #ifdef VERBOSE std::cerr << "Fragment " << MolToSmiles(*newMol) << std::endl; #endif for (auto at : newMol->atoms()) { unsigned int elno = at->getAtomicNum(); if (elno == 0) { unsigned int index = at->getIsotope(); // this is the index into the core // it messes up when there are multiple ? int rlabel; auto coreAtom = rcore->core->getAtomWithIdx(index); coreAtomAnyMatched.insert(index); if (coreAtom->getPropIfPresent(RLABEL, rlabel)) { std::vector<int> rlabelsOnSideChain; at->getPropIfPresent(SIDECHAIN_RLABELS, rlabelsOnSideChain); rlabelsOnSideChain.push_back(rlabel); at->setProp(SIDECHAIN_RLABELS, rlabelsOnSideChain); data->labels.insert(rlabel); // keep track of all labels used attachments.push_back(rlabel); } } } if (attachments.size() > 0) { // reject multiple attachments? // what to do with labelled cores ? std::string newCoreSmi = MolToSmiles(*newMol, true); for (size_t attach_idx = 0; attach_idx < attachments.size(); ++attach_idx) { int rlabel = attachments[attach_idx]; ADD_MATCH(match, rlabel); match[rlabel]->add(newMol, attachments); #ifdef VERBOSE std::cerr << "Fragment " << i << " R" << rlabel << " " << MolToSmiles(*newMol) << std::endl; #endif } } else { // special case, only one fragment if (fragments.size() == 1) { // need to make a new core // remove the sidechains // GJ I think if we ever get here that it's really an error and I // believe that I've fixed the case where this code was called. // Still, I'm too scared to delete the block. RWMol newCore(mol); for (const auto &mvpair : tmatche) { const Atom *coreAtm = rcore->core->getAtomWithIdx(mvpair.first); Atom *newCoreAtm = newCore.getAtomWithIdx(mvpair.second); int rlabel; if (coreAtm->getPropIfPresent(RLABEL, rlabel)) { newCoreAtm->setProp<int>(RLABEL, rlabel); } newCoreAtm->setProp<bool>("keep", true); } newCore.beginBatchEdit(); for (const auto atom : newCore.atoms()) { if (!atom->hasProp("keep")) { newCore.removeAtom(atom); } } newCore.commitBatchEdit(); if (newCore.getNumAtoms()) { std::string newCoreSmi = MolToSmiles(newCore, true); // add a new core if possible auto newcore = data->newCores.find(newCoreSmi); int core_idx = 0; if (newcore == data->newCores.end()) { core_idx = data->newCores[newCoreSmi] = data->newCoreLabel--; data->cores[core_idx] = RCore(newCore); return add(inmol); } } } } } if (match.size()) { auto numberUserGroupsInMatch = std::accumulate( match.begin(), match.end(), 0, [](int sum, std::pair<int, boost::shared_ptr<RGroupData>> p) { return p.first > 0 && !p.second->is_hydrogen ? ++sum : sum; }); int numberMissingUserGroups = rcore->numberUserRGroups - numberUserGroupsInMatch; CHECK_INVARIANT(numberMissingUserGroups >= 0, "Data error in missing user rgroup count"); potentialMatches.emplace_back( core_idx, numberMissingUserGroups, match, hasCoreDummies || !data->params.onlyMatchAtRGroups ? coreCopy : nullptr); } } } if (potentialMatches.size() == 0) { BOOST_LOG(rdDebugLog) << "No attachment points in side chains" << std::endl; return -2; } // in case the value ends up being changed in a future version of the code: if (data->prunePermutations) { data->permutationProduct = 1; } if (data->params.matchingStrategy != GA) { size_t N = data->permutationProduct; for (auto matche = data->matches.begin() + data->previousMatchSize; matche != data->matches.end(); ++matche) { size_t sz = matche->size(); N *= sz; } // oops, exponential is a pain if (N * potentialMatches.size() > 100000) { data->permutationProduct = N; data->process(data->prunePermutations); } } data->matches.push_back(potentialMatches); if (data->matches.size()) { if (data->params.matchingStrategy & Greedy || (data->params.matchingStrategy & GreedyChunks && data->matches.size() > 1 && data->matches.size() % data->params.chunkSize == 0)) { data->process(data->prunePermutations); } } return data->matches.size() - 1; } bool RGroupDecomposition::process() { return processAndScore().success; } RGroupDecompositionProcessResult RGroupDecomposition::processAndScore() { try { const bool finalize = true; return data->process(data->prunePermutations, finalize); } catch (...) { return RGroupDecompositionProcessResult(false, -1); } } std::vector<std::string> RGroupDecomposition::getRGroupLabels() const { // this is a bit of a cheat RGroupColumns cols = getRGroupsAsColumns(); std::vector<std::string> labels; for (auto it : cols) { labels.push_back(it.first); } std::sort(labels.begin(), labels.end()); return labels; } RWMOL_SPTR RGroupDecomposition::outputCoreMolecule( const RGroupMatch &match, const UsedLabelMap &usedLabelMap) const { const auto &core = data->cores[match.core_idx]; if (!match.matchedCore) { return core.labelledCore; } auto coreWithMatches = core.coreWithMatches(*match.matchedCore); for (auto atomIdx = coreWithMatches->getNumAtoms(); atomIdx--;) { auto atom = coreWithMatches->getAtomWithIdx(atomIdx); if (atom->getAtomicNum()) { continue; } auto label = atom->getAtomMapNum(); Atom *nbrAtom = nullptr; for (const auto &nbri : boost::make_iterator_range(coreWithMatches->getAtomNeighbors(atom))) { nbrAtom = (*coreWithMatches)[nbri]; break; } if (nbrAtom) { bool isUserDefinedLabel = usedLabelMap.isUserDefined(label); auto numExplicitHs = nbrAtom->getNumExplicitHs(); if (usedLabelMap.getIsUsed(label)) { if (numExplicitHs) { nbrAtom->setNumExplicitHs(numExplicitHs - 1); } } else if (!isUserDefinedLabel || data->params.removeAllHydrogenRGroupsAndLabels) { coreWithMatches->removeAtom(atomIdx); // if we remove an unused label from an aromatic atom, // we need to check whether we need to adjust its explicit // H count, or it will fail to kekulize if (isUserDefinedLabel && nbrAtom->getIsAromatic()) { nbrAtom->updatePropertyCache(false); if (!numExplicitHs) { nbrAtom->setNumExplicitHs(nbrAtom->getExplicitValence() - nbrAtom->getDegree()); } } } nbrAtom->updatePropertyCache(false); } } return coreWithMatches; } RGroupRows RGroupDecomposition::getRGroupsAsRows() const { std::vector<RGroupMatch> permutation = data->GetCurrentBestPermutation(); RGroupRows groups; auto usedLabelMap = UsedLabelMap(data->finalRlabelMapping); for (auto it = permutation.begin(); it != permutation.end(); ++it) { auto Rs_seen(usedLabelMap); // make a new rgroup entry groups.push_back(RGroupRow()); RGroupRow &out_rgroups = groups.back(); const R_DECOMP &in_rgroups = it->rgroups; for (const auto &rgroup : in_rgroups) { const auto realLabel = data->finalRlabelMapping.find(rgroup.first); CHECK_INVARIANT(realLabel != data->finalRlabelMapping.end(), "unprocessed rlabel, please call process() first."); Rs_seen.setIsUsed(realLabel->second); out_rgroups[RPREFIX + std::to_string(realLabel->second)] = rgroup.second->combinedMol; } out_rgroups[CORE] = outputCoreMolecule(*it, Rs_seen); } return groups; } //! return rgroups in column order group[attachment_point][molidx] = ROMol RGroupColumns RGroupDecomposition::getRGroupsAsColumns() const { std::vector<RGroupMatch> permutation = data->GetCurrentBestPermutation(); RGroupColumns groups; std::unordered_set<std::string> rGroupWithRealMol{CORE}; auto usedLabelMap = UsedLabelMap(data->finalRlabelMapping); unsigned int molidx = 0; for (auto it = permutation.begin(); it != permutation.end(); ++it, ++molidx) { auto Rs_seen(usedLabelMap); const R_DECOMP &in_rgroups = it->rgroups; for (const auto &rgroup : in_rgroups) { const auto realLabel = data->finalRlabelMapping.find(rgroup.first); CHECK_INVARIANT(realLabel != data->finalRlabelMapping.end(), "unprocessed rlabel, please call process() first."); CHECK_INVARIANT(rgroup.second->combinedMol->hasProp(done), "Not done! Call process()"); CHECK_INVARIANT(!Rs_seen.getIsUsed(realLabel->second), "R group label appears multiple times!"); Rs_seen.setIsUsed(realLabel->second); std::string r = RPREFIX + std::to_string(realLabel->second); RGroupColumn &col = groups[r]; if (molidx && col.size() < molidx - 1) { col.resize(molidx - 1); } col.push_back(rgroup.second->combinedMol); rGroupWithRealMol.insert(r); } groups[CORE].push_back(outputCoreMolecule(*it, Rs_seen)); // add empty entries to columns where this molecule didn't appear for (const auto &realLabel : data->finalRlabelMapping) { if (!Rs_seen.getIsUsed(realLabel.second)) { std::string r = RPREFIX + std::to_string(realLabel.second); groups[r].push_back(boost::make_shared<RWMol>()); } } } // purge R-group entries that have no mols for (auto it = groups.begin(); it != groups.end();) { auto itToErase = groups.end(); if (!rGroupWithRealMol.count(it->first)) { itToErase = it; } ++it; if (itToErase != groups.end()) { groups.erase(itToErase); } } return groups; } const RGroupDecompositionParameters &RGroupDecomposition::params() const { return data->params; } namespace { std::vector<unsigned int> Decomp(RGroupDecomposition &decomp, const std::vector<ROMOL_SPTR> &mols) { auto t0 = std::chrono::steady_clock::now(); std::vector<unsigned int> unmatched; for (size_t i = 0; i < mols.size(); ++i) { int v = decomp.add(*mols[i].get()); if (v == -1) { unmatched.push_back(i); } checkForTimeout(t0, decomp.params().timeout); } decomp.process(); return unmatched; } } // namespace unsigned int RGroupDecompose(const std::vector<ROMOL_SPTR> &cores, const std::vector<ROMOL_SPTR> &mols, RGroupRows &rows, std::vector<unsigned int> *unmatchedIndices, const RGroupDecompositionParameters &options) { RGroupDecomposition decomp(cores, options); std::vector<unsigned int> unmatched = Decomp(decomp, mols); if (unmatchedIndices) { *unmatchedIndices = unmatched; } rows = decomp.getRGroupsAsRows(); return mols.size() - unmatched.size(); } unsigned int RGroupDecompose(const std::vector<ROMOL_SPTR> &cores, const std::vector<ROMOL_SPTR> &mols, RGroupColumns &columns, std::vector<unsigned int> *unmatchedIndices, const RGroupDecompositionParameters &options) { RGroupDecomposition decomp(cores, options); std::vector<unsigned int> unmatched = Decomp(decomp, mols); if (unmatchedIndices) { *unmatchedIndices = unmatched; } columns = decomp.getRGroupsAsColumns(); return mols.size() - unmatched.size(); } } // namespace RDKit
1
23,962
Definitely not required, but it would be better if you had a constexpr for `"INPUT_DUMMY"`
rdkit-rdkit
cpp
@@ -1831,11 +1831,7 @@ namespace NLog.Targets #endif try { - archiveFile = this.GetArchiveFileName(fileName, ev, upcomingWriteSize); - if (!string.IsNullOrEmpty(archiveFile)) - { - this.DoAutoArchive(archiveFile, ev); - } + this.DoAutoArchive(archiveFile, ev); } finally {
1
// // Copyright (c) 2004-2016 Jaroslaw Kowalski <[email protected]>, Kim Christensen, Julian Verdurmen // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * Neither the name of Jaroslaw Kowalski nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF // THE POSSIBILITY OF SUCH DAMAGE. // #if !SILVERLIGHT && !__ANDROID__ && !__IOS__ // Unfortunately, Xamarin Android and Xamarin iOS don't support mutexes (see https://github.com/mono/mono/blob/3a9e18e5405b5772be88bfc45739d6a350560111/mcs/class/corlib/System.Threading/Mutex.cs#L167) so the BaseFileAppender class now throws an exception in the constructor. #define SupportsMutex #endif namespace NLog.Targets { using System; using System.Collections.Generic; using System.ComponentModel; using System.Globalization; using System.IO; #if !SILVERLIGHT using System.IO.Compression; #endif using System.Linq; using System.Text; using System.Threading; using Common; using Config; using Internal; using Internal.FileAppenders; using Layouts; using Time; /// <summary> /// Writes log messages to one or more files. /// </summary> /// <seealso href="https://github.com/nlog/nlog/wiki/File-target">Documentation on NLog Wiki</seealso> [Target("File")] public class FileTarget : TargetWithLayoutHeaderAndFooter, ICreateFileParameters { /// <summary> /// Default clean up period of the initilized files. When a file exceeds the clean up period is removed from the list. /// </summary> /// <remarks>Clean up period is defined in days.</remarks> private const int InitializedFilesCleanupPeriod = 2; /// <summary> /// The maximum number of initialised files at any one time. Once this number is exceeded clean up procedures /// are initiated to reduce the number of initialised files. /// </summary> private const int InitializedFilesCounterMax = 100; /// <summary> /// This value disables file archiving based on the size. /// </summary> private const int ArchiveAboveSizeDisabled = -1; /// <summary> /// Holds the initialised files each given time by the <see cref="FileTarget"/> instance. Against each file, the last write time is stored. /// </summary> /// <remarks>Last write time is store in local time (no UTC).</remarks> private readonly Dictionary<string, DateTime> initializedFiles = new Dictionary<string, DateTime>(); private LineEndingMode lineEndingMode = LineEndingMode.Default; /// <summary> /// Factory used to create the file appenders in the <see cref="FileTarget"/> instance. /// </summary> /// <remarks>File appenders are stored in an instance of <see cref="FileAppenderCache"/>.</remarks> private IFileAppenderFactory appenderFactory; /// <summary> /// List of the associated file appenders with the <see cref="FileTarget"/> instance. /// </summary> private FileAppenderCache fileAppenderCache; private Timer autoClosingTimer; #if !SILVERLIGHT && !__IOS__ && !__ANDROID__ private Thread appenderInvalidatorThread = null; private EventWaitHandle stopAppenderInvalidatorThreadWaitHandle = new ManualResetEvent(false); #endif /// <summary> /// The number of initialised files at any one time. /// </summary> private int initializedFilesCounter; /// <summary> /// The maximum number of archive files that should be kept. /// </summary> private int maxArchiveFiles; private readonly DynamicFileArchive fileArchive; /// <summary> /// It holds the file names of existing archives in order for the oldest archives to be removed when the list of /// filenames becomes too long. /// </summary> private Queue<string> previousFileNames; /// <summary> /// The filename as target /// </summary> private FilePathLayout fullFileName; /// <summary> /// The archive file name as target /// </summary> private FilePathLayout fullarchiveFileName; private FileArchivePeriod archiveEvery; private long archiveAboveSize; private bool enableArchiveFileCompression; /// <summary> /// The date of the previous log event. /// </summary> private DateTime? previousLogEventTimestamp; /// <summary> /// The file name of the previous log event. /// </summary> private string previousLogFileName; private bool concurrentWrites; private bool keepFileOpen; private bool cleanupFileName; private FilePathKind fileNameKind; private FilePathKind archiveFileKind; /// <summary> /// Initializes a new instance of the <see cref="FileTarget" /> class. /// </summary> /// <remarks> /// The default value of the layout is: <code>${longdate}|${level:uppercase=true}|${logger}|${message}</code> /// </remarks> public FileTarget() { this.ArchiveNumbering = ArchiveNumberingMode.Sequence; this.maxArchiveFiles = 0; this.ConcurrentWriteAttemptDelay = 1; this.ArchiveEvery = FileArchivePeriod.None; this.ArchiveAboveSize = FileTarget.ArchiveAboveSizeDisabled; this.ConcurrentWriteAttempts = 10; this.ConcurrentWrites = true; #if SILVERLIGHT this.Encoding = Encoding.UTF8; #else this.Encoding = Encoding.Default; #endif this.BufferSize = 32768; this.AutoFlush = true; #if !SILVERLIGHT this.FileAttributes = Win32FileAttributes.Normal; #endif this.LineEnding = LineEndingMode.Default; this.EnableFileDelete = true; this.OpenFileCacheTimeout = -1; this.OpenFileCacheSize = 5; this.CreateDirs = true; this.fileArchive = new DynamicFileArchive(this, MaxArchiveFiles); this.ForceManaged = false; this.ArchiveDateFormat = string.Empty; this.maxLogFilenames = 20; this.previousFileNames = new Queue<string>(this.maxLogFilenames); this.fileAppenderCache = FileAppenderCache.Empty; this.CleanupFileName = true; this.WriteFooterOnArchivingOnly = false; } #if NET4_5 static FileTarget() { FileCompressor = new ZipArchiveFileCompressor(); } #endif /// <summary> /// Initializes a new instance of the <see cref="FileTarget" /> class. /// </summary> /// <remarks> /// The default value of the layout is: <code>${longdate}|${level:uppercase=true}|${logger}|${message}</code> /// </remarks> /// <param name="name">Name of the target.</param> public FileTarget(string name) : this() { this.Name = name; } /// <summary> /// Gets or sets the name of the file to write to. /// </summary> /// <remarks> /// This FileName string is a layout which may include instances of layout renderers. /// This lets you use a single target to write to multiple files. /// </remarks> /// <example> /// The following value makes NLog write logging events to files based on the log level in the directory where /// the application runs. /// <code>${basedir}/${level}.log</code> /// All <c>Debug</c> messages will go to <c>Debug.log</c>, all <c>Info</c> messages will go to <c>Info.log</c> and so on. /// You can combine as many of the layout renderers as you want to produce an arbitrary log file name. /// </example> /// <docgen category='Output Options' order='1' /> [RequiredParameter] public Layout FileName { get { if (fullFileName == null) return null; return fullFileName.GetLayout(); } set { fullFileName = CreateFileNameLayout(value); if (IsInitialized) { //don't call before initialized because this could lead to stackoverflows. RefreshFileArchive(); RefreshArchiveFilePatternToWatch(); } } } private FilePathLayout CreateFileNameLayout(Layout value) { if (value == null) return null; return new FilePathLayout(value, CleanupFileName, FileNameKind); } /// <summary> /// Cleanup invalid values in a filename, e.g. slashes in a filename. If set to <c>true</c>, this can impact the performance of massive writes. /// If set to <c>false</c>, nothing gets written when the filename is wrong. /// </summary> [DefaultValue(true)] public bool CleanupFileName { get { return cleanupFileName; } set { cleanupFileName = value; fullFileName = CreateFileNameLayout(FileName); fullarchiveFileName = CreateFileNameLayout(ArchiveFileName); } } /// <summary> /// Is the <see cref="FileName"/> an absolute or relative path? /// </summary> [DefaultValue(FilePathKind.Unknown)] public FilePathKind FileNameKind { get { return fileNameKind; } set { fileNameKind = value; fullFileName = CreateFileNameLayout(FileName); } } /// <summary> /// Gets or sets a value indicating whether to create directories if they do not exist. /// </summary> /// <remarks> /// Setting this to false may improve performance a bit, but you'll receive an error /// when attempting to write to a directory that's not present. /// </remarks> /// <docgen category='Output Options' order='10' /> [DefaultValue(true)] [Advanced] public bool CreateDirs { get; set; } /// <summary> /// Gets or sets a value indicating whether to delete old log file on startup. /// </summary> /// <remarks> /// This option works only when the "FileName" parameter denotes a single file. /// </remarks> /// <docgen category='Output Options' order='10' /> [DefaultValue(false)] public bool DeleteOldFileOnStartup { get; set; } /// <summary> /// Gets or sets a value indicating whether to replace file contents on each write instead of appending log message at the end. /// </summary> /// <docgen category='Output Options' order='10' /> [DefaultValue(false)] [Advanced] public bool ReplaceFileContentsOnEachWrite { get; set; } /// <summary> /// Gets or sets a value indicating whether to keep log file open instead of opening and closing it on each logging event. /// </summary> /// <remarks> /// Setting this property to <c>True</c> helps improve performance. /// </remarks> /// <docgen category='Performance Tuning Options' order='10' /> [DefaultValue(false)] public bool KeepFileOpen { get { return keepFileOpen; } set { keepFileOpen = value; if (IsInitialized) { RefreshArchiveFilePatternToWatch(); } } } /// <summary> /// Gets or sets the maximum number of log filenames that should be stored as existing. /// </summary> /// <remarks> /// The bigger this number is the longer it will take to write each log record. The smaller the number is /// the higher the chance that the clean function will be run when no new files have been opened. /// </remarks> /// <docgen category='Performance Tuning Options' order='10' /> [DefaultValue(20)] //NLog5: todo rename correct for text case public int maxLogFilenames { get; set; } /// <summary> /// Gets or sets a value indicating whether to enable log file(s) to be deleted. /// </summary> /// <docgen category='Output Options' order='10' /> [DefaultValue(true)] public bool EnableFileDelete { get; set; } #if !SILVERLIGHT /// <summary> /// Gets or sets the file attributes (Windows only). /// </summary> /// <docgen category='Output Options' order='10' /> [Advanced] public Win32FileAttributes FileAttributes { get; set; } #endif /// <summary> /// Should we capture the last write time of a file? /// </summary> bool ICreateFileParameters.CaptureLastWriteTime { get { return ArchiveNumbering == ArchiveNumberingMode.Date || ArchiveNumbering == ArchiveNumberingMode.DateAndSequence; } } /// <summary> /// Gets or sets the line ending mode. /// </summary> /// <docgen category='Layout Options' order='10' /> [Advanced] public LineEndingMode LineEnding { get { return this.lineEndingMode; } set { this.lineEndingMode = value; } } /// <summary> /// Gets or sets a value indicating whether to automatically flush the file buffers after each log message. /// </summary> /// <docgen category='Performance Tuning Options' order='10' /> [DefaultValue(true)] public bool AutoFlush { get; set; } /// <summary> /// Gets or sets the number of files to be kept open. Setting this to a higher value may improve performance /// in a situation where a single File target is writing to many files /// (such as splitting by level or by logger). /// </summary> /// <remarks> /// The files are managed on a LRU (least recently used) basis, which flushes /// the files that have not been used for the longest period of time should the /// cache become full. As a rule of thumb, you shouldn't set this parameter to /// a very high value. A number like 10-15 shouldn't be exceeded, because you'd /// be keeping a large number of files open which consumes system resources. /// </remarks> /// <docgen category='Performance Tuning Options' order='10' /> [DefaultValue(5)] [Advanced] public int OpenFileCacheSize { get; set; } /// <summary> /// Gets or sets the maximum number of seconds that files are kept open. If this number is negative the files are /// not automatically closed after a period of inactivity. /// </summary> /// <docgen category='Performance Tuning Options' order='10' /> [DefaultValue(-1)] [Advanced] public int OpenFileCacheTimeout { get; set; } /// <summary> /// Gets or sets the log file buffer size in bytes. /// </summary> /// <docgen category='Performance Tuning Options' order='10' /> [DefaultValue(32768)] public int BufferSize { get; set; } /// <summary> /// Gets or sets the file encoding. /// </summary> /// <docgen category='Layout Options' order='10' /> public Encoding Encoding { get; set; } /// <summary> /// Gets or sets a value indicating whether concurrent writes to the log file by multiple processes on the same host. /// </summary> /// <remarks> /// This makes multi-process logging possible. NLog uses a special technique /// that lets it keep the files open for writing. /// </remarks> /// <docgen category='Performance Tuning Options' order='10' /> [DefaultValue(true)] public bool ConcurrentWrites { get { return concurrentWrites; } set { concurrentWrites = value; if (IsInitialized) { RefreshArchiveFilePatternToWatch(); } } } /// <summary> /// Gets or sets a value indicating whether concurrent writes to the log file by multiple processes on different network hosts. /// </summary> /// <remarks> /// This effectively prevents files from being kept open. /// </remarks> /// <docgen category='Performance Tuning Options' order='10' /> [DefaultValue(false)] public bool NetworkWrites { get; set; } /// <summary> /// Gets or sets the number of times the write is appended on the file before NLog /// discards the log message. /// </summary> /// <docgen category='Performance Tuning Options' order='10' /> [DefaultValue(10)] [Advanced] public int ConcurrentWriteAttempts { get; set; } /// <summary> /// Gets or sets the delay in milliseconds to wait before attempting to write to the file again. /// </summary> /// <remarks> /// The actual delay is a random value between 0 and the value specified /// in this parameter. On each failed attempt the delay base is doubled /// up to <see cref="ConcurrentWriteAttempts" /> times. /// </remarks> /// <example> /// Assuming that ConcurrentWriteAttemptDelay is 10 the time to wait will be:<p/> /// a random value between 0 and 10 milliseconds - 1st attempt<br/> /// a random value between 0 and 20 milliseconds - 2nd attempt<br/> /// a random value between 0 and 40 milliseconds - 3rd attempt<br/> /// a random value between 0 and 80 milliseconds - 4th attempt<br/> /// ...<p/> /// and so on. /// </example> /// <docgen category='Performance Tuning Options' order='10' /> [DefaultValue(1)] [Advanced] public int ConcurrentWriteAttemptDelay { get; set; } /// <summary> /// Gets or sets a value indicating whether to archive old log file on startup. /// </summary> /// <remarks> /// This option works only when the "FileName" parameter denotes a single file. /// After archiving the old file, the current log file will be empty. /// </remarks> /// <docgen category='Output Options' order='10' /> [DefaultValue(false)] public bool ArchiveOldFileOnStartup { get; set; } /// <summary> /// Gets or sets a value specifying the date format to use when archiving files. /// </summary> /// <remarks> /// This option works only when the "ArchiveNumbering" parameter is set either to Date or DateAndSequence. /// </remarks> /// <docgen category='Output Options' order='10' /> [DefaultValue("")] public string ArchiveDateFormat { get; set; } /// <summary> /// Gets or sets the size in bytes above which log files will be automatically archived. /// /// Warning: combining this with <see cref="ArchiveNumberingMode.Date"/> isn't supported. We cannot create multiple archive files, if they should have the same name. /// Choose: <see cref="ArchiveNumberingMode.DateAndSequence"/> /// </summary> /// <remarks> /// Caution: Enabling this option can considerably slow down your file /// logging in multi-process scenarios. If only one process is going to /// be writing to the file, consider setting <c>ConcurrentWrites</c> /// to <c>false</c> for maximum performance. /// </remarks> /// <docgen category='Archival Options' order='10' /> public long ArchiveAboveSize { get { return archiveAboveSize; } set { archiveAboveSize = value; if (IsInitialized) { RefreshArchiveFilePatternToWatch(); } } } /// <summary> /// Gets or sets a value indicating whether to automatically archive log files every time the specified time passes. /// </summary> /// <remarks> /// Files are moved to the archive as part of the write operation if the current period of time changes. For example /// if the current <c>hour</c> changes from 10 to 11, the first write that will occur /// on or after 11:00 will trigger the archiving. /// <p> /// Caution: Enabling this option can considerably slow down your file /// logging in multi-process scenarios. If only one process is going to /// be writing to the file, consider setting <c>ConcurrentWrites</c> /// to <c>false</c> for maximum performance. /// </p> /// </remarks> /// <docgen category='Archival Options' order='10' /> public FileArchivePeriod ArchiveEvery { get { return archiveEvery; } set { archiveEvery = value; if (IsInitialized) { RefreshArchiveFilePatternToWatch(); } } } /// <summary> /// Is the <see cref="ArchiveFileName"/> an absolute or relative path? /// </summary> public FilePathKind ArchiveFileKind { get { return archiveFileKind; } set { archiveFileKind = value; fullarchiveFileName = CreateFileNameLayout(ArchiveFileName); } } /// <summary> /// Gets or sets the name of the file to be used for an archive. /// </summary> /// <remarks> /// It may contain a special placeholder {#####} /// that will be replaced with a sequence of numbers depending on /// the archiving strategy. The number of hash characters used determines /// the number of numerical digits to be used for numbering files. /// </remarks> /// <docgen category='Archival Options' order='10' /> public Layout ArchiveFileName { get { if (fullarchiveFileName == null) return null; return fullarchiveFileName.GetLayout(); } set { fullarchiveFileName = CreateFileNameLayout(value); if (IsInitialized) { //don't call before initialized because this could lead to stackoverflows. RefreshFileArchive(); RefreshArchiveFilePatternToWatch(); } } } /// <summary> /// Gets or sets the maximum number of archive files that should be kept. /// </summary> /// <docgen category='Archival Options' order='10' /> [DefaultValue(0)] public int MaxArchiveFiles { get { return maxArchiveFiles; } set { maxArchiveFiles = value; fileArchive.MaxArchiveFileToKeep = value; } } /// <summary> /// Gets or sets the way file archives are numbered. /// </summary> /// <docgen category='Archival Options' order='10' /> public ArchiveNumberingMode ArchiveNumbering { get; set; } /// <summary> /// Used to compress log files during archiving. /// This may be used to provide your own implementation of a zip file compressor, /// on platforms other than .Net4.5. /// Defaults to ZipArchiveFileCompressor on .Net4.5 and to null otherwise. /// </summary> public static IFileCompressor FileCompressor { get; set; } /// <summary> /// Gets or sets a value indicating whether to compress archive files into the zip archive format. /// </summary> /// <docgen category='Archival Options' order='10' /> [DefaultValue(false)] public bool EnableArchiveFileCompression { get { return enableArchiveFileCompression && FileCompressor != null; } set { enableArchiveFileCompression = value; if (IsInitialized) { RefreshArchiveFilePatternToWatch(); } } } /// <summary> /// Gets or set a value indicating whether a managed file stream is forced, instead of used the native implementation. /// </summary> [DefaultValue(false)] public bool ForceManaged { get; set; } /// <summary> /// Gets or sets a value indicating whether the footer should be written only when the file is archived. /// </summary> [DefaultValue(false)] public bool WriteFooterOnArchivingOnly { get; set; } /// <summary> /// Gets the characters that are appended after each line. /// </summary> protected internal string NewLineChars { get { return lineEndingMode.NewLineCharacters; } } private void RefreshFileArchive() { var nullEvent = LogEventInfo.CreateNullEvent(); string fileNamePattern = GetArchiveFileNamePattern(GetFullFileName(nullEvent), nullEvent); if (fileNamePattern == null) { InternalLogger.Debug("no RefreshFileArchive because fileName is NULL"); return; } if (!ContainsFileNamePattern(fileNamePattern)) { try { fileArchive.InitializeForArchiveFolderPath(Path.GetDirectoryName(fileNamePattern)); } catch (Exception exception) { if (exception.MustBeRethrownImmediately()) { throw; } //TODO NLog 5, check MustBeRethrown() InternalLogger.Warn(exception, "Error while initializing archive folder."); } } } /// <summary> /// Refresh the ArchiveFilePatternToWatch option of the <see cref="FileAppenderCache" />. /// The log file must be watched for archiving when multiple processes are writing to the same /// open file. /// </summary> private void RefreshArchiveFilePatternToWatch() { #if !SILVERLIGHT && !__IOS__ && !__ANDROID__ if (this.fileAppenderCache != null) { bool mustWatchArchiving = IsArchivingEnabled() && ConcurrentWrites && KeepFileOpen; if (mustWatchArchiving) { var nullEvent = LogEventInfo.CreateNullEvent(); string fileNamePattern = GetArchiveFileNamePattern(GetFullFileName(nullEvent), nullEvent); if (!string.IsNullOrEmpty(fileNamePattern)) { fileNamePattern = Path.Combine(Path.GetDirectoryName(fileNamePattern), ReplaceFileNamePattern(fileNamePattern, "*")); //fileNamePattern is absolute this.fileAppenderCache.ArchiveFilePatternToWatch = fileNamePattern; if ((EnableArchiveFileCompression) && (this.appenderInvalidatorThread == null)) { // EnableArchiveFileCompression creates a new file for the archive, instead of just moving the log file. // The log file is deleted instead of moved. This process may be holding a lock to that file which will // avoid the file from being deleted. Therefore we must periodically close appenders for files that // were archived so that the file can be deleted. this.appenderInvalidatorThread = new Thread(() => { while (true) { try { if (this.stopAppenderInvalidatorThreadWaitHandle.WaitOne(200)) break; lock (SyncRoot) { this.fileAppenderCache.InvalidateAppendersForInvalidFiles(); } } catch (Exception ex) { InternalLogger.Debug(ex, "Exception in FileTarget appender-invalidator thread."); } } }); this.appenderInvalidatorThread.IsBackground = true; this.appenderInvalidatorThread.Start(); } } } else { this.fileAppenderCache.ArchiveFilePatternToWatch = null; this.StopAppenderInvalidatorThread(); } } #endif } private void StopAppenderInvalidatorThread() { #if !SILVERLIGHT && !__IOS__ && !__ANDROID__ if (this.appenderInvalidatorThread != null) { this.stopAppenderInvalidatorThreadWaitHandle.Set(); this.appenderInvalidatorThread = null; } #endif } /// <summary> /// Removes records of initialized files that have not been /// accessed in the last two days. /// </summary> /// <remarks> /// Files are marked 'initialized' for the purpose of writing footers when the logging finishes. /// </remarks> public void CleanupInitializedFiles() { this.CleanupInitializedFiles(DateTime.UtcNow.AddDays(-FileTarget.InitializedFilesCleanupPeriod)); } /// <summary> /// Removes records of initialized files that have not been /// accessed after the specified date. /// </summary> /// <param name="cleanupThreshold">The cleanup threshold.</param> /// <remarks> /// Files are marked 'initialized' for the purpose of writing footers when the logging finishes. /// </remarks> public void CleanupInitializedFiles(DateTime cleanupThreshold) { var filesToFinalize = new List<string>(); // Select the files require to be finalized. foreach (var file in this.initializedFiles) { if (file.Value < cleanupThreshold) { filesToFinalize.Add(file.Key); } } // Finalize the files. foreach (string fileName in filesToFinalize) { this.FinalizeFile(fileName); } } /// <summary> /// Flushes all pending file operations. /// </summary> /// <param name="asyncContinuation">The asynchronous continuation.</param> /// <remarks> /// The timeout parameter is ignored, because file APIs don't provide /// the needed functionality. /// </remarks> protected override void FlushAsync(AsyncContinuation asyncContinuation) { try { fileAppenderCache.FlushAppenders(); asyncContinuation(null); } catch (Exception exception) { if (exception.MustBeRethrown()) { throw; } asyncContinuation(exception); } } /// <summary> /// Returns the suitable appender factory ( <see cref="IFileAppenderFactory"/>) to be used to generate the file /// appenders associated with the <see cref="FileTarget"/> instance. /// /// The type of the file appender factory returned depends on the values of various <see cref="FileTarget"/> properties. /// </summary> /// <returns><see cref="IFileAppenderFactory"/> suitable for this instance.</returns> private IFileAppenderFactory GetFileAppenderFactory() { if (!this.KeepFileOpen) { return RetryingMultiProcessFileAppender.TheFactory; } else if (this.NetworkWrites) { return RetryingMultiProcessFileAppender.TheFactory; } else if (this.ConcurrentWrites) { #if !SupportsMutex return RetryingMultiProcessFileAppender.TheFactory; #elif MONO // // mono on Windows uses mutexes, on Unix - special appender // if (PlatformDetector.IsUnix) { return UnixMultiProcessFileAppender.TheFactory; } else { return MutexMultiProcessFileAppender.TheFactory; } #else return MutexMultiProcessFileAppender.TheFactory; #endif } else if (IsArchivingEnabled()) return CountingSingleProcessFileAppender.TheFactory; else return SingleProcessFileAppender.TheFactory; } private bool IsArchivingEnabled() { return this.ArchiveAboveSize != FileTarget.ArchiveAboveSizeDisabled || this.ArchiveEvery != FileArchivePeriod.None; } /// <summary> /// Initializes file logging by creating data structures that /// enable efficient multi-file logging. /// </summary> protected override void InitializeTarget() { base.InitializeTarget(); RefreshFileArchive(); this.appenderFactory = GetFileAppenderFactory(); this.fileAppenderCache = new FileAppenderCache(this.OpenFileCacheSize, this.appenderFactory, this); RefreshArchiveFilePatternToWatch(); if ((this.OpenFileCacheSize > 0 || this.EnableFileDelete) && this.OpenFileCacheTimeout > 0) { this.autoClosingTimer = new Timer( this.AutoClosingTimerCallback, null, this.OpenFileCacheTimeout*1000, this.OpenFileCacheTimeout*1000); } } /// <summary> /// Closes the file(s) opened for writing. /// </summary> protected override void CloseTarget() { base.CloseTarget(); foreach (string fileName in new List<string>(this.initializedFiles.Keys)) { this.FinalizeFile(fileName); } if (this.autoClosingTimer != null) { this.autoClosingTimer.Change(Timeout.Infinite, Timeout.Infinite); this.autoClosingTimer.Dispose(); this.autoClosingTimer = null; } this.StopAppenderInvalidatorThread(); this.fileAppenderCache.CloseAppenders(); } /// <summary> /// Writes the specified logging event to a file specified in the FileName /// parameter. /// </summary> /// <param name="logEvent">The logging event.</param> protected override void Write(LogEventInfo logEvent) { var fullFileName = this.GetFullFileName(logEvent); byte[] bytes = this.GetBytesToWrite(logEvent); ProcessLogEvent(logEvent, fullFileName, bytes); } /// <summary> /// Get full filename (=absolute) and cleaned if needed. /// </summary> /// <param name="logEvent"></param> /// <returns></returns> internal string GetFullFileName(LogEventInfo logEvent) { if (this.fullFileName == null) { return null; } return this.fullFileName.Render(logEvent); } /// <summary> /// Writes the specified array of logging events to a file specified in the FileName /// parameter. /// </summary> /// <param name="logEvents">An array of <see cref="AsyncLogEventInfo"/> objects.</param> /// <remarks> /// This function makes use of the fact that the events are batched by sorting /// the requests by filename. This optimizes the number of open/close calls /// and can help improve performance. /// </remarks> protected override void Write(AsyncLogEventInfo[] logEvents) { var buckets = logEvents.BucketSort(c => this.GetFullFileName(c.LogEvent)); using (var ms = new MemoryStream()) { var pendingContinuations = new List<AsyncContinuation>(); foreach (var bucket in buckets) { string fileName = bucket.Key; ms.SetLength(0); ms.Position = 0; LogEventInfo firstLogEvent = null; foreach (AsyncLogEventInfo ev in bucket.Value) { if (firstLogEvent == null) { firstLogEvent = ev.LogEvent; } byte[] bytes = this.GetBytesToWrite(ev.LogEvent); ms.Write(bytes, 0, bytes.Length); pendingContinuations.Add(ev.Continuation); } this.FlushCurrentFileWrites(fileName, firstLogEvent, ms, pendingContinuations); } } } private void ProcessLogEvent(LogEventInfo logEvent, string fileName, byte[] bytesToWrite) { #if !SILVERLIGHT && !__IOS__ && !__ANDROID__ this.fileAppenderCache.InvalidateAppendersForInvalidFiles(); #endif TryArchiveFile(fileName, logEvent, bytesToWrite.Length); // Clean up old archives if this is the first time a log record is being written to // this log file and the archiving system is date/time based. if (this.ArchiveNumbering == ArchiveNumberingMode.Date && this.ArchiveEvery != FileArchivePeriod.None && ShouldDeleteOldArchives()) { if (!previousFileNames.Contains(fileName)) { if (this.previousFileNames.Count > this.maxLogFilenames) { this.previousFileNames.Dequeue(); } string fileNamePattern = this.GetArchiveFileNamePattern(fileName, logEvent); if (fileNamePattern != null) { this.DeleteOldDateArchives(fileNamePattern); } this.previousFileNames.Enqueue(fileName); } } this.WriteToFile(fileName, logEvent, bytesToWrite, false); previousLogFileName = fileName; previousLogEventTimestamp = logEvent.TimeStamp; } /// <summary> /// Formats the log event for write. /// </summary> /// <param name="logEvent">The log event to be formatted.</param> /// <returns>A string representation of the log event.</returns> protected virtual string GetFormattedMessage(LogEventInfo logEvent) { return this.Layout.Render(logEvent); } /// <summary> /// Gets the bytes to be written to the file. /// </summary> /// <param name="logEvent">Log event.</param> /// <returns>Array of bytes that are ready to be written.</returns> protected virtual byte[] GetBytesToWrite(LogEventInfo logEvent) { string renderedText = this.GetFormattedMessage(logEvent) + this.NewLineChars; return this.TransformBytes(this.Encoding.GetBytes(renderedText)); } /// <summary> /// Modifies the specified byte array before it gets sent to a file. /// </summary> /// <param name="value">The byte array.</param> /// <returns>The modified byte array. The function can do the modification in-place.</returns> protected virtual byte[] TransformBytes(byte[] value) { return value; } /// <summary> /// Replaces the numeric pattern i.e. {#} in a file name with the <paramref name="value"/> parameter value. /// </summary> /// <param name="pattern">File name which contains the numeric pattern.</param> /// <param name="value">Value which will replace the numeric pattern.</param> /// <returns>File name with the value of <paramref name="value"/> in the position of the numeric pattern.</returns> private static string ReplaceNumberPattern(string pattern, int value) { int firstPart = pattern.IndexOf("{#", StringComparison.Ordinal); int lastPart = pattern.IndexOf("#}", StringComparison.Ordinal) + 2; int numDigits = lastPart - firstPart - 2; return pattern.Substring(0, firstPart) + Convert.ToString(value, 10).PadLeft(numDigits, '0') + pattern.Substring(lastPart); } private void FlushCurrentFileWrites(string currentFileName, LogEventInfo firstLogEvent, MemoryStream ms, List<AsyncContinuation> pendingContinuations) { Exception lastException = null; try { if (currentFileName != null) ProcessLogEvent(firstLogEvent, currentFileName, ms.ToArray()); } catch (Exception exception) { if (exception.MustBeRethrown()) { throw; } lastException = exception; } foreach (AsyncContinuation cont in pendingContinuations) { cont(lastException); } pendingContinuations.Clear(); } /// <summary> /// Determines if the file name as <see cref="String"/> contains a numeric pattern i.e. {#} in it. /// /// Example: /// trace{#}.log Contains the numeric pattern. /// trace{###}.log Contains the numeric pattern. /// trace{#X#}.log Contains the numeric pattern (See remarks). /// trace.log Does not contain the pattern. /// </summary> /// <remarks>Occasionally, this method can identify the existence of the {#} pattern incorrectly.</remarks> /// <param name="fileName">File name to be checked.</param> /// <returns><see langword="true"/> when the pattern is found; <see langword="false"/> otherwise.</returns> private static bool ContainsFileNamePattern(string fileName) { int startingIndex = fileName.IndexOf("{#", StringComparison.Ordinal); int endingIndex = fileName.IndexOf("#}", StringComparison.Ordinal); return (startingIndex != -1 && endingIndex != -1 && startingIndex < endingIndex); } /// <summary> /// Archives the <paramref name="fileName"/> using a rolling style numbering (the most recent is always #0 then /// #1, ..., #N. When the number of archive files exceed <see cref="P:MaxArchiveFiles"/> the obsolete archives /// are deleted. /// </summary> /// <remarks> /// This method is called recursively. This is the reason the <paramref name="archiveNumber"/> is required. /// </remarks> /// <param name="fileName">File name to be archived.</param> /// <param name="pattern">File name template which contains the numeric pattern to be replaced.</param> /// <param name="archiveNumber">Value which will replace the numeric pattern.</param> private void RollArchivesForward(string fileName, string pattern, int archiveNumber) { if (ShouldDeleteOldArchives() && archiveNumber >= this.MaxArchiveFiles) { File.Delete(fileName); return; } if (!File.Exists(fileName)) { return; } string newFileName = ReplaceNumberPattern(pattern, archiveNumber); RollArchivesForward(newFileName, pattern, archiveNumber + 1); if (archiveNumber == 0) ArchiveFile(fileName, newFileName); else { InternalLogger.Info("Roll archive {0} to {1}", fileName, newFileName); File.Move(fileName, newFileName); } } /// <summary> /// Archives the <paramref name="fileName"/> using a sequence style numbering. The most recent archive has the /// highest number. When the number of archive files exceed <see cref="P:MaxArchiveFiles"/> the obsolete /// archives are deleted. /// </summary> /// <param name="fileName">File name to be archived.</param> /// <param name="pattern">File name template which contains the numeric pattern to be replaced.</param> private void ArchiveBySequence(string fileName, string pattern) { FileNameTemplate fileTemplate = new FileNameTemplate(Path.GetFileName(pattern)); int trailerLength = fileTemplate.Template.Length - fileTemplate.EndAt; string fileNameMask = fileTemplate.ReplacePattern("*"); string dirName = Path.GetDirectoryName(Path.GetFullPath(pattern)); int nextNumber = -1; int minNumber = -1; var number2Name = new Dictionary<int, string>(); try { #if SILVERLIGHT && !WINDOWS_PHONE foreach (string s in Directory.EnumerateFiles(dirName, fileNameMask)) #else foreach (string s in Directory.GetFiles(dirName, fileNameMask)) #endif { string baseName = Path.GetFileName(s); string number = baseName.Substring(fileTemplate.BeginAt, baseName.Length - trailerLength - fileTemplate.BeginAt); int num; try { num = Convert.ToInt32(number, CultureInfo.InvariantCulture); } catch (FormatException) { continue; } nextNumber = Math.Max(nextNumber, num); minNumber = minNumber != -1 ? Math.Min(minNumber, num) : num; number2Name[num] = s; } nextNumber++; } catch (DirectoryNotFoundException) { Directory.CreateDirectory(dirName); nextNumber = 0; } if (minNumber != -1 && ShouldDeleteOldArchives()) { int minNumberToKeep = nextNumber - this.MaxArchiveFiles + 1; for (int i = minNumber; i < minNumberToKeep; ++i) { string s; if (number2Name.TryGetValue(i, out s)) { InternalLogger.Info("Deleting old archive {0}", s); File.Delete(s); } } } string newFileName = ReplaceNumberPattern(pattern, nextNumber); ArchiveFile(fileName, newFileName); } /// <summary> /// Archives fileName to archiveFileName. /// </summary> /// <param name="fileName">File name to be archived.</param> /// <param name="archiveFileName">Name of the archive file.</param> private void ArchiveFile(string fileName, string archiveFileName) { FinalizeFile(fileName, isArchiving: true); string archiveFolderPath = Path.GetDirectoryName(archiveFileName); if (!Directory.Exists(archiveFolderPath)) Directory.CreateDirectory(archiveFolderPath); if (EnableArchiveFileCompression) { InternalLogger.Info("Archiving {0} to compressed {1}", fileName, archiveFileName); FileCompressor.CompressFile(fileName, archiveFileName); DeleteAndWaitForFileDelete(fileName); } else { InternalLogger.Info("Archiving {0} to {1}", fileName, archiveFileName); if (File.Exists(archiveFileName)) { //todo handle double footer InternalLogger.Info("Already exists, append to {0}", archiveFileName); //todo maybe needs a better filelock behaviour //copy to archive file. using (FileStream fileStream = File.Open(fileName, FileMode.Open)) using (FileStream archiveFileStream = File.Open(archiveFileName, FileMode.Append )) { fileStream.CopyAndSkipBom(archiveFileStream, Encoding); //clear old content fileStream.SetLength(0); fileStream.Close(); // This flushes the content, too. #if NET3_5 archiveFileStream.Flush(); #else archiveFileStream.Flush(true); #endif } } else { File.Move(fileName, archiveFileName); } } } private static void DeleteAndWaitForFileDelete(string fileName) { var originalFileCreationTime = (new FileInfo(fileName)).CreationTime; File.Delete(fileName); if (File.Exists(fileName)) { FileInfo currentFileInfo; do { Thread.Sleep(100); currentFileInfo = new FileInfo(fileName); } while ((currentFileInfo.Exists) && (currentFileInfo.CreationTime == originalFileCreationTime)); } } #if !NET_CF /// <summary> /// <para> /// Archives the <paramref name="fileName"/> using a date and sequence style numbering. Archives will be stamped /// with the prior period (Year, Month, Day) datetime. The most recent archive has the highest number (in /// combination with the date). /// </para> /// <para> /// When the number of archive files exceed <see cref="P:MaxArchiveFiles"/> the obsolete archives are deleted. /// </para> /// </summary> /// <param name="fileName">File name to be archived.</param> /// <param name="pattern">File name template which contains the numeric pattern to be replaced.</param> /// <param name="logEvent">Log event that the <see cref="FileTarget"/> instance is currently processing.</param> private void ArchiveByDateAndSequence(string fileName, string pattern, LogEventInfo logEvent) { string baseNamePattern = Path.GetFileName(pattern); if (string.IsNullOrEmpty(baseNamePattern)) { return; } FileNameTemplate fileTemplate = new FileNameTemplate(baseNamePattern); string fileNameMask = fileTemplate.ReplacePattern("*"); string dateFormat = GetArchiveDateFormatString(this.ArchiveDateFormat); string dirName = Path.GetDirectoryName(Path.GetFullPath(pattern)); if (string.IsNullOrEmpty(dirName)) { return; } int minSequenceLength = fileTemplate.EndAt - fileTemplate.BeginAt - 2; int nextSequenceNumber; DateTime archiveDate = GetArchiveDate(fileName, logEvent); List<string> archiveFileNames; if (Directory.Exists(dirName)) { List<DateAndSequenceArchive> archives = FindDateAndSequenceArchives(dirName, fileName, fileNameMask, minSequenceLength, dateFormat, fileTemplate) .ToList(); // Find out the next sequence number among existing archives having the same date part as the current date. int? lastSequenceNumber = archives .Where(a => a.HasSameFormattedDate(archiveDate)) .Max(a => (int?)a.Sequence); nextSequenceNumber = (int)(lastSequenceNumber != null ? lastSequenceNumber + 1 : 0); archiveFileNames = archives .OrderBy(a => a.Date) .ThenBy(a => a.Sequence) .Select(a => a.FileName) .ToList(); } else { Directory.CreateDirectory(dirName); nextSequenceNumber = 0; archiveFileNames = new List<string>(); } string paddedSequence = nextSequenceNumber.ToString().PadLeft(minSequenceLength, '0'); string archiveFileNameWithoutPath = fileNameMask.Replace("*", string.Format("{0}.{1}", archiveDate.ToString(dateFormat), paddedSequence)); string archiveFileName = Path.Combine(dirName, archiveFileNameWithoutPath); ArchiveFile(fileName, archiveFileName); archiveFileNames.Add(archiveFileName); EnsureArchiveCount(archiveFileNames); } /// <summary> /// Deletes files among a given list, and stops as soon as the remaining files are fewer than the <see /// cref="P:FileTarget.MaxArchiveFiles"/> setting. /// </summary> /// <param name="oldArchiveFileNames">List of the file archives.</param> /// <remarks> /// Items are deleted in the same order as in <paramref name="oldArchiveFileNames"/>. No file is deleted if <see /// cref="P:FileTarget.MaxArchiveFiles"/> property is zero. /// </remarks> private void EnsureArchiveCount(List<string> oldArchiveFileNames) { if (!ShouldDeleteOldArchives()) { return; } int numberToDelete = oldArchiveFileNames.Count - this.MaxArchiveFiles; for (int fileIndex = 0; fileIndex < numberToDelete; fileIndex++) { InternalLogger.Info("Deleting old archive {0}.", oldArchiveFileNames[fileIndex]); File.Delete(oldArchiveFileNames[fileIndex]); } } /// <summary> /// Searches a given directory for archives that comply with the current archive pattern. /// </summary> /// <returns>An enumeration of archive infos, ordered by their file creation date.</returns> private IEnumerable<DateAndSequenceArchive> FindDateAndSequenceArchives(string dirName, string logFileName, string fileNameMask, int minSequenceLength, string dateFormat, FileNameTemplate fileTemplate) { var directoryInfo = new DirectoryInfo(dirName); int archiveFileNameMinLength = fileNameMask.Length + minSequenceLength; var archiveFileNames = GetFiles(directoryInfo, fileNameMask) .Where(n => n.Name.Length >= archiveFileNameMinLength) .OrderBy(n => n.CreationTime) .Select(n => n.FullName); foreach (string archiveFileName in archiveFileNames) { //Get the archive file name or empty string if it's null string archiveFileNameWithoutPath = Path.GetFileName(archiveFileName) ?? ""; DateTime date; int sequence; if ( !TryParseDateAndSequence(archiveFileNameWithoutPath, dateFormat, fileTemplate, out date, out sequence)) { continue; } //It's possible that the log file itself has a name that will match the archive file mask. if (string.IsNullOrEmpty(archiveFileNameWithoutPath) || archiveFileNameWithoutPath.Equals(Path.GetFileName(logFileName))) { continue; } yield return new DateAndSequenceArchive(archiveFileName, date, dateFormat, sequence); } } /// <summary> /// Parse filename with date and sequence pattern /// </summary> /// <param name="archiveFileNameWithoutPath"></param> /// <param name="dateFormat">dateformat for archive</param> /// <param name="fileTemplate"></param> /// <param name="date">the found pattern. When failed, then default</param> /// <param name="sequence">the found pattern. When failed, then default</param> /// <returns></returns> private static bool TryParseDateAndSequence(string archiveFileNameWithoutPath, string dateFormat, FileNameTemplate fileTemplate, out DateTime date, out int sequence) { int trailerLength = fileTemplate.Template.Length - fileTemplate.EndAt; int dateAndSequenceIndex = fileTemplate.BeginAt; int dateAndSequenceLength = archiveFileNameWithoutPath.Length - trailerLength - dateAndSequenceIndex; if (dateAndSequenceLength < 0) { date = default(DateTime); sequence = 0; return false; } string dateAndSequence = archiveFileNameWithoutPath.Substring(dateAndSequenceIndex, dateAndSequenceLength); int sequenceIndex = dateAndSequence.LastIndexOf('.') + 1; string sequencePart = dateAndSequence.Substring(sequenceIndex); if (!Int32.TryParse(sequencePart, NumberStyles.None, CultureInfo.CurrentCulture, out sequence)) { date = default(DateTime); return false; } var dateAndSequenceLength2 = dateAndSequence.Length - sequencePart.Length - 1; if (dateAndSequenceLength2 < 0) { date = default(DateTime); return false; } string datePart = dateAndSequence.Substring(0, dateAndSequenceLength2); if (!DateTime.TryParseExact(datePart, dateFormat, CultureInfo.CurrentCulture, DateTimeStyles.None, out date)) { return false; } return true; } /// <summary> /// Gets the collection of files in the specified directory which they match the <paramref name="fileNameMask"/>. /// </summary> /// <param name="directoryInfo">Directory to searched.</param> /// <param name="fileNameMask">Pattern which the files will be searched against.</param> /// <returns>List of files matching the pattern.</returns> private static IEnumerable<FileInfo> GetFiles(DirectoryInfo directoryInfo, string fileNameMask) { #if SILVERLIGHT && !WINDOWS_PHONE return directoryInfo.EnumerateFiles(fileNameMask); #else return directoryInfo.GetFiles(fileNameMask); #endif } /// <summary> /// Replaces the string-based pattern i.e. {#} in a file name with the value passed in <paramref /// name="replacementValue"/> parameter. /// </summary> /// <param name="pattern">File name which contains the string-based pattern.</param> /// <param name="replacementValue">Value which will replace the string-based pattern.</param> /// <returns> /// File name with the value of <paramref name="replacementValue"/> in the position of the string-based pattern. /// </returns> private static string ReplaceFileNamePattern(string pattern, string replacementValue) { // // TODO: ReplaceFileNamePattern() method is nearly identical to ReplaceNumberPattern(). Consider merging. // return new FileNameTemplate(Path.GetFileName(pattern)).ReplacePattern(replacementValue); } /// <summary> /// Archives the <paramref name="fileName"/> using a date style numbering. Archives will be stamped with the /// prior period (Year, Month, Day, Hour, Minute) datetime. When the number of archive files exceed <see /// cref="P:MaxArchiveFiles"/> the obsolete archives are deleted. /// </summary> /// <param name="fileName">File name to be archived.</param> /// <param name="pattern">File name template which contains the numeric pattern to be replaced.</param> /// <param name="logEvent">Log event that the <see cref="FileTarget"/> instance is currently processing.</param> private void ArchiveByDate(string fileName, string pattern, LogEventInfo logEvent) { string fileNameMask = ReplaceFileNamePattern(pattern, "*"); string dirName = Path.GetDirectoryName(Path.GetFullPath(pattern)); string dateFormat = GetArchiveDateFormatString(this.ArchiveDateFormat); DateTime archiveDate = GetArchiveDate(fileName, logEvent); if (dirName != null) { string archiveFileName = Path.Combine(dirName, fileNameMask.Replace("*", archiveDate.ToString(dateFormat))); ArchiveFile(fileName, archiveFileName); } DeleteOldDateArchives(pattern); } /// <summary> /// Deletes archive files in reverse chronological order until only the /// MaxArchiveFiles number of archive files remain. /// </summary> /// <param name="pattern">The pattern that archive filenames will match</param> private void DeleteOldDateArchives(string pattern) { if (!ShouldDeleteOldArchives()) { return; } string fileNameMask = ReplaceFileNamePattern(pattern, "*"); string dirName = Path.GetDirectoryName(Path.GetFullPath(pattern)); string dateFormat = GetArchiveDateFormatString(this.ArchiveDateFormat); if (dirName != null) { DirectoryInfo directoryInfo = new DirectoryInfo(dirName); if (!directoryInfo.Exists) { Directory.CreateDirectory(dirName); return; } #if SILVERLIGHT && !WINDOWS_PHONE var files = directoryInfo.EnumerateFiles(fileNameMask).OrderBy(n => n.CreationTime).Select(n => n.FullName); #else var files = directoryInfo.GetFiles(fileNameMask).OrderBy(n => n.CreationTime).Select(n => n.FullName); #endif List<string> filesByDate = new List<string>(); foreach (string nextFile in files) { string archiveFileName = Path.GetFileName(nextFile); int lastIndexOfStar = fileNameMask.LastIndexOf('*'); if (lastIndexOfStar + dateFormat.Length <= archiveFileName.Length) { string datePart = archiveFileName.Substring(lastIndexOfStar, dateFormat.Length); DateTime fileDate = DateTime.MinValue; if (DateTime.TryParseExact(datePart, dateFormat, CultureInfo.InvariantCulture, DateTimeStyles.None, out fileDate)) { filesByDate.Add(nextFile); } } } EnsureArchiveCount(filesByDate); } } #endif /// <summary> /// Gets the correct formatting <see langword="String"/> to be used based on the value of <see /// cref="P:ArchiveEvery"/> for converting <see langword="DateTime"/> values which will be inserting into file /// names during archiving. /// /// This value will be computed only when a empty value or <see langword="null"/> is passed into <paramref name="defaultFormat"/> /// </summary> /// <param name="defaultFormat">Date format to used irrespectively of <see cref="P:ArchiveEvery"/> value.</param> /// <returns>Formatting <see langword="String"/> for dates.</returns> private string GetArchiveDateFormatString(string defaultFormat) { // If archiveDateFormat is not set in the config file, use a default // date format string based on the archive period. string formatString = defaultFormat; if (string.IsNullOrEmpty(formatString)) { switch (this.ArchiveEvery) { case FileArchivePeriod.Year: formatString = "yyyy"; break; case FileArchivePeriod.Month: formatString = "yyyyMM"; break; default: formatString = "yyyyMMdd"; break; case FileArchivePeriod.Hour: formatString = "yyyyMMddHH"; break; case FileArchivePeriod.Minute: formatString = "yyyyMMddHHmm"; break; } } return formatString; } private DateTime GetArchiveDate(string fileName, LogEventInfo logEvent) { var lastWriteTimeUtc = this.fileAppenderCache.GetFileLastWriteTimeUtc(fileName, true); //todo null check var lastWriteTime = TimeSource.Current.FromSystemTime(lastWriteTimeUtc.Value); InternalLogger.Trace("Calculating archive date. Last write time: {0}; Previous log event time: {1}", lastWriteTime, previousLogEventTimestamp); bool previousLogIsMoreRecent = previousLogEventTimestamp.HasValue && (previousLogEventTimestamp.Value > lastWriteTime); if (previousLogIsMoreRecent) { InternalLogger.Trace("Using previous log event time (is more recent)"); return previousLogEventTimestamp.Value; } if (previousLogEventTimestamp.HasValue && PreviousLogOverlappedPeriod(logEvent, lastWriteTime)) { InternalLogger.Trace("Using previous log event time (previous log overlapped period)"); return previousLogEventTimestamp.Value; } InternalLogger.Trace("Using last write time"); return lastWriteTime; } private bool PreviousLogOverlappedPeriod(LogEventInfo logEvent, DateTime lastWrite) { if (!previousLogEventTimestamp.HasValue) return false; string formatString = GetArchiveDateFormatString(string.Empty); string lastWriteTimeString = lastWrite.ToString(formatString, CultureInfo.InvariantCulture); string logEventTimeString = logEvent.TimeStamp.ToString(formatString, CultureInfo.InvariantCulture); if (lastWriteTimeString != logEventTimeString) return false; DateTime periodAfterPreviousLogEventTime; switch (this.ArchiveEvery) { case FileArchivePeriod.Year: periodAfterPreviousLogEventTime = previousLogEventTimestamp.Value.AddYears(1); break; case FileArchivePeriod.Month: periodAfterPreviousLogEventTime = previousLogEventTimestamp.Value.AddMonths(1); break; case FileArchivePeriod.Day: periodAfterPreviousLogEventTime = previousLogEventTimestamp.Value.AddDays(1); break; case FileArchivePeriod.Hour: periodAfterPreviousLogEventTime = previousLogEventTimestamp.Value.AddHours(1); break; case FileArchivePeriod.Minute: periodAfterPreviousLogEventTime = previousLogEventTimestamp.Value.AddMinutes(1); break; default: return false; } string periodAfterPreviousLogEventTimeString = periodAfterPreviousLogEventTime.ToString(formatString, CultureInfo.InvariantCulture); return lastWriteTimeString == periodAfterPreviousLogEventTimeString; } /// <summary> /// Invokes the archiving process after determining when and which type of archiving is required. /// </summary> /// <param name="fileName">File name to be checked and archived.</param> /// <param name="eventInfo">Log event that the <see cref="FileTarget"/> instance is currently processing.</param> private void DoAutoArchive(string fileName, LogEventInfo eventInfo) { var fileInfo = new FileInfo(fileName); if (!fileInfo.Exists) { return; } string fileNamePattern = GetArchiveFileNamePattern(fileName, eventInfo); if (fileNamePattern == null) { InternalLogger.Warn("Skip auto archive because fileName is NULL"); return; } if (!ContainsFileNamePattern(fileNamePattern)) { if (fileArchive.Archive(fileNamePattern, fileInfo.FullName, CreateDirs)) { if (this.initializedFiles.ContainsKey(fileInfo.FullName)) { this.initializedFiles.Remove(fileInfo.FullName); } } } else { switch (this.ArchiveNumbering) { case ArchiveNumberingMode.Rolling: this.RollArchivesForward(fileInfo.FullName, fileNamePattern, 0); break; case ArchiveNumberingMode.Sequence: this.ArchiveBySequence(fileInfo.FullName, fileNamePattern); break; #if !NET_CF case ArchiveNumberingMode.Date: this.ArchiveByDate(fileInfo.FullName, fileNamePattern, eventInfo); break; case ArchiveNumberingMode.DateAndSequence: this.ArchiveByDateAndSequence(fileInfo.FullName, fileNamePattern, eventInfo); break; #endif } } } /// <summary> /// Gets the pattern that archive files will match /// </summary> /// <param name="fileName">Filename of the log file</param> /// <param name="eventInfo">Log event that the <see cref="FileTarget"/> instance is currently processing.</param> /// <returns>A string with a pattern that will match the archive filenames</returns> private string GetArchiveFileNamePattern(string fileName, LogEventInfo eventInfo) { if (this.fullarchiveFileName == null) { string ext = EnableArchiveFileCompression ? ".zip" : Path.GetExtension(fileName); return Path.ChangeExtension(fileName, ".{#}" + ext); } else { //The archive file name is given. There are two possibilities //(1) User supplied the Filename with pattern //(2) User supplied the normal filename string archiveFileName = this.fullarchiveFileName.Render(eventInfo); return archiveFileName; } } /// <summary> /// Determine if old archive files should be deleted. /// </summary> /// <returns><see langword="true"/> when old archives should be deleted; <see langword="false"/> otherwise.</returns> private bool ShouldDeleteOldArchives() { return MaxArchiveFiles > 0; } /// <summary> /// Archives the file if it should be archived. /// </summary> /// <param name="fileName">The file name to check for.</param> /// <param name="ev">Log event that the <see cref="FileTarget"/> instance is currently processing.</param> /// <param name="upcomingWriteSize">The size in bytes of the next chunk of data to be written in the file.</param> private void TryArchiveFile(string fileName, LogEventInfo ev, int upcomingWriteSize) { var archiveFile = this.GetArchiveFileName(fileName, ev, upcomingWriteSize); if (!string.IsNullOrEmpty(archiveFile)) { #if SupportsMutex Mutex archiveMutex = this.fileAppenderCache.GetArchiveMutex(fileName); try { if (archiveMutex != null) archiveMutex.WaitOne(); } catch (AbandonedMutexException) { // ignore the exception, another process was killed without properly releasing the mutex // the mutex has been acquired, so proceed to writing // See: http://msdn.microsoft.com/en-us/library/system.threading.abandonedmutexexception.aspx } #endif try { archiveFile = this.GetArchiveFileName(fileName, ev, upcomingWriteSize); if (!string.IsNullOrEmpty(archiveFile)) { this.DoAutoArchive(archiveFile, ev); } } finally { #if SupportsMutex if (archiveMutex != null) archiveMutex.ReleaseMutex(); #endif } } } /// <summary> /// Indicates if the automatic archiving process should be executed. /// </summary> /// <param name="fileName">File name to be written.</param> /// <param name="ev">Log event that the <see cref="FileTarget"/> instance is currently processing.</param> /// <param name="upcomingWriteSize">The size in bytes of the next chunk of data to be written in the file.</param> /// <returns>Filename to archive. If <c>null</c>, then nothing to archive.</returns> private string GetArchiveFileName(string fileName, LogEventInfo ev, int upcomingWriteSize) { var hasFileName = !(fileName == null && previousLogFileName == null); if (hasFileName) { return GetArchiveFileNameBasedOnFileSize(fileName, upcomingWriteSize) ?? GetArchiveFileNameBasedOnTime(fileName, ev); } return null; } /// <summary> /// Returns the correct filename to archive /// </summary> /// <returns></returns> private string GetPotentialFileForArchiving(string fileName) { if (fileName == previousLogFileName) { //both the same, so don't care return fileName; } if (string.IsNullOrEmpty(previousLogFileName)) { return fileName; } if (string.IsNullOrEmpty(fileName)) { return previousLogFileName; } //this is an expensive call var fileLength = this.fileAppenderCache.GetFileLength(fileName, true); string fileToArchive = fileLength != null ? fileName : previousLogFileName; return fileToArchive; } /// <summary> /// Gets the file name for archiving, or null if archiving should not occur based on file size. /// </summary> /// <param name="fileName">File name to be written.</param> /// <param name="upcomingWriteSize">The size in bytes of the next chunk of data to be written in the file.</param> /// <returns>Filename to archive. If <c>null</c>, then nothing to archive.</returns> private string GetArchiveFileNameBasedOnFileSize(string fileName, int upcomingWriteSize) { if (this.ArchiveAboveSize == ArchiveAboveSizeDisabled) { return null; } fileName = GetPotentialFileForArchiving(fileName); if (fileName == null) { return null; } var length = this.fileAppenderCache.GetFileLength(fileName, true); if (length == null) { return null; } var shouldArchive = length.Value + upcomingWriteSize > this.ArchiveAboveSize; if (shouldArchive) { return fileName; } return null; } /// <summary> /// Returns the file name for archiving, or null if archiving should not occur based on date/time. /// </summary> /// <param name="fileName">File name to be written.</param> /// <param name="logEvent">Log event that the <see cref="FileTarget"/> instance is currently processing.</param> /// <returns>Filename to archive. If <c>null</c>, then nothing to archive.</returns> private string GetArchiveFileNameBasedOnTime(string fileName, LogEventInfo logEvent) { if (this.ArchiveEvery == FileArchivePeriod.None) { return null; } fileName = GetPotentialFileForArchiving(fileName); if (fileName == null) { return null; } var creationTimeUtc = this.fileAppenderCache.GetFileCreationTimeUtc(fileName, true); if (creationTimeUtc == null) { return null; } // file creation time is in Utc and logEvent's timestamp is originated from TimeSource.Current, // so we should ask the TimeSource to convert file time to TimeSource time: DateTime creationTime = TimeSource.Current.FromSystemTime(creationTimeUtc.Value); string formatString = GetArchiveDateFormatString(string.Empty); string fileCreated = creationTime.ToString(formatString, CultureInfo.InvariantCulture); string logEventRecorded = logEvent.TimeStamp.ToString(formatString, CultureInfo.InvariantCulture); var shouldArchive = fileCreated != logEventRecorded; if (shouldArchive) { return fileName; } return null; } private void AutoClosingTimerCallback(object state) { lock (this.SyncRoot) { if (!this.IsInitialized) { return; } try { DateTime expireTime = DateTime.UtcNow.AddSeconds(-this.OpenFileCacheTimeout); this.fileAppenderCache.CloseAppenders(expireTime); } catch (Exception exception) { InternalLogger.Warn(exception, "Exception in AutoClosingTimerCallback."); if (exception.MustBeRethrown()) { throw; } } } } /// <summary> /// The sequence of <see langword="byte"/> to be written for the file header. /// </summary> /// <returns>Sequence of <see langword="byte"/> to be written.</returns> private byte[] GetHeaderBytes() { return this.GetLayoutBytes(this.Header); } /// <summary> /// The sequence of <see langword="byte"/> to be written for the file footer. /// </summary> /// <returns>Sequence of <see langword="byte"/> to be written.</returns> private byte[] GetFooterBytes() { return this.GetLayoutBytes(this.Footer); } /// <summary> /// Evaluates which parts of a file should be written (header, content, footer) based on various properties of /// <see cref="FileTarget"/> instance and writes them. /// </summary> /// <param name="fileName">File name to be written.</param> /// <param name="logEvent">Log event that the <see cref="FileTarget"/> instance is currently processing.</param> /// <param name="bytes">Raw sequence of <see langword="byte"/> to be written into the content part of the file.</param> /// <param name="justData">Indicates that only content section should be written in the file.</param> private void WriteToFile(string fileName, LogEventInfo logEvent, byte[] bytes, bool justData) { if (this.ReplaceFileContentsOnEachWrite) { ReplaceFileContent(fileName, bytes, true); return; } bool writeHeader = InitializeFile(fileName, logEvent, justData); BaseFileAppender appender = this.fileAppenderCache.AllocateAppender(fileName); if (writeHeader) { this.WriteHeader(appender); } appender.Write(bytes); if (this.AutoFlush) { appender.Flush(); } } /// <summary> /// Initialise a file to be used by the <see cref="FileTarget"/> instance. Based on the number of initialised /// files and the values of various instance properties clean up and/or archiving processes can be invoked. /// </summary> /// <param name="fileName">File name to be written.</param> /// <param name="logEvent">Log event that the <see cref="FileTarget"/> instance is currently processing.</param> /// <param name="justData">Indicates that only content section should be written in the file.</param> /// <returns><see langword="true"/> when file header should be written; <see langword="false"/> otherwise.</returns> private bool InitializeFile(string fileName, LogEventInfo logEvent, bool justData) { bool writeHeader = false; if (!justData) { //UtcNow is much faster then .now. This was a bottleneck in writing a lot of files after CPU test. var now = DateTime.UtcNow; if (!this.initializedFiles.ContainsKey(fileName)) { ProcessOnStartup(fileName, logEvent); this.initializedFiles[fileName] = now; this.initializedFilesCounter++; writeHeader = true; if (this.initializedFilesCounter >= FileTarget.InitializedFilesCounterMax) { this.initializedFilesCounter = 0; this.CleanupInitializedFiles(); } } this.initializedFiles[fileName] = now; } return writeHeader; } /// <summary> /// Writes the file footer and finalizes the file in <see cref="FileTarget"/> instance internal structures. /// </summary> /// <param name="fileName">File name to close.</param> /// <param name="isArchiving">Indicates if the file is being finalized for archiving.</param> private void FinalizeFile(string fileName, bool isArchiving = false) { if ((isArchiving) || (!this.WriteFooterOnArchivingOnly)) WriteFooter(fileName); this.fileAppenderCache.InvalidateAppender(fileName); this.initializedFiles.Remove(fileName); } /// <summary> /// Writes the footer information to a file. /// </summary> /// <param name="fileName">The file path to write to.</param> private void WriteFooter(string fileName) { byte[] footerBytes = this.GetFooterBytes(); if (footerBytes != null) { if (File.Exists(fileName)) { this.WriteToFile(fileName, null, footerBytes, true); } } } /// <summary> /// Invokes the archiving and clean up of older archive file based on the values of <see /// cref="P:NLog.Targets.FileTarget.ArchiveOldFileOnStartup"/> and <see /// cref="P:NLog.Targets.FileTarget.DeleteOldFileOnStartup"/> properties respectively. /// </summary> /// <param name="fileName">File name to be written.</param> /// <param name="logEvent">Log event that the <see cref="FileTarget"/> instance is currently processing.</param> private void ProcessOnStartup(string fileName, LogEventInfo logEvent) { if (this.ArchiveOldFileOnStartup) { try { this.DoAutoArchive(fileName, logEvent); } catch (Exception exception) { InternalLogger.Warn(exception, "Unable to archive old log file '{0}'.", fileName); if (exception.MustBeRethrown()) { throw; } } } if (this.DeleteOldFileOnStartup) { try { File.Delete(fileName); } catch (DirectoryNotFoundException exception) { //never rethrow this, as this isn't an exceptional case. InternalLogger.Debug(exception, "Unable to delete old log file '{0}' as directory is missing.", fileName); } catch (Exception exception) { InternalLogger.Warn(exception, "Unable to delete old log file '{0}'.", fileName); if (exception.MustBeRethrown()) { throw; } } } } /// <summary> /// Creates the file specified in <paramref name="fileName"/> and writes the file content in each entirety i.e. /// Header, Content and Footer. /// </summary> /// <param name="fileName">The name of the file to be written.</param> /// <param name="bytes">Sequence of <see langword="byte"/> to be written in the content section of the file.</param> /// <param name="firstAttempt">First attempt to write?</param> /// <remarks>This method is used when the content of the log file is re-written on every write.</remarks> private void ReplaceFileContent(string fileName, byte[] bytes, bool firstAttempt) { try { using (FileStream fs = File.Create(fileName)) { byte[] headerBytes = this.GetHeaderBytes(); if (headerBytes != null) { fs.Write(headerBytes, 0, headerBytes.Length); } fs.Write(bytes, 0, bytes.Length); byte[] footerBytes = this.GetFooterBytes(); if (footerBytes != null) { fs.Write(footerBytes, 0, footerBytes.Length); } } } catch (DirectoryNotFoundException) { if (!this.CreateDirs || !firstAttempt) { throw; } Directory.CreateDirectory(Path.GetDirectoryName(fileName)); //retry. ReplaceFileContent(fileName, bytes, false); } } /// <summary> /// Writes the header information to a file. /// </summary> /// <param name="appender">File appender associated with the file.</param> private void WriteHeader(BaseFileAppender appender) { //performance: cheap check before checking file info if (Header == null) return; //todo replace with hasWritten? var length = appender.GetFileLength(); // Write header only on empty files or if file info cannot be obtained. if (length == null || length == 0) { byte[] headerBytes = this.GetHeaderBytes(); if (headerBytes != null) { appender.Write(headerBytes); } } } /// <summary> /// The sequence of <see langword="byte"/> to be written in a file after applying any formating and any /// transformations required from the <see cref="Layout"/>. /// </summary> /// <param name="layout">The layout used to render output message.</param> /// <returns>Sequence of <see langword="byte"/> to be written.</returns> /// <remarks>Usually it is used to render the header and hooter of the files.</remarks> private byte[] GetLayoutBytes(Layout layout) { if (layout == null) { return null; } //todo remove string renderedText = layout.Render(LogEventInfo.CreateNullEvent()) + this.NewLineChars; return this.TransformBytes(this.Encoding.GetBytes(renderedText)); } private class DynamicFileArchive { private readonly Queue<string> archiveFileQueue = new Queue<string>(); private readonly FileTarget fileTarget; /// <summary> /// Creates an instance of <see cref="DynamicFileArchive"/> class. /// </summary> /// <param name="fileTarget">The file target instance whose files to archive.</param> /// <param name="maxArchivedFiles">Maximum number of archive files to be kept.</param> public DynamicFileArchive(FileTarget fileTarget, int maxArchivedFiles) { this.fileTarget = fileTarget; this.MaxArchiveFileToKeep = maxArchivedFiles; } /// <summary> /// Gets or sets the maximum number of archive files that should be kept. /// </summary> public int MaxArchiveFileToKeep { get; set; } /// <summary> /// Adds the files in the specified path to the archive file queue. /// </summary> /// <param name="archiveFolderPath">The folder where the archive files are stored.</param> public void InitializeForArchiveFolderPath(string archiveFolderPath) { archiveFileQueue.Clear(); if (Directory.Exists(archiveFolderPath)) { #if SILVERLIGHT && !WINDOWS_PHONE var files = Directory.EnumerateFiles(archiveFolderPath); #else var files = Directory.GetFiles(archiveFolderPath); #endif foreach (string nextFile in files.OrderBy(f => ExtractArchiveNumberFromFileName(f))) archiveFileQueue.Enqueue(nextFile); } } /// <summary> /// Adds a file into archive. /// </summary> /// <param name="archiveFileName">File name of the archive</param> /// <param name="fileName">Original file name</param> /// <param name="createDirectory">Create a directory, if it does not exist</param> /// <returns><see langword="true"/> if the file has been moved successfully; <see langword="false"/> otherwise.</returns> [System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Design", "CA1031:DoNotCatchGeneralExceptionTypes")] public bool Archive(string archiveFileName, string fileName, bool createDirectory) { if (MaxArchiveFileToKeep < 1) { InternalLogger.Warn("Archive is called. Even though the MaxArchiveFiles is set to less than 1"); return false; } if (!File.Exists(fileName)) { InternalLogger.Error("Error while archiving, Source File : {0} Not found.", fileName); return false; } DeleteOldArchiveFiles(); AddToArchive(archiveFileName, fileName, createDirectory); return true; } /// <summary> /// Archives the file, either by copying it to a new file system location or by compressing it, and add the file name into the list of archives. /// </summary> /// <param name="archiveFileName">Target file name.</param> /// <param name="fileName">Original file name.</param> /// <param name="createDirectory">Create a directory, if it does not exist.</param> private void AddToArchive(string archiveFileName, string fileName, bool createDirectory) { if (archiveFileQueue.Count != 0) archiveFileName = GetNextArchiveFileName(archiveFileName); try { fileTarget.ArchiveFile(fileName, archiveFileName); archiveFileQueue.Enqueue(archiveFileName); } catch (Exception ex) { InternalLogger.Error(ex, "Cannot archive file '{0}'.", fileName); throw; } } /// <summary> /// Remove old archive files when the files on the queue are more than the <see cref="P:MaxArchiveFilesToKeep"/>. /// </summary> private void DeleteOldArchiveFiles() { if (MaxArchiveFileToKeep == 1 && archiveFileQueue.Any()) { var archiveFileName = archiveFileQueue.Dequeue(); try { File.Delete(archiveFileName); } catch (Exception ex) { InternalLogger.Warn(ex, "Cannot delete old archive file : '{0}'.", archiveFileName); } } while (archiveFileQueue.Count >= MaxArchiveFileToKeep) { string oldestArchivedFileName = archiveFileQueue.Dequeue(); try { File.Delete(oldestArchivedFileName); } catch (Exception ex) { InternalLogger.Warn(ex, "Cannot delete old archive file : '{0}'.", oldestArchivedFileName); } } } /// <summary> /// Gets the file name for the next archive file by appending a number to the provided /// "base"-filename. /// /// Example: /// Original Filename trace.log /// Target Filename trace.15.log /// </summary> /// <param name="fileName">Original file name.</param> /// <returns>File name suitable for archiving</returns> private string GetNextArchiveFileName(string fileName) { int currentArchiveNumber = archiveFileQueue.Count == 0 ? 0 : ExtractArchiveNumberFromFileName(archiveFileQueue.Last()); string archiveFileName = string.Format("{0}.{1}{2}", Path.GetFileNameWithoutExtension(fileName), currentArchiveNumber + 1, Path.GetExtension(fileName)); return Path.Combine(Path.GetDirectoryName(fileName), archiveFileName); } private static int ExtractArchiveNumberFromFileName(string archiveFileName) { archiveFileName = Path.GetFileName(archiveFileName); int lastDotIdx = archiveFileName.LastIndexOf('.'); if (lastDotIdx == -1) return 0; int previousToLastDotIdx = archiveFileName.LastIndexOf('.', lastDotIdx - 1); string numberPart = previousToLastDotIdx == -1 ? archiveFileName.Substring(lastDotIdx + 1) : archiveFileName.Substring(previousToLastDotIdx + 1, lastDotIdx - previousToLastDotIdx - 1); int archiveNumber; return Int32.TryParse(numberPart, out archiveNumber) ? archiveNumber : 0; } } private sealed class FileNameTemplate { /// <summary> /// Characters determining the start of the <see cref="P:FileNameTemplate.Pattern"/>. /// </summary> public const string PatternStartCharacters = "{#"; /// <summary> /// Characters determining the end of the <see cref="P:FileNameTemplate.Pattern"/>. /// </summary> public const string PatternEndCharacters = "#}"; /// <summary> /// File name which is used as template for matching and replacements. /// It is expected to contain a pattern to match. /// </summary> public string Template { get { return this.template; } } /// <summary> /// The begging position of the <see cref="P:FileNameTemplate.Pattern"/> /// within the <see cref="P:FileNameTemplate.Template"/>. -1 is returned /// when no pattern can be found. /// </summary> public int BeginAt { get { return startIndex; } } /// <summary> /// The ending position of the <see cref="P:FileNameTemplate.Pattern"/> /// within the <see cref="P:FileNameTemplate.Template"/>. -1 is returned /// when no pattern can be found. /// </summary> public int EndAt { get { return endIndex; } } private bool FoundPattern { get { return startIndex != -1 && endIndex != -1; } } private readonly string template; private readonly int startIndex; private readonly int endIndex; public FileNameTemplate(string template) { this.template = template; this.startIndex = template.IndexOf(PatternStartCharacters, StringComparison.Ordinal); if (this.startIndex != -1) this.endIndex = template.IndexOf(PatternEndCharacters, StringComparison.Ordinal) + PatternEndCharacters.Length; } /// <summary> /// Replace the pattern with the specified String. /// </summary> /// <param name="replacementValue"></param> /// <returns></returns> public string ReplacePattern(string replacementValue) { return !FoundPattern || String.IsNullOrEmpty(replacementValue) ? this.Template : template.Substring(0, this.BeginAt) + replacementValue + template.Substring(this.EndAt); } } } }
1
13,948
It seems ok. Because already created a file name at line 1815 and already checked if it is null or empty at line 1816. If file name is null, then already this line will not executed.
NLog-NLog
.cs
@@ -71,8 +71,9 @@ public interface GenesisConfigOptions { OptionalLong getLondonBlockNumber(); - // TODO EIP-1559 change for the actual fork name when known - OptionalLong getAleutBlockNumber(); + OptionalLong getArrowGlacierBlockNumber(); + + OptionalLong getBaseFeePerGas(); OptionalLong getEIP1559BlockNumber();
1
/* * Copyright ConsenSys AG. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. * * SPDX-License-Identifier: Apache-2.0 */ package org.hyperledger.besu.config; import java.math.BigInteger; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.OptionalInt; import java.util.OptionalLong; public interface GenesisConfigOptions { boolean isEthHash(); boolean isKeccak256(); boolean isIbftLegacy(); boolean isIbft2(); boolean isQbft(); boolean isClique(); String getConsensusEngine(); IbftLegacyConfigOptions getIbftLegacyConfigOptions(); CliqueConfigOptions getCliqueConfigOptions(); BftConfigOptions getBftConfigOptions(); QbftConfigOptions getQbftConfigOptions(); EthashConfigOptions getEthashConfigOptions(); Keccak256ConfigOptions getKeccak256ConfigOptions(); OptionalLong getHomesteadBlockNumber(); OptionalLong getDaoForkBlock(); OptionalLong getTangerineWhistleBlockNumber(); OptionalLong getSpuriousDragonBlockNumber(); OptionalLong getByzantiumBlockNumber(); OptionalLong getConstantinopleBlockNumber(); OptionalLong getPetersburgBlockNumber(); OptionalLong getIstanbulBlockNumber(); OptionalLong getMuirGlacierBlockNumber(); OptionalLong getBerlinBlockNumber(); OptionalLong getLondonBlockNumber(); // TODO EIP-1559 change for the actual fork name when known OptionalLong getAleutBlockNumber(); OptionalLong getEIP1559BlockNumber(); List<Long> getForks(); /** * Block number for the Dao Fork, this value is used to tell node to connect with peer that did * NOT accept the Dao Fork and instead continued as what is now called the classic network * * @return block number to activate the classic fork block */ OptionalLong getClassicForkBlock(); /** * Block number for ECIP-1015 fork on Classic network ECIP-1015: Long-term gas cost changes for * IO-heavy operations to mitigate transaction spam attacks In reference to EIP-150 (ETH Tangerine * Whistle) Note, this fork happens after Homestead (Mainnet definition) and before DieHard fork * * @see <a * href="https://ecips.ethereumclassic.org/ECIPs/ecip-1015">https://ecips.ethereumclassic.org/ECIPs/ecip-1015</a> * @return block number to activate ECIP-1015 code */ OptionalLong getEcip1015BlockNumber(); /** * Block number for DieHard fork on Classic network The DieHard fork includes changes to meet * specification for ECIP-1010 and EIP-160 Note, this fork happens after ECIP-1015 (classic * tangerine whistle) and before Gotham fork ECIP-1010: Delay Difficulty Bomb Explosion * * @see <a * href="https://ecips.ethereumclassic.org/ECIPs/ecip-1010">https://ecips.ethereumclassic.org/ECIPs/ecip-1010</a> * EIP-160: EXP cost increase * @see <a * href="https://eips.ethereum.org/EIPS/eip-160">https://eips.ethereum.org/EIPS/eip-160</a> * @return block number to activate Classic DieHard fork */ OptionalLong getDieHardBlockNumber(); /** * Block number for Gotham fork on Classic network, the Gotham form includes changes to meet * specification for ECIP-1017 and ECIP-1039 both regarding Monetary Policy (rewards). * * @see <a * href="https://ecips.ethereumclassic.org/ECIPs/ecip-1017">https://ecips.ethereumclassic.org/ECIPs/ecip-1017</a> * ECIP-1017: Monetary Policy and Final Modification to the Ethereum Classic Emission Schedule * @see <a * href="https://ecips.ethereumclassic.org/ECIPs/ecip-1039">https://ecips.ethereumclassic.org/ECIPs/ecip-1039</a> * ECIP-1039: Monetary policy rounding specification * @return block to activate Classic Gotham fork */ OptionalLong getGothamBlockNumber(); /** * Block number to remove difficulty bomb, to meet specification for ECIP-1041. * * @see <a * href="https://ecips.ethereumclassic.org/ECIPs/ecip-1041">https://ecips.ethereumclassic.org/ECIPs/ecip-1041</a> * ECIP-1041: Remove Difficulty Bomb * @return block number to remove difficulty bomb on classic network */ OptionalLong getDefuseDifficultyBombBlockNumber(); /** * Block number for Atlantis fork on Classic network Note, this fork happen after Defuse * Difficulty Bomb fork and before Agharta fork ECIP-1054: Atlantis EVM and Protocol Upgrades * Enable the outstanding Ethereum Foundation Spurious Dragon and Byzantium network protocol * upgrades for the Ethereum Classic network. * * @see <a * href="https://ecips.ethereumclassic.org/ECIPs/ecip-1054">https://ecips.ethereumclassic.org/ECIPs/ecip-1054</a> * @return block number for Atlantis fork on Classic network * @see <a * href="https://ecips.ethereumclassic.org/ECIPs/ecip-1054">https://ecips.ethereumclassic.org/ECIPs/ecip-1054</a> */ OptionalLong getAtlantisBlockNumber(); /** * Block number for Agharta fork on Classic network. Enable the outstanding Ethereum Foundation * Constaninople and Petersburg network protocol upgrades on the Ethereum Classic network in a * hard-fork code-named Agharta to enable maximum compatibility across these networks. * * @see <a * href="https://ecips.ethereumclassic.org/ECIPs/ecip-1056">https://ecips.ethereumclassic.org/ECIPs/ecip-1056</a> * @return block number for Agharta fork on Classic network * @see <a * href="https://ecips.ethereumclassic.org/ECIPs/ecip-1056">https://ecips.ethereumclassic.org/ECIPs/ecip-1056</a> */ OptionalLong getAghartaBlockNumber(); /** * Block number for Phoenix fork on Classic networks. Enable the outstanding Ethereum Foundation * Istanbul network protocol upgrades on the Ethereum Classic network in a hard-fork code-named * Phoenix to enable maximum compatibility across these networks. * * @return block number of Phoenix fork on Classic networks * @see <a * href="https://ecips.ethereumclassic.org/ECIPs/ecip-1088">https://ecips.ethereumclassic.org/ECIPs/ecip-1088</a> */ OptionalLong getPhoenixBlockNumber(); /** * Block number to activate ECIP-1099 (Thanos) on Classic networks. Doubles the length of the * Ethash epoch, with the impact being a reduced DAG size. * * @return block number of ECIP-1099 fork on Classic networks * @see <a * href="https://ecips.ethereumclassic.org/ECIPs/ecip-1099">https://ecips.ethereumclassic.org/ECIPs/ecip-1099</a> */ OptionalLong getThanosBlockNumber(); /** * Block number to activate Magneto on Classic networks. * * @return block number of Magneto fork on Classic networks * @see <a * href="https://github.com/ethereumclassic/ECIPs/issues/424">https://github.com/ethereumclassic/ECIPs/issues/424</a> */ OptionalLong getMagnetoBlockNumber(); /** * Block number to activate ECIP-1049 on Classic networks. Changes the hashing algorithm to * keccak-256. * * @return block number of ECIP-1049 fork on Classic networks * @see <a * href="https://ecips.ethereumclassic.org/ECIPs/ecip-1049">https://ecips.ethereumclassic.org/ECIPs/ecip-1049</a> */ OptionalLong getEcip1049BlockNumber(); Optional<BigInteger> getChainId(); OptionalInt getContractSizeLimit(); OptionalInt getEvmStackSize(); /** * Number of rounds contained within an Era for calculating Ethereum Classic Emission Schedule, * ECIP defines this as 5,000,000 however this config option allows for adjusting (for using with * other networks, for example Mordor testnet uses 2,000,000). The values defaults to 5,000,000 if * not set. * * @return number of rounds pre Era * @see <a * href="https://ecips.ethereumclassic.org/ECIPs/ecip-1017">https://ecips.ethereumclassic.org/ECIPs/ecip-1017</a> */ OptionalLong getEcip1017EraRounds(); Map<String, Object> asMap(); TransitionsConfigOptions getTransitions(); /** * Set Besu in Quorum-compatibility mode * * @return true, if Besu is running on Quorum-compatibility mode, false, otherwise. */ boolean isQuorum(); /** * Block number to activate Quorum Permissioning. This option is used on Quorum-compatibility * mode. * * @return block number to activate Quorum Permissioning */ OptionalLong getQip714BlockNumber(); /** * The PoW algorithm associated with the genesis file. * * @return the PoW algorithm in use. */ PowAlgorithm getPowAlgorithm(); /** * The elliptic curve which should be used in SignatureAlgorithm. * * @return the name of the elliptic curve. */ Optional<String> getEcCurve(); }
1
26,519
Can all the BaseFeePerGas methods that are added be removed? It's not needed for the bomb and not referenced anywhere else in this PR.
hyperledger-besu
java
@@ -172,7 +172,7 @@ func handleMainConfigArgs(cmd *cobra.Command, args []string, app *ddevapp.DdevAp if docrootRelPath != "" { app.Docroot = docrootRelPath if _, err = os.Stat(docrootRelPath); os.IsNotExist(err) { - util.Failed("The docroot provided (%v) does not exist", docrootRelPath) + output.UserOut.Warnf("Warning: the provided docroot at %s does not currently exist.", docrootRelPath) } } else if !cmd.Flags().Changed("docroot") { app.Docroot = ddevapp.DiscoverDefaultDocroot(app)
1
package cmd import ( "fmt" "os" "strings" "path/filepath" "github.com/drud/ddev/pkg/ddevapp" "github.com/drud/ddev/pkg/output" "github.com/drud/ddev/pkg/util" "github.com/spf13/cobra" ) // docrootRelPath is the relative path to the docroot where index.php is var docrootRelPath string // siteName is the name of the site var siteName string // appType is the ddev app type, like drupal7/drupal8/wordpress var appType string // showConfigLocation if set causes the command to show the config locatio var showConfigLocation bool // extraFlagsHandlingFunc does specific handling for additional flags, and is different per provider. var extraFlagsHandlingFunc func(cmd *cobra.Command, args []string, app *ddevapp.DdevApp) error var providerName = ddevapp.DefaultProviderName // ConfigCommand represents the `ddev config` command var ConfigCommand *cobra.Command = &cobra.Command{ Use: "config [provider]", Short: "Create or modify a ddev project configuration in the current directory", Example: `"ddev config" or "ddev config --docroot=. --projectname=d7-kickstart --projecttype=drupal7"`, Args: cobra.ExactArgs(0), Run: handleConfigRun, } // handleConfigRun handles all the flag processing for any provider func handleConfigRun(cmd *cobra.Command, args []string) { app, err := getConfigApp(providerName) if err != nil { util.Failed(err.Error()) } if cmd.Flags().NFlag() == 0 { err = app.PromptForConfig() if err != nil { util.Failed("There was a problem configuring your project: %v", err) } } else { err = handleMainConfigArgs(cmd, args, app) if err != nil { util.Failed(err.Error()) } if extraFlagsHandlingFunc != nil { err = extraFlagsHandlingFunc(cmd, args, app) if err != nil { util.Failed("failed to handle per-provider extra flags: %v", err) } } } provider, err := app.GetProvider() if err != nil { util.Failed("Failed to get provider: %v", err) } err = provider.Validate() if err != nil { util.Failed("Failed to validate project name %v: %v", app.Name, err) } err = app.WriteConfig() if err != nil { util.Failed("Failed to write config: %v", err) } _, err = app.CreateSettingsFile() if err != nil { util.Warning("Could not write settings file: %v", err) } err = provider.Write(app.GetConfigPath("import.yaml")) if err != nil { util.Failed("Failed to write provider config: %v", err) } util.Success("Configuration complete. You may now run 'ddev start'.") } func init() { validAppTypes := strings.Join(ddevapp.GetValidAppTypes(), ", ") apptypeUsage := fmt.Sprintf("Provide the project type (one of %s). This is autodetected and this flag is necessary only to override the detection.", validAppTypes) projectNameUsage := fmt.Sprintf("Provide the project name of project to configure (normally the same as the last part of directory name)") ConfigCommand.Flags().StringVarP(&siteName, "projectname", "", "", projectNameUsage) ConfigCommand.Flags().StringVarP(&docrootRelPath, "docroot", "", "", "Provide the relative docroot of the project, like 'docroot' or 'htdocs' or 'web', defaults to empty, the current directory") ConfigCommand.Flags().StringVarP(&appType, "projecttype", "", "", apptypeUsage) // apptype flag is there for backwards compatibility. ConfigCommand.Flags().StringVarP(&appType, "apptype", "", "", apptypeUsage+" This is the same as --projecttype and is included only for backwards compatibility.") ConfigCommand.Flags().BoolVarP(&showConfigLocation, "show-config-location", "", false, "Output the location of the config.yaml file if it exists, or error that it doesn't exist.") ConfigCommand.Flags().StringVarP(&siteName, "sitename", "", "", projectNameUsage+" This is the same as projectname and is included only for backwards compatibility") err := ConfigCommand.Flags().MarkDeprecated("sitename", "The sitename flag is deprecated in favor of --projectname") util.CheckErr(err) err = ConfigCommand.Flags().MarkDeprecated("apptype", "The apptype flag is deprecated in favor of --projecttype") util.CheckErr(err) RootCmd.AddCommand(ConfigCommand) } // getConfigApp() does the basic setup of the app (with provider) and returns it. func getConfigApp(providerName string) (*ddevapp.DdevApp, error) { appRoot, err := os.Getwd() if err != nil { return nil, fmt.Errorf("could not determine current working directory: %v", err) } // TODO: Handle case where config may be in parent directories. app, err := ddevapp.NewApp(appRoot, providerName) if err != nil { return nil, fmt.Errorf("could not create new config: %v", err) } return app, nil } // handleMainConfigArgs() validates and processes the main config args (docroot, etc.) func handleMainConfigArgs(cmd *cobra.Command, args []string, app *ddevapp.DdevApp) error { var err error // Support the show-config-location flag. if showConfigLocation { // nolint: vetshadow activeApp, err := ddevapp.GetActiveApp("") if err != nil { if strings.Contains(err.Error(), "Have you run 'ddev config'") { util.Failed("No project configuration currently exists") } else { util.Failed("Failed to access project configuration: %v", err) } } if activeApp.ConfigPath != "" && activeApp.ConfigExists() { rawResult := make(map[string]interface{}) rawResult["configpath"] = activeApp.ConfigPath rawResult["approot"] = activeApp.AppRoot friendlyMsg := fmt.Sprintf("The project config location is %s", activeApp.ConfigPath) output.UserOut.WithField("raw", rawResult).Print(friendlyMsg) return nil } } // Let them know if we're replacing the config.yaml app.WarnIfConfigReplace() // app.Name gets set to basename if not provided, or set to siteName if provided if app.Name != "" && siteName == "" { // If we already have a c.Name and no siteName, leave c.Name alone // Sorry this is empty but it makes the logic clearer. } else if siteName != "" { // if we have a siteName passed in, use it for c.Name app.Name = siteName } else { // No siteName passed, c.Name not set: use c.Name from the directory // nolint: vetshadow pwd, err := os.Getwd() util.CheckErr(err) app.Name = filepath.Base(pwd) } // docrootRelPath must exist if docrootRelPath != "" { app.Docroot = docrootRelPath if _, err = os.Stat(docrootRelPath); os.IsNotExist(err) { util.Failed("The docroot provided (%v) does not exist", docrootRelPath) } } else if !cmd.Flags().Changed("docroot") { app.Docroot = ddevapp.DiscoverDefaultDocroot(app) } if appType != "" && !ddevapp.IsValidAppType(appType) { validAppTypes := strings.Join(ddevapp.GetValidAppTypes(), ", ") util.Failed("apptype must be one of %s", validAppTypes) } detectedApptype := app.DetectAppType() fullPath, pathErr := filepath.Abs(app.Docroot) if pathErr != nil { util.Failed("Failed to get absolute path to Docroot %s: %v", app.Docroot, pathErr) } if appType == "" || appType == detectedApptype { // Found an app, matches passed-in or no apptype passed appType = detectedApptype util.Success("Found a %s codebase at %s", detectedApptype, fullPath) } else if appType != "" { // apptype was passed, but we found no app at all util.Warning("You have specified a project type of %s but no project of that type is found in %s", appType, fullPath) } else if appType != "" && detectedApptype != appType { // apptype was passed, app was found, but not the same type util.Warning("You have specified a project type of %s but a project of type %s was discovered in %s", appType, detectedApptype, fullPath) } app.Type = appType err = app.ConfigFileOverrideAction() if err != nil { util.Failed("failed to run ConfigFileOverrideAction: %v", err) } err = app.WriteConfig() if err != nil { return fmt.Errorf("could not write ddev config file %s: %v", app.ConfigPath, err) } return nil }
1
13,056
util.Warning()? Easier to say.
drud-ddev
php
@@ -64,8 +64,6 @@ type ( rootHash hash.Hash32B toRoot *list.List // stores the path from root to diverging node bucket string // bucket name to store the nodes - clpsK []byte // path if the node can collapse after deleting an entry - clpsV []byte // value if the node can collapse after deleting an entry numEntry uint64 // number of entries added to the trie numBranch uint64 numExt uint64
1
// Copyright (c) 2018 IoTeX // This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no // warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent // permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache // License 2.0 that can be found in the LICENSE file. package trie import ( "container/list" "context" "sync" "github.com/pkg/errors" "github.com/iotexproject/iotex-core/db" "github.com/iotexproject/iotex-core/logger" "github.com/iotexproject/iotex-core/pkg/hash" "github.com/iotexproject/iotex-core/pkg/lifecycle" ) var ( // AccountKVNameSpace is the bucket name for account trie AccountKVNameSpace = "Account" // CodeKVNameSpace is the bucket name for code CodeKVNameSpace = "Code" // ContractKVNameSpace is the bucket name for contract data storage ContractKVNameSpace = "Contract" // CandidateKVNameSpace is the bucket name for candidate data storage CandidateKVNameSpace = "Candidate" // ErrInvalidTrie indicates something wrong causing invalid operation ErrInvalidTrie = errors.New("invalid trie operation") // ErrNotExist indicates entry does not exist ErrNotExist = errors.New("not exist in trie") // EmptyRoot is the root hash of an empty trie EmptyRoot = hash.Hash32B{0xe, 0x57, 0x51, 0xc0, 0x26, 0xe5, 0x43, 0xb2, 0xe8, 0xab, 0x2e, 0xb0, 0x60, 0x99, 0xda, 0xa1, 0xd1, 0xe5, 0xdf, 0x47, 0x77, 0x8f, 0x77, 0x87, 0xfa, 0xab, 0x45, 0xcd, 0xf1, 0x2f, 0xe3, 0xa8} ) type ( // Trie is the interface of Merkle Patricia Trie Trie interface { lifecycle.StartStopper TrieDB() db.KVStore // return the underlying DB instance Upsert([]byte, []byte) error // insert a new entry Get([]byte) ([]byte, error) // retrieve an existing entry Delete([]byte) error // delete an entry Commit() error // commit the state changes in a batch RootHash() hash.Hash32B // returns trie's root hash SetRoot(hash.Hash32B) error // set a new root to trie } // trie implements the Trie interface trie struct { lifecycle lifecycle.Lifecycle mutex sync.RWMutex root patricia rootHash hash.Hash32B toRoot *list.List // stores the path from root to diverging node bucket string // bucket name to store the nodes clpsK []byte // path if the node can collapse after deleting an entry clpsV []byte // value if the node can collapse after deleting an entry numEntry uint64 // number of entries added to the trie numBranch uint64 numExt uint64 numLeaf uint64 cb db.CachedBatch // cached batch for pending writes dao db.KVStore // the underlying storage DB } ) // NewTrie creates a trie with DB filename func NewTrie(kvStore db.KVStore, name string, root hash.Hash32B) (Trie, error) { if kvStore == nil { return nil, errors.New("try to create trie with empty KV store") } return newTrie(kvStore, name, root), nil } // NewTrieSharedBatch creates a trie with a shared batch func NewTrieSharedBatch(kvStore db.KVStore, batch db.CachedBatch, name string, root hash.Hash32B) (Trie, error) { if kvStore == nil || batch == nil { return nil, errors.New("try to create trie with empty KV store") } return newTrieSharedBatch(kvStore, batch, name, root), nil } func (t *trie) Start(ctx context.Context) error { t.lifecycle.OnStart(ctx) return t.loadRoot() } func (t *trie) Stop(ctx context.Context) error { return t.lifecycle.OnStop(ctx) } // TrieDB return the underlying DB instance func (t *trie) TrieDB() db.KVStore { return t.dao } // Upsert a new entry func (t *trie) Upsert(key, value []byte) error { t.mutex.Lock() defer t.mutex.Unlock() return t.upsert(key, value) } // Get an existing entry func (t *trie) Get(key []byte) ([]byte, error) { // Use write lock because t.clear() will mutate toRoot t.mutex.Lock() defer t.mutex.Unlock() ptr, size, err := t.query(key) t.clear() if size != len(key) { return nil, errors.Wrapf(ErrNotExist, "key = %x", key) } if err != nil { return nil, err } // retrieve the value from terminal patricia node size = len(key) return t.getValue(ptr, key[size-1]) } // Delete an entry func (t *trie) Delete(key []byte) error { t.mutex.Lock() defer t.mutex.Unlock() var ptr patricia var size int var err error ptr, size, err = t.query(key) if size != len(key) { return errors.Wrapf(ErrNotExist, "key = %x not exist", key) } if err != nil { return errors.Wrap(err, "failed to query") } var index byte var childClps bool var clpsType byte t.clpsK, t.clpsV = nil, nil if _, ok := ptr.(*branch); ok { // for branch, the entry to delete is the leaf matching last byte of path size = len(key) index = key[size-1] if ptr, err = t.getPatricia(ptr.(*branch).Path[index]); err != nil { return errors.Wrap(err, "failed to getPatricia") } } else { ptr, index = t.popToRoot() } // delete the entry and update if it can collapse if childClps, clpsType, err = t.delete(ptr, index); err != nil { return errors.Wrap(err, "failed to delete") } if t.numEntry == 1 { return errors.Wrapf(ErrInvalidTrie, "trie has more entries than ever added") } t.numEntry-- if t.numEntry == 2 { // only 1 entry left (the other being the root), collapse into leaf clpsType = 0 } // update upstream nodes on path ascending to root return t.updateDelete(ptr, childClps, clpsType) } // Commit local cached <k, v> in a batch func (t *trie) Commit() error { t.mutex.Lock() defer t.mutex.Unlock() return t.dao.Commit(t.cb) } // RootHash returns the root hash of merkle patricia trie func (t *trie) RootHash() hash.Hash32B { t.mutex.RLock() defer t.mutex.RUnlock() return t.rootHash } // SetRoot sets the root trie func (t *trie) SetRoot(rootHash hash.Hash32B) (err error) { t.mutex.RLock() defer t.mutex.RUnlock() var root patricia if root, err = t.getPatricia(rootHash[:]); err != nil { return errors.Wrapf(err, "failed to set root %x", rootHash[:]) } t.root = root t.rootHash = rootHash return err } //====================================== // private functions //====================================== // newTrie creates a trie func newTrie(dao db.KVStore, name string, root hash.Hash32B) *trie { t := &trie{ cb: db.NewCachedBatch(), dao: dao, rootHash: root, toRoot: list.New(), bucket: name, numEntry: 1, numBranch: 1, } t.lifecycle.Add(dao) return t } // newTrieSharedBatch creates a trie with shared DB func newTrieSharedBatch(dao db.KVStore, batch db.CachedBatch, name string, root hash.Hash32B) *trie { t := &trie{ cb: batch, dao: dao, rootHash: root, toRoot: list.New(), bucket: name, numEntry: 1, numBranch: 1} t.lifecycle.Add(dao) return t } // loadRoot loads the root patricia from DB func (t *trie) loadRoot() error { t.mutex.RLock() defer t.mutex.RUnlock() if t.rootHash != EmptyRoot { var err error t.root, err = t.getPatricia(t.rootHash[:]) return err } // initial empty trie t.root = &branch{} return t.putPatricia(t.root) } // upsert a new entry func (t *trie) upsert(key, value []byte) error { var hashChild hash.Hash32B ptr, size, err := t.query(key) if ptr == nil { return errors.Wrapf(err, "failed to parse key %x", key) } if err != nil { nb, ne, nl := ptr.increase(key[size:]) addNode := list.New() if err := ptr.insert(key[size:], value, addNode); err != nil { return errors.Wrapf(err, "failed to insert key = %x", key) } // update newly added patricia node into DB for addNode.Len() > 0 { n := addNode.Back() ptr, ok := n.Value.(patricia) if !ok { return errors.Wrapf(ErrInvalidPatricia, "cannot decode node = %v", n.Value) } hashChild = ptr.hash() // hash of new node should NOT exist in DB if err := t.putPatricia(ptr); err != nil { return err } addNode.Remove(n) } t.numBranch += uint64(nb) t.numExt += uint64(ne) t.numLeaf += uint64(nl) t.numEntry++ // if the diverging node is leaf, delete it n := t.toRoot.Back() if _, ok := n.Value.(patricia).(*leaf); ok { logger.Debug().Msg("delete leaf") t.toRoot.Remove(n) } } else { // key already exists, update with new value if size != len(key) { return errors.Wrapf(ErrNotExist, "key = %x not exist", key) } if err != nil { return err } var index byte t.clpsK, t.clpsV = nil, nil if _, ok := ptr.(*branch); ok { // for branch, the entry to delete is the leaf matching last byte of path size = len(key) index = key[size-1] if ptr, err = t.getPatricia(ptr.(*branch).Path[index]); err != nil { return err } } else { ptr, index = t.popToRoot() } // delete the entry and update if it can collapse if _, _, err = t.delete(ptr, index); err != nil { return err } // update with new value err := ptr.set(value, index) if err != nil { return err } if err := t.putPatricia(ptr); err != nil { return err } hashChild = ptr.hash() } // update upstream nodes on path ascending to root return t.updateInsert(hashChild[:]) } // query returns the diverging patricia node, and length of matching path in bytes func (t *trie) query(key []byte) (patricia, int, error) { ptr := t.root if ptr == nil { return nil, 0, errors.Wrap(ErrNotExist, "failed to load root") } size := 0 for len(key) > 0 { // keep descending the trie hashn, match, err := ptr.descend(key) logger.Debug().Hex("key", hashn).Msg("access") if _, b := ptr.(*branch); b { // for branch node, need to save first byte of path to traceback to branch[key[0]] later t.toRoot.PushBack(key[0]) } t.toRoot.PushBack(ptr) // path diverges, return the diverging node if err != nil { // patricia.insert() will be called later to insert <key, value> pair into trie return ptr, size, err } // path matching entire key, return ptr that holds the value if match == len(key) { return ptr, size + match, nil } if ptr, err = t.getPatricia(hashn); err != nil { return nil, 0, err } size += match key = key[match:] } return ptr, size, nil } // delete removes the entry stored in patricia node, and returns if the node can collapse func (t *trie) delete(ptr patricia, index byte) (bool, byte, error) { var childClps bool var clpsType byte // delete the node from DB if err := t.delPatricia(ptr); err != nil { return childClps, clpsType, err } // by default assuming collapse to leaf node switch ptr.(type) { case *branch: // check if the branch can collapse, and if yes get the leaf node value if t.clpsK, t.clpsV, childClps = ptr.collapse(t.clpsK, t.clpsV, index, true); childClps { l, err := t.getPatricia(t.clpsV) if err != nil { return childClps, clpsType, err } // the original branch collapse to its single remaining leaf var k []byte if k, t.clpsV, err = l.blob(); err != nil { return childClps, clpsType, err } // remaining leaf path != nil means it is extension node if k != nil { clpsType = 1 } t.clpsK = append(t.clpsK, k...) ptr.(*branch).print() } case *leaf: if ptr.(*leaf).Ext == 1 { return childClps, clpsType, errors.Wrap(ErrInvalidPatricia, "extension cannot be terminal node") } // deleting a leaf, upstream node must be extension so collapse into extension childClps, clpsType = true, 1 } return childClps, clpsType, nil } // updateInsert rewinds the path back to root and updates nodes along the way func (t *trie) updateInsert(hashChild []byte) error { for t.toRoot.Len() > 0 { curr, index := t.popToRoot() if curr == nil { return errors.Wrap(ErrInvalidPatricia, "patricia pushed on stack is not valid") } // update the patricia node if err := curr.ascend(hashChild[:], index); err != nil { return err } hashCurr := curr.hash() hashChild = hashCurr[:] // when adding an entry, hash of nodes along the path changes and is expected NOT to exist in DB if err := t.putPatricia(curr); err != nil { return err } } // update root hash t.rootHash = t.root.hash() return nil } // updateDelete rewinds the path back to root and updates nodes along the way func (t *trie) updateDelete(curr patricia, currClps bool, clpsType byte) error { contClps := false for t.toRoot.Len() > 0 { logger.Debug().Int("stack size", t.toRoot.Len()).Msg("clps") next, index := t.popToRoot() if next == nil { return errors.Wrap(ErrInvalidPatricia, "patricia pushed on stack is not valid") } if err := t.delPatricia(next); err != nil { return errors.Wrap(err, "failed to delete patricia") } // we attempt to collapse in 2 cases: // 1. the current node is not root // 2. the current node is root, but <v> is nil meaning no more entries exist on the incoming path isRoot := t.toRoot.Len() == 0 noEntry := t.clpsV == nil var nextClps bool t.clpsK, t.clpsV, nextClps = next.collapse(t.clpsK, t.clpsV, index, currClps && (!isRoot || noEntry)) logger.Debug().Bool("curr", currClps).Msg("clps") logger.Debug().Bool("next", nextClps).Msg("clps") if nextClps { // current node can also collapse, concatenate the path and keep going contClps = true if !isRoot { currClps = nextClps curr = next continue } } logger.Debug().Bool("cont", contClps).Msg("clps") if contClps && !noEntry { curr = &leaf{clpsType, t.clpsK, t.clpsV} logger.Info().Hex("k", t.clpsK).Hex("v", t.clpsV).Msg("clps") // after collapsing, the trie might rollback to an earlier state in the history (before adding the deleted entry) // so the node we try to put may already exist in DB if err := t.putPatricia(curr); err != nil { return errors.Wrap(err, "failed to put patricia") } } contClps = false // update current with new child hash := curr.hash() err := next.ascend(hash[:], index) if err != nil { return errors.Wrap(err, "failed to ascend") } // for the same reason above, the trie might rollback to an earlier state in the history // so the node we try to put may already exist in DB if err := t.putPatricia(next); err != nil { return errors.Wrap(err, "failed to put patricia") } currClps = nextClps curr = next } // update root hash t.rootHash = t.root.hash() return nil } //====================================== // helper functions to operate patricia //====================================== // getPatricia retrieves the patricia node from DB according to key func (t *trie) getPatricia(key []byte) (patricia, error) { // search in cache first node, err := t.cb.Get(t.bucket, key) if err != nil { node, err = t.dao.Get(t.bucket, key) } if err != nil { return nil, errors.Wrapf(err, "failed to get key %x", key[:8]) } var ptr patricia // first byte of serialized data is type switch node[0] { case 2: ptr = &branch{} case 1: ptr = &leaf{} case 0: ptr = &leaf{} default: return nil, errors.Wrapf(ErrInvalidPatricia, "invalid node type = %v", node[0]) } if err := ptr.deserialize(node); err != nil { return nil, err } return ptr, nil } // putPatricia stores the patricia node into DB // the node may already exist in DB func (t *trie) putPatricia(ptr patricia) error { value, err := ptr.serialize() if err != nil { return errors.Wrapf(err, "failed to encode patricia node") } key := ptr.hash() logger.Debug().Hex("key", key[:8]).Msg("put") t.cb.Put(t.bucket, key[:], value, "failed to put key = %x", key) return nil } // putPatriciaNew stores a new patricia node into DB // it is expected the node does not exist yet, will return error if already exist func (t *trie) putPatriciaNew(ptr patricia) error { value, err := ptr.serialize() if err != nil { return errors.Wrap(err, "failed to encode patricia node") } key := ptr.hash() logger.Debug().Hex("key", key[:8]).Msg("putnew") return t.cb.PutIfNotExists(t.bucket, key[:], value, "failed to put non-existing key = %x", key) } // delPatricia deletes the patricia node from DB func (t *trie) delPatricia(ptr patricia) error { key := ptr.hash() logger.Debug().Hex("key", key[:8]).Msg("del") t.cb.Delete(t.bucket, key[:], "failed to delete key = %x", key) return nil } // getValue returns the actual value stored in patricia node func (t *trie) getValue(ptr patricia, index byte) ([]byte, error) { br, isBranch := ptr.(*branch) var err error if isBranch { if ptr, err = t.getPatricia(br.Path[index]); err != nil { return nil, err } } _, v, e := ptr.blob() return v, e } // clear the stack func (t *trie) clear() { for t.toRoot.Len() > 0 { n := t.toRoot.Back() t.toRoot.Remove(n) } } // pop the stack func (t *trie) popToRoot() (patricia, byte) { if t.toRoot.Len() > 0 { n := t.toRoot.Back() ptr, _ := n.Value.(patricia) t.toRoot.Remove(n) var index byte _, isBranch := ptr.(*branch) if isBranch { // for branch node, the index is pushed onto stack in query() n := t.toRoot.Back() index, _ = n.Value.(byte) t.toRoot.Remove(n) } return ptr, index } return nil, 0 }
1
12,768
these 2 no longer needed after refactor
iotexproject-iotex-core
go
@@ -63,6 +63,8 @@ const ( SpecExportOptionsEmpty = "empty_export_options" SpecMountOptions = "mount_options" SpecCSIMountOptions = "csi_mount_options" + SpecCSIRawBlock = "fadirectRawBlock" + SpecCSIFsType = "fsType" SpecSharedv4MountOptions = "sharedv4_mount_options" SpecProxyProtocolS3 = "s3" SpecProxyProtocolPXD = "pxd"
1
package api import ( "context" "fmt" "math" "strconv" "strings" "time" "github.com/golang/protobuf/ptypes" "github.com/libopenstorage/openstorage/pkg/auth" "github.com/mohae/deepcopy" protoimpl "google.golang.org/protobuf/runtime/protoimpl" ) // Strings for VolumeSpec const ( Name = "name" Token = "token" TokenSecret = "token_secret" TokenSecretNamespace = "token_secret_namespace" SpecNodes = "nodes" SpecParent = "parent" SpecEphemeral = "ephemeral" SpecShared = "shared" SpecJournal = "journal" SpecSharedv4 = "sharedv4" SpecCascaded = "cascaded" SpecSticky = "sticky" SpecSecure = "secure" SpecCompressed = "compressed" SpecSize = "size" SpecScale = "scale" SpecFilesystem = "fs" SpecBlockSize = "block_size" SpecQueueDepth = "queue_depth" SpecHaLevel = "repl" SpecPriority = "io_priority" SpecSnapshotInterval = "snap_interval" SpecSnapshotSchedule = "snap_schedule" SpecAggregationLevel = "aggregation_level" SpecDedupe = "dedupe" SpecPassphrase = "secret_key" SpecAutoAggregationValue = "auto" SpecGroup = "group" SpecGroupEnforce = "fg" SpecZones = "zones" SpecRacks = "racks" SpecRack = "rack" SpecRegions = "regions" SpecLabels = "labels" SpecPriorityAlias = "priority_io" SpecIoProfile = "io_profile" SpecAsyncIo = "async_io" SpecEarlyAck = "early_ack" SpecExportProtocol = "export" SpecExportProtocolISCSI = "iscsi" SpecExportProtocolPXD = "pxd" SpecExportProtocolNFS = "nfs" SpecExportProtocolCustom = "custom" SpecExportOptions = "export_options" SpecExportOptionsEmpty = "empty_export_options" SpecMountOptions = "mount_options" SpecCSIMountOptions = "csi_mount_options" SpecSharedv4MountOptions = "sharedv4_mount_options" SpecProxyProtocolS3 = "s3" SpecProxyProtocolPXD = "pxd" SpecProxyProtocolNFS = "nfs" SpecProxyEndpoint = "proxy_endpoint" SpecProxyNFSSubPath = "proxy_nfs_subpath" SpecProxyNFSExportPath = "proxy_nfs_exportpath" SpecProxyS3Bucket = "proxy_s3_bucket" // SpecBestEffortLocationProvisioning default is false. If set provisioning request will succeed // even if specified data location parameters could not be satisfied. SpecBestEffortLocationProvisioning = "best_effort_location_provisioning" // SpecForceUnsuppportedFsType is of type boolean and if true it sets // the VolumeSpec.force_unsupported_fs_type. When set to true it asks // the driver to use an unsupported value of VolumeSpec.format if possible SpecForceUnsupportedFsType = "force_unsupported_fs_type" // SpecMatchSrcVolProvision defaults to false. Applicable to cloudbackup restores only. // If set to "true", cloudbackup restore volume gets provisioned on same pools as // backup, allowing for inplace restore after. SpecMatchSrcVolProvision = "match_src_vol_provision" SpecNodiscard = "nodiscard" StoragePolicy = "storagepolicy" SpecCowOnDemand = "cow_ondemand" SpecDirectIo = "direct_io" SpecScanPolicyTrigger = "scan_policy_trigger" SpecScanPolicyAction = "scan_policy_action" SpecProxyWrite = "proxy_write" SpecSharedv4ServiceType = "sharedv4_svc_type" SpecSharedv4ServiceName = "sharedv4_svc_name" SpecSharedv4FailoverStrategy = "sharedv4_failover_strategy" SpecSharedv4FailoverStrategyNormal = "normal" SpecSharedv4FailoverStrategyAggressive = "aggressive" SpecSharedv4FailoverStrategyUnspecified = "" SpecSharedv4ExternalAccess = "sharedv4_external_access" SpecFastpath = "fastpath" SpecAutoFstrim = "auto_fstrim" SpecBackendType = "backend" SpecBackendPureBlock = "pure_block" SpecBackendPureFile = "pure_file" SpecPureFileExportRules = "pure_export_rules" SpecIoThrottleRdIOPS = "io_throttle_rd_iops" SpecIoThrottleWrIOPS = "io_throttle_wr_iops" SpecIoThrottleRdBW = "io_throttle_rd_bw" SpecIoThrottleWrBW = "io_throttle_wr_bw" ) // OptionKey specifies a set of recognized query params. const ( // OptName query parameter used to lookup volume by name. OptName = "Name" // OptVolumeID query parameter used to lookup volume by ID. OptVolumeID = "VolumeID" // OptSnapID query parameter used to lookup snapshot by ID. OptSnapID = "SnapID" // OptLabel query parameter used to lookup volume by set of labels. OptLabel = "Label" // OptConfigLabel query parameter used to lookup volume by set of labels. OptConfigLabel = "ConfigLabel" // OptCumulative query parameter used to request cumulative stats. OptCumulative = "Cumulative" // OptTimeout query parameter used to indicate timeout seconds OptTimeoutSec = "TimeoutSec" // OptQuiesceID query parameter use for quiesce OptQuiesceID = "QuiesceID" // OptCredUUID is the UUID of the credential OptCredUUID = "CredUUID" // OptCredName indicates unique name of credential OptCredName = "CredName" // OptCredType indicates type of credential OptCredType = "CredType" // OptCredEncrKey is the key used to encrypt data OptCredEncrKey = "CredEncrypt" // OptCredRegion indicates the region for s3 OptCredRegion = "CredRegion" // OptCredDisableSSL indicated if SSL should be disabled OptCredDisableSSL = "CredDisableSSL" // OptCredDisablePathStyle does not enforce path style for s3 OptCredDisablePathStyle = "CredDisablePathStyle" // OptCredStorageClass indicates the storage class to be used for puts // allowed values are STANDARD, STANDARD_IA,ONEZONE_IA, REDUCED_REDUNDANCY OptCredStorageClass = "CredStorageClass" // OptCredEndpoint indicate the cloud endpoint OptCredEndpoint = "CredEndpoint" // OptCredAccKey for s3 OptCredAccessKey = "CredAccessKey" // OptCredSecretKey for s3 OptCredSecretKey = "CredSecretKey" // OptCredBucket is the optional bucket name OptCredBucket = "CredBucket" // OptCredGoogleProjectID projectID for google cloud OptCredGoogleProjectID = "CredProjectID" // OptCredGoogleJsonKey for google cloud OptCredGoogleJsonKey = "CredJsonKey" // OptCredAzureAccountName is the account name for // azure as the cloud provider OptCredAzureAccountName = "CredAccountName" // OptOptCredAzureAccountKey is the accountkey for // azure as the cloud provider OptCredAzureAccountKey = "CredAccountKey" // Credential ownership key in params OptCredOwnership = "CredOwnership" // OptCredProxy proxy key in params OptCredProxy = "CredProxy" // OptCredIAMPolicy if "true", indicates IAM creds to be used OptCredIAMPolicy = "CredIAMPolicy" // OptRemoteCredUUID is the UUID of the remote cluster credential OptRemoteCredUUID = "RemoteCredUUID" // OptCloudBackupID is the backID in the cloud OptCloudBackupID = "CloudBackID" // OptCloudBackupIgnoreCreds ignores credentials for incr backups OptCloudBackupIgnoreCreds = "CloudBackupIgnoreCreds" // OptSrcVolID is the source volume ID of the backup OptSrcVolID = "SrcVolID" // OptBkupOpState is the desired operational state // (stop/pause/resume) of backup/restore OptBkupOpState = "OpState" // OptBackupSchedUUID is the UUID of the backup-schedule OptBackupSchedUUID = "BkupSchedUUID" // OptVolumeSubFolder query parameter used to catalog a particular path inside a volume OptCatalogSubFolder = "subfolder" // OptCatalogMaxDepth query parameter used to limit the depth we return OptCatalogMaxDepth = "depth" // OptVolumeService query parameter used to request background volume services OptVolService = "volservice" ) // Api clientserver Constants const ( OsdVolumePath = "osd-volumes" OsdSnapshotPath = "osd-snapshot" OsdCredsPath = "osd-creds" OsdBackupPath = "osd-backup" OsdMigratePath = "osd-migrate" OsdMigrateStartPath = OsdMigratePath + "/start" OsdMigrateCancelPath = OsdMigratePath + "/cancel" OsdMigrateStatusPath = OsdMigratePath + "/status" TimeLayout = "Jan 2 15:04:05 UTC 2006" ) const ( // AutoAggregation value indicates driver to select aggregation level. AutoAggregation = math.MaxUint32 ) const ( // gRPC root path used to extract service and API information SdkRootPath = "openstorage.api.OpenStorage" ) // Node describes the state of a node. // It includes the current physical state (CPU, memory, storage, network usage) as // well as the containers running on the system. // // swagger:model type Node struct { // Id of the node. Id string // SchedulerNodeName is name of the node in scheduler context. It can be // empty if unable to get the name from the scheduler. SchedulerNodeName string // Cpu usage of the node. Cpu float64 // percentage. // Total Memory of the node MemTotal uint64 // Used Memory of the node MemUsed uint64 // Free Memory of the node MemFree uint64 // Average load (percentage) Avgload int // Node Status see (Status object) Status Status // GenNumber of the node GenNumber uint64 // List of disks on this node. Disks map[string]StorageResource // List of storage pools this node supports Pools []StoragePool // Management IP MgmtIp string // Data IP DataIp string // Timestamp Timestamp time.Time // Start time of this node StartTime time.Time // Hostname of this node Hostname string // Node data for this node (EX: Public IP, Provider, City..) NodeData map[string]interface{} // User defined labels for node. Key Value pairs NodeLabels map[string]string // GossipPort is the port used by the gossip protocol GossipPort string // HWType is the type of the underlying hardware used by the node HWType HardwareType // Determine if the node is secure with authentication and authorization SecurityStatus StorageNode_SecurityStatus } // FluentDConfig describes ip and port of a fluentdhost. // DEPRECATED // // swagger:model type FluentDConfig struct { IP string `json:"ip"` Port string `json:"port"` } // Cluster represents the state of the cluster. // // swagger:model type Cluster struct { Status Status // Id of the cluster. // // required: true Id string // Id of the node on which this cluster object is initialized NodeId string // array of all the nodes in the cluster. Nodes []*Node // Management url for the cluster ManagementURL string // FluentD Host for the cluster FluentDConfig FluentDConfig } // CredCreateRequest is the input for CredCreate command type CredCreateRequest struct { // InputParams is map describing cloud provide InputParams map[string]string } // CredCreateResponse is returned for CredCreate command type CredCreateResponse struct { // UUID of the credential that was just created UUID string } // CredUpdateRequest is the input for CredsUpdate command type CredUpdateRequest struct { // Name or the UUID of the credential being updated Name string // InputParams is map describing cloud provide InputParams map[string]string } // StatPoint represents the basic structure of a single Stat reported // TODO: This is the first step to introduce stats in openstorage. // Follow up task is to introduce an API for logging stats type StatPoint struct { // Name of the Stat Name string // Tags for the Stat Tags map[string]string // Fields and values of the stat Fields map[string]interface{} // Timestamp in Unix format Timestamp int64 } type CloudBackupCreateRequest struct { // VolumeID of the volume for which cloudbackup is requested VolumeID string // CredentialUUID is cloud credential to be used for backup CredentialUUID string // Full indicates if full backup is desired even though incremental is possible Full bool // Name is optional unique id to be used for this backup // If not specified backup creates this by default Name string // Labels are list of key value pairs to tag the cloud backup. These labels // are stored in the metadata associated with the backup. Labels map[string]string // FullBackupFrequency indicates number of incremental backup after whcih // a fullbackup must be created. This is to override the default value for // manual/user triggerred backups and not applicable for scheduled backups. // Value of 0 retains the default behavior. FullBackupFrequency uint32 // DeleteLocal indicates if local snap must be deleted after the // backup is complete DeleteLocal bool } type CloudBackupCreateResponse struct { // Name of the task performing this backup Name string } type CloudBackupGroupCreateRequest struct { // GroupID indicates backup request for a volumegroup with this group id GroupID string // Labels indicates backup request for a volume group with these labels Labels map[string]string // VolumeIDs are a list of volume IDs to use for the backup request // If multiple of GroupID, Labels or VolumeIDs are specified, volumes matching all of // them are backed up to cloud VolumeIDs []string // CredentialUUID is cloud credential to be used for backup CredentialUUID string // Full indicates if full backup is desired even though incremental is possible Full bool // DeleteLocal indicates if local snap must be deleted after the // backup is complete DeleteLocal bool } type CloudBackupRestoreRequest struct { // ID is the backup ID being restored ID string // RestoreVolumeName is optional volume Name of the new volume to be created // in the cluster for restoring the cloudbackup RestoreVolumeName string // CredentialUUID is the credential to be used for restore operation CredentialUUID string // NodeID is the optional NodeID for provisioning restore // volume (ResoreVolumeName should not be specified) NodeID string // Name is optional unique id to be used for this restore op // restore creates this by default Name string // Optional RestoreVolumeSpec allows some of the restoreVolume fields to be modified. // These fields default to the volume spec stored with cloudbackup. // The request fails if both RestoreVolSpec and NodeID are specified. Spec *RestoreVolumeSpec // Optional Locator for restoreVolume. Request fails if both Name and // locator are specified Locator *VolumeLocator } type CloudBackupGroupCreateResponse struct { // ID for this group of backups GroupCloudBackupID string // Names of the tasks performing this group backup Names []string } type CloudBackupRestoreResponse struct { // RestoreVolumeID is the volumeID to which the backup is being restored RestoreVolumeID string // Name of the task performing this restore Name string } type CloudBackupGenericRequest struct { // SrcVolumeID is optional Source VolumeID for the request SrcVolumeID string // ClusterID is the optional clusterID for the request ClusterID string // CredentialUUID is the credential for cloud to be used for the request CredentialUUID string // All if set to true, backups for all clusters in the cloud are processed All bool // StatusFilter indicates backups based on status StatusFilter CloudBackupStatusType // MetadataFilter indicates backups whose metadata has these kv pairs MetadataFilter map[string]string // CloudBackupID must be specified if one needs to enumerate known single // backup (format is clusteruuidORBucketName/srcVolId-SnapId(-incr). If t\ // this is specified, everything else n the command is ignored CloudBackupID string // MissingSrcVol set to true enumerates cloudbackups for which srcVol is not // present in the cluster. Either the source volume is deleted or the // cloudbackup belongs to other cluster.( with older version this // information may be missing, and in such a case these will list as // missing cluster info field in enumeration). Specifying SrcVolumeID and // this flag at the same time is an error MissingSrcVolumes bool } type CloudBackupInfo struct { // ID is the ID of the cloud backup ID string // SrcVolumeID is Source volumeID of the backup SrcVolumeID string // SrcvolumeName is name of the sourceVolume of the backup SrcVolumeName string // Timestamp is the timestamp at which the source volume // was backed up to cloud Timestamp time.Time // Metadata associated with the backup Metadata map[string]string // Status indicates the status of the backup Status string // ClusterType indicates if the cloudbackup was uploaded by this // cluster. Could be unknown with older version cloudbackups ClusterType SdkCloudBackupClusterType // Namespace to which this cloudbackup belongs to Namespace string } type CloudBackupEnumerateRequest struct { CloudBackupGenericRequest // MaxBackups indicates maxBackups to return in this enumerate list MaxBackups uint64 // ContinuationToken returned in the enumerate response if all of the // requested backups could not be returned in one response ContinuationToken string } type CloudBackupEnumerateResponse struct { // Backups is list of backups in cloud for given volume/cluster/s Backups []CloudBackupInfo ContinuationToken string } type CloudBackupDeleteRequest struct { // ID is the ID of the cloud backup ID string // CredentialUUID is the credential for cloud to be used for the request CredentialUUID string // Force Delete cloudbackup even if there are dependencies Force bool } type CloudBackupDeleteAllRequest struct { CloudBackupGenericRequest } type CloudBackupStatusRequest struct { // SrcVolumeID optional volumeID to list status of backup/restore SrcVolumeID string // Local indicates if only those backups/restores that are // active on current node must be returned Local bool // ID of the backup/restore task. If this is specified, SrcVolumeID is // ignored. This could be GroupCloudBackupId too, and in that case multiple // statuses belonging to the groupCloudBackupID is returned. ID string } type CloudBackupStatusRequestOld struct { // Old field for task ID Name string // New structure CloudBackupStatusRequest } type CloudBackupOpType string const ( CloudBackupOp = CloudBackupOpType("Backup") CloudRestoreOp = CloudBackupOpType("Restore") ) // Allowed storage classes s3 const ( S3StorageClassStandard = "STANDARD" S3StorageClassStandardIa = "STANDARD_IA" ) type CloudBackupStatusType string const ( CloudBackupStatusNotStarted = CloudBackupStatusType("NotStarted") CloudBackupStatusDone = CloudBackupStatusType("Done") CloudBackupStatusAborted = CloudBackupStatusType("Aborted") CloudBackupStatusPaused = CloudBackupStatusType("Paused") CloudBackupStatusStopped = CloudBackupStatusType("Stopped") CloudBackupStatusActive = CloudBackupStatusType("Active") CloudBackupStatusQueued = CloudBackupStatusType("Queued") CloudBackupStatusFailed = CloudBackupStatusType("Failed") // Invalid includes Failed, Stopped, and Aborted used as filter to enumerate // cloud backups CloudBackupStatusInvalid = CloudBackupStatusType("Invalid") ) const ( CloudBackupRequestedStatePause = "pause" CloudBackupRequestedStateResume = "resume" CloudBackupRequestedStateStop = "stop" ) type CloudBackupStatus struct { // ID is the ID for the operation ID string // OpType indicates if this is a backup or restore OpType CloudBackupOpType // State indicates if the op is currently active/done/failed Status CloudBackupStatusType // BytesDone indicates Bytes uploaded/downloaded so far BytesDone uint64 // BytesTotal is the total number of bytes being transferred BytesTotal uint64 // EtaSeconds estimated time in seconds for backup/restore completion EtaSeconds int64 // StartTime indicates Op's start time StartTime time.Time // CompletedTime indicates Op's completed time CompletedTime time.Time // NodeID is the ID of the node where this Op is active NodeID string // SrcVolumeID is either the volume being backed-up or target volume to // which a cloud backup is being restored SrcVolumeID string // Info currently indicates only failure cause in case of failed backup/restore Info []string // CredentialUUID used for this backup/restore op CredentialUUID string // GroupCloudBackupID is valid for backups that were started as part of group // cloudbackup request GroupCloudBackupID string } type CloudBackupStatusResponse struct { // statuses is list of currently active/failed/done backup/restores // map key is the id of the task Statuses map[string]CloudBackupStatus } type CloudBackupCatalogRequest struct { // ID is Backup ID in the cloud ID string // CredentialUUID is the credential for cloud CredentialUUID string } type CloudBackupCatalogResponse struct { // Contents is listing of backup contents Contents []string } type CloudBackupHistoryRequest struct { // SrcVolumeID is volumeID for which history of backup/restore // is being requested SrcVolumeID string } type CloudBackupHistoryItem struct { // SrcVolumeID is volume ID which was backedup SrcVolumeID string // TimeStamp is the time at which either backup completed/failed Timestamp time.Time // Status indicates whether backup was completed/failed Status string } type CloudBackupHistoryResponse struct { // HistoryList is list of past backup/restores in the cluster HistoryList []CloudBackupHistoryItem } type CloudBackupStateChangeRequest struct { // Name of the backup/restore task for which state change // is being requested Name string // RequestedState is desired state of the op // can be pause/resume/stop RequestedState string } type CloudBackupScheduleInfo struct { // SrcVolumeID is the schedule's source volume SrcVolumeID string // CredentialUUID is the cloud credential used with this schedule CredentialUUID string // Schedule is the frequence of backup Schedule string // MaxBackups are the maximum number of backups retained // in cloud.Older backups are deleted MaxBackups uint // GroupID indicates the group of volumes for this cloudbackup schedule GroupID string // Labels indicates a volume group for this cloudsnap schedule Labels map[string]string // Full indicates if scheduled backups must be full always Full bool // RetentionDays is the number of days that the scheduled backups will be kept // and after these number of days it will be deleted RetentionDays uint32 } type CloudBackupSchedCreateRequest struct { CloudBackupScheduleInfo } // Callers must read the existing schedule and modify // required fields type CloudBackupSchedUpdateRequest struct { CloudBackupScheduleInfo // SchedUUID for which the schedule is being updated SchedUUID string } type CloudBackupGroupSchedCreateRequest struct { // GroupID indicates the group of volumes for which cloudbackup schedule is // being created GroupID string // Labels indicates a volume group for which this group cloudsnap schedule is // being created. If this is provided GroupId is not needed and vice-versa. Labels map[string]string // VolumeIDs are a list of volume IDs to use for the backup request // If multiple of GroupID, Labels or VolumeIDs are specified, volumes matching all of // them are backed up to cloud VolumeIDs []string // CredentialUUID is cloud credential to be used with this schedule CredentialUUID string // Schedule is the frequency of backup Schedule string // MaxBackups are the maximum number of backups retained // in cloud.Older backups are deleted MaxBackups uint // Full indicates if scheduled backups must be full always Full bool // RetentionDays is the number of days that the scheduled backups will be kept // and after these number of days it will be deleted RetentionDays uint32 } type CloudBackupGroupSchedUpdateRequest struct { // Any parameters in this can be updated CloudBackupGroupSchedCreateRequest // UUID of the group schedule being upated SchedUUID string } type CloudBackupSchedCreateResponse struct { // UUID is the UUID of the newly created schedule UUID string } type CloudBackupSchedDeleteRequest struct { // UUID is UUID of the schedule to be deleted UUID string } type CloudBackupSchedEnumerateResponse struct { // Schedule is map of schedule uuid to scheduleInfo Schedules map[string]CloudBackupScheduleInfo } // Defines the response for CapacityUsage request type CapacityUsageResponse struct { CapacityUsageInfo *CapacityUsageInfo // Describes the err if all of the usage details could not be obtained Error error } // // DriverTypeSimpleValueOf returns the string format of DriverType func DriverTypeSimpleValueOf(s string) (DriverType, error) { obj, err := simpleValueOf("driver_type", DriverType_value, s) return DriverType(obj), err } // SimpleString returns the string format of DriverType func (x DriverType) SimpleString() string { return simpleString("driver_type", DriverType_name, int32(x)) } // FSTypeSimpleValueOf returns the string format of FSType func FSTypeSimpleValueOf(s string) (FSType, error) { obj, err := simpleValueOf("fs_type", FSType_value, s) return FSType(obj), err } // SimpleString returns the string format of DriverType func (x FSType) SimpleString() string { return simpleString("fs_type", FSType_name, int32(x)) } // CosTypeSimpleValueOf returns the string format of CosType func CosTypeSimpleValueOf(s string) (CosType, error) { obj, exists := CosType_value[strings.ToUpper(s)] if !exists { return -1, fmt.Errorf("Invalid cos value: %s", s) } return CosType(obj), nil } // SimpleString returns the string format of CosType func (x CosType) SimpleString() string { return simpleString("cos_type", CosType_name, int32(x)) } // GraphDriverChangeTypeSimpleValueOf returns the string format of GraphDriverChangeType func GraphDriverChangeTypeSimpleValueOf(s string) (GraphDriverChangeType, error) { obj, err := simpleValueOf("graph_driver_change_type", GraphDriverChangeType_value, s) return GraphDriverChangeType(obj), err } // SimpleString returns the string format of GraphDriverChangeType func (x GraphDriverChangeType) SimpleString() string { return simpleString("graph_driver_change_type", GraphDriverChangeType_name, int32(x)) } // VolumeActionParamSimpleValueOf returns the string format of VolumeAction func VolumeActionParamSimpleValueOf(s string) (VolumeActionParam, error) { obj, err := simpleValueOf("volume_action_param", VolumeActionParam_value, s) return VolumeActionParam(obj), err } // SimpleString returns the string format of VolumeAction func (x VolumeActionParam) SimpleString() string { return simpleString("volume_action_param", VolumeActionParam_name, int32(x)) } // VolumeStateSimpleValueOf returns the string format of VolumeState func VolumeStateSimpleValueOf(s string) (VolumeState, error) { obj, err := simpleValueOf("volume_state", VolumeState_value, s) return VolumeState(obj), err } // SimpleString returns the string format of VolumeState func (x VolumeState) SimpleString() string { return simpleString("volume_state", VolumeState_name, int32(x)) } // VolumeStatusSimpleValueOf returns the string format of VolumeStatus func VolumeStatusSimpleValueOf(s string) (VolumeStatus, error) { obj, err := simpleValueOf("volume_status", VolumeStatus_value, s) return VolumeStatus(obj), err } // SimpleString returns the string format of VolumeStatus func (x VolumeStatus) SimpleString() string { return simpleString("volume_status", VolumeStatus_name, int32(x)) } // IoProfileSimpleValueOf returns the string format of IoProfile func IoProfileSimpleValueOf(s string) (IoProfile, error) { obj, err := simpleValueOf("io_profile", IoProfile_value, s) return IoProfile(obj), err } // SimpleString returns the string format of IoProfile func (x IoProfile) SimpleString() string { return simpleString("io_profile", IoProfile_name, int32(x)) } // ProxyProtocolSimpleValueOf returns the string format of ProxyProtocol func ProxyProtocolSimpleValueOf(s string) (ProxyProtocol, error) { obj, err := simpleValueOf("proxy_protocol", ProxyProtocol_value, s) return ProxyProtocol(obj), err } // SimpleString returns the string format of ProxyProtocol func (x ProxyProtocol) SimpleString() string { return simpleString("proxy_protocol", ProxyProtocol_name, int32(x)) } func simpleValueOf(typeString string, valueMap map[string]int32, s string) (int32, error) { obj, ok := valueMap[strings.ToUpper(fmt.Sprintf("%s_%s", typeString, s))] if !ok { return 0, fmt.Errorf("no openstorage.%s for %s", strings.ToUpper(typeString), s) } return obj, nil } func simpleString(typeString string, nameMap map[int32]string, v int32) string { s, ok := nameMap[v] if !ok { return strconv.Itoa(int(v)) } return strings.TrimPrefix(strings.ToLower(s), fmt.Sprintf("%s_", strings.ToLower(typeString))) } // ScanPolicyTriggerValueof returns value of string func ScanPolicy_ScanTriggerSimpleValueOf(s string) (ScanPolicy_ScanTrigger, error) { obj, err := simpleValueOf("scan_trigger", ScanPolicy_ScanTrigger_value, s) return ScanPolicy_ScanTrigger(obj), err } // SimpleString returns the string format of ScanPolicy_ScanTrigger func (x ScanPolicy_ScanTrigger) SimpleString() string { return simpleString("scan_trigger", ScanPolicy_ScanTrigger_name, int32(x)) } // ScanPolicyActioinValueof returns value of string func ScanPolicy_ScanActionSimpleValueOf(s string) (ScanPolicy_ScanAction, error) { obj, err := simpleValueOf("scan_action", ScanPolicy_ScanAction_value, s) return ScanPolicy_ScanAction(obj), err } // SimpleString returns the string format of ScanPolicy_ScanAction func (x ScanPolicy_ScanAction) SimpleString() string { return simpleString("scan_action", ScanPolicy_ScanAction_name, int32(x)) } func toSec(ms uint64) uint64 { return ms / 1000 } // WriteThroughput returns the write throughput func (v *Stats) WriteThroughput() uint64 { intv := toSec(v.IntervalMs) if intv == 0 { return 0 } return (v.WriteBytes) / intv } // ReadThroughput returns the read throughput func (v *Stats) ReadThroughput() uint64 { intv := toSec(v.IntervalMs) if intv == 0 { return 0 } return (v.ReadBytes) / intv } // Latency returns latency func (v *Stats) Latency() uint64 { ops := v.Writes + v.Reads if ops == 0 { return 0 } return (uint64)((v.IoMs * 1000) / ops) } // Read latency returns avg. time required for read operation to complete func (v *Stats) ReadLatency() uint64 { if v.Reads == 0 { return 0 } return (uint64)((v.ReadMs * 1000) / v.Reads) } // Write latency returns avg. time required for write operation to complete func (v *Stats) WriteLatency() uint64 { if v.Writes == 0 { return 0 } return (uint64)((v.WriteMs * 1000) / v.Writes) } // Iops returns iops func (v *Stats) Iops() uint64 { intv := toSec(v.IntervalMs) if intv == 0 { return 0 } return (v.Writes + v.Reads) / intv } // Scaled returns true if the volume is scaled. func (v *Volume) Scaled() bool { return v.Spec.Scale > 1 } // Contains returns true if locationConstraint is a member of volume's replication set. func (m *Volume) Contains(locationConstraint string) bool { rsets := m.GetReplicaSets() for _, rset := range rsets { for _, node := range rset.Nodes { if node == locationConstraint { return true } } } // also check storage pool UUIDs for _, replSet := range m.ReplicaSets { for _, uid := range replSet.PoolUuids { if uid == locationConstraint { return true } } } return false } // Copy makes a deep copy of VolumeSpec func (s *VolumeSpec) Copy() *VolumeSpec { spec := *s if s.ReplicaSet != nil { spec.ReplicaSet = &ReplicaSet{Nodes: make([]string, len(s.ReplicaSet.Nodes))} copy(spec.ReplicaSet.Nodes, s.ReplicaSet.Nodes) } return &spec } // Copy makes a deep copy of Node func (s *Node) Copy() *Node { localCopy := deepcopy.Copy(*s) nodeCopy := localCopy.(Node) return &nodeCopy } func (v Volume) IsClone() bool { return v.Source != nil && len(v.Source.Parent) != 0 && !v.Readonly } func (v Volume) IsSnapshot() bool { return v.Source != nil && len(v.Source.Parent) != 0 && v.Readonly } func (v Volume) DisplayId() string { if v.Locator != nil { return fmt.Sprintf("%s (%s)", v.Locator.Name, v.Id) } else { return v.Id } } // ToStorageNode converts a Node structure to an exported gRPC StorageNode struct func (s *Node) ToStorageNode() *StorageNode { node := &StorageNode{ Id: s.Id, SchedulerNodeName: s.SchedulerNodeName, Cpu: s.Cpu, MemTotal: s.MemTotal, MemUsed: s.MemUsed, MemFree: s.MemFree, AvgLoad: int64(s.Avgload), Status: s.Status, MgmtIp: s.MgmtIp, DataIp: s.DataIp, Hostname: s.Hostname, HWType: s.HWType, SecurityStatus: s.SecurityStatus, } node.Disks = make(map[string]*StorageResource) for k, v := range s.Disks { // need to take the address of a local variable and not of v // since its address does not change vv := v node.Disks[k] = &vv } node.NodeLabels = make(map[string]string) for k, v := range s.NodeLabels { node.NodeLabels[k] = v } node.Pools = make([]*StoragePool, len(s.Pools)) for i, v := range s.Pools { // need to take the address of a local variable and not of v // since its address does not change vv := v node.Pools[i] = &vv } return node } // ToStorageCluster converts a Cluster structure to an exported gRPC StorageCluster struct func (c *Cluster) ToStorageCluster() *StorageCluster { cluster := &StorageCluster{ Status: c.Status, // Due to history, the cluster ID is normally the name of the cluster, not the // unique identifier Name: c.Id, } return cluster } func CloudBackupStatusTypeToSdkCloudBackupStatusType( t CloudBackupStatusType, ) SdkCloudBackupStatusType { switch t { case CloudBackupStatusNotStarted: return SdkCloudBackupStatusType_SdkCloudBackupStatusTypeNotStarted case CloudBackupStatusDone: return SdkCloudBackupStatusType_SdkCloudBackupStatusTypeDone case CloudBackupStatusAborted: return SdkCloudBackupStatusType_SdkCloudBackupStatusTypeAborted case CloudBackupStatusPaused: return SdkCloudBackupStatusType_SdkCloudBackupStatusTypePaused case CloudBackupStatusStopped: return SdkCloudBackupStatusType_SdkCloudBackupStatusTypeStopped case CloudBackupStatusActive: return SdkCloudBackupStatusType_SdkCloudBackupStatusTypeActive case CloudBackupStatusFailed: return SdkCloudBackupStatusType_SdkCloudBackupStatusTypeFailed case CloudBackupStatusQueued: return SdkCloudBackupStatusType_SdkCloudBackupStatusTypeQueued case CloudBackupStatusInvalid: return SdkCloudBackupStatusType_SdkCloudBackupStatusTypeInvalid default: return SdkCloudBackupStatusType_SdkCloudBackupStatusTypeUnknown } } func SdkCloudBackupStatusTypeToCloudBackupStatusString( t SdkCloudBackupStatusType, ) string { switch t { case SdkCloudBackupStatusType_SdkCloudBackupStatusTypeNotStarted: return string(CloudBackupStatusNotStarted) case SdkCloudBackupStatusType_SdkCloudBackupStatusTypeDone: return string(CloudBackupStatusDone) case SdkCloudBackupStatusType_SdkCloudBackupStatusTypeAborted: return string(CloudBackupStatusAborted) case SdkCloudBackupStatusType_SdkCloudBackupStatusTypePaused: return string(CloudBackupStatusPaused) case SdkCloudBackupStatusType_SdkCloudBackupStatusTypeStopped: return string(CloudBackupStatusStopped) case SdkCloudBackupStatusType_SdkCloudBackupStatusTypeActive: return string(CloudBackupStatusActive) case SdkCloudBackupStatusType_SdkCloudBackupStatusTypeFailed: return string(CloudBackupStatusFailed) case SdkCloudBackupStatusType_SdkCloudBackupStatusTypeQueued: return string(CloudBackupStatusQueued) case SdkCloudBackupStatusType_SdkCloudBackupStatusTypeInvalid: return string(CloudBackupStatusInvalid) default: return string(CloudBackupStatusFailed) } } func StringToSdkCloudBackupStatusType(s string) SdkCloudBackupStatusType { return CloudBackupStatusTypeToSdkCloudBackupStatusType(CloudBackupStatusType(s)) } func (b *CloudBackupInfo) ToSdkCloudBackupInfo() *SdkCloudBackupInfo { info := &SdkCloudBackupInfo{ Id: b.ID, SrcVolumeId: b.SrcVolumeID, SrcVolumeName: b.SrcVolumeName, Metadata: b.Metadata, ClusterType: b.ClusterType, Namespace: b.Namespace, } info.Timestamp, _ = ptypes.TimestampProto(b.Timestamp) info.Status = StringToSdkCloudBackupStatusType(b.Status) return info } func (r *CloudBackupEnumerateResponse) ToSdkCloudBackupEnumerateWithFiltersResponse() *SdkCloudBackupEnumerateWithFiltersResponse { resp := &SdkCloudBackupEnumerateWithFiltersResponse{ Backups: make([]*SdkCloudBackupInfo, len(r.Backups)), } for i, v := range r.Backups { resp.Backups[i] = v.ToSdkCloudBackupInfo() } resp.ContinuationToken = r.ContinuationToken return resp } func CloudBackupOpTypeToSdkCloudBackupOpType(t CloudBackupOpType) SdkCloudBackupOpType { switch t { case CloudBackupOp: return SdkCloudBackupOpType_SdkCloudBackupOpTypeBackupOp case CloudRestoreOp: return SdkCloudBackupOpType_SdkCloudBackupOpTypeRestoreOp default: return SdkCloudBackupOpType_SdkCloudBackupOpTypeUnknown } } func StringToSdkCloudBackupOpType(s string) SdkCloudBackupOpType { return CloudBackupOpTypeToSdkCloudBackupOpType(CloudBackupOpType(s)) } func SdkCloudBackupOpTypeToCloudBackupOpType(t SdkCloudBackupOpType) CloudBackupOpType { switch t { case SdkCloudBackupOpType_SdkCloudBackupOpTypeBackupOp: return CloudBackupOp case SdkCloudBackupOpType_SdkCloudBackupOpTypeRestoreOp: return CloudRestoreOp default: return CloudBackupOpType("Unknown") } } func (s CloudBackupStatus) ToSdkCloudBackupStatus() *SdkCloudBackupStatus { status := &SdkCloudBackupStatus{ BackupId: s.ID, Optype: CloudBackupOpTypeToSdkCloudBackupOpType(s.OpType), Status: CloudBackupStatusTypeToSdkCloudBackupStatusType(s.Status), BytesDone: s.BytesDone, NodeId: s.NodeID, Info: s.Info, CredentialId: s.CredentialUUID, SrcVolumeId: s.SrcVolumeID, EtaSeconds: s.EtaSeconds, BytesTotal: s.BytesTotal, } status.StartTime, _ = ptypes.TimestampProto(s.StartTime) status.CompletedTime, _ = ptypes.TimestampProto(s.CompletedTime) return status } func (r *CloudBackupStatusResponse) ToSdkCloudBackupStatusResponse() *SdkCloudBackupStatusResponse { resp := &SdkCloudBackupStatusResponse{ Statuses: make(map[string]*SdkCloudBackupStatus), } for k, v := range r.Statuses { resp.Statuses[k] = v.ToSdkCloudBackupStatus() } return resp } func (h CloudBackupHistoryItem) ToSdkCloudBackupHistoryItem() *SdkCloudBackupHistoryItem { item := &SdkCloudBackupHistoryItem{ SrcVolumeId: h.SrcVolumeID, Status: StringToSdkCloudBackupStatusType(h.Status), } item.Timestamp, _ = ptypes.TimestampProto(h.Timestamp) return item } func (r *CloudBackupHistoryResponse) ToSdkCloudBackupHistoryResponse() *SdkCloudBackupHistoryResponse { resp := &SdkCloudBackupHistoryResponse{ HistoryList: make([]*SdkCloudBackupHistoryItem, len(r.HistoryList)), } for i, v := range r.HistoryList { resp.HistoryList[i] = v.ToSdkCloudBackupHistoryItem() } return resp } func (l *VolumeLocator) MergeVolumeSpecLabels(s *VolumeSpec) *VolumeLocator { if l.VolumeLabels == nil && len(s.GetVolumeLabels()) > 0 { l.VolumeLabels = make(map[string]string) } for k, v := range s.GetVolumeLabels() { l.VolumeLabels[k] = v } return l } func (v *Volume) IsPermitted(ctx context.Context, accessType Ownership_AccessType) bool { return v.GetSpec().IsPermitted(ctx, accessType) } func (v *VolumeSpec) IsPermitted(ctx context.Context, accessType Ownership_AccessType) bool { return v.GetOwnership().IsPermittedByContext(ctx, accessType) } func (v *VolumeSpec) IsPermittedFromUserInfo(user *auth.UserInfo, accessType Ownership_AccessType) bool { if v.IsPublic(accessType) { return true } if v.GetOwnership() != nil { return v.GetOwnership().IsPermitted(user, accessType) } return true } func (v *VolumeSpec) IsPublic(accessType Ownership_AccessType) bool { return v.GetOwnership() == nil || v.GetOwnership().IsPublic(accessType) } func (v *VolumeSpec) IsPureVolume() bool { return v.GetProxySpec() != nil && v.GetProxySpec().IsPureBackend() } // GetCloneCreatorOwnership returns the appropriate ownership for the // new snapshot and if an update is required func (v *VolumeSpec) GetCloneCreatorOwnership(ctx context.Context) (*Ownership, bool) { o := v.GetOwnership() // If there is user information, then auth is enabled if userinfo, ok := auth.NewUserInfoFromContext(ctx); ok { // Check if the owner is the one who cloned it if o != nil && o.IsOwner(userinfo) { return o, false } // Not the same owner, we now need new ownership. // This works for public volumes also. return OwnershipSetUsernameFromContext(ctx, nil), true } return o, false } // Check access permission of SdkStoragePolicy Objects func (s *SdkStoragePolicy) IsPermitted(ctx context.Context, accessType Ownership_AccessType) bool { if s.IsPublic(accessType) { return true } // Storage Policy is not public, check permission if userinfo, ok := auth.NewUserInfoFromContext(ctx); ok { // Check Access return s.IsPermittedFromUserInfo(userinfo, accessType) } else { // There is no user information in the context so // authorization is not running return true } } func (s *SdkStoragePolicy) IsPermittedFromUserInfo(user *auth.UserInfo, accessType Ownership_AccessType) bool { if s.IsPublic(accessType) { return true } if s.GetOwnership() != nil { return s.GetOwnership().IsPermitted(user, accessType) } return true } func (s *SdkStoragePolicy) IsPublic(accessType Ownership_AccessType) bool { return s.GetOwnership() == nil || s.GetOwnership().IsPublic(accessType) } func CloudBackupRequestedStateToSdkCloudBackupRequestedState( t string, ) SdkCloudBackupRequestedState { switch t { case CloudBackupRequestedStateStop: return SdkCloudBackupRequestedState_SdkCloudBackupRequestedStateStop case CloudBackupRequestedStatePause: return SdkCloudBackupRequestedState_SdkCloudBackupRequestedStatePause case CloudBackupRequestedStateResume: return SdkCloudBackupRequestedState_SdkCloudBackupRequestedStateResume default: return SdkCloudBackupRequestedState_SdkCloudBackupRequestedStateUnknown } } // Helpers for volume state action func (m *VolumeStateAction) IsAttach() bool { return m.GetAttach() == VolumeActionParam_VOLUME_ACTION_PARAM_ON } func (m *VolumeStateAction) IsDetach() bool { return m.GetAttach() == VolumeActionParam_VOLUME_ACTION_PARAM_OFF } func (m *VolumeStateAction) IsMount() bool { return m.GetMount() == VolumeActionParam_VOLUME_ACTION_PARAM_ON } func (m *VolumeStateAction) IsUnMount() bool { return m.GetMount() == VolumeActionParam_VOLUME_ACTION_PARAM_OFF } // IsAttached checks if a volume is attached func (v *Volume) IsAttached() bool { return len(v.AttachedOn) > 0 && v.State == VolumeState_VOLUME_STATE_ATTACHED && v.AttachedState != AttachState_ATTACH_STATE_INTERNAL } // TokenSecretContext contains all nessesary information to get a // token secret from any provider type TokenSecretContext struct { SecretName string SecretNamespace string } // ParseProxyEndpoint parses the proxy endpoint and returns the // proxy protocol and the endpoint func ParseProxyEndpoint(proxyEndpoint string) (ProxyProtocol, string) { if len(proxyEndpoint) == 0 { return ProxyProtocol_PROXY_PROTOCOL_INVALID, "" } tokens := strings.Split(proxyEndpoint, "://") if len(tokens) == 1 { return ProxyProtocol_PROXY_PROTOCOL_INVALID, tokens[0] } else if len(tokens) == 2 { switch tokens[0] { case SpecProxyProtocolS3: return ProxyProtocol_PROXY_PROTOCOL_S3, tokens[1] case SpecProxyProtocolNFS: return ProxyProtocol_PROXY_PROTOCOL_NFS, tokens[1] case SpecProxyProtocolPXD: return ProxyProtocol_PROXY_PROTOCOL_PXD, tokens[1] default: return ProxyProtocol_PROXY_PROTOCOL_INVALID, tokens[1] } } return ProxyProtocol_PROXY_PROTOCOL_INVALID, "" } func (s *ProxySpec) IsPureBackend() bool { return s.ProxyProtocol == ProxyProtocol_PROXY_PROTOCOL_PURE_BLOCK || s.ProxyProtocol == ProxyProtocol_PROXY_PROTOCOL_PURE_FILE } // GetAllEnumInfo returns an EnumInfo for every proto enum func GetAllEnumInfo() []protoimpl.EnumInfo { return file_api_api_proto_enumTypes }
1
8,993
most spec options appear to be snake case - `fa_direct_raw_block`, let's stick to that convention
libopenstorage-openstorage
go
@@ -4,7 +4,6 @@ var globby = require('globby'); var WebDriver = require('selenium-webdriver'); var chrome = require('selenium-webdriver/chrome'); var chromedriver = require('chromedriver'); -var isCI = require('is-ci'); var args = process.argv.slice(2);
1
/*global window, Promise */ var globby = require('globby'); var WebDriver = require('selenium-webdriver'); var chrome = require('selenium-webdriver/chrome'); var chromedriver = require('chromedriver'); var isCI = require('is-ci'); var args = process.argv.slice(2); // allow running certain browsers through command line args // (only one browser supported, run multiple times for more browsers) var browser = 'chrome'; args.forEach(function(arg) { // pattern: browsers=Chrome var parts = arg.split('='); if (parts[0] === 'browser') { browser = parts[1].toLowerCase(); } }); // circle has everything configured to run chrome but local install // may not if (browser === 'chrome' && !isCI) { var service = new chrome.ServiceBuilder(chromedriver.path).build(); chrome.setDefaultService(service); } /** * Keep injecting scripts until window.mochaResults is set */ function collectTestResults(driver) { // inject a script that waits half a second return driver .executeAsyncScript(function() { var callback = arguments[arguments.length - 1]; setTimeout(function() { // return the mocha results (or undefined if not finished) callback(window.mochaResults); }, 500); }) .then(function(result) { // If there are no results, listen a little longer if (!result) { return collectTestResults(driver); // if there are, return them } else { return Promise.resolve(result); } }); } /** * Test each URL */ function runTestUrls(driver, isMobile, urls, errors) { var url = urls.shift(); errors = errors || []; return ( driver .get(url) // Get results .then(function() { return Promise.all([ driver.getCapabilities(), collectTestResults(driver) ]); }) // And process them .then(function(promiseResults) { var capabilities = promiseResults[0]; var result = promiseResults[1]; var browserName = capabilities.get('browserName') + (capabilities.get('mobileEmulationEnabled') ? '-mobile' : ''); console.log(url + ' [' + browserName + ']'); // Remember the errors (result.reports || []).forEach(function(err) { console.log(err.message); err.url = url; err.browser = browserName; errors.push(err); }); // Log the result of the page tests console[result.failures ? 'error' : 'log']( 'passes: ' + result.passes + ', ' + 'failures: ' + result.failures + ', ' + 'duration: ' + result.duration / 1000 + 's' ); console.log(); }) .then(function() { // Start the next job, if any if (urls.length > 0) { return runTestUrls(driver, isMobile, urls, errors); } else { driver.quit(); return Promise.resolve(errors); } }) ); } /* * Build web driver depends whether REMOTE_SELENIUM_URL is set */ function buildWebDriver(browser) { var capabilities; var mobileBrowser = browser.split('-mobile'); if (mobileBrowser.length > 1) { browser = mobileBrowser[0]; capabilities = { browserName: mobileBrowser[0], chromeOptions: { mobileEmulation: { deviceMetrics: { width: 320, height: 568, pixelRatio: 2 } } } }; } var webdriver = new WebDriver.Builder() .withCapabilities(capabilities) .forBrowser(browser); if (process.env.REMOTE_SELENIUM_URL) { webdriver.usingServer(process.env.REMOTE_SELENIUM_URL); } // @see https://github.com/SeleniumHQ/selenium/issues/6026 if (browser === 'safari') { var safari = require('selenium-webdriver/safari'); var server = new safari.ServiceBuilder() .addArguments('--legacy') .build() .start(); webdriver.usingServer(server); } return { driver: webdriver.build(), isMobile: mobileBrowser.length > 1 }; } function start(options) { var driver; var isMobile = false; // yes, really, and this isn't documented anywhere either. options.browser = options.browser === 'edge' ? 'MicrosoftEdge' : options.browser; var testUrls = globby .sync(['test/integration/full/**/*.html', '!**/frames/**/*.html']) .map(function(url) { return 'http://localhost:9876/' + url; }); if ( (process.platform === 'win32' && options.browser === 'safari') || (process.platform === 'darwin' && ['ie', 'MicrosoftEdge'].indexOf(options.browser) !== -1) || ((process.platform === 'linux' || process.env.REMOTE_SELENIUM_URL) && ['ie', 'MicrosoftEdge', 'safari'].indexOf(options.browser) !== -1) ) { console.log(); console.log( 'Skipped ' + options.browser + ' as it is not supported on this platform' ); return process.exit(); } // try to load the browser try { var webDriver = buildWebDriver(options.browser); driver = webDriver.driver; isMobile = webDriver.isMobile; // If load fails, warn user and move to the next task } catch (err) { console.log(); console.log(err.message); console.log('Aborted testing using ' + options.browser); return process.exit(); } // Give driver timeout options for scripts driver .manage() .timeouts() .setScriptTimeout(!isMobile ? 60000 * 5 : 60000 * 10); // allow to wait for page load implicitly driver .manage() .timeouts() .implicitlyWait(50000); // Test all pages runTestUrls(driver, isMobile, testUrls) .then(function(testErrors) { // log each error and abort testErrors.forEach(function(err) { console.log(); console.log('URL: ' + err.url); console.log('Browser: ' + err.browser); console.log('Describe: ' + err.titles.join(' > ')); console.log('it ' + err.name); console.log(err.stack); console.log(); }); process.exit(testErrors.length); // catch any potential problems }) .catch(function(err) { console.log(err); process.exit(1); }); } start({ browser: browser });
1
16,114
Looks like we can then drop this dependency.
dequelabs-axe-core
js
@@ -667,6 +667,10 @@ class FlowMaster(controller.Master): self.add_event("Script error:\n" + str(e), "error") self.scripts.remove(script_obj) + def reload_scripts(self): + for s in self.scripts[:]: + s.load() + def load_script(self, command): """ Loads a script. Returns an error description if something went
1
""" This module provides more sophisticated flow tracking and provides filtering and interception facilities. """ from __future__ import absolute_import from abc import abstractmethod, ABCMeta import hashlib import Cookie import cookielib import os import re import urlparse from netlib import wsgi from netlib.exceptions import HttpException from netlib.http import CONTENT_MISSING, Headers, http1 import netlib.http from . import controller, tnetstring, filt, script, version from .onboarding import app from .proxy.config import HostMatcher from .protocol.http_replay import RequestReplayThread from .protocol import Kill from .models import ClientConnection, ServerConnection, HTTPResponse, HTTPFlow, HTTPRequest class AppRegistry: def __init__(self): self.apps = {} def add(self, app, domain, port): """ Add a WSGI app to the registry, to be served for requests to the specified domain, on the specified port. """ self.apps[(domain, port)] = wsgi.WSGIAdaptor( app, domain, port, version.NAMEVERSION ) def get(self, request): """ Returns an WSGIAdaptor instance if request matches an app, or None. """ if (request.host, request.port) in self.apps: return self.apps[(request.host, request.port)] if "host" in request.headers: host = request.headers["host"] return self.apps.get((host, request.port), None) class ReplaceHooks: def __init__(self): self.lst = [] def set(self, r): self.clear() for i in r: self.add(*i) def add(self, fpatt, rex, s): """ add a replacement hook. fpatt: a string specifying a filter pattern. rex: a regular expression. s: the replacement string returns true if hook was added, false if the pattern could not be parsed. """ cpatt = filt.parse(fpatt) if not cpatt: return False try: re.compile(rex) except re.error: return False self.lst.append((fpatt, rex, s, cpatt)) return True def get_specs(self): """ Retrieve the hook specifcations. Returns a list of (fpatt, rex, s) tuples. """ return [i[:3] for i in self.lst] def count(self): return len(self.lst) def run(self, f): for _, rex, s, cpatt in self.lst: if cpatt(f): if f.response: f.response.replace(rex, s) else: f.request.replace(rex, s) def clear(self): self.lst = [] class SetHeaders: def __init__(self): self.lst = [] def set(self, r): self.clear() for i in r: self.add(*i) def add(self, fpatt, header, value): """ Add a set header hook. fpatt: String specifying a filter pattern. header: Header name. value: Header value string Returns True if hook was added, False if the pattern could not be parsed. """ cpatt = filt.parse(fpatt) if not cpatt: return False self.lst.append((fpatt, header, value, cpatt)) return True def get_specs(self): """ Retrieve the hook specifcations. Returns a list of (fpatt, rex, s) tuples. """ return [i[:3] for i in self.lst] def count(self): return len(self.lst) def clear(self): self.lst = [] def run(self, f): for _, header, value, cpatt in self.lst: if cpatt(f): if f.response: f.response.headers.pop(header, None) else: f.request.headers.pop(header, None) for _, header, value, cpatt in self.lst: if cpatt(f): if f.response: f.response.headers.fields.append((header, value)) else: f.request.headers.fields.append((header, value)) class StreamLargeBodies(object): def __init__(self, max_size): self.max_size = max_size def run(self, flow, is_request): r = flow.request if is_request else flow.response expected_size = http1.expected_http_body_size( flow.request, flow.response if not is_request else None ) if not (0 <= expected_size <= self.max_size): # r.stream may already be a callable, which we want to preserve. r.stream = r.stream or True class ClientPlaybackState: def __init__(self, flows, exit): self.flows, self.exit = flows, exit self.current = None self.testing = False # Disables actual replay for testing. def count(self): return len(self.flows) def done(self): if len(self.flows) == 0 and not self.current: return True return False def clear(self, flow): """ A request has returned in some way - if this is the one we're servicing, go to the next flow. """ if flow is self.current: self.current = None def tick(self, master): if self.flows and not self.current: self.current = self.flows.pop(0).copy() if not self.testing: master.replay_request(self.current) else: self.current.reply = controller.DummyReply() master.handle_request(self.current) if self.current.response: master.handle_response(self.current) class ServerPlaybackState: def __init__( self, headers, flows, exit, nopop, ignore_params, ignore_content, ignore_payload_params, ignore_host): """ headers: Case-insensitive list of request headers that should be included in request-response matching. """ self.headers = headers self.exit = exit self.nopop = nopop self.ignore_params = ignore_params self.ignore_content = ignore_content self.ignore_payload_params = ignore_payload_params self.ignore_host = ignore_host self.fmap = {} for i in flows: if i.response: l = self.fmap.setdefault(self._hash(i), []) l.append(i) def count(self): return sum(len(i) for i in self.fmap.values()) def _hash(self, flow): """ Calculates a loose hash of the flow request. """ r = flow.request _, _, path, _, query, _ = urlparse.urlparse(r.url) queriesArray = urlparse.parse_qsl(query, keep_blank_values=True) key = [ str(r.port), str(r.scheme), str(r.method), str(path), ] if not self.ignore_content: form_contents = r.urlencoded_form or r.multipart_form if self.ignore_payload_params and form_contents: key.extend( p for p in form_contents if p[0] not in self.ignore_payload_params ) else: key.append(str(r.content)) if not self.ignore_host: key.append(r.host) filtered = [] ignore_params = self.ignore_params or [] for p in queriesArray: if p[0] not in ignore_params: filtered.append(p) for p in filtered: key.append(p[0]) key.append(p[1]) if self.headers: headers = [] for i in self.headers: v = r.headers.get(i) headers.append((i, v)) key.append(headers) return hashlib.sha256(repr(key)).digest() def next_flow(self, request): """ Returns the next flow object, or None if no matching flow was found. """ l = self.fmap.get(self._hash(request)) if not l: return None if self.nopop: return l[0] else: return l.pop(0) class StickyCookieState: def __init__(self, flt): """ flt: Compiled filter. """ self.jar = {} self.flt = flt def ckey(self, m, f): """ Returns a (domain, port, path) tuple. """ return ( m["domain"] or f.request.host, f.request.port, m["path"] or "/" ) def domain_match(self, a, b): if cookielib.domain_match(a, b): return True elif cookielib.domain_match(a, b.strip(".")): return True return False def handle_response(self, f): for i in f.response.headers.get_all("set-cookie"): # FIXME: We now know that Cookie.py screws up some cookies with # valid RFC 822/1123 datetime specifications for expiry. Sigh. c = Cookie.SimpleCookie(str(i)) for m in c.values(): k = self.ckey(m, f) if self.domain_match(f.request.host, k[0]): self.jar[k] = m def handle_request(self, f): l = [] if f.match(self.flt): for i in self.jar.keys(): match = [ self.domain_match(f.request.host, i[0]), f.request.port == i[1], f.request.path.startswith(i[2]) ] if all(match): l.append(self.jar[i].output(header="").strip()) if l: f.request.stickycookie = True f.request.headers.set_all("cookie",l) class StickyAuthState: def __init__(self, flt): """ flt: Compiled filter. """ self.flt = flt self.hosts = {} def handle_request(self, f): host = f.request.host if "authorization" in f.request.headers: self.hosts[host] = f.request.headers["authorization"] elif f.match(self.flt): if host in self.hosts: f.request.headers["authorization"] = self.hosts[host] class FlowList(object): __metaclass__ = ABCMeta def __iter__(self): return iter(self._list) def __contains__(self, item): return item in self._list def __getitem__(self, item): return self._list[item] def __nonzero__(self): return bool(self._list) def __len__(self): return len(self._list) def index(self, f): return self._list.index(f) @abstractmethod def _add(self, f): return @abstractmethod def _update(self, f): return @abstractmethod def _remove(self, f): return class FlowView(FlowList): def __init__(self, store, filt=None): self._list = [] if not filt: filt = lambda flow: True self._build(store, filt) self.store = store self.store.views.append(self) def _close(self): self.store.views.remove(self) def _build(self, flows, filt=None): if filt: self.filt = filt self._list = list(filter(self.filt, flows)) def _add(self, f): if self.filt(f): self._list.append(f) def _update(self, f): if f not in self._list: self._add(f) elif not self.filt(f): self._remove(f) def _remove(self, f): if f in self._list: self._list.remove(f) def _recalculate(self, flows): self._build(flows) class FlowStore(FlowList): """ Responsible for handling flows in the state: Keeps a list of all flows and provides views on them. """ def __init__(self): self._list = [] self._set = set() # Used for O(1) lookups self.views = [] self._recalculate_views() def get(self, flow_id): for f in self._list: if f.id == flow_id: return f def __contains__(self, f): return f in self._set def _add(self, f): """ Adds a flow to the state. The flow to add must not be present in the state. """ self._list.append(f) self._set.add(f) for view in self.views: view._add(f) def _update(self, f): """ Notifies the state that a flow has been updated. The flow must be present in the state. """ if f in self: for view in self.views: view._update(f) def _remove(self, f): """ Deletes a flow from the state. The flow must be present in the state. """ self._list.remove(f) self._set.remove(f) for view in self.views: view._remove(f) # Expensive bulk operations def _extend(self, flows): """ Adds a list of flows to the state. The list of flows to add must not contain flows that are already in the state. """ self._list.extend(flows) self._set.update(flows) self._recalculate_views() def _clear(self): self._list = [] self._set = set() self._recalculate_views() def _recalculate_views(self): """ Expensive operation: Recalculate all the views after a bulk change. """ for view in self.views: view._recalculate(self) # Utility functions. # There are some common cases where we need to argue about all flows # irrespective of filters on the view etc (i.e. on shutdown). def active_count(self): c = 0 for i in self._list: if not i.response and not i.error: c += 1 return c # TODO: Should accept_all operate on views or on all flows? def accept_all(self, master): for f in self._list: f.accept_intercept(master) def kill_all(self, master): for f in self._list: f.kill(master) class State(object): def __init__(self): self.flows = FlowStore() self.view = FlowView(self.flows, None) # These are compiled filt expressions: self.intercept = None @property def limit_txt(self): return getattr(self.view.filt, "pattern", None) def flow_count(self): return len(self.flows) # TODO: All functions regarding flows that don't cause side-effects should # be moved into FlowStore. def index(self, f): return self.flows.index(f) def active_flow_count(self): return self.flows.active_count() def add_flow(self, f): """ Add a request to the state. """ self.flows._add(f) return f def update_flow(self, f): """ Add a response to the state. """ self.flows._update(f) return f def delete_flow(self, f): self.flows._remove(f) def load_flows(self, flows): self.flows._extend(flows) def set_limit(self, txt): if txt == self.limit_txt: return if txt: f = filt.parse(txt) if not f: return "Invalid filter expression." self.view._close() self.view = FlowView(self.flows, f) else: self.view._close() self.view = FlowView(self.flows, None) def set_intercept(self, txt): if txt: f = filt.parse(txt) if not f: return "Invalid filter expression." self.intercept = f else: self.intercept = None @property def intercept_txt(self): return getattr(self.intercept, "pattern", None) def clear(self): self.flows._clear() def accept_all(self, master): self.flows.accept_all(master) def backup(self, f): f.backup() self.update_flow(f) def revert(self, f): f.revert() self.update_flow(f) def killall(self, master): self.flows.kill_all(master) class FlowMaster(controller.Master): def __init__(self, server, state): controller.Master.__init__(self, server) self.state = state self.server_playback = None self.client_playback = None self.kill_nonreplay = False self.scripts = [] self.pause_scripts = False self.stickycookie_state = False self.stickycookie_txt = None self.stickyauth_state = False self.stickyauth_txt = None self.anticache = False self.anticomp = False self.stream_large_bodies = False self.refresh_server_playback = False self.replacehooks = ReplaceHooks() self.setheaders = SetHeaders() self.replay_ignore_params = False self.replay_ignore_content = None self.replay_ignore_host = False self.stream = None self.apps = AppRegistry() def start_app(self, host, port): self.apps.add( app.mapp, host, port ) def add_event(self, e, level="info"): """ level: debug, info, error """ pass def unload_scripts(self): for s in self.scripts[:]: self.unload_script(s) def unload_script(self, script_obj): try: script_obj.unload() except script.ScriptError as e: self.add_event("Script error:\n" + str(e), "error") self.scripts.remove(script_obj) def load_script(self, command): """ Loads a script. Returns an error description if something went wrong. """ try: s = script.Script(command, self) except script.ScriptError as v: return v.args[0] self.scripts.append(s) def _run_single_script_hook(self, script_obj, name, *args, **kwargs): if script_obj and not self.pause_scripts: try: script_obj.run(name, *args, **kwargs) except script.ScriptError as e: self.add_event("Script error:\n" + str(e), "error") def run_script_hook(self, name, *args, **kwargs): for script_obj in self.scripts: self._run_single_script_hook(script_obj, name, *args, **kwargs) def get_ignore_filter(self): return self.server.config.check_ignore.patterns def set_ignore_filter(self, host_patterns): self.server.config.check_ignore = HostMatcher(host_patterns) def get_tcp_filter(self): return self.server.config.check_tcp.patterns def set_tcp_filter(self, host_patterns): self.server.config.check_tcp = HostMatcher(host_patterns) def set_stickycookie(self, txt): if txt: flt = filt.parse(txt) if not flt: return "Invalid filter expression." self.stickycookie_state = StickyCookieState(flt) self.stickycookie_txt = txt else: self.stickycookie_state = None self.stickycookie_txt = None def set_stream_large_bodies(self, max_size): if max_size is not None: self.stream_large_bodies = StreamLargeBodies(max_size) else: self.stream_large_bodies = False def set_stickyauth(self, txt): if txt: flt = filt.parse(txt) if not flt: return "Invalid filter expression." self.stickyauth_state = StickyAuthState(flt) self.stickyauth_txt = txt else: self.stickyauth_state = None self.stickyauth_txt = None def start_client_playback(self, flows, exit): """ flows: List of flows. """ self.client_playback = ClientPlaybackState(flows, exit) def stop_client_playback(self): self.client_playback = None def start_server_playback( self, flows, kill, headers, exit, nopop, ignore_params, ignore_content, ignore_payload_params, ignore_host): """ flows: List of flows. kill: Boolean, should we kill requests not part of the replay? ignore_params: list of parameters to ignore in server replay ignore_content: true if request content should be ignored in server replay ignore_payload_params: list of content params to ignore in server replay ignore_host: true if request host should be ignored in server replay """ self.server_playback = ServerPlaybackState( headers, flows, exit, nopop, ignore_params, ignore_content, ignore_payload_params, ignore_host) self.kill_nonreplay = kill def stop_server_playback(self): if self.server_playback.exit: self.shutdown() self.server_playback = None def do_server_playback(self, flow): """ This method should be called by child classes in the handle_request handler. Returns True if playback has taken place, None if not. """ if self.server_playback: rflow = self.server_playback.next_flow(flow) if not rflow: return None response = HTTPResponse.from_state(rflow.response.get_state()) response.is_replay = True if self.refresh_server_playback: response.refresh() flow.reply(response) if self.server_playback.count() == 0: self.stop_server_playback() return True return None def tick(self, q, timeout): if self.client_playback: e = [ self.client_playback.done(), self.client_playback.exit, self.state.active_flow_count() == 0 ] if all(e): self.shutdown() self.client_playback.tick(self) if self.client_playback.done(): self.client_playback = None return super(FlowMaster, self).tick(q, timeout) def duplicate_flow(self, f): return self.load_flow(f.copy()) def create_request(self, method, scheme, host, port, path): """ this method creates a new artificial and minimalist request also adds it to flowlist """ c = ClientConnection.from_state(dict( address=dict(address=(host, port), use_ipv6=False), clientcert=None )) s = ServerConnection.from_state(dict( address=dict(address=(host, port), use_ipv6=False), state=[], source_address=None, # source_address=dict(address=(host, port), use_ipv6=False), cert=None, sni=host, ssl_established=True )) f = HTTPFlow(c, s) headers = Headers() req = HTTPRequest( "absolute", method, scheme, host, port, path, b"HTTP/1.1", headers, None, None, None, None) f.request = req return self.load_flow(f) def load_flow(self, f): """ Loads a flow, and returns a new flow object. """ if self.server and self.server.config.mode == "reverse": f.request.host = self.server.config.upstream_server.address.host f.request.port = self.server.config.upstream_server.address.port f.request.scheme = re.sub("^https?2", "", self.server.config.upstream_server.scheme) f.reply = controller.DummyReply() if f.request: self.handle_request(f) if f.response: self.handle_responseheaders(f) self.handle_response(f) if f.error: self.handle_error(f) return f def load_flows(self, fr): """ Load flows from a FlowReader object. """ cnt = 0 for i in fr.stream(): cnt += 1 self.load_flow(i) return cnt def load_flows_file(self, path): path = os.path.expanduser(path) try: f = file(path, "rb") freader = FlowReader(f) except IOError as v: raise FlowReadError(v.strerror) return self.load_flows(freader) def process_new_request(self, f): if self.stickycookie_state: self.stickycookie_state.handle_request(f) if self.stickyauth_state: self.stickyauth_state.handle_request(f) if self.anticache: f.request.anticache() if self.anticomp: f.request.anticomp() if self.server_playback: pb = self.do_server_playback(f) if not pb: if self.kill_nonreplay: f.kill(self) else: f.reply() def process_new_response(self, f): if self.stickycookie_state: self.stickycookie_state.handle_response(f) def replay_request(self, f, block=False, run_scripthooks=True): """ Returns None if successful, or error message if not. """ if f.live and run_scripthooks: return "Can't replay live request." if f.intercepted: return "Can't replay while intercepting..." if f.request.content == CONTENT_MISSING: return "Can't replay request with missing content..." if f.request: f.backup() f.request.is_replay = True if "Content-Length" in f.request.headers: f.request.headers["Content-Length"] = str(len(f.request.content)) f.response = None f.error = None self.process_new_request(f) rt = RequestReplayThread( self.server.config, f, self.masterq if run_scripthooks else False, self.should_exit ) rt.start() # pragma: no cover if block: rt.join() def handle_log(self, l): self.add_event(l.msg, l.level) l.reply() def handle_clientconnect(self, root_layer): self.run_script_hook("clientconnect", root_layer) root_layer.reply() def handle_clientdisconnect(self, root_layer): self.run_script_hook("clientdisconnect", root_layer) root_layer.reply() def handle_serverconnect(self, server_conn): self.run_script_hook("serverconnect", server_conn) server_conn.reply() def handle_serverdisconnect(self, server_conn): self.run_script_hook("serverdisconnect", server_conn) server_conn.reply() def handle_next_layer(self, top_layer): self.run_script_hook("next_layer", top_layer) top_layer.reply() def handle_error(self, f): self.state.update_flow(f) self.run_script_hook("error", f) if self.client_playback: self.client_playback.clear(f) f.reply() return f def handle_request(self, f): if f.live: app = self.apps.get(f.request) if app: err = app.serve( f, f.client_conn.wfile, **{"mitmproxy.master": self} ) if err: self.add_event("Error in wsgi app. %s" % err, "error") f.reply(Kill) return if f not in self.state.flows: # don't add again on replay self.state.add_flow(f) self.replacehooks.run(f) self.setheaders.run(f) self.run_script_hook("request", f) self.process_new_request(f) return f def handle_responseheaders(self, f): self.run_script_hook("responseheaders", f) try: if self.stream_large_bodies: self.stream_large_bodies.run(f, False) except HttpException: f.reply(Kill) return f.reply() return f def handle_response(self, f): self.state.update_flow(f) self.replacehooks.run(f) self.setheaders.run(f) self.run_script_hook("response", f) if self.client_playback: self.client_playback.clear(f) self.process_new_response(f) if self.stream: self.stream.add(f) return f def handle_intercept(self, f): self.state.update_flow(f) def handle_accept_intercept(self, f): self.state.update_flow(f) def shutdown(self): self.unload_scripts() controller.Master.shutdown(self) if self.stream: for i in self.state.flows: if not i.response: self.stream.add(i) self.stop_stream() def start_stream(self, fp, filt): self.stream = FilteredFlowWriter(fp, filt) def stop_stream(self): self.stream.fo.close() self.stream = None def read_flows_from_paths(paths): """ Given a list of filepaths, read all flows and return a list of them. From a performance perspective, streaming would be advisable - however, if there's an error with one of the files, we want it to be raised immediately. If an error occurs, a FlowReadError will be raised. """ try: flows = [] for path in paths: path = os.path.expanduser(path) with file(path, "rb") as f: flows.extend(FlowReader(f).stream()) except IOError as e: raise FlowReadError(e.strerror) return flows class FlowWriter: def __init__(self, fo): self.fo = fo def add(self, flow): d = flow.get_state() tnetstring.dump(d, self.fo) class FlowReadError(Exception): @property def strerror(self): return self.args[0] class FlowReader: def __init__(self, fo): self.fo = fo def stream(self): """ Yields Flow objects from the dump. """ off = 0 try: while True: data = tnetstring.load(self.fo) if tuple(data["version"][:2]) != version.IVERSION[:2]: v = ".".join(str(i) for i in data["version"]) raise FlowReadError( "Incompatible serialized data version: %s" % v ) off = self.fo.tell() yield HTTPFlow.from_state(data) except ValueError as v: # Error is due to EOF if self.fo.tell() == off and self.fo.read() == '': return raise FlowReadError("Invalid data format.") class FilteredFlowWriter: def __init__(self, fo, filt): self.fo = fo self.filt = filt def add(self, f): if self.filt and not f.match(self.filt): return d = f.get_state() tnetstring.dump(d, self.fo)
1
10,889
3) Subscribe to the script change signal in `FlowMaster.__init__`. The event handler should call `self.masterq.put(("script_change", script))`. 4) Add a `handle_script_change` function, that once called, takes the script object and calls `script.reload()`.
mitmproxy-mitmproxy
py
@@ -287,4 +287,13 @@ public class EmailMessage { return this; } + + public String getBody() { + return _body.toString(); + } + + public String getSubject() { + return _subject; + } + }
1
/* * Copyright 2012 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.utils; import java.io.File; import java.io.InputStream; import java.net.SocketTimeoutException; import java.util.ArrayList; import java.util.Collection; import java.util.Date; import java.util.List; import java.util.Properties; import javax.activation.DataHandler; import javax.activation.DataSource; import javax.activation.FileDataSource; import javax.mail.BodyPart; import javax.mail.Message; import javax.mail.MessagingException; import javax.mail.Session; import javax.mail.internet.InternetAddress; import javax.mail.internet.MimeBodyPart; import javax.mail.internet.MimeMessage; import javax.mail.internet.MimeMultipart; import org.apache.log4j.Logger; import com.sun.mail.smtp.SMTPTransport; public class EmailMessage { private final Logger logger = Logger.getLogger(EmailMessage.class); private static String protocol = "smtp"; private List<String> _toAddress = new ArrayList<String>(); private String _mailHost; private String _mailUser; private String _mailPassword; private String _subject; private String _fromAddress; private String _mimeType = "text/plain"; private String _tls; private long _totalAttachmentSizeSoFar; private boolean _usesAuth = true; private boolean _enableAttachementEmbedment = true; private StringBuffer _body = new StringBuffer(); private static int _mailTimeout = 10000; private static int _connectionTimeout = 10000; private static long _totalAttachmentMaxSizeInByte = 1024 * 1024 * 1024; // 1 // GB private ArrayList<BodyPart> _attachments = new ArrayList<BodyPart>(); public EmailMessage() { this("localhost", "", ""); } public EmailMessage(String host, String user, String password) { _mailUser = user; _mailHost = host; _mailPassword = password; } public static void setTimeout(int timeoutMillis) { _mailTimeout = timeoutMillis; } public static void setConnectionTimeout(int timeoutMillis) { _connectionTimeout = timeoutMillis; } public static void setTotalAttachmentMaxSize(long sizeInBytes) { if (sizeInBytes < 1) { throw new IllegalArgumentException( "attachment max size can't be 0 or negative"); } _totalAttachmentMaxSizeInByte = sizeInBytes; } public EmailMessage setMailHost(String host) { _mailHost = host; return this; } public EmailMessage setMailUser(String user) { _mailUser = user; return this; } public EmailMessage enableAttachementEmbedment(boolean toEnable) { _enableAttachementEmbedment = toEnable; return this; } public EmailMessage setMailPassword(String password) { _mailPassword = password; return this; } public EmailMessage addAllToAddress(Collection<? extends String> addresses) { _toAddress.addAll(addresses); return this; } public EmailMessage addToAddress(String address) { _toAddress.add(address); return this; } public EmailMessage setSubject(String subject) { _subject = subject; return this; } public EmailMessage setFromAddress(String fromAddress) { _fromAddress = fromAddress; return this; } public EmailMessage setTLS(String tls) { _tls = tls; return this; } public EmailMessage setAuth(boolean auth) { _usesAuth = auth; return this; } public EmailMessage addAttachment(File file) throws MessagingException { return addAttachment(file.getName(), file); } public EmailMessage addAttachment(String attachmentName, File file) throws MessagingException { _totalAttachmentSizeSoFar += file.length(); if (_totalAttachmentSizeSoFar > _totalAttachmentMaxSizeInByte) { throw new MessageAttachmentExceededMaximumSizeException( "Adding attachment '" + attachmentName + "' will exceed the allowed maximum size of " + _totalAttachmentMaxSizeInByte); } BodyPart attachmentPart = new MimeBodyPart(); DataSource fileDataSource = new FileDataSource(file); attachmentPart.setDataHandler(new DataHandler(fileDataSource)); attachmentPart.setFileName(attachmentName); _attachments.add(attachmentPart); return this; } public EmailMessage addAttachment(String attachmentName, InputStream stream) throws MessagingException { BodyPart attachmentPart = new MimeBodyPart(stream); attachmentPart.setFileName(attachmentName); _attachments.add(attachmentPart); return this; } private void checkSettings() { if (_mailHost == null) { throw new RuntimeException("Mail host not set."); } if (_fromAddress == null || _fromAddress.length() == 0) { throw new RuntimeException("From address not set."); } if (_subject == null) { throw new RuntimeException("Subject cannot be null"); } if (_toAddress.size() == 0) { throw new RuntimeException("T"); } } public void sendEmail() throws MessagingException { checkSettings(); Properties props = new Properties(); if (_usesAuth) { props.put("mail." + protocol + ".auth", "true"); props.put("mail.user", _mailUser); props.put("mail.password", _mailPassword); } else { props.put("mail." + protocol + ".auth", "false"); } props.put("mail." + protocol + ".host", _mailHost); props.put("mail." + protocol + ".timeout", _mailTimeout); props.put("mail." + protocol + ".connectiontimeout", _connectionTimeout); props.put("mail.smtp.starttls.enable", _tls); props.put("mail.smtp.ssl.trust", _mailHost); Session session = Session.getInstance(props, null); Message message = new MimeMessage(session); InternetAddress from = new InternetAddress(_fromAddress, false); message.setFrom(from); for (String toAddr : _toAddress) message.addRecipient(Message.RecipientType.TO, new InternetAddress( toAddr, false)); message.setSubject(_subject); message.setSentDate(new Date()); if (_attachments.size() > 0) { MimeMultipart multipart = this._enableAttachementEmbedment ? new MimeMultipart("related") : new MimeMultipart(); BodyPart messageBodyPart = new MimeBodyPart(); messageBodyPart.setContent(_body.toString(), _mimeType); multipart.addBodyPart(messageBodyPart); // Add attachments for (BodyPart part : _attachments) { multipart.addBodyPart(part); } message.setContent(multipart); } else { message.setContent(_body.toString(), _mimeType); } // Transport transport = session.getTransport(); SMTPTransport t = (SMTPTransport) session.getTransport(protocol); try { connectToSMTPServer(t); } catch (MessagingException ste) { if (ste.getCause() instanceof SocketTimeoutException) { try { // retry on SocketTimeoutException connectToSMTPServer(t); logger.info("Email retry on SocketTimeoutException succeeded"); } catch (MessagingException me) { logger.error("Email retry on SocketTimeoutException failed", me); throw me; } } else { logger.error("Encountered issue while connecting to email server", ste); throw ste; } } t.sendMessage(message, message.getRecipients(Message.RecipientType.TO)); t.close(); } private void connectToSMTPServer(SMTPTransport t) throws MessagingException { if (_usesAuth) { t.connect(_mailHost, _mailUser, _mailPassword); } else { t.connect(); } } public void setBody(String body) { setBody(body, _mimeType); } public void setBody(String body, String mimeType) { _body = new StringBuffer(body); _mimeType = mimeType; } public EmailMessage setMimeType(String mimeType) { _mimeType = mimeType; return this; } public EmailMessage println(Object str) { _body.append(str); return this; } }
1
11,250
This method is for unit testing only, right? How about making it package private? This way the readers would know that this is not a public API outside this package and would reduce the search space.
azkaban-azkaban
java
@@ -0,0 +1 @@ +package project
1
1
11,308
accidental? i guess it's the same as any other boilerplate
lyft-clutch
go
@@ -1,7 +1,10 @@ +from concurrent.futures import ThreadPoolExecutor, TimeoutError from pyramid.security import NO_PERMISSION_REQUIRED +from kinto import logger from kinto.core import Service + heartbeat = Service(name="heartbeat", path='/__heartbeat__', description="Server health")
1
from pyramid.security import NO_PERMISSION_REQUIRED from kinto.core import Service heartbeat = Service(name="heartbeat", path='/__heartbeat__', description="Server health") @heartbeat.get(permission=NO_PERMISSION_REQUIRED) def get_heartbeat(request): """Return information about server health.""" status = {} heartbeats = request.registry.heartbeats for name, callable in heartbeats.items(): status[name] = callable(request) has_error = not all([v or v is None for v in status.values()]) if has_error: request.response.status = 503 return status lbheartbeat = Service(name="lbheartbeat", path='/__lbheartbeat__', description="Web head health") @lbheartbeat.get(permission=NO_PERMISSION_REQUIRED) def get_lbheartbeat(request): """Return successful healthy response. If the load-balancer tries to access this URL and fails, this means the Web head is not operational and should be dropped. """ status = {} return status
1
9,295
shadowing the builtin, let's use function or func or callable_
Kinto-kinto
py
@@ -0,0 +1,8 @@ +# pylint: disable=missing-docstring,expression-not-assigned,too-few-public-methods,pointless-statement + +class Unhashable(object): + __hash__ = list.__hash__ + +{}[[]] # [unhashable-dict-key] +{}[{}] # [unhashable-dict-key] +{}[Unhashable()] # [unhashable-dict-key]
1
1
10,211
Can you add some good examples, for instance integers, strings and whatnot?
PyCQA-pylint
py
@@ -1,3 +1,17 @@ +// Copyright © 2017-2019 The OpenEBS Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package command import (
1
package command import ( "reflect" "testing" "github.com/spf13/cobra" ) // TestNewCStorPoolMgmt is to test cstor-pool-mgmt command. func TestNewCStorPoolMgmt(t *testing.T) { cases := []struct { use string }{ {"start"}, } cmd, err := NewCStorPoolMgmt() if err != nil { t.Errorf("Unable to Instantiate cstor-pool-mgmt") } cmds := cmd.Commands() if len(cmds) != len(cases) { t.Errorf("ExpectedCommands: %d ActualCommands: '%d'", len(cases), len(cmds)) } for i, c := range cases { if c.use != cmds[i].Use { t.Errorf("ExpectedCommand: '%s' ActualCommand: '%s'", c.use, cmds[i].Use) } } } // TestRun is to test running cstor-pool-mgmt without sub-commands. func TestRun(t *testing.T) { var cmd *cobra.Command err := Run(cmd) if err != nil { t.Errorf("Expected: '%s' Actual: '%s'", "nil", err) } } // TestNewCmdOptions is to test type of CLI command. func TestNewCmdOptions(t *testing.T) { var expectedCmd *cobra.Command gotCmd := NewCmdOptions() if reflect.TypeOf(gotCmd) != reflect.TypeOf(expectedCmd) { t.Errorf("Expected: '%s' Actual: '%v'", reflect.TypeOf(gotCmd), reflect.TypeOf(expectedCmd)) } }
1
14,784
Can we just have 2017 here @kmova if possible, as i seen in other projects as well( kubernetes etc..), they mentioned only the year when the file has been created.
openebs-maya
go
@@ -460,8 +460,7 @@ std::vector<DPFSubstring> DebugPrintf::ParseFormatString(const std::string forma std::string DebugPrintf::FindFormatString(std::vector<unsigned int> pgm, uint32_t string_id) { std::string format_string; - SHADER_MODULE_STATE shader; - shader.words = pgm; + SHADER_MODULE_STATE shader(pgm); if (shader.words.size() > 0) { for (const auto &insn : shader) { if (insn.opcode() == spv::OpString) {
1
/* Copyright (c) 2020-2021 The Khronos Group Inc. * Copyright (c) 2020-2021 Valve Corporation * Copyright (c) 2020-2021 LunarG, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * Author: Tony Barbour <[email protected]> */ #include "debug_printf.h" #include "spirv-tools/optimizer.hpp" #include "spirv-tools/instrument.hpp" #include <iostream> #include "layer_chassis_dispatch.h" #include "sync_utils.h" #include "cmd_buffer_state.h" static const VkShaderStageFlags kShaderStageAllRayTracing = VK_SHADER_STAGE_ANY_HIT_BIT_NV | VK_SHADER_STAGE_CALLABLE_BIT_NV | VK_SHADER_STAGE_CLOSEST_HIT_BIT_NV | VK_SHADER_STAGE_INTERSECTION_BIT_NV | VK_SHADER_STAGE_MISS_BIT_NV | VK_SHADER_STAGE_RAYGEN_BIT_NV; // Convenience function for reporting problems with setting up Debug Printf. template <typename T> void DebugPrintf::ReportSetupProblem(T object, const char *const specific_message) const { LogError(object, "UNASSIGNED-DEBUG-PRINTF ", "Detail: (%s)", specific_message); } // Turn on necessary device features. void DebugPrintf::PreCallRecordCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *create_info, const VkAllocationCallbacks *pAllocator, VkDevice *pDevice, void *modified_create_info) { DispatchGetPhysicalDeviceFeatures(gpu, &supported_features); VkPhysicalDeviceFeatures features = {}; features.vertexPipelineStoresAndAtomics = true; features.fragmentStoresAndAtomics = true; UtilPreCallRecordCreateDevice(gpu, reinterpret_cast<safe_VkDeviceCreateInfo *>(modified_create_info), supported_features, features); } // Perform initializations that can be done at Create Device time. void DebugPrintf::PostCallRecordCreateDevice(VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkDevice *pDevice, VkResult result) { ValidationStateTracker::PostCallRecordCreateDevice(physicalDevice, pCreateInfo, pAllocator, pDevice, result); ValidationObject *device_object = GetLayerDataPtr(get_dispatch_key(*pDevice), layer_data_map); ValidationObject *validation_data = GetValidationObject(device_object->object_dispatch, this->container_type); DebugPrintf *device_debug_printf = static_cast<DebugPrintf *>(validation_data); device_debug_printf->physicalDevice = physicalDevice; device_debug_printf->device = *pDevice; const char *size_string = getLayerOption("khronos_validation.printf_buffer_size"); device_debug_printf->output_buffer_size = *size_string ? atoi(size_string) : 1024; std::string verbose_string = getLayerOption("khronos_validation.printf_verbose"); transform(verbose_string.begin(), verbose_string.end(), verbose_string.begin(), ::tolower); device_debug_printf->verbose = verbose_string.length() ? !verbose_string.compare("true") : false; std::string stdout_string = getLayerOption("khronos_validation.printf_to_stdout"); transform(stdout_string.begin(), stdout_string.end(), stdout_string.begin(), ::tolower); device_debug_printf->use_stdout = stdout_string.length() ? !stdout_string.compare("true") : false; if (getenv("DEBUG_PRINTF_TO_STDOUT")) device_debug_printf->use_stdout = true; if (device_debug_printf->phys_dev_props.apiVersion < VK_API_VERSION_1_1) { ReportSetupProblem(device, "Debug Printf requires Vulkan 1.1 or later. Debug Printf disabled."); device_debug_printf->aborted = true; return; } if (!supported_features.fragmentStoresAndAtomics || !supported_features.vertexPipelineStoresAndAtomics) { ReportSetupProblem(device, "Debug Printf requires fragmentStoresAndAtomics and vertexPipelineStoresAndAtomics. " "Debug Printf disabled."); device_debug_printf->aborted = true; return; } if (enabled[gpu_validation]) { ReportSetupProblem(device, "Debug Printf cannot be enabled when gpu assisted validation is enabled. " "Debug Printf disabled."); device_debug_printf->aborted = true; return; } std::vector<VkDescriptorSetLayoutBinding> bindings; VkDescriptorSetLayoutBinding binding = {3, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_ALL_GRAPHICS | VK_SHADER_STAGE_MESH_BIT_NV | VK_SHADER_STAGE_TASK_BIT_NV | VK_SHADER_STAGE_COMPUTE_BIT | kShaderStageAllRayTracing, NULL}; bindings.push_back(binding); UtilPostCallRecordCreateDevice(pCreateInfo, bindings, device_debug_printf, device_debug_printf->phys_dev_props); } void DebugPrintf::PreCallRecordDestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) { UtilPreCallRecordDestroyDevice(this); ValidationStateTracker::PreCallRecordDestroyDevice(device, pAllocator); // State Tracker can end up making vma calls through callbacks - don't destroy allocator until ST is done if (vmaAllocator) { vmaDestroyAllocator(vmaAllocator); } desc_set_manager.reset(); } // Modify the pipeline layout to include our debug descriptor set and any needed padding with the dummy descriptor set. void DebugPrintf::PreCallRecordCreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout, void *cpl_state_data) { if (aborted) { return; } create_pipeline_layout_api_state *cpl_state = reinterpret_cast<create_pipeline_layout_api_state *>(cpl_state_data); if (cpl_state->modified_create_info.setLayoutCount >= adjusted_max_desc_sets) { std::ostringstream strm; strm << "Pipeline Layout conflict with validation's descriptor set at slot " << desc_set_bind_index << ". " << "Application has too many descriptor sets in the pipeline layout to continue with debug printf. " << "Not modifying the pipeline layout. " << "Instrumented shaders are replaced with non-instrumented shaders."; ReportSetupProblem(device, strm.str().c_str()); } else { UtilPreCallRecordCreatePipelineLayout(cpl_state, this, pCreateInfo); } } void DebugPrintf::PostCallRecordCreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout, VkResult result) { ValidationStateTracker::PostCallRecordCreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout, result); if (result != VK_SUCCESS) { ReportSetupProblem(device, "Unable to create pipeline layout. Device could become unstable."); aborted = true; } } // Free the device memory and descriptor set associated with a command buffer. void DebugPrintf::DestroyBuffer(DPFBufferInfo &buffer_info) { vmaDestroyBuffer(vmaAllocator, buffer_info.output_mem_block.buffer, buffer_info.output_mem_block.allocation); if (buffer_info.desc_set != VK_NULL_HANDLE) { desc_set_manager->PutBackDescriptorSet(buffer_info.desc_pool, buffer_info.desc_set); } } // Just gives a warning about a possible deadlock. bool DebugPrintf::PreCallValidateCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) const { if (srcStageMask & VK_PIPELINE_STAGE_HOST_BIT) { ReportSetupProblem(commandBuffer, "CmdWaitEvents recorded with VK_PIPELINE_STAGE_HOST_BIT set. " "Debug Printf waits on queue completion. " "This wait could block the host's signaling of this event, resulting in deadlock."); } return false; } bool DebugPrintf::PreCallValidateCmdWaitEvents2KHR(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents, const VkDependencyInfoKHR *pDependencyInfos) const { VkPipelineStageFlags2KHR srcStageMask = 0; for (uint32_t i = 0; i < eventCount; i++) { auto stage_masks = sync_utils::GetGlobalStageMasks(pDependencyInfos[i]); srcStageMask = stage_masks.src; } if (srcStageMask & VK_PIPELINE_STAGE_HOST_BIT) { ReportSetupProblem(commandBuffer, "CmdWaitEvents2KHR recorded with VK_PIPELINE_STAGE_HOST_BIT set. " "Debug Printf waits on queue completion. " "This wait could block the host's signaling of this event, resulting in deadlock."); } return false; } void DebugPrintf::PreCallRecordCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count, const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines, void *cgpl_state_data) { if (aborted) return; std::vector<safe_VkGraphicsPipelineCreateInfo> new_pipeline_create_infos; create_graphics_pipeline_api_state *cgpl_state = reinterpret_cast<create_graphics_pipeline_api_state *>(cgpl_state_data); UtilPreCallRecordPipelineCreations(count, pCreateInfos, pAllocator, pPipelines, cgpl_state->pipe_state, &new_pipeline_create_infos, VK_PIPELINE_BIND_POINT_GRAPHICS, this); cgpl_state->printf_create_infos = new_pipeline_create_infos; cgpl_state->pCreateInfos = reinterpret_cast<VkGraphicsPipelineCreateInfo *>(cgpl_state->printf_create_infos.data()); } void DebugPrintf::PreCallRecordCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count, const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines, void *ccpl_state_data) { if (aborted) return; std::vector<safe_VkComputePipelineCreateInfo> new_pipeline_create_infos; auto *ccpl_state = reinterpret_cast<create_compute_pipeline_api_state *>(ccpl_state_data); UtilPreCallRecordPipelineCreations(count, pCreateInfos, pAllocator, pPipelines, ccpl_state->pipe_state, &new_pipeline_create_infos, VK_PIPELINE_BIND_POINT_COMPUTE, this); ccpl_state->printf_create_infos = new_pipeline_create_infos; ccpl_state->pCreateInfos = reinterpret_cast<VkComputePipelineCreateInfo *>(ccpl_state->printf_create_infos.data()); } void DebugPrintf::PreCallRecordCreateRayTracingPipelinesNV(VkDevice device, VkPipelineCache pipelineCache, uint32_t count, const VkRayTracingPipelineCreateInfoNV *pCreateInfos, const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines, void *crtpl_state_data) { if (aborted) return; std::vector<safe_VkRayTracingPipelineCreateInfoCommon> new_pipeline_create_infos; auto *crtpl_state = reinterpret_cast<create_ray_tracing_pipeline_api_state *>(crtpl_state_data); UtilPreCallRecordPipelineCreations(count, pCreateInfos, pAllocator, pPipelines, crtpl_state->pipe_state, &new_pipeline_create_infos, VK_PIPELINE_BIND_POINT_RAY_TRACING_NV, this); crtpl_state->printf_create_infos = new_pipeline_create_infos; crtpl_state->pCreateInfos = reinterpret_cast<VkRayTracingPipelineCreateInfoNV *>(crtpl_state->printf_create_infos.data()); } void DebugPrintf::PreCallRecordCreateRayTracingPipelinesKHR(VkDevice device, VkDeferredOperationKHR deferredOperation, VkPipelineCache pipelineCache, uint32_t count, const VkRayTracingPipelineCreateInfoKHR *pCreateInfos, const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines, void *crtpl_state_data) { if (aborted) return; std::vector<safe_VkRayTracingPipelineCreateInfoCommon> new_pipeline_create_infos; auto *crtpl_state = reinterpret_cast<create_ray_tracing_pipeline_khr_api_state *>(crtpl_state_data); UtilPreCallRecordPipelineCreations(count, pCreateInfos, pAllocator, pPipelines, crtpl_state->pipe_state, &new_pipeline_create_infos, VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR, this); crtpl_state->printf_create_infos = new_pipeline_create_infos; crtpl_state->pCreateInfos = reinterpret_cast<VkRayTracingPipelineCreateInfoKHR *>(crtpl_state->printf_create_infos.data()); } void DebugPrintf::PostCallRecordCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count, const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines, VkResult result, void *cgpl_state_data) { ValidationStateTracker::PostCallRecordCreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines, result, cgpl_state_data); if (aborted) return; create_graphics_pipeline_api_state *cgpl_state = reinterpret_cast<create_graphics_pipeline_api_state *>(cgpl_state_data); UtilCopyCreatePipelineFeedbackData(count, pCreateInfos, cgpl_state->printf_create_infos.data()); UtilPostCallRecordPipelineCreations(count, pCreateInfos, pAllocator, pPipelines, VK_PIPELINE_BIND_POINT_GRAPHICS, this); } void DebugPrintf::PostCallRecordCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count, const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines, VkResult result, void *ccpl_state_data) { ValidationStateTracker::PostCallRecordCreateComputePipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines, result, ccpl_state_data); if (aborted) return; create_compute_pipeline_api_state *ccpl_state = reinterpret_cast<create_compute_pipeline_api_state *>(ccpl_state_data); UtilCopyCreatePipelineFeedbackData(count, pCreateInfos, ccpl_state->printf_create_infos.data()); UtilPostCallRecordPipelineCreations(count, pCreateInfos, pAllocator, pPipelines, VK_PIPELINE_BIND_POINT_COMPUTE, this); } void DebugPrintf::PostCallRecordCreateRayTracingPipelinesNV(VkDevice device, VkPipelineCache pipelineCache, uint32_t count, const VkRayTracingPipelineCreateInfoNV *pCreateInfos, const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines, VkResult result, void *crtpl_state_data) { auto *crtpl_state = reinterpret_cast<create_ray_tracing_pipeline_khr_api_state *>(crtpl_state_data); ValidationStateTracker::PostCallRecordCreateRayTracingPipelinesNV(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines, result, crtpl_state_data); if (aborted) return; UtilCopyCreatePipelineFeedbackData(count, pCreateInfos, crtpl_state->printf_create_infos.data()); UtilPostCallRecordPipelineCreations(count, pCreateInfos, pAllocator, pPipelines, VK_PIPELINE_BIND_POINT_RAY_TRACING_NV, this); } void DebugPrintf::PostCallRecordCreateRayTracingPipelinesKHR(VkDevice device, VkDeferredOperationKHR deferredOperation, VkPipelineCache pipelineCache, uint32_t count, const VkRayTracingPipelineCreateInfoKHR *pCreateInfos, const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines, VkResult result, void *crtpl_state_data) { auto *crtpl_state = reinterpret_cast<create_ray_tracing_pipeline_khr_api_state *>(crtpl_state_data); ValidationStateTracker::PostCallRecordCreateRayTracingPipelinesKHR( device, deferredOperation, pipelineCache, count, pCreateInfos, pAllocator, pPipelines, result, crtpl_state_data); if (aborted) return; UtilCopyCreatePipelineFeedbackData(count, pCreateInfos, crtpl_state->printf_create_infos.data()); UtilPostCallRecordPipelineCreations(count, pCreateInfos, pAllocator, pPipelines, VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR, this); } // Remove all the shader trackers associated with this destroyed pipeline. void DebugPrintf::PreCallRecordDestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) { for (auto it = shader_map.begin(); it != shader_map.end();) { if (it->second.pipeline == pipeline) { it = shader_map.erase(it); } else { ++it; } } ValidationStateTracker::PreCallRecordDestroyPipeline(device, pipeline, pAllocator); } // Call the SPIR-V Optimizer to run the instrumentation pass on the shader. bool DebugPrintf::InstrumentShader(const VkShaderModuleCreateInfo *pCreateInfo, std::vector<unsigned int> &new_pgm, uint32_t *unique_shader_id) { if (aborted) return false; if (pCreateInfo->pCode[0] != spv::MagicNumber) return false; // Load original shader SPIR-V uint32_t num_words = static_cast<uint32_t>(pCreateInfo->codeSize / 4); new_pgm.clear(); new_pgm.reserve(num_words); new_pgm.insert(new_pgm.end(), &pCreateInfo->pCode[0], &pCreateInfo->pCode[num_words]); // Call the optimizer to instrument the shader. // Use the unique_shader_module_id as a shader ID so we can look up its handle later in the shader_map. // If descriptor indexing is enabled, enable length checks and updated descriptor checks using namespace spvtools; spv_target_env target_env = PickSpirvEnv(api_version, IsExtEnabled(device_extensions.vk_khr_spirv_1_4)); spvtools::ValidatorOptions val_options; AdjustValidatorOptions(device_extensions, enabled_features, val_options); spvtools::OptimizerOptions opt_options; opt_options.set_run_validator(true); opt_options.set_validator_options(val_options); Optimizer optimizer(target_env); const spvtools::MessageConsumer debug_printf_console_message_consumer = [this](spv_message_level_t level, const char *, const spv_position_t &position, const char *message) -> void { switch (level) { case SPV_MSG_FATAL: case SPV_MSG_INTERNAL_ERROR: case SPV_MSG_ERROR: this->LogError(this->device, "UNASSIGNED-Debug-Printf", "Error during shader instrumentation: line %zu: %s", position.index, message); break; default: break; } }; optimizer.SetMessageConsumer(debug_printf_console_message_consumer); optimizer.RegisterPass(CreateInstDebugPrintfPass(desc_set_bind_index, unique_shader_module_id)); bool pass = optimizer.Run(new_pgm.data(), new_pgm.size(), &new_pgm, opt_options); if (!pass) { ReportSetupProblem(device, "Failure to instrument shader. Proceeding with non-instrumented shader."); } *unique_shader_id = unique_shader_module_id++; return pass; } // Create the instrumented shader data to provide to the driver. void DebugPrintf::PreCallRecordCreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkShaderModule *pShaderModule, void *csm_state_data) { create_shader_module_api_state *csm_state = reinterpret_cast<create_shader_module_api_state *>(csm_state_data); bool pass = InstrumentShader(pCreateInfo, csm_state->instrumented_pgm, &csm_state->unique_shader_id); if (pass) { csm_state->instrumented_create_info.pCode = csm_state->instrumented_pgm.data(); csm_state->instrumented_create_info.codeSize = csm_state->instrumented_pgm.size() * sizeof(unsigned int); } } vartype vartype_lookup(char intype) { switch (intype) { case 'd': case 'i': return varsigned; break; case 'f': case 'F': case 'a': case 'A': case 'e': case 'E': case 'g': case 'G': return varfloat; break; case 'u': case 'x': case 'o': default: return varunsigned; break; } } std::vector<DPFSubstring> DebugPrintf::ParseFormatString(const std::string format_string) { const char types[] = {'d', 'i', 'o', 'u', 'x', 'X', 'a', 'A', 'e', 'E', 'f', 'F', 'g', 'G', 'v', '\0'}; std::vector<DPFSubstring> parsed_strings; size_t pos = 0; size_t begin = 0; size_t percent = 0; while (begin < format_string.length()) { DPFSubstring substring; // Find a percent sign pos = percent = format_string.find_first_of('%', pos); if (pos == std::string::npos) { // End of the format string Push the rest of the characters substring.string = format_string.substr(begin, format_string.length()); substring.needs_value = false; parsed_strings.push_back(substring); break; } pos++; if (format_string[pos] == '%') { pos++; continue; // %% - skip it } // Find the type of the value pos = format_string.find_first_of(types, pos); if (pos == format_string.npos) { // This really shouldn't happen with a legal value string pos = format_string.length(); } else { char tempstring[32]; int count = 0; std::string specifier = {}; if (format_string[pos] == 'v') { // Vector must be of size 2, 3, or 4 // and format %v<size><type> specifier = format_string.substr(percent, pos - percent); count = atoi(&format_string[pos + 1]); pos += 2; // skip v<count>, handle long specifier.push_back(format_string[pos]); if (format_string[pos + 1] == 'l') { specifier.push_back('l'); pos++; } // Take the preceding characters, and the percent through the type substring.string = format_string.substr(begin, percent - begin); substring.string += specifier; substring.needs_value = true; substring.type = vartype_lookup(specifier.back()); parsed_strings.push_back(substring); // Continue with a comma separated list sprintf(tempstring, ", %s", specifier.c_str()); substring.string = tempstring; for (int i = 0; i < (count - 1); i++) { parsed_strings.push_back(substring); } } else { // Single non-vector value if (format_string[pos + 1] == 'l') pos++; // Save long size substring.string = format_string.substr(begin, pos - begin + 1); substring.needs_value = true; substring.type = vartype_lookup(format_string[pos]); parsed_strings.push_back(substring); } begin = pos + 1; } } return parsed_strings; } std::string DebugPrintf::FindFormatString(std::vector<unsigned int> pgm, uint32_t string_id) { std::string format_string; SHADER_MODULE_STATE shader; shader.words = pgm; if (shader.words.size() > 0) { for (const auto &insn : shader) { if (insn.opcode() == spv::OpString) { uint32_t offset = insn.offset(); if (pgm[offset + 1] == string_id) { format_string = reinterpret_cast<char *>(&pgm[offset + 2]); break; } } } } return format_string; } // GCC and clang don't like using variables as format strings in sprintf. // #pragma GCC is recognized by both compilers #if defined(__GNUC__) || defined(__clang__) #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wformat-security" #endif void snprintf_with_malloc(std::stringstream &shader_message, DPFSubstring substring, size_t needed, void *values) { char *buffer = static_cast<char *>(malloc((needed + 1) * sizeof(char))); // Add 1 for terminator if (substring.longval) { snprintf(buffer, needed, substring.string.c_str(), substring.longval); } else if (!substring.needs_value) { snprintf(buffer, needed, substring.string.c_str()); } else { switch (substring.type) { case varunsigned: needed = snprintf(buffer, needed, substring.string.c_str(), *static_cast<uint32_t *>(values) - 1); break; case varsigned: needed = snprintf(buffer, needed, substring.string.c_str(), *static_cast<int32_t *>(values) - 1); break; case varfloat: needed = snprintf(buffer, needed, substring.string.c_str(), *static_cast<float *>(values) - 1); break; } } shader_message << buffer; free(buffer); } void DebugPrintf::AnalyzeAndGenerateMessages(VkCommandBuffer command_buffer, VkQueue queue, DPFBufferInfo &buffer_info, uint32_t operation_index, uint32_t *const debug_output_buffer) { // Word Content // 0 Size of output record, including this word // 1 Shader ID // 2 Instruction Position // 3 Stage Ordinal // 4 Stage - specific Info Word 0 // 5 Stage - specific Info Word 1 // 6 Stage - specific Info Word 2 // 7 Printf Format String Id // 8 Printf Values Word 0 (optional) // 9 Printf Values Word 1 (optional) uint32_t expect = debug_output_buffer[0]; if (!expect) return; uint32_t index = 1; while (debug_output_buffer[index]) { std::stringstream shader_message; VkShaderModule shader_module_handle = VK_NULL_HANDLE; VkPipeline pipeline_handle = VK_NULL_HANDLE; std::vector<unsigned int> pgm; DPFOutputRecord *debug_record = reinterpret_cast<DPFOutputRecord *>(&debug_output_buffer[index]); // Lookup the VkShaderModule handle and SPIR-V code used to create the shader, using the unique shader ID value returned // by the instrumented shader. auto it = shader_map.find(debug_record->shader_id); if (it != shader_map.end()) { shader_module_handle = it->second.shader_module; pipeline_handle = it->second.pipeline; pgm = it->second.pgm; } // Search through the shader source for the printf format string for this invocation auto format_string = FindFormatString(pgm, debug_record->format_string_id); // Break the format string into strings with 1 or 0 value auto format_substrings = ParseFormatString(format_string); void *values = static_cast<void *>(&debug_record->values); const uint32_t static_size = 1024; // Sprintf each format substring into a temporary string then add that to the message for (auto &substring : format_substrings) { char temp_string[static_size]; size_t needed = 0; std::vector<std::string> format_strings = { "%ul", "%lu", "%lx" }; size_t ul_pos = 0; bool print_hex = true; for (auto ul_string : format_strings) { ul_pos = substring.string.find(ul_string); if (ul_pos != std::string::npos) { if (ul_string == "%lu") print_hex = false; break; } } if (ul_pos != std::string::npos) { // Unsigned 64 bit value substring.longval = *static_cast<uint64_t *>(values); values = static_cast<uint64_t *>(values) + 1; if (print_hex) { substring.string.replace(ul_pos + 1, 2, PRIx64); } else { substring.string.replace(ul_pos + 1, 2, PRIu64); } needed = snprintf(temp_string, static_size, substring.string.c_str(), substring.longval); } else { if (substring.needs_value) { switch (substring.type) { case varunsigned: needed = snprintf(temp_string, static_size, substring.string.c_str(), *static_cast<uint32_t *>(values)); break; case varsigned: needed = snprintf(temp_string, static_size, substring.string.c_str(), *static_cast<int32_t *>(values)); break; case varfloat: needed = snprintf(temp_string, static_size, substring.string.c_str(), *static_cast<float *>(values)); break; } values = static_cast<uint32_t *>(values) + 1; } else { needed = snprintf(temp_string, static_size, substring.string.c_str()); } } if (needed < static_size) { shader_message << temp_string; } else { // Static buffer not big enough for message, use malloc to get enough snprintf_with_malloc(shader_message, substring, needed, values); } } if (verbose) { std::string stage_message; std::string common_message; std::string filename_message; std::string source_message; UtilGenerateStageMessage(&debug_output_buffer[index], stage_message); UtilGenerateCommonMessage(report_data, command_buffer, &debug_output_buffer[index], shader_module_handle, pipeline_handle, buffer_info.pipeline_bind_point, operation_index, common_message); UtilGenerateSourceMessages(pgm, &debug_output_buffer[index], true, filename_message, source_message); if (use_stdout) { std::cout << "UNASSIGNED-DEBUG-PRINTF " << common_message.c_str() << " " << stage_message.c_str() << " " << shader_message.str().c_str() << " " << filename_message.c_str() << " " << source_message.c_str(); } else { LogInfo(queue, "UNASSIGNED-DEBUG-PRINTF", "%s %s %s %s%s", common_message.c_str(), stage_message.c_str(), shader_message.str().c_str(), filename_message.c_str(), source_message.c_str()); } } else { if (use_stdout) { std::cout << shader_message.str(); } else { // Don't let LogInfo process any '%'s in the string LogInfo(device, "UNASSIGNED-DEBUG-PRINTF", "%s", shader_message.str().c_str()); } } index += debug_record->size; } if ((index - 1) != expect) { LogWarning(device, "UNASSIGNED-DEBUG-PRINTF", "WARNING - Debug Printf message was truncated, likely due to a buffer size that was too small for the message"); } memset(debug_output_buffer, 0, 4 * (debug_output_buffer[0] + 1)); } #if defined(__GNUC__) #pragma GCC diagnostic pop #endif bool DebugPrintf::CommandBufferNeedsProcessing(VkCommandBuffer command_buffer) { bool buffers_present = false; auto cb_node = GetCBState(command_buffer); if (GetBufferInfo(cb_node).size()) { buffers_present = true; } for (const auto *secondaryCmdBuffer : cb_node->linkedCommandBuffers) { if (GetBufferInfo(secondaryCmdBuffer).size()) { buffers_present = true; } } return buffers_present; } void DebugPrintf::ProcessCommandBuffer(VkQueue queue, VkCommandBuffer command_buffer) { auto cb_node = GetCBState(command_buffer); UtilProcessInstrumentationBuffer(queue, cb_node, this); for (auto *secondary_cmd_buffer : cb_node->linkedCommandBuffers) { UtilProcessInstrumentationBuffer(queue, secondary_cmd_buffer, this); } } // Issue a memory barrier to make GPU-written data available to host. // Wait for the queue to complete execution. // Check the debug buffers for all the command buffers that were submitted. void DebugPrintf::PostCallRecordQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence, VkResult result) { ValidationStateTracker::PostCallRecordQueueSubmit(queue, submitCount, pSubmits, fence, result); if (aborted || (result != VK_SUCCESS)) return; bool buffers_present = false; // Don't QueueWaitIdle if there's nothing to process for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) { const VkSubmitInfo *submit = &pSubmits[submit_idx]; for (uint32_t i = 0; i < submit->commandBufferCount; i++) { buffers_present |= CommandBufferNeedsProcessing(submit->pCommandBuffers[i]); } } if (!buffers_present) return; UtilSubmitBarrier(queue, this); DispatchQueueWaitIdle(queue); for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) { const VkSubmitInfo *submit = &pSubmits[submit_idx]; for (uint32_t i = 0; i < submit->commandBufferCount; i++) { ProcessCommandBuffer(queue, submit->pCommandBuffers[i]); } } } void DebugPrintf::PostCallRecordQueueSubmit2KHR(VkQueue queue, uint32_t submitCount, const VkSubmitInfo2KHR *pSubmits, VkFence fence, VkResult result) { ValidationStateTracker::PostCallRecordQueueSubmit2KHR(queue, submitCount, pSubmits, fence, result); if (aborted || (result != VK_SUCCESS)) return; bool buffers_present = false; // Don't QueueWaitIdle if there's nothing to process for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) { const auto *submit = &pSubmits[submit_idx]; for (uint32_t i = 0; i < submit->commandBufferInfoCount; i++) { buffers_present |= CommandBufferNeedsProcessing(submit->pCommandBufferInfos[i].commandBuffer); } } if (!buffers_present) return; UtilSubmitBarrier(queue, this); DispatchQueueWaitIdle(queue); for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) { const VkSubmitInfo2KHR *submit = &pSubmits[submit_idx]; for (uint32_t i = 0; i < submit->commandBufferInfoCount; i++) { ProcessCommandBuffer(queue, submit->pCommandBufferInfos[i].commandBuffer); } } } void DebugPrintf::PreCallRecordCmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance) { AllocateDebugPrintfResources(commandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS); } void DebugPrintf::PreCallRecordCmdDrawMultiEXT(VkCommandBuffer commandBuffer, uint32_t drawCount, const VkMultiDrawInfoEXT *pVertexInfo, uint32_t instanceCount, uint32_t firstInstance, uint32_t stride) { for(uint32_t i = 0; i < drawCount; i++) { AllocateDebugPrintfResources(commandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS); } } void DebugPrintf::PreCallRecordCmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance) { AllocateDebugPrintfResources(commandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS); } void DebugPrintf::PreCallRecordCmdDrawMultiIndexedEXT(VkCommandBuffer commandBuffer, uint32_t drawCount, const VkMultiDrawIndexedInfoEXT *pIndexInfo, uint32_t instanceCount, uint32_t firstInstance, uint32_t stride, const int32_t *pVertexOffset) { for (uint32_t i = 0; i < drawCount; i++) { AllocateDebugPrintfResources(commandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS); } } void DebugPrintf::PreCallRecordCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) { AllocateDebugPrintfResources(commandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS); } void DebugPrintf::PreCallRecordCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) { AllocateDebugPrintfResources(commandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS); } void DebugPrintf::PreCallRecordCmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) { AllocateDebugPrintfResources(commandBuffer, VK_PIPELINE_BIND_POINT_COMPUTE); } void DebugPrintf::PreCallRecordCmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) { AllocateDebugPrintfResources(commandBuffer, VK_PIPELINE_BIND_POINT_COMPUTE); } void DebugPrintf::PreCallRecordCmdDispatchBase(VkCommandBuffer commandBuffer, uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ) { AllocateDebugPrintfResources(commandBuffer, VK_PIPELINE_BIND_POINT_COMPUTE); } void DebugPrintf::PreCallRecordCmdDispatchBaseKHR(VkCommandBuffer commandBuffer, uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ) { AllocateDebugPrintfResources(commandBuffer, VK_PIPELINE_BIND_POINT_COMPUTE); } void DebugPrintf::PreCallRecordCmdDrawIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride) { ValidationStateTracker::PreCallRecordCmdDrawIndirectCountKHR(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride); AllocateDebugPrintfResources(commandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS); } void DebugPrintf::PreCallRecordCmdDrawIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride) { ValidationStateTracker::PreCallRecordCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride); AllocateDebugPrintfResources(commandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS); } void DebugPrintf::PreCallRecordCmdDrawIndexedIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride) { ValidationStateTracker::PreCallRecordCmdDrawIndexedIndirectCountKHR(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride); AllocateDebugPrintfResources(commandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS); } void DebugPrintf::PreCallRecordCmdDrawIndexedIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride) { ValidationStateTracker::PreCallRecordCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride); AllocateDebugPrintfResources(commandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS); } void DebugPrintf::PreCallRecordCmdDrawIndirectByteCountEXT(VkCommandBuffer commandBuffer, uint32_t instanceCount, uint32_t firstInstance, VkBuffer counterBuffer, VkDeviceSize counterBufferOffset, uint32_t counterOffset, uint32_t vertexStride) { ValidationStateTracker::PreCallRecordCmdDrawIndirectByteCountEXT(commandBuffer, instanceCount, firstInstance, counterBuffer, counterBufferOffset, counterOffset, vertexStride); AllocateDebugPrintfResources(commandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS); } void DebugPrintf::PreCallRecordCmdDrawMeshTasksNV(VkCommandBuffer commandBuffer, uint32_t taskCount, uint32_t firstTask) { ValidationStateTracker::PreCallRecordCmdDrawMeshTasksNV(commandBuffer, taskCount, firstTask); AllocateDebugPrintfResources(commandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS); } void DebugPrintf::PreCallRecordCmdDrawMeshTasksIndirectNV(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride) { ValidationStateTracker::PreCallRecordCmdDrawMeshTasksIndirectNV(commandBuffer, buffer, offset, drawCount, stride); AllocateDebugPrintfResources(commandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS); } void DebugPrintf::PreCallRecordCmdDrawMeshTasksIndirectCountNV(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride) { ValidationStateTracker::PreCallRecordCmdDrawMeshTasksIndirectCountNV(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride); AllocateDebugPrintfResources(commandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS); } void DebugPrintf::PreCallRecordCmdTraceRaysNV(VkCommandBuffer commandBuffer, VkBuffer raygenShaderBindingTableBuffer, VkDeviceSize raygenShaderBindingOffset, VkBuffer missShaderBindingTableBuffer, VkDeviceSize missShaderBindingOffset, VkDeviceSize missShaderBindingStride, VkBuffer hitShaderBindingTableBuffer, VkDeviceSize hitShaderBindingOffset, VkDeviceSize hitShaderBindingStride, VkBuffer callableShaderBindingTableBuffer, VkDeviceSize callableShaderBindingOffset, VkDeviceSize callableShaderBindingStride, uint32_t width, uint32_t height, uint32_t depth) { AllocateDebugPrintfResources(commandBuffer, VK_PIPELINE_BIND_POINT_RAY_TRACING_NV); } void DebugPrintf::PostCallRecordCmdTraceRaysNV(VkCommandBuffer commandBuffer, VkBuffer raygenShaderBindingTableBuffer, VkDeviceSize raygenShaderBindingOffset, VkBuffer missShaderBindingTableBuffer, VkDeviceSize missShaderBindingOffset, VkDeviceSize missShaderBindingStride, VkBuffer hitShaderBindingTableBuffer, VkDeviceSize hitShaderBindingOffset, VkDeviceSize hitShaderBindingStride, VkBuffer callableShaderBindingTableBuffer, VkDeviceSize callableShaderBindingOffset, VkDeviceSize callableShaderBindingStride, uint32_t width, uint32_t height, uint32_t depth) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); cb_state->hasTraceRaysCmd = true; } void DebugPrintf::PreCallRecordCmdTraceRaysKHR(VkCommandBuffer commandBuffer, const VkStridedDeviceAddressRegionKHR *pRaygenShaderBindingTable, const VkStridedDeviceAddressRegionKHR *pMissShaderBindingTable, const VkStridedDeviceAddressRegionKHR *pHitShaderBindingTable, const VkStridedDeviceAddressRegionKHR *pCallableShaderBindingTable, uint32_t width, uint32_t height, uint32_t depth) { AllocateDebugPrintfResources(commandBuffer, VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR); } void DebugPrintf::PostCallRecordCmdTraceRaysKHR(VkCommandBuffer commandBuffer, const VkStridedDeviceAddressRegionKHR *pRaygenShaderBindingTable, const VkStridedDeviceAddressRegionKHR *pMissShaderBindingTable, const VkStridedDeviceAddressRegionKHR *pHitShaderBindingTable, const VkStridedDeviceAddressRegionKHR *pCallableShaderBindingTable, uint32_t width, uint32_t height, uint32_t depth) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); cb_state->hasTraceRaysCmd = true; } void DebugPrintf::PreCallRecordCmdTraceRaysIndirectKHR(VkCommandBuffer commandBuffer, const VkStridedDeviceAddressRegionKHR *pRaygenShaderBindingTable, const VkStridedDeviceAddressRegionKHR *pMissShaderBindingTable, const VkStridedDeviceAddressRegionKHR *pHitShaderBindingTable, const VkStridedDeviceAddressRegionKHR *pCallableShaderBindingTable, VkDeviceAddress indirectDeviceAddress) { AllocateDebugPrintfResources(commandBuffer, VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR); } void DebugPrintf::PostCallRecordCmdTraceRaysIndirectKHR(VkCommandBuffer commandBuffer, const VkStridedDeviceAddressRegionKHR *pRaygenShaderBindingTable, const VkStridedDeviceAddressRegionKHR *pMissShaderBindingTable, const VkStridedDeviceAddressRegionKHR *pHitShaderBindingTable, const VkStridedDeviceAddressRegionKHR *pCallableShaderBindingTable, VkDeviceAddress indirectDeviceAddress) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); cb_state->hasTraceRaysCmd = true; } void DebugPrintf::AllocateDebugPrintfResources(const VkCommandBuffer cmd_buffer, const VkPipelineBindPoint bind_point) { if (bind_point != VK_PIPELINE_BIND_POINT_GRAPHICS && bind_point != VK_PIPELINE_BIND_POINT_COMPUTE && bind_point != VK_PIPELINE_BIND_POINT_RAY_TRACING_NV) { return; } VkResult result; if (aborted) return; std::vector<VkDescriptorSet> desc_sets; VkDescriptorPool desc_pool = VK_NULL_HANDLE; result = desc_set_manager->GetDescriptorSets(1, &desc_pool, debug_desc_layout, &desc_sets); assert(result == VK_SUCCESS); if (result != VK_SUCCESS) { ReportSetupProblem(device, "Unable to allocate descriptor sets. Device could become unstable."); aborted = true; return; } VkDescriptorBufferInfo output_desc_buffer_info = {}; output_desc_buffer_info.range = output_buffer_size; auto cb_node = GetCBState(cmd_buffer); if (!cb_node) { ReportSetupProblem(device, "Unrecognized command buffer"); aborted = true; return; } // Allocate memory for the output block that the gpu will use to return values for printf DPFDeviceMemoryBlock output_block = {}; VkBufferCreateInfo buffer_info = {VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO}; buffer_info.size = output_buffer_size; buffer_info.usage = VK_BUFFER_USAGE_STORAGE_BUFFER_BIT; VmaAllocationCreateInfo alloc_info = {}; alloc_info.usage = VMA_MEMORY_USAGE_GPU_TO_CPU; result = vmaCreateBuffer(vmaAllocator, &buffer_info, &alloc_info, &output_block.buffer, &output_block.allocation, nullptr); if (result != VK_SUCCESS) { ReportSetupProblem(device, "Unable to allocate device memory. Device could become unstable."); aborted = true; return; } // Clear the output block to zeros so that only printf values from the gpu will be present uint32_t *data; result = vmaMapMemory(vmaAllocator, output_block.allocation, reinterpret_cast<void **>(&data)); if (result == VK_SUCCESS) { memset(data, 0, output_buffer_size); vmaUnmapMemory(vmaAllocator, output_block.allocation); } auto desc_writes = LvlInitStruct<VkWriteDescriptorSet>(); const uint32_t desc_count = 1; // Write the descriptor output_desc_buffer_info.buffer = output_block.buffer; output_desc_buffer_info.offset = 0; desc_writes.descriptorCount = 1; desc_writes.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER; desc_writes.pBufferInfo = &output_desc_buffer_info; desc_writes.dstSet = desc_sets[0]; desc_writes.dstBinding = 3; DispatchUpdateDescriptorSets(device, desc_count, &desc_writes, 0, NULL); const auto lv_bind_point = ConvertToLvlBindPoint(bind_point); const auto *pipeline_state = cb_node->lastBound[lv_bind_point].pipeline_state; if (pipeline_state) { if (pipeline_state->pipeline_layout->set_layouts.size() <= desc_set_bind_index) { DispatchCmdBindDescriptorSets(cmd_buffer, bind_point, pipeline_state->pipeline_layout->layout(), desc_set_bind_index, 1, desc_sets.data(), 0, nullptr); } // Record buffer and memory info in CB state tracking cb_node->buffer_infos.emplace_back(output_block, desc_sets[0], desc_pool, bind_point); } else { ReportSetupProblem(device, "Unable to find pipeline state"); vmaDestroyBuffer(vmaAllocator, output_block.buffer, output_block.allocation); aborted = true; return; } } std::shared_ptr<CMD_BUFFER_STATE> DebugPrintf::CreateCmdBufferState(VkCommandBuffer cb, const VkCommandBufferAllocateInfo *pCreateInfo, std::shared_ptr<COMMAND_POOL_STATE> &pool) { return std::static_pointer_cast<CMD_BUFFER_STATE>(std::make_shared<CMD_BUFFER_STATE_PRINTF>(this, cb, pCreateInfo, pool)); } CMD_BUFFER_STATE_PRINTF::CMD_BUFFER_STATE_PRINTF(DebugPrintf *dp, VkCommandBuffer cb, const VkCommandBufferAllocateInfo *pCreateInfo, std::shared_ptr<COMMAND_POOL_STATE> &pool) : CMD_BUFFER_STATE(dp, cb, pCreateInfo, pool) {} void CMD_BUFFER_STATE_PRINTF::Reset() { CMD_BUFFER_STATE::Reset(); auto debug_printf = static_cast<DebugPrintf *>(dev_data); // Free the device memory and descriptor set(s) associated with a command buffer. if (debug_printf->aborted) { return; } for (auto &buffer_info : buffer_infos) { debug_printf->DestroyBuffer(buffer_info); } buffer_infos.clear(); }
1
21,494
@Tony-LunarG I just realized that this differs from the previous behavior in that spirv-opt will run on the byte code if there are any "group decorations." If this is a problem, I can add an additional constructor to keep the pre-existing behavior.
KhronosGroup-Vulkan-ValidationLayers
cpp
@@ -86,9 +86,9 @@ public class ParquetMetrics implements Serializable { Types.NestedField field = fileSchema.asStruct().field(fieldId); if (field != null && stats.hasNonNullValue()) { updateMin(lowerBounds, fieldId, - fromParquetPrimitive(field.type(), stats.genericGetMin())); + fromParquetPrimitive(field.type(), column.getPrimitiveType(), stats.genericGetMin())); updateMax(upperBounds, fieldId, - fromParquetPrimitive(field.type(), stats.genericGetMax())); + fromParquetPrimitive(field.type(), column.getPrimitiveType(), stats.genericGetMax())); } } }
1
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package com.netflix.iceberg.parquet; import com.google.common.collect.Maps; import com.google.common.collect.Sets; import com.netflix.iceberg.Metrics; import com.netflix.iceberg.Schema; import com.netflix.iceberg.exceptions.RuntimeIOException; import com.netflix.iceberg.expressions.Literal; import com.netflix.iceberg.io.InputFile; import com.netflix.iceberg.types.Conversions; import com.netflix.iceberg.types.Types; import org.apache.parquet.column.statistics.Statistics; import org.apache.parquet.hadoop.ParquetFileReader; import org.apache.parquet.hadoop.metadata.BlockMetaData; import org.apache.parquet.hadoop.metadata.ColumnChunkMetaData; import org.apache.parquet.hadoop.metadata.ParquetMetadata; import org.apache.parquet.schema.MessageType; import java.io.IOException; import java.io.Serializable; import java.nio.ByteBuffer; import java.util.List; import java.util.Map; import java.util.Set; import static com.netflix.iceberg.parquet.ParquetConversions.fromParquetPrimitive; public class ParquetMetrics implements Serializable { private ParquetMetrics() { } public static Metrics fromInputFile(InputFile file) { try (ParquetFileReader reader = ParquetFileReader.open(ParquetIO.file(file))) { return fromMetadata(reader.getFooter()); } catch (IOException e) { throw new RuntimeIOException(e, "Failed to read footer of file: %s", file); } } public static Metrics fromMetadata(ParquetMetadata metadata) { long rowCount = 0; Map<Integer, Long> columnSizes = Maps.newHashMap(); Map<Integer, Long> valueCounts = Maps.newHashMap(); Map<Integer, Long> nullValueCounts = Maps.newHashMap(); Map<Integer, Literal<?>> lowerBounds = Maps.newHashMap(); Map<Integer, Literal<?>> upperBounds = Maps.newHashMap(); Set<Integer> missingStats = Sets.newHashSet(); MessageType parquetType = metadata.getFileMetaData().getSchema(); Schema fileSchema = ParquetSchemaUtil.convert(parquetType); List<BlockMetaData> blocks = metadata.getBlocks(); for (BlockMetaData block : blocks) { rowCount += block.getRowCount(); for (ColumnChunkMetaData column : block.getColumns()) { int fieldId = fileSchema.aliasToId(column.getPath().toDotString()); increment(columnSizes, fieldId, column.getTotalSize()); increment(valueCounts, fieldId, column.getValueCount()); Statistics stats = column.getStatistics(); if (stats == null) { missingStats.add(fieldId); } else if (!stats.isEmpty()) { increment(nullValueCounts, fieldId, stats.getNumNulls()); // only add min/max stats for top-level fields // TODO: allow struct nesting, but not maps or arrays Types.NestedField field = fileSchema.asStruct().field(fieldId); if (field != null && stats.hasNonNullValue()) { updateMin(lowerBounds, fieldId, fromParquetPrimitive(field.type(), stats.genericGetMin())); updateMax(upperBounds, fieldId, fromParquetPrimitive(field.type(), stats.genericGetMax())); } } } } // discard accumulated values if any stats were missing for (Integer fieldId : missingStats) { nullValueCounts.remove(fieldId); lowerBounds.remove(fieldId); upperBounds.remove(fieldId); } return new Metrics(rowCount, columnSizes, valueCounts, nullValueCounts, toBufferMap(fileSchema, lowerBounds), toBufferMap(fileSchema, upperBounds)); } private static void increment(Map<Integer, Long> columns, int fieldId, long amount) { if (columns != null) { if (columns.containsKey(fieldId)) { columns.put(fieldId, columns.get(fieldId) + amount); } else { columns.put(fieldId, amount); } } } @SuppressWarnings("unchecked") private static <T> void updateMin(Map<Integer, Literal<?>> lowerBounds, int id, Literal<T> min) { Literal<T> currentMin = (Literal<T>) lowerBounds.get(id); if (currentMin == null || min.comparator().compare(min.value(), currentMin.value()) < 0) { lowerBounds.put(id, min); } } @SuppressWarnings("unchecked") private static <T> void updateMax(Map<Integer, Literal<?>> upperBounds, int id, Literal<T> max) { Literal<T> currentMax = (Literal<T>) upperBounds.get(id); if (currentMax == null || max.comparator().compare(max.value(), currentMax.value()) > 0) { upperBounds.put(id, max); } } private static Map<Integer, ByteBuffer> toBufferMap(Schema schema, Map<Integer, Literal<?>> map) { Map<Integer, ByteBuffer> bufferMap = Maps.newHashMap(); for (Map.Entry<Integer, Literal<?>> entry : map.entrySet()) { bufferMap.put(entry.getKey(), Conversions.toByteBuffer(schema.findType(entry.getKey()), entry.getValue().value())); } return bufferMap; } }
1
12,956
Nit: continuation lines should be indented 4 spaces from the start of the statement.
apache-iceberg
java
@@ -487,6 +487,7 @@ int sysfs_finalize(void) sysfs_region_destroy(&_regions[i]); } _sysfs_region_count = 0; + _sysfs_format_ptr = NULL; return FPGA_OK; }
1
// Copyright(c) 2017-2018, Intel Corporation // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Intel Corporation nor the names of its contributors // may be used to endorse or promote products derived from this software // without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. #ifdef HAVE_CONFIG_H #include <config.h> #endif // HAVE_CONFIG_H #define _GNU_SOURCE #include <pthread.h> #include <glob.h> #include <dirent.h> #include <unistd.h> #include <string.h> #include <stdlib.h> #include <stdio.h> #include <fcntl.h> #include <errno.h> #include <sys/stat.h> #include <regex.h> #undef _GNU_SOURCE #include <opae/types.h> #include <opae/log.h> #include <opae/types_enum.h> #include "safe_string/safe_string.h" #include "types_int.h" #include "sysfs_int.h" #include "common_int.h" // substring that identifies a sysfs directory as the FME device. #define FPGA_SYSFS_FME "fme" #define FPGA_SYSFS_FME_LEN 3 // substring that identifies a sysfs directory as the AFU device. #define FPGA_SYSFS_PORT "port" #define FPGA_SYSFS_PORT_LEN 4 #define OPAE_KERNEL_DRIVERS 2 typedef struct _sysfs_formats { const char *sysfs_class_path; const char *sysfs_region_fmt; const char *sysfs_resource_fmt; const char *sysfs_compat_id; } sysfs_formats; static sysfs_formats sysfs_path_table[OPAE_KERNEL_DRIVERS] = { // upstream driver sysfs formats {"/sys/class/fpga_region", "region([0-9])+", "dfl-(fme|port)\\.([0-9]+)", "/dfl-fme-region.*/fpga_region/region*/compat_id"}, // intel driver sysfs formats {"/sys/class/fpga", "intel-fpga-dev\\.([0-9]+)", "intel-fpga-(fme|port)\\.([0-9]+)", "pr/interface_id"} }; static sysfs_formats *_sysfs_format_ptr; static uint32_t _sysfs_region_count; /* mutex to protect sysfs region data structures */ pthread_mutex_t _sysfs_region_lock = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP; #define SYSFS_FORMAT(s) (_sysfs_format_ptr ? _sysfs_format_ptr->s : NULL) #define SYSFS_MAX_REGIONS 128 static sysfs_fpga_region _regions[SYSFS_MAX_REGIONS]; #define PCIE_PATH_PATTERN "([0-9a-fA-F]{4}):([0-9a-fA-F]{2}):([0-9]{2})\\.([0-9])/fpga" #define PCIE_PATH_PATTERN_GROUPS 5 #define PARSE_MATCH_INT(_p, _m, _v, _b, _l) \ do { \ errno = 0; \ _v = strtoul(_p + _m.rm_so, NULL, _b); \ if (errno) { \ FPGA_MSG("error parsing int"); \ goto _l; \ } \ } while (0); STATIC int parse_pcie_info(sysfs_fpga_region *region, char *buffer) { char err[128] = {0}; regex_t re; regmatch_t matches[PCIE_PATH_PATTERN_GROUPS] = { {0} }; int res = FPGA_EXCEPTION; int reg_res = regcomp(&re, PCIE_PATH_PATTERN, REG_EXTENDED | REG_ICASE); if (reg_res) { FPGA_ERR("Error compling regex"); return FPGA_EXCEPTION; } reg_res = regexec(&re, buffer, PCIE_PATH_PATTERN_GROUPS, matches, 0); if (reg_res) { regerror(reg_res, &re, err, 128); FPGA_ERR("Error executing regex: %s", err); res = FPGA_EXCEPTION; goto out; } else { PARSE_MATCH_INT(buffer, matches[1], region->segment, 16, out); PARSE_MATCH_INT(buffer, matches[2], region->bus, 16, out); PARSE_MATCH_INT(buffer, matches[3], region->device, 16, out); PARSE_MATCH_INT(buffer, matches[4], region->function, 10, out); } res = FPGA_OK; out: regfree(&re); return res; } int sysfs_parse_attribute64(const char *root, const char *attr_path, uint64_t *value) { uint64_t pg_size = (uint64_t)sysconf(_SC_PAGE_SIZE); char path[SYSFS_PATH_MAX]; char buffer[pg_size]; int fd = -1; ssize_t bytes_read = 0; int len = snprintf_s_ss(path, SYSFS_PATH_MAX, "%s/%s", root, attr_path); if (len < 0) { FPGA_ERR("error concatenating strings (%s, %s)", root, attr_path); return FPGA_EXCEPTION; } fd = open(path, O_RDONLY); if (fd < 0) { FPGA_ERR("Error opening %s: %s", path, strerror(errno)); return FPGA_EXCEPTION; } bytes_read = eintr_read(fd, buffer, pg_size); if (bytes_read < 0) { FPGA_ERR("Error reading from %s: %s", path, strerror(errno)); close(fd); return FPGA_EXCEPTION; } *value = strtoull(buffer, NULL, 0); close(fd); return FPGA_OK; } STATIC int parse_device_vendor_id(sysfs_fpga_region *region) { uint64_t value = 0; int res = sysfs_parse_attribute64(region->region_path, "device/device", &value); if (res) { FPGA_ERR("Error parsing device_id for region: %s", region->region_path); return res; } region->device_id = value; res = sysfs_parse_attribute64(region->region_path, "device/vendor", &value); if (res) { FPGA_ERR("Error parsing vendor_id for region: %s", region->region_path); return res; } region->vendor_id = value; return FPGA_OK; } STATIC sysfs_fpga_resource *make_resource(sysfs_fpga_region *region, char *name, int num, fpga_objtype type) { sysfs_fpga_resource *resource = malloc(sizeof(sysfs_fpga_resource)); if (resource == NULL) { FPGA_ERR("error creating resource"); return NULL; } resource->region = region; resource->type = type; resource->num = num; // copy the full path to the parent region object strcpy_s(resource->res_path, SYSFS_PATH_MAX, region->region_path); // add a trailing path seperator '/' int len = strlen(resource->res_path); char *ptr = resource->res_path + len; *ptr = '/'; ptr++; *ptr = '\0'; // append the name to get the full path to the resource if (cat_sysfs_path(resource->res_path, name)) { FPGA_ERR("error concatenating path"); free(resource); return NULL; } if (snprintf_s_s(resource->res_name, SYSFS_PATH_MAX, "%s", name) < 0) { FPGA_ERR("Error formatting sysfs name"); free(resource); return NULL; } return resource; } STATIC int find_resources(sysfs_fpga_region *region) { DIR *dir = NULL; struct dirent *dirent = NULL; regex_t re; int reg_res = -1; int num = -1; char err[128] = {0}; regmatch_t matches[SYSFS_MAX_RESOURCES]; if (SYSFS_FORMAT(sysfs_resource_fmt)) { reg_res = regcomp(&re, SYSFS_FORMAT(sysfs_resource_fmt), REG_EXTENDED); if (reg_res) { regerror(reg_res, &re, err, 128); FPGA_MSG("Error compiling regex: %s", err); return FPGA_EXCEPTION; } } dir = opendir(region->region_path); if (!dir) { FPGA_MSG("failed to open region path: %s", region->region_path); regfree(&re); return FPGA_EXCEPTION; } while ((dirent = readdir(dir)) != NULL) { if (!strcmp(dirent->d_name, ".")) continue; if (!strcmp(dirent->d_name, "..")) continue; reg_res = regexec(&re, dirent->d_name, SYSFS_MAX_RESOURCES, matches, 0); if (!reg_res) { int type_beg = matches[1].rm_so; // int type_end = matches[1].rm_eo; int num_beg = matches[2].rm_so; // int num_end = matches[2].rm_eo; if (type_beg < 1 || num_beg < 1) { FPGA_MSG("Invalid sysfs resource format"); continue; } num = strtoul(dirent->d_name + num_beg, NULL, 10); if (!strncmp(FPGA_SYSFS_FME, dirent->d_name + type_beg, FPGA_SYSFS_FME_LEN)) { region->fme = make_resource( region, dirent->d_name, num, FPGA_DEVICE); } else if (!strncmp(FPGA_SYSFS_PORT, dirent->d_name + type_beg, FPGA_SYSFS_PORT_LEN)) { region->port = make_resource(region, dirent->d_name, num, FPGA_ACCELERATOR); } } } regfree(&re); if (dir) closedir(dir); if (!region->fme && !region->port) { FPGA_MSG("did not find fme/port in region: %s", region->region_path); return FPGA_NOT_FOUND; } return FPGA_OK; } STATIC int make_region(sysfs_fpga_region *region, const char *sysfs_class_fpga, char *dir_name, int num) { int res = FPGA_OK; char buffer[SYSFS_PATH_MAX] = {0}; ssize_t sym_link_len = 0; if (snprintf_s_ss(region->region_path, SYSFS_PATH_MAX, "%s/%s", sysfs_class_fpga, dir_name) < 0) { FPGA_ERR("Error formatting sysfs paths"); return FPGA_EXCEPTION; } if (snprintf_s_s(region->region_name, SYSFS_PATH_MAX, "%s", dir_name) < 0) { FPGA_ERR("Error formatting sysfs name"); return FPGA_EXCEPTION; } sym_link_len = readlink(region->region_path, buffer, SYSFS_PATH_MAX); if (sym_link_len < 0) { FPGA_ERR("Error reading sysfs link: %s", region->region_path); return FPGA_EXCEPTION; } region->number = num; res = parse_pcie_info(region, buffer); if (res) { FPGA_ERR("Could not parse symlink"); return res; } res = parse_device_vendor_id(region); if (res) { FPGA_ERR("Could not parse vendor/device id"); return res; } return find_resources(region); } STATIC int sysfs_region_destroy(sysfs_fpga_region *region) { ASSERT_NOT_NULL(region); if (region->fme) { free(region->fme); region->fme = NULL; } if (region->port) { free(region->port); region->port = NULL; } return FPGA_OK; } int sysfs_region_count(void) { int res = 0, count = 0; if (!opae_mutex_lock(res, &_sysfs_region_lock)) { count = _sysfs_region_count; } if (opae_mutex_unlock(res, &_sysfs_region_lock)) { count = 0; } return count; } void sysfs_foreach_region(region_cb cb, void *context) { uint32_t i = 0; int res = 0; if (!opae_mutex_lock(res, &_sysfs_region_lock)) { for ( ; i < _sysfs_region_count; ++i) { cb(&_regions[i], context); } opae_mutex_unlock(res, &_sysfs_region_lock); } } int sysfs_initialize(void) { int stat_res = -1; int reg_res = -1; int res = FPGA_OK; uint32_t i = 0; struct stat st; DIR *dir = NULL; char err[128] = {0}; struct dirent *dirent = NULL; regex_t region_re; regmatch_t matches[SYSFS_MAX_REGIONS]; for (i = 0; i < OPAE_KERNEL_DRIVERS; ++i) { errno = 0; stat_res = stat(sysfs_path_table[i].sysfs_class_path, &st); if (!stat_res) { _sysfs_format_ptr = &sysfs_path_table[i]; break; } if (errno != ENOENT) { FPGA_ERR("Error while inspecting sysfs: %s", strerror(errno)); return FPGA_EXCEPTION; } } if (i == OPAE_KERNEL_DRIVERS) { FPGA_ERR( "No valid sysfs class files found - a suitable driver may not be loaded"); return FPGA_NO_DRIVER; } _sysfs_region_count = 0; if (SYSFS_FORMAT(sysfs_region_fmt)) { reg_res = regcomp(&region_re, SYSFS_FORMAT(sysfs_region_fmt), REG_EXTENDED); if (reg_res) { regerror(reg_res, &region_re, err, 128); FPGA_ERR("Error compling regex: %s", err); return FPGA_EXCEPTION; } } const char *sysfs_class_fpga = SYSFS_FORMAT(sysfs_class_path); if (!sysfs_class_fpga) { FPGA_ERR("Invalid fpga class path: %s", sysfs_class_fpga); res = FPGA_EXCEPTION; goto out_free; } // open the root sysfs class directory // look in the directory and get region (device) objects dir = opendir(sysfs_class_fpga); if (!dir) { FPGA_MSG("failed to open region path: %s", sysfs_class_fpga); res = FPGA_EXCEPTION; goto out_free; } while ((dirent = readdir(dir))) { if (!strcmp(dirent->d_name, ".")) continue; if (!strcmp(dirent->d_name, "..")) continue; // if the current directory matches the region (device) regex reg_res = regexec(&region_re, dirent->d_name, SYSFS_MAX_REGIONS, matches, 0); if (!reg_res) { int num_begin = matches[1].rm_so; if (num_begin < 0) { FPGA_ERR("sysfs format invalid: %s", dirent->d_name); continue; } int num = strtoul(dirent->d_name + num_begin, NULL, 10); // increment our region count after filling out details // of the discovered region in our _regions array if (opae_mutex_lock(res, &_sysfs_region_lock)) { goto out_free; } if (make_region(&_regions[_sysfs_region_count++], sysfs_class_fpga, dirent->d_name, num)) { FPGA_MSG("Error processing region: %s", dirent->d_name); _sysfs_region_count--; } if (opae_mutex_unlock(res, &_sysfs_region_lock)) { goto out_free; } } } if (!_sysfs_region_count) { FPGA_ERR("Error discovering fpga regions"); res = FPGA_NO_DRIVER; } out_free: regfree(&region_re); if (dir) closedir(dir); return res; } int sysfs_finalize(void) { uint32_t i = 0; for (; i < _sysfs_region_count; ++i) { sysfs_region_destroy(&_regions[i]); } _sysfs_region_count = 0; return FPGA_OK; } const sysfs_fpga_region *sysfs_get_region(size_t num) { const sysfs_fpga_region *ptr = NULL; int res = 0; if (!opae_mutex_lock(res, &_sysfs_region_lock)) { if (num >= _sysfs_region_count) { FPGA_ERR("No such region with index: %d", num); } else { ptr = &_regions[num]; } if (opae_mutex_unlock(res, &_sysfs_region_lock)) { ptr = NULL; } } return ptr; } fpga_result sysfs_get_interface_id(fpga_token token, fpga_guid guid) { fpga_result res = FPGA_OK; char path[SYSFS_PATH_MAX]; struct _fpga_token *_token = (struct _fpga_token *)token; ASSERT_NOT_NULL(_token); res = cat_token_sysfs_path(path, token, SYSFS_FORMAT(sysfs_compat_id)); if (res) { return res; } res = opae_glob_path(path); if (res) { return res; } return sysfs_read_guid(path, guid); } fpga_result sysfs_get_fme_pr_interface_id(const char *sysfs_res_path, fpga_guid guid) { fpga_result res = FPGA_OK; char sysfs_path[SYSFS_PATH_MAX]; int len = snprintf_s_ss(sysfs_path, SYSFS_PATH_MAX, "%s/%s", sysfs_res_path, SYSFS_FORMAT(sysfs_compat_id)); if (len < 0) { FPGA_ERR("error concatenating strings (%s, %s)", sysfs_res_path, sysfs_path); return FPGA_EXCEPTION; } res = opae_glob_path(sysfs_path); if (res) { return res; } return sysfs_read_guid(sysfs_path, guid); } fpga_result sysfs_get_guid(fpga_token token, const char *sysfspath, fpga_guid guid) { fpga_result res = FPGA_OK; char sysfs_path[SYSFS_PATH_MAX]; struct _fpga_token *_token = (struct _fpga_token *)token; if (_token == NULL || sysfspath == NULL) return FPGA_EXCEPTION; int len = snprintf_s_ss(sysfs_path, SYSFS_PATH_MAX, "%s/%s", _token->sysfspath, sysfspath); if (len < 0) { FPGA_ERR("error concatenating strings (%s, %s)", _token->sysfspath, sysfs_path); return FPGA_EXCEPTION; } res = opae_glob_path(sysfs_path); if (res) { return res; } return sysfs_read_guid(sysfs_path, guid); } int sysfs_filter(const struct dirent *de) { return de->d_name[0] != '.'; } fpga_result sysfs_get_fme_path(int dev, int subdev, char *path) { fpga_result result = FPGA_OK; char spath[SYSFS_PATH_MAX]; char sysfs_path[SYSFS_PATH_MAX]; errno_t e; int len = snprintf_s_ss(sysfs_path, SYSFS_PATH_MAX, "%s/%s", SYSFS_FORMAT(sysfs_class_path), SYSFS_FME_PATH); if (len < 0) { FPGA_ERR("Error formatting sysfs path"); return FPGA_EXCEPTION; } snprintf_s_ii(spath, SYSFS_PATH_MAX, sysfs_path, dev, subdev); result = opae_glob_path(spath); if (result) { return result; } e = strncpy_s(path, SYSFS_PATH_MAX, spath, SYSFS_PATH_MAX); if (EOK != e) { return FPGA_EXCEPTION; } return result; } // // sysfs access (read/write) functions // fpga_result sysfs_read_int(const char *path, int *i) { int fd; int res; char buf[SYSFS_PATH_MAX]; int b; if (path == NULL) { FPGA_ERR("Invalid input path"); return FPGA_INVALID_PARAM; } fd = open(path, O_RDONLY); if (fd < 0) { FPGA_MSG("open(%s) failed", path); return FPGA_NOT_FOUND; } if ((off_t)-1 == lseek(fd, 0, SEEK_SET)) { FPGA_MSG("seek failed"); goto out_close; } b = 0; do { res = read(fd, buf + b, sizeof(buf) - b); if (res <= 0) { FPGA_MSG("Read from %s failed", path); goto out_close; } b += res; if (((unsigned)b > sizeof(buf)) || (b <= 0)) { FPGA_MSG("Unexpected size reading from %s", path); goto out_close; } } while (buf[b - 1] != '\n' && buf[b - 1] != '\0' && (unsigned)b < sizeof(buf)); // erase \n buf[b - 1] = 0; *i = atoi(buf); close(fd); return FPGA_OK; out_close: close(fd); return FPGA_NOT_FOUND; } fpga_result sysfs_read_u32(const char *path, uint32_t *u) { int fd; int res; char buf[SYSFS_PATH_MAX]; int b; if (path == NULL) { FPGA_ERR("Invalid input path"); return FPGA_INVALID_PARAM; } fd = open(path, O_RDONLY); if (fd < 0) { FPGA_MSG("open(%s) failed", path); return FPGA_NOT_FOUND; } if ((off_t)-1 == lseek(fd, 0, SEEK_SET)) { FPGA_MSG("seek failed"); goto out_close; } b = 0; do { res = read(fd, buf + b, sizeof(buf) - b); if (res <= 0) { FPGA_MSG("Read from %s failed", path); goto out_close; } b += res; if (((unsigned)b > sizeof(buf)) || (b <= 0)) { FPGA_MSG("Unexpected size reading from %s", path); goto out_close; } } while (buf[b - 1] != '\n' && buf[b - 1] != '\0' && (unsigned)b < sizeof(buf)); // erase \n buf[b - 1] = 0; *u = strtoul(buf, NULL, 0); close(fd); return FPGA_OK; out_close: close(fd); return FPGA_NOT_FOUND; } // read tuple separated by 'sep' character fpga_result sysfs_read_u32_pair(const char *path, uint32_t *u1, uint32_t *u2, char sep) { int fd; int res; char buf[SYSFS_PATH_MAX]; int b; char *c; uint32_t x1, x2; if (sep == '\0') { FPGA_MSG("invalid separation character"); return FPGA_INVALID_PARAM; } if (path == NULL) { FPGA_ERR("Invalid input path"); return FPGA_INVALID_PARAM; } fd = open(path, O_RDONLY); if (fd < 0) { FPGA_MSG("open(%s) failed", path); return FPGA_NOT_FOUND; } if ((off_t)-1 == lseek(fd, 0, SEEK_SET)) { FPGA_MSG("seek failed"); goto out_close; } b = 0; do { res = read(fd, buf + b, sizeof(buf) - b); if (res <= 0) { FPGA_MSG("Read from %s failed", path); goto out_close; } b += res; if (((unsigned)b > sizeof(buf)) || (b <= 0)) { FPGA_MSG("Unexpected size reading from %s", path); goto out_close; } } while (buf[b - 1] != '\n' && buf[b - 1] != '\0' && (unsigned)b < sizeof(buf)); // erase \n buf[b - 1] = 0; // read first value x1 = strtoul(buf, &c, 0); if (*c != sep) { FPGA_MSG("couldn't find separation character '%c' in '%s'", sep, path); goto out_close; } // read second value x2 = strtoul(c + 1, &c, 0); if (*c != '\0') { FPGA_MSG("unexpected character '%c' in '%s'", *c, path); goto out_close; } *u1 = x1; *u2 = x2; close(fd); return FPGA_OK; out_close: close(fd); return FPGA_NOT_FOUND; } fpga_result __FIXME_MAKE_VISIBLE__ sysfs_read_u64(const char *path, uint64_t *u) { int fd = -1; int res = 0; char buf[SYSFS_PATH_MAX] = {0}; int b = 0; if (path == NULL) { FPGA_ERR("Invalid input path"); return FPGA_INVALID_PARAM; } fd = open(path, O_RDONLY); if (fd < 0) { FPGA_MSG("open(%s) failed", path); return FPGA_NOT_FOUND; } if ((off_t)-1 == lseek(fd, 0, SEEK_SET)) { FPGA_MSG("seek failed"); goto out_close; } do { res = read(fd, buf + b, sizeof(buf) - b); if (res <= 0) { FPGA_MSG("Read from %s failed", path); goto out_close; } b += res; if (((unsigned)b > sizeof(buf)) || (b <= 0)) { FPGA_MSG("Unexpected size reading from %s", path); goto out_close; } } while (buf[b - 1] != '\n' && buf[b - 1] != '\0' && (unsigned)b < sizeof(buf)); // erase \n buf[b - 1] = 0; *u = strtoull(buf, NULL, 0); close(fd); return FPGA_OK; out_close: close(fd); return FPGA_NOT_FOUND; } fpga_result __FIXME_MAKE_VISIBLE__ sysfs_write_u64(const char *path, uint64_t u) { int fd = -1; int res = 0; char buf[SYSFS_PATH_MAX] = {0}; int b = 0; int len; if (path == NULL) { FPGA_ERR("Invalid input path"); return FPGA_INVALID_PARAM; } fd = open(path, O_WRONLY); if (fd < 0) { FPGA_MSG("open(%s) failed: %s", path, strerror(errno)); return FPGA_NOT_FOUND; } if ((off_t)-1 == lseek(fd, 0, SEEK_SET)) { FPGA_MSG("seek: %s", strerror(errno)); goto out_close; } len = snprintf_s_l(buf, sizeof(buf), "0x%lx\n", u); do { res = write(fd, buf + b, len - b); if (res <= 0) { FPGA_ERR("Failed to write"); goto out_close; } b += res; if (b > len || b <= 0) { FPGA_MSG("Unexpected size writing to %s", path); goto out_close; } } while (buf[b - 1] != '\n' && buf[b - 1] != '\0' && b < len); close(fd); return FPGA_OK; out_close: close(fd); return FPGA_NOT_FOUND; } fpga_result __FIXME_MAKE_VISIBLE__ sysfs_write_u64_decimal(const char *path, uint64_t u) { int fd = -1; int res = 0; char buf[SYSFS_PATH_MAX] = {0}; int b = 0; int len; if (path == NULL) { FPGA_ERR("Invalid input path"); return FPGA_INVALID_PARAM; } fd = open(path, O_WRONLY); if (fd < 0) { FPGA_MSG("open(%s) failed: %s", path, strerror(errno)); return FPGA_NOT_FOUND; } if ((off_t)-1 == lseek(fd, 0, SEEK_SET)) { FPGA_MSG("seek: %s", strerror(errno)); goto out_close; } len = snprintf_s_l(buf, sizeof(buf), "%ld\n", u); do { res = write(fd, buf + b, len - b); if (res <= 0) { FPGA_ERR("Failed to write"); goto out_close; } b += res; if (b > len || b <= 0) { FPGA_MSG("Unexpected size writing to %s", path); goto out_close; } } while (buf[b - 1] != '\n' && buf[b - 1] != '\0' && b < len); close(fd); return FPGA_OK; out_close: close(fd); return FPGA_NOT_FOUND; } fpga_result sysfs_read_guid(const char *path, fpga_guid guid) { int fd; int res; char buf[SYSFS_PATH_MAX]; int b; int i; char tmp; unsigned octet; if (path == NULL) { FPGA_ERR("Invalid input path"); return FPGA_INVALID_PARAM; } fd = open(path, O_RDONLY); if (fd < 0) { FPGA_MSG("open(%s) failed", path); return FPGA_NOT_FOUND; } if ((off_t)-1 == lseek(fd, 0, SEEK_SET)) { FPGA_MSG("seek failed"); goto out_close; } b = 0; do { res = read(fd, buf + b, sizeof(buf) - b); if (res <= 0) { FPGA_MSG("Read from %s failed", path); goto out_close; } b += res; if (((unsigned)b > sizeof(buf)) || (b <= 0)) { FPGA_MSG("Unexpected size reading from %s", path); goto out_close; } } while (buf[b - 1] != '\n' && buf[b - 1] != '\0' && (unsigned)b < sizeof(buf)); // erase \n buf[b - 1] = 0; for (i = 0; i < 32; i += 2) { tmp = buf[i + 2]; buf[i + 2] = 0; octet = 0; sscanf_s_u(&buf[i], "%x", &octet); guid[i / 2] = (uint8_t)octet; buf[i + 2] = tmp; } close(fd); return FPGA_OK; out_close: close(fd); return FPGA_NOT_FOUND; } fpga_result sysfs_path_is_valid(const char *root, const char *attr_path) { char path[SYSFS_PATH_MAX] = {0}; fpga_result result = FPGA_OK; struct stat stats; int len = snprintf_s_ss(path, SYSFS_PATH_MAX, "%s/%s", root, attr_path); if (len < 0) { FPGA_ERR("error concatenating strings (%s, %s)", root, attr_path); return FPGA_EXCEPTION; } result = opae_glob_path(path); if (result) { return result; } if (stat(path, &stats) != 0) { FPGA_ERR("stat failed: %s", strerror(errno)); return FPGA_NOT_FOUND; } if (S_ISDIR(stats.st_mode) || S_ISREG(stats.st_mode)) { return FPGA_OK; } return FPGA_EXCEPTION; } // // sysfs convenience functions to access device components by device number // fpga_result sysfs_get_socket_id(int dev, int subdev, uint8_t *socket_id) { fpga_result result; char spath[SYSFS_PATH_MAX]; int i; snprintf_s_ii(spath, SYSFS_PATH_MAX, SYSFS_FPGA_CLASS_PATH SYSFS_FME_PATH_FMT "/" FPGA_SYSFS_SOCKET_ID, dev, subdev); i = 0; result = sysfs_read_int(spath, &i); if (FPGA_OK != result) return result; *socket_id = (uint8_t)i; return FPGA_OK; } fpga_result sysfs_get_afu_id(int dev, int subdev, fpga_guid guid) { char spath[SYSFS_PATH_MAX]; snprintf_s_ii(spath, SYSFS_PATH_MAX, SYSFS_FPGA_CLASS_PATH SYSFS_AFU_PATH_FMT "/" FPGA_SYSFS_AFU_GUID, dev, subdev); return sysfs_read_guid(spath, guid); } fpga_result sysfs_get_pr_id(int dev, int subdev, fpga_guid guid) { char spath[SYSFS_PATH_MAX]; snprintf_s_ii(spath, SYSFS_PATH_MAX, SYSFS_FPGA_CLASS_PATH SYSFS_FME_PATH_FMT "/" FPGA_SYSFS_FME_INTERFACE_ID, dev, subdev); return sysfs_read_guid(spath, guid); } fpga_result sysfs_get_slots(int dev, int subdev, uint32_t *slots) { char spath[SYSFS_PATH_MAX]; snprintf_s_ii(spath, SYSFS_PATH_MAX, SYSFS_FPGA_CLASS_PATH SYSFS_FME_PATH_FMT "/" FPGA_SYSFS_NUM_SLOTS, dev, subdev); return sysfs_read_u32(spath, slots); } fpga_result sysfs_get_bitstream_id(int dev, int subdev, uint64_t *id) { char spath[SYSFS_PATH_MAX]; snprintf_s_ii(spath, SYSFS_PATH_MAX, SYSFS_FPGA_CLASS_PATH SYSFS_FME_PATH_FMT "/" FPGA_SYSFS_BITSTREAM_ID, dev, subdev); return sysfs_read_u64(spath, id); } // Get port syfs path fpga_result get_port_sysfs(fpga_handle handle, char *sysfs_port) { struct _fpga_token *_token; struct _fpga_handle *_handle = (struct _fpga_handle *)handle; char *p = 0; char sysfs_path[SYSFS_PATH_MAX] = {0}; fpga_result result = FPGA_OK; errno_t e; if (sysfs_port == NULL) { FPGA_ERR("Invalid output pointer"); return FPGA_INVALID_PARAM; } if (_handle == NULL) { FPGA_ERR("Invalid handle"); return FPGA_INVALID_PARAM; } _token = (struct _fpga_token *)_handle->token; if (_token == NULL) { FPGA_ERR("Token not found"); return FPGA_INVALID_PARAM; } p = strstr(_token->sysfspath, FPGA_SYSFS_FME); if (NULL == p) { FPGA_ERR("Invalid sysfspath in token"); return FPGA_INVALID_PARAM; } int len = snprintf_s_s(sysfs_path, SYSFS_PATH_MAX, "%s/../*-port.*", _token->sysfspath); if (len < 0) { FPGA_ERR("Error formatting sysfs path"); return FPGA_EXCEPTION; } result = opae_glob_path(sysfs_path); if (result) { return result; } e = strncpy_s(sysfs_port, SYSFS_PATH_MAX, sysfs_path, SYSFS_PATH_MAX); if (EOK != e) { return FPGA_EXCEPTION; } return FPGA_OK; } // get fpga device id fpga_result get_fpga_deviceid(fpga_handle handle, uint64_t *deviceid) { struct _fpga_token *_token = NULL; struct _fpga_handle *_handle = (struct _fpga_handle *)handle; char sysfs_path[SYSFS_PATH_MAX] = {0}; char *p = NULL; fpga_result result = FPGA_OK; int err = 0; if (_handle == NULL) { FPGA_ERR("Invalid handle"); return FPGA_INVALID_PARAM; } if (deviceid == NULL) { FPGA_ERR("Invalid input Parameters"); return FPGA_INVALID_PARAM; } if (pthread_mutex_lock(&_handle->lock)) { FPGA_MSG("Failed to lock handle mutex"); return FPGA_EXCEPTION; } _token = (struct _fpga_token *)_handle->token; if (_token == NULL) { FPGA_ERR("Token not found"); result = FPGA_INVALID_PARAM; goto out_unlock; } p = strstr(_token->sysfspath, FPGA_SYSFS_FME); if (p == NULL) { FPGA_ERR("Failed to read sysfs path"); result = FPGA_NOT_SUPPORTED; goto out_unlock; } snprintf_s_s(sysfs_path, SYSFS_PATH_MAX, "%s/../device/device", _token->sysfspath); result = sysfs_read_u64(sysfs_path, deviceid); if (result != 0) { FPGA_ERR("Failed to read device ID"); goto out_unlock; } out_unlock: err = pthread_mutex_unlock(&_handle->lock); if (err) FPGA_ERR("pthread_mutex_unlock() failed: %s", strerror(err)); return result; } /* * The rlpath path is assumed to be of the form: * ../../devices/pci0000:5e/0000:5e:00.0/fpga/intel-fpga-dev.0 */ fpga_result sysfs_sbdf_from_path(const char *sysfspath, int *s, int *b, int *d, int *f) { int res; char rlpath[SYSFS_PATH_MAX]; char *p; res = readlink(sysfspath, rlpath, sizeof(rlpath)); if (-1 == res) { FPGA_MSG("Can't read link %s (no driver?)", sysfspath); return FPGA_NO_DRIVER; } // Find the BDF from the link path. rlpath[res] = 0; p = strrchr(rlpath, '/'); if (!p) { FPGA_MSG("Invalid link %s (no driver?)", rlpath); return FPGA_NO_DRIVER; } *p = 0; p = strrchr(rlpath, '/'); if (!p) { FPGA_MSG("Invalid link %s (no driver?)", rlpath); return FPGA_NO_DRIVER; } *p = 0; p = strrchr(rlpath, '/'); if (!p) { FPGA_MSG("Invalid link %s (no driver?)", rlpath); return FPGA_NO_DRIVER; } ++p; // 11 // 012345678901 // ssss:bb:dd.f *f = (int)strtoul(p + 11, NULL, 16); *(p + 10) = 0; *d = (int)strtoul(p + 8, NULL, 16); *(p + 7) = 0; *b = (int)strtoul(p + 5, NULL, 16); *(p + 4) = 0; *s = (int)strtoul(p, NULL, 16); return FPGA_OK; } fpga_result sysfs_objectid_from_path(const char *sysfspath, uint64_t *object_id) { char sdevpath[SYSFS_PATH_MAX]; uint32_t major = 0; uint32_t minor = 0; fpga_result result; snprintf_s_s(sdevpath, SYSFS_PATH_MAX, "%s/dev", sysfspath); result = sysfs_read_u32_pair(sdevpath, &major, &minor, ':'); if (FPGA_OK != result) return result; *object_id = ((major & 0xFFF) << 20) | (minor & 0xFFFFF); return FPGA_OK; } ssize_t eintr_read(int fd, void *buf, size_t count) { ssize_t bytes_read = 0, total_read = 0; char *ptr = buf; while (total_read < (ssize_t)count) { bytes_read = read(fd, ptr + total_read, count - total_read); if (bytes_read < 0) { if (errno == EINTR) { continue; } return bytes_read; } else if (bytes_read == 0) { return lseek(fd, 0, SEEK_CUR); } else { total_read += bytes_read; } } return total_read; } ssize_t eintr_write(int fd, void *buf, size_t count) { ssize_t bytes_written = 0, total_written = 0; char *ptr = buf; if (!buf) { return -1; } while (total_written < (ssize_t)count) { bytes_written = write(fd, ptr + total_written, count - total_written); if (bytes_written < 0) { if (errno == EINTR) { continue; } return bytes_written; } total_written += bytes_written; } return total_written; } fpga_result cat_token_sysfs_path(char *dest, fpga_token token, const char *path) { if (!dest) { FPGA_ERR("destination str is NULL"); return FPGA_EXCEPTION; } struct _fpga_token *_token = (struct _fpga_token *)token; int len = snprintf_s_ss(dest, SYSFS_PATH_MAX, "%s/%s", _token->sysfspath, path); if (len < 0) { FPGA_ERR("error concatenating strings (%s, %s)", _token->sysfspath, path); return FPGA_EXCEPTION; } return FPGA_OK; } fpga_result cat_sysfs_path(char *dest, const char *path) { errno_t err; err = strcat_s(dest, SYSFS_PATH_MAX, path); switch (err) { case EOK: return FPGA_OK; case ESNULLP: FPGA_ERR("NULL pointer in name"); return FPGA_INVALID_PARAM; break; case ESZEROL: FPGA_ERR("Zero length"); break; case ESLEMAX: FPGA_ERR("Length exceeds max"); break; case ESUNTERM: FPGA_ERR("Destination not termindated"); break; }; return FPGA_EXCEPTION; } fpga_result cat_handle_sysfs_path(char *dest, fpga_handle handle, const char *path) { struct _fpga_handle *_handle = (struct _fpga_handle *)(handle); return cat_token_sysfs_path(dest, _handle->token, path); } STATIC char *cstr_dup(const char *str) { size_t s = strlen(str); char *p = malloc(s+1); if (strncpy_s(p, s+1, str, s)) { FPGA_ERR("Error copying string"); return NULL; } p[s] = '\0'; return p; } struct _fpga_object *alloc_fpga_object(const char *sysfspath, const char *name) { struct _fpga_object *obj = calloc(1, sizeof(struct _fpga_object)); if (obj) { obj->handle = NULL; obj->path = cstr_dup(sysfspath); obj->name = cstr_dup(name); obj->perm = 0; obj->size = 0; obj->max_size = 0; obj->buffer = NULL; obj->objects = NULL; } return obj; } fpga_result opae_glob_path(char *path) { fpga_result res = FPGA_OK; glob_t pglob; pglob.gl_pathc = 0; pglob.gl_pathv = NULL; int globres = glob(path, 0, NULL, &pglob); if (!globres) { if (pglob.gl_pathc > 1) { FPGA_MSG("Ambiguous object key - using first one"); } if (strcpy_s(path, FILENAME_MAX, pglob.gl_pathv[0])) { FPGA_ERR("Could not copy globbed path"); res = FPGA_EXCEPTION; } globfree(&pglob); } else { switch (globres) { case GLOB_NOSPACE: res = FPGA_NO_MEMORY; break; case GLOB_NOMATCH: res = FPGA_NOT_FOUND; break; default: res = FPGA_EXCEPTION; } if (pglob.gl_pathv) { globfree(&pglob); } } return res; } fpga_result sync_object(fpga_object obj) { struct _fpga_object *_obj; int fd = -1; ssize_t bytes_read = 0; ASSERT_NOT_NULL(obj); _obj = (struct _fpga_object *)obj; fd = open(_obj->path, _obj->perm); if (fd < 0) { FPGA_ERR("Error opening %s: %s", _obj->path, strerror(errno)); return FPGA_EXCEPTION; } bytes_read = eintr_read(fd, _obj->buffer, _obj->max_size); if (bytes_read < 0) { FPGA_ERR("Error reading from %s: %s", _obj->path, strerror(errno)); close(fd); return FPGA_EXCEPTION; } _obj->size = bytes_read; close(fd); return FPGA_OK; } fpga_result make_sysfs_group(char *sysfspath, const char *name, fpga_object *object, int flags, fpga_handle handle) { struct dirent **namelist; int n; size_t pathlen = strlen(sysfspath); char *ptr = NULL; errno_t err; fpga_object subobj; fpga_result res = FPGA_OK; struct _fpga_object *group; if (flags & FPGA_OBJECT_GLOB) { res = opae_glob_path(sysfspath); } if (res != FPGA_OK) { return res; } n = scandir(sysfspath, &namelist, sysfs_filter, alphasort); if (n < 0) { FPGA_ERR("Error calling scandir: %s", strerror(errno)); switch (errno) { case ENOMEM: return FPGA_NO_MEMORY; case ENOENT: return FPGA_NOT_FOUND; } return FPGA_EXCEPTION; } if (n == 0) { FPGA_ERR("Group is empty"); return FPGA_EXCEPTION; } group = alloc_fpga_object(sysfspath, name); if (!group) { res = FPGA_NO_MEMORY; goto out_free_namelist; } group->handle = handle; group->type = FPGA_SYSFS_DIR; if (flags & FPGA_OBJECT_RECURSE_ONE || flags & FPGA_OBJECT_RECURSE_ALL) { ptr = sysfspath + pathlen; *ptr++ = '/'; group->objects = calloc(n, sizeof(fpga_object)); if (!group->objects) { res = FPGA_NO_MEMORY; goto out_free_group; } group->size = 0; while (n--) { err = strcpy_s(ptr, SYSFS_PATH_MAX - pathlen + 1, namelist[n]->d_name); if (err == EOK) { if (flags & FPGA_OBJECT_RECURSE_ONE) { flags &= ~FPGA_OBJECT_RECURSE_ONE; } if (!make_sysfs_object( sysfspath, namelist[n]->d_name, &subobj, flags, handle)) { group->objects[group->size++] = subobj; } } free(namelist[n]); } free(namelist); } else { while (n--) { free(namelist[n]); } free(namelist); } *object = (fpga_object)group; return FPGA_OK; out_free_group: if (group->path) free(group->path); if (group->name) free(group->name); free(group); out_free_namelist: while (n--) free(namelist[n]); free(namelist); return res; } fpga_result make_sysfs_object(char *sysfspath, const char *name, fpga_object *object, int flags, fpga_handle handle) { uint64_t pg_size = (uint64_t)sysconf(_SC_PAGE_SIZE); struct _fpga_object *obj = NULL; struct stat objstat; int statres; fpga_result res = FPGA_OK; if (flags & FPGA_OBJECT_GLOB) { res = opae_glob_path(sysfspath); } statres = stat(sysfspath, &objstat); if (statres < 0) { FPGA_MSG("Error accessing %s: %s", sysfspath, strerror(errno)); switch (errno) { case ENOENT: res = FPGA_NOT_FOUND; goto out_free; case ENOMEM: res = FPGA_NO_MEMORY; goto out_free; case EACCES: res = FPGA_NO_ACCESS; goto out_free; } res = FPGA_EXCEPTION; goto out_free; } if (S_ISDIR(objstat.st_mode)) { return make_sysfs_group(sysfspath, name, object, flags, handle); } obj = alloc_fpga_object(sysfspath, name); if (!obj) { return FPGA_NO_MEMORY; } obj->handle = handle; obj->type = FPGA_SYSFS_FILE; obj->buffer = calloc(pg_size, sizeof(uint8_t)); obj->max_size = pg_size; if (handle && (objstat.st_mode & (S_IWUSR | S_IWGRP | S_IWOTH))) { if ((objstat.st_mode & (S_IRUSR | S_IRGRP | S_IROTH))) { obj->perm = O_RDWR; } else { obj->perm = O_WRONLY; } } else { obj->perm = O_RDONLY; } *object = (fpga_object)obj; if (obj->perm == O_RDONLY || obj->perm == O_RDWR) { return sync_object((fpga_object)obj); } return FPGA_OK; out_free: free(obj); return res; }
1
17,801
Are these protected by any kind of lock?
OPAE-opae-sdk
c
@@ -66,6 +66,7 @@ class FirewallRule(object): self.resource_id = kwargs.get('id') self.create_time = kwargs.get('firewall_rule_create_time') self.name = kwargs.get('firewall_rule_name') + self.hierarchical_name = kwargs.get('firewall_rule_hierarchical_name') self.kind = kwargs.get('firewall_rule_kind') self.network = kwargs.get('firewall_rule_network') self._priority = kwargs.get('firewall_rule_priority')
1
# Copyright 2017 The Forseti Security Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A Firewall. See: https://cloud.google.com/compute/docs/reference/latest/firewalls """ import json import netaddr from google.cloud.forseti.common.util import log_util from google.cloud.forseti.common.util import parser LOGGER = log_util.get_logger(__name__) # pylint: disable=too-many-instance-attributes ALL_REPRESENTATIONS = ('all', '0-65355', '1-65535') ALLOWED_RULE_ITEMS = frozenset(('allowed', 'denied', 'description', 'direction', 'name', 'network', 'priority', 'sourceRanges', 'destinationRanges', 'sourceTags', 'targetTags', 'sourceServiceAccounts', 'targetServiceAccounts')) class Error(Exception): """Base error class for the module.""" class InvalidFirewallRuleError(Error): """Raised if a firewall rule doesn't look like a firewall rule should.""" class InvalidFirewallActionError(Error): """Raised if a firewall action doesn't look like a firewall rule should.""" class FirewallRule(object): """Represents Firewall resource.""" MYSQL_DATETIME_FORMAT = '%Y-%m-%d %H:%M:%S' def __init__(self, validate=False, **kwargs): """Firewall resource. Args: validate (bool): Whether to validate this rule. kwargs (dict): Object properties Raises: InvalidFirewallRuleError: If allowed and denied rules aren't valid. """ self.project_id = kwargs.get('project_id') self.resource_id = kwargs.get('id') self.create_time = kwargs.get('firewall_rule_create_time') self.name = kwargs.get('firewall_rule_name') self.kind = kwargs.get('firewall_rule_kind') self.network = kwargs.get('firewall_rule_network') self._priority = kwargs.get('firewall_rule_priority') self.direction = kwargs.get('firewall_rule_direction') if self.direction: self.direction = self.direction.upper() self._source_ranges = frozenset(parser.json_unstringify( kwargs.get('firewall_rule_source_ranges'), default=list())) self._destination_ranges = frozenset(parser.json_unstringify( kwargs.get('firewall_rule_destination_ranges'), default=list())) self._source_tags = frozenset(parser.json_unstringify( kwargs.get('firewall_rule_source_tags'), default=list())) self._target_tags = frozenset(parser.json_unstringify( kwargs.get('firewall_rule_target_tags'), default=list())) self._source_service_accounts = frozenset(parser.json_unstringify( kwargs.get('firewall_rule_source_service_accounts'), default=list())) self._target_service_accounts = frozenset(parser.json_unstringify( kwargs.get('firewall_rule_target_service_accounts'), default=list())) self.allowed = parser.json_unstringify( kwargs.get('firewall_rule_allowed')) self.denied = parser.json_unstringify( kwargs.get('firewall_rule_denied')) if self.allowed and self.denied: raise InvalidFirewallRuleError( 'Cannot have allowed and denied rules (%s, %s)' % ( self.allowed, self.denied)) if self.allowed is None and self.denied is None: raise InvalidFirewallRuleError('Must have allowed or denied rules') self._firewall_action = None if validate: self.validate() def __str__(self): """String representation. Returns: str: A string representation of FirewallRule. """ string = ('FirewallRule(' 'project_id=%s\n' 'name=%s\n' 'network=%s\n' 'priority=%s\n' 'direction=%s\n' 'action=%s\n') % ( self.project_id, self.name, self.network, self._priority, self.direction, self._firewall_action) for field_name, value in [ ('sourceRanges', self._source_ranges), ('destinationRanges', self._destination_ranges), ('sourceTags', self._source_tags), ('targetTags', self._target_tags), ('sourceServiceAccounts', self._source_service_accounts), ('targetServiceAccounts', self._target_service_accounts), ]: if value: string += '%s=%s\n' % (field_name, value) return string.strip() @staticmethod def _transform(firewall_dict, project_id=None, validate=None): """Transforms firewall dictionary into FirewallRule. Args: firewall_dict (dict): A dictionary with firewall field names matching the API field names. project_id (str): A project id string. validate (bool): Whether to validate this FirewallRule or not. Returns: FirewallRule: A FirewallRule created from the input dictionary. """ in_dict = { 'firewall_rule_id': firewall_dict.get('id'), 'firewall_rule_name': firewall_dict.get('name'), 'firewall_rule_description': firewall_dict.get('description'), 'firewall_rule_kind': firewall_dict.get('kind'), 'firewall_rule_network': firewall_dict.get('network'), 'firewall_rule_priority': firewall_dict.get('priority'), 'firewall_rule_direction': firewall_dict.get('direction'), 'firewall_rule_source_ranges': parser.json_stringify( firewall_dict.get('sourceRanges')), 'firewall_rule_destination_ranges': parser.json_stringify( firewall_dict.get('destinationRanges')), 'firewall_rule_source_tags': parser.json_stringify( firewall_dict.get('sourceTags')), 'firewall_rule_target_tags': parser.json_stringify( firewall_dict.get('targetTags')), 'firewall_rule_source_service_accounts': parser.json_stringify( firewall_dict.get('sourceServiceAccounts')), 'firewall_rule_target_service_accounts': parser.json_stringify( firewall_dict.get('targetServiceAccounts')), 'firewall_rule_allowed': parser.json_stringify( firewall_dict.get('allowed')), 'firewall_rule_denied': parser.json_stringify( firewall_dict.get('denied')), 'firewall_rule_self_link': parser.json_stringify( firewall_dict.get('selfLink')), 'firewall_rule_create_time': parser.format_timestamp( parser.json_stringify(firewall_dict.get('creationTimestamp')), FirewallRule.MYSQL_DATETIME_FORMAT), } if project_id: in_dict['project_id'] = project_id return FirewallRule(validate=validate, **in_dict) @classmethod def from_json(cls, json_string, project_id=None): """Creates a validated FirewallRule from a valid firewall JSON. Args: json_string (str): A valid firewall JSON string. project_id (str): A string project id. Returns: FirewallRule: A validated FirewallRule from the JSON string. Raises: InvalidFirewallRuleError: If the firewall rule is invalid. InvalidFirewallActionError: If the firewall action is invalid. """ json_dict = json.loads(json_string) return FirewallRule._transform( json_dict, project_id=project_id, validate=True) @classmethod def from_dict(cls, firewall_dict, project_id=None, validate=False): """Creates an unvalidated FirewallRule from a dictionary. Args: firewall_dict (dict): A dict with firewall keys and values. project_id (str): A string project id. validate (bool): Whether to validate this rule or not. Returns: FirewallRule: A validated FirewallRule from the JSON string. Raises: InvalidFirewallRuleError: If the firewall rule is invalid. InvalidFirewallActionError: If the firewall action is invalid. """ return FirewallRule._transform( firewall_dict, project_id=project_id, validate=validate) def as_json(self): """Returns a valid JSON representation of this firewall rule. This rule must be valid to return the representation. Returns: str: A string JSON dump of the firewall rule. Raises: InvalidFirewallRuleError: If the firewall rule is invalid. InvalidFirewallActionError: If the firewall action is invalid. """ self.validate() firewall_dict = { 'direction': self.direction, 'network': self.network, 'name': self.name, } for key, value in [ self.firewall_action.json_dict(), ('sourceRanges', self.source_ranges), ('sourceTags', self.source_tags), ('targetTags', self.target_tags), ('destinationRanges', self.destination_ranges), ('priority', self._priority), ('sourceServiceAccounts', self.source_service_accounts), ('targetServiceAccounts', self.target_service_accounts)]: if value: firewall_dict[key] = value return json.dumps(firewall_dict, sort_keys=True) def validate(self): """Validates that a rule is valid. Validation is based on reference: https://cloud.google.com/compute/docs/reference/beta/firewalls and https://cloud.google.com/compute/docs/vpc/firewalls#gcp_firewall_rule_summary_table Returns: bool: If rule is valid. Raises: InvalidFirewallRuleError: One or more rules failed validation. """ self._validate_keys() self._validate_direction() self._validate_priority() if not self.firewall_action: raise InvalidFirewallRuleError('Rule missing action "%s"' % self) else: self.firewall_action.validate() # TODO: Verify rule name matches regex of allowed # names from reference return True def _validate_keys(self): """Checks that required keys and value restrictions. Required fields: name and network Length restrictions: * name <= 63 characters * <= 256 values: sourceRanges, sourceTags, targetTags, destinationRanges Raises: InvalidFirewallRuleError: If keys don't meet requirements. """ if not self.name: raise InvalidFirewallRuleError( 'Rule missing required field "%s"' % 'name') if not self.network: raise InvalidFirewallRuleError( 'Rule missing required field "%s"' % 'network') if len(self.name) > 63: raise InvalidFirewallRuleError( 'Rule name exceeds length limit of 63 chars: "%s".' % self.name) max_256_value_keys = [ ('sourceRanges', self._source_ranges), ('sourceTags', self._source_tags), ('targetTags', self._target_tags), ('destinationRanges', self._destination_ranges) ] for key, value in max_256_value_keys: if value and len(value) > 256: raise InvalidFirewallRuleError( 'Rule entry "%s" must contain 256 or fewer values: "%s".' % (key, value)) if self._source_tags: if self._source_service_accounts or self._target_service_accounts: raise InvalidFirewallRuleError( 'sourceTags cannot be set when source/targetServiceAccounts' ' are set') if self._target_tags: if self._source_service_accounts or self._target_service_accounts: raise InvalidFirewallRuleError( 'targetTags cannot be set when source/targetServiceAccounts' ' are set') max_1_value_keys = [ ('sourceServiceAccount', self.source_service_accounts), ('targetServiceAccount', self.target_service_accounts), ] for key, value in max_1_value_keys: if value and len(value) > 1: raise InvalidFirewallRuleError( 'Rule entry "%s" may contain at most 1 value: "%s".' % (key, value)) def _validate_direction(self): """Checks that the direction and associated fields are valid. Raises: InvalidFirewallRuleError: If: * Direction is 'ingress' and * there are no source ranges or tags * _destination_ranges is not set * Direction is 'egress' and * there are no source ranges or tags * _destination_ranges is set """ if self.direction == 'INGRESS': if (not self._source_ranges and not self._source_tags and not self.source_service_accounts): raise InvalidFirewallRuleError( 'Ingress rule missing required field oneof "sourceRanges" ' 'or "sourceTags" or "sourceServiceAccounts": "%s".' % self) if self._destination_ranges: raise InvalidFirewallRuleError( 'Ingress rules cannot include "destinationRanges": "%s".' % self) elif self.direction == 'EGRESS': if not self._destination_ranges: raise InvalidFirewallRuleError( 'Egress rule missing required field "destinationRanges":' '"%s".'% self) if (self._source_ranges or self._source_tags or self._source_service_accounts): raise InvalidFirewallRuleError( 'Egress rules cannot include "sourceRanges", "sourceTags"' ' or "sourceServiceAccounts": "%s".' % self) else: raise InvalidFirewallRuleError( 'Rule "direction" must be either "ingress" or "egress": "%s".' % self) def _validate_priority(self): """Checks that the priority of the rule is a valid value. Raises: InvalidFirewallRuleError: If the priority can't be converted to an int or if it is outside the allowed range. """ if self._priority: try: priority = int(self._priority) except ValueError as err: raise InvalidFirewallRuleError( 'Rule "priority" could not be converted to an integer: ' '"%s".' % err) if priority < 0 or priority > 65535: raise InvalidFirewallRuleError( 'Rule "priority" out of range 0-65535: "%s".' % priority) @property def source_ranges(self): """The sorted source ranges for this policy. Returns: list: Sorted source ips ranges. """ return sorted(self._source_ranges) @property def destination_ranges(self): """The sorted destination ranges for this policy. Returns: list: Sorted destination ips ranges. """ return sorted(self._destination_ranges) @property def source_tags(self): """The sorted source tags for this policy. Returns: list: Sorted source tags. """ return sorted(self._source_tags) @property def target_tags(self): """The sorted target tags for this policy. Returns: list: Sorted target tags. """ return sorted(self._target_tags) @property def source_service_accounts(self): """The sorted source tags for this policy. Returns: list: Sorted source tags. """ return sorted(self._source_service_accounts) @property def target_service_accounts(self): """The sorted target tags for this policy. Returns: list: Sorted target tags. """ return sorted(self._target_service_accounts) @property def priority(self): """The effective priority of the firewall rule. Per https://cloud.google.com/compute/docs/reference/latest/firewalls the default priority is 1000. Returns: int: Rule priority (lower is more important) """ if self._priority is None: return 1000 return self._priority @property def firewall_action(self): """The protocols and ports allowed or denied by this policy. https://cloud.google.com/compute/docs/reference/beta/firewalls Returns: FirewallAction: An object that represents what ports and protocols are allowed or denied. Raises: ValueError: If there are both allow and deny actions for a rule. """ if not self._firewall_action: if self.allowed: self._firewall_action = FirewallAction( firewall_rules=self.allowed, firewall_rule_action='allowed') else: self._firewall_action = FirewallAction( firewall_rules=self.denied, firewall_rule_action='denied') return self._firewall_action def __lt__(self, other): """Test whether this policy is contained in another policy. Checks if this rule is a subset of the allowed/denied ports and protocols that are in the other rule. Args: other(FirewallRule): object to compare to Returns: bool: comparison result """ LOGGER.debug('Checking %s < %s', self, other) return ((self.direction == other.direction or self.direction is None or other.direction is None) and (self.network == other.network or other.network is None) and set(self.source_tags).issubset(other.source_tags) and set(self.target_tags).issubset(other.target_tags) and self.firewall_action < other.firewall_action and ips_in_list(self.source_ranges, other.source_ranges) and ips_in_list(self.destination_ranges, other.destination_ranges)) def __gt__(self, other): """Test whether this policy contains the other policy. Checks if this rule is a superset of the allowed/denied ports and protocols that are in the other rule. Args: other(FirewallRule): object to compare to Returns: bool: comparison result """ LOGGER.debug('Checking %s > %s', self, other) return ((self.direction is None or other.direction is None or self.direction == other.direction) and (self.network is None or other.network is None or self.network == other.network) and set(other.source_tags).issubset(self.source_tags) and set(other.target_tags).issubset(self.target_tags) and self.firewall_action > other.firewall_action and ips_in_list(other.source_ranges, self.source_ranges) and ips_in_list(other.destination_ranges, self.destination_ranges)) # pylint: disable=protected-access def __eq__(self, other): """Test whether this policy is the same as the other policy. Args: other(FirewallRule): object to compare to Returns: bool: comparison result """ LOGGER.debug('Checking %s == %s', self, other) return (self.direction == other.direction and self.network == other.network and self._source_tags == other._source_tags and self._target_tags == other._target_tags and self.source_ranges == other.source_ranges and self.destination_ranges == other.destination_ranges and self.firewall_action == other.firewall_action) # pylint: enable=protected-access # pylint: disable=protected-access def is_equivalent(self, other): """Test whether this policy is equivalent to the other policy. Args: other(FirewallRule): object to compare to Returns: bool: comparison result """ return (self.direction == other.direction and self.network == other.network and self._source_tags == other._source_tags and self._target_tags == other._target_tags and self.source_ranges == other.source_ranges and self.destination_ranges == other.destination_ranges and self.firewall_action.is_equivalent(other.firewall_action)) # pylint: enable=protected-access class FirewallAction(object): """An association of allowed or denied ports and protocols.""" VALID_ACTIONS = frozenset(['allowed', 'denied']) MATCH_ANY = '*' def __init__(self, firewall_rules=None, firewall_rule_action='allowed'): """Initialize. Args: firewall_rules (list): A list of dictionaries of allowed ports and protocols. firewall_rule_action (str): The action, either allow or deny. Raises: InvalidFirewallActionError: If there are both allow and deny rules. """ if firewall_rule_action not in self.VALID_ACTIONS: raise InvalidFirewallActionError( 'Firewall rule action must be either allowed or denied' ' got: %s' % (firewall_rule_action)) self.action = firewall_rule_action self._any_value = None if firewall_rules: assert isinstance(firewall_rules, list) self.rules = sort_rules(firewall_rules) else: self.rules = [] self._applies_to_all = None self._expanded_rules = None def __str__(self): """String representation. Returns: str: A string representation of FirewallAction. """ return 'FirewallAction(action=%s, rules=%s)' % (self.action, self.rules) def json_dict(self): """Gets the JSON key and values for the firewall action. Returns: tuple: Of key ('allowed' or 'denied') and the firewall rules. Raises: InvalidFirewallActionError: If a rule is not formatted for the API. """ self.validate() return (self.action, self.rules) def validate(self): """Validates that the firewall rules are valid for use in the API. Raises: InvalidFirewallActionError: If a rule is not formatted for the API. """ for rule in self.rules: if 'IPProtocol' not in rule: raise InvalidFirewallActionError( 'Action must have field IPProtocol') if 'ports' in rule: if rule['IPProtocol'] not in ['tcp', 'udp']: raise InvalidFirewallActionError( 'Only "tcp" and "udp" can have ports specified: %s' % rule) for port in rule['ports']: if '-' in port: validate_port_range(port) else: validate_port(port) invalid_keys = set(rule.keys()) - set(['IPProtocol', 'ports']) if invalid_keys: raise InvalidFirewallActionError( 'Action can only have "IPProtocol" and "ports": %s' % invalid_keys) @property def applies_to_all(self): """Returns whether this applies to all ports and protocols or not. Returns: bool: Whether this applies to all ports and protocols or not. """ if self._applies_to_all is None: self._applies_to_all = False for rule in self.rules: protocol = rule.get('IPProtocol') if protocol == 'all': self._applies_to_all = True break return self._applies_to_all @property def any_value(self): """Returns whether this rule matches any value. Returns: bool: Whether this rule matches any value. """ if self._any_value is None: self._any_value = all(rule == self.MATCH_ANY for rule in self.rules) return self._any_value @property def expanded_rules(self): """Returns an expanded set of ports. Returns: dict: A dict of protocol to all port numbers. """ if self._expanded_rules is None: self._expanded_rules = {} if not self.any_value: for rule in self.rules: protocol = rule.get('IPProtocol') ports = rule.get('ports', ['all']) expanded_ports = set(expand_ports(ports)) current_ports = self._expanded_rules.get(protocol, set([])) current_ports.update(expanded_ports) self._expanded_rules[protocol] = current_ports return self._expanded_rules @staticmethod def ports_are_subset(ports_1, ports_2): """Returns whether one port list is a subset of another. Args: ports_1 (list): A list of string port numbers. ports_2 (list): A list of string port numbers. Returns: bool: Whether ports_1 are a subset of ports_2 or not. """ if any([a in ports_2 for a in ALL_REPRESENTATIONS]): return True return set(ports_1).issubset(ports_2) @staticmethod def ports_are_equal(ports_1, ports_2): """Returns whether two port lists are the same. Args: ports_1 (list): A list of string port numbers. ports_2 (list): A list of string port numbers. Returns: bool: Whether ports_1 have the same ports as ports_2. """ if (any([a in ports_1 for a in ALL_REPRESENTATIONS]) and any([a in ports_2 for a in ALL_REPRESENTATIONS])): return True return set(ports_1) == set(ports_2) def is_equivalent(self, other): """Returns whether this action and another are functionally equivalent. Args: other (FirewallAction): Another FirewallAction. Returns: bool: Whether these two FirewallActions are functionally equivalent. """ return (self.action == other.action and (self.any_value or other.any_value or self.expanded_rules.keys() == other.expanded_rules.keys() and all([ self.ports_are_equal( self.expanded_rules.get(protocol, []), other.expanded_rules.get(protocol, [])) for protocol in self.expanded_rules ]))) def __lt__(self, other): """Less than. Args: other (FirewallAction): The FirewallAction to compare to. Returns: bool: Whether this action is a subset of the other action. """ return (self.action == other.action and (self.any_value or other.any_value or other.applies_to_all or not other.expanded_rules or all([ self.ports_are_subset( self.expanded_rules.get(protocol, []), other.expanded_rules.get(protocol, [])) for protocol in self.expanded_rules]))) def __gt__(self, other): """Greater than. Args: other (FirewallAction): The FirewallAction to compare to. Returns: bool: Whether this action is a superset of the other action. """ return (self.action == other.action and (self.any_value or other.any_value or self.applies_to_all or not self.expanded_rules or all([ self.ports_are_subset( other.expanded_rules.get(protocol, []), self.expanded_rules.get(protocol, [])) for protocol in other.expanded_rules]))) def __eq__(self, other): """Equals. Args: other (FirewallAction): The FirewallAction to compare to. Returns: bool: If this action is the exact same as the other FirewallAction. """ return self.action == other.action and self.rules == other.rules def sort_rules(rules): """Sorts firewall rules by protocol and sorts ports. Args: rules (list): A list of firewall rule dictionaries. Returns: list: A list of sorted firewall rules. """ sorted_rules = [] if FirewallAction.MATCH_ANY in rules: return rules for rule in sorted(rules, key=lambda k: k.get('IPProtocol', '')): if 'ports' in rule: # If the ports contains 'all', don't care about the other ports # or sorting. Otherwise, sort ports numerically, and handle ranges # through sorting by start port. if 'all' in rule['ports']: rule['ports'] = 'all' else: rule['ports'] = sorted(rule['ports'], key=lambda k: int(k.split('-')[0])) sorted_rules.append(rule) return sorted_rules def ips_in_list(ips, ips_list): """Checks whether the ips and ranges are all in a list. Examples: ips_in_list([1.1.1.1], [0.0.0.0/0]) = True ips_in_list([1.1.1.1/24], [0.0.0.0/0]) = True ips_in_list([1.1.1.1, 1.1.1.2], [0.0.0.0/0]) = True ips_in_list([1.1.1.1, 2.2.2.2], [1.1.1.0/24, 2.2.2.0/24]) = True ips_in_list([0.0.0.0/0], [1.1.1.1]) = False Args: ips (list): A list of string IP addresses. ips_list (list): A list of string IP addresses. Returns: bool: Whether the ips are all in the given ips_list. """ if not ips or not ips_list: return True for ip_addr in ips: if not ips_list: return False if not any([ip_in_range(ip_addr, ips) for ips in ips_list]): return False return True def ip_in_range(ip_addr, ip_range): """Checks whether the ip/ip range is in another ip range. Examples: ip_in_range(1.1.1.1, 0.0.0.0/0) = True ip_in_range(1.1.1.1/24, 0.0.0.0/0) = True ip_in_range(0.0.0.0/0, 1.1.1.1) = False Args: ip_addr (string): A list of string IP addresses. ip_range (string): A list of string IP addresses. Returns: bool: Whether the ip / ip range is in another ip range. """ ip_network = netaddr.IPNetwork(ip_addr) ip_range_network = netaddr.IPNetwork(ip_range) return ip_network in ip_range_network def expand_port_range(port_range): """Expands a port range. From https://cloud.google.com/compute/docs/reference/beta/firewalls, ports can be of the form "<number>-<number>". Args: port_range (string): A string of format "<number_1>-<number_2>". Returns: list: A list of string integers from number_1 to number_2. """ start, end = port_range.split('-') return [str(i) for i in xrange(int(start), int(end) + 1)] def expand_ports(ports): """Expands all ports in a list. From https://cloud.google.com/compute/docs/reference/beta/firewalls, ports can be of the form "<number" or "<number>-<number>". Args: ports (list): A list of strings of format "<number>" or "<number_1>-<number_2>". Returns: list: A list of all port number strings with the ranges expanded. """ expanded_ports = [] if not ports: return [] for port_str in ports: if '-' in port_str: expanded_ports.extend(expand_port_range(port_str)) else: expanded_ports.append(port_str) return expanded_ports def validate_port(port): """Validates that a string is a valid port number. Args: port (str): A port number string. Returns: int: The integer port number. Raises: InvalidFirewallActionError: If the port string isn't a valid port. """ try: iport = int(port) except ValueError: raise InvalidFirewallActionError( 'Port not a valid int: %s' % port) if iport < 0: raise InvalidFirewallActionError( 'Port must be >= 0: %s' % port) if iport > 65535: raise InvalidFirewallActionError( 'Port must be <= 65535: %s' % port) return iport def validate_port_range(port_range): """Validates that a string is a valid port number. Args: port_range (str): A port range string. Raises: InvalidFirewallActionError: If the port range isn't a valid range. """ split_ports = port_range.split('-') if len(split_ports) > 2: raise InvalidFirewallActionError( 'Invalid port range: %s' % port_range) start = validate_port(split_ports[0]) end = validate_port(split_ports[1]) if start > end: raise InvalidFirewallActionError( 'Start port range > end port range: %s' % port_range)
1
28,040
This should probably default to a string, as get_resource_ancestors is causing the tests to fail due to the rsplit on a None hierarchical_name.
forseti-security-forseti-security
py
@@ -170,6 +170,7 @@ type ACMEChallengeSolverHTTP01Ingress struct { // The ingress class to use when creating Ingress resources to solve ACME // challenges that use this challenge solver. + // A reference by name to a Kubernetes [IngressClass](https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-class) object // Only one of 'class' or 'name' may be specified. Class *string
1
/* Copyright 2019 The Jetstack cert-manager contributors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package acme import ( corev1 "k8s.io/api/core/v1" apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" cmmeta "github.com/jetstack/cert-manager/pkg/internal/apis/meta" ) // ACMEIssuer contains the specification for an ACME issuer. // This uses the RFC8555 specification to obtain certificates by completing // 'challenges' to prove ownership of domain identifiers. // Earlier draft versions of the ACME specification are not supported. type ACMEIssuer struct { // Email is the email address to be associated with the ACME account. // This field is optional, but it is strongly recommended to be set. // It will be used to contact you in case of issues with your account or // certificates, including expiry notification emails. // This field may be updated after the account is initially registered. Email string // Server is the URL used to access the ACME server's 'directory' endpoint. // For example, for Let's Encrypt's staging endpoint, you would use: // "https://acme-staging-v02.api.letsencrypt.org/directory". // Only ACME v2 endpoints (i.e. RFC 8555) are supported. Server string // Enables or disables validation of the ACME server TLS certificate. // If true, requests to the ACME server will not have their TLS certificate // validated (i.e. insecure connections will be allowed). // Only enable this option in development environments. // The cert-manager system installed roots will be used to verify connections // to the ACME server if this is false. // Defaults to false. SkipTLSVerify bool // ExternalAccountBinding is a reference to a CA external account of the ACME // server. // If set, upon registration cert-manager will attempt to associate the given // external account credentials with the registered ACME account. ExternalAccountBinding *ACMEExternalAccountBinding // PrivateKey is the name of a Kubernetes Secret resource that will be used to // store the automatically generated ACME account private key. // Optionally, a `key` may be specified to select a specific entry within // the named Secret resource. // If `key` is not specified, a default of `tls.key` will be used. PrivateKey cmmeta.SecretKeySelector // Solvers is a list of challenge solvers that will be used to solve // ACME challenges for the matching domains. // Solver configurations must be provided in order to obtain certificates // from an ACME server. // For more information, see: https://cert-manager.io/docs/configuration/acme/ Solvers []ACMEChallengeSolver } // ACMEExternalAccountBinding is a reference to a CA external account of the ACME // server. type ACMEExternalAccountBinding struct { // keyID is the ID of the CA key that the External Account is bound to. KeyID string // keySecretRef is a Secret Key Selector referencing a data item in a Kubernetes // Secret which holds the symmetric MAC key of the External Account Binding. // The `key` is the index string that is paired with the key data in the // Secret and should not be confused with the key data itself, or indeed with // the External Account Binding keyID above. // The secret key stored in the Secret **must** be un-padded, base64 URL // encoded data. Key cmmeta.SecretKeySelector // keyAlgorithm is the MAC key algorithm that the key is used for. // Valid values are "HS256", "HS384" and "HS512". KeyAlgorithm HMACKeyAlgorithm } // HMACKeyAlgorithm is the name of a key algorithm used for HMAC encryption type HMACKeyAlgorithm string const ( HS256 HMACKeyAlgorithm = "HS256" HS384 HMACKeyAlgorithm = "HS384" HS512 HMACKeyAlgorithm = "HS512" ) // Configures an issuer to solve challenges using the specified options. // Only one of HTTP01 or DNS01 may be provided. type ACMEChallengeSolver struct { // Selector selects a set of DNSNames on the Certificate resource that // should be solved using this challenge solver. // If not specified, the solver will be treated as the 'default' solver // with the lowest priority, i.e. if any other solver has a more specific // match, it will be used instead. Selector *CertificateDNSNameSelector // Configures cert-manager to attempt to complete authorizations by // performing the HTTP01 challenge flow. // It is not possible to obtain certificates for wildcard domain names // (e.g. `*.example.com`) using the HTTP01 challenge mechanism. HTTP01 *ACMEChallengeSolverHTTP01 // Configures cert-manager to attempt to complete authorizations by // performing the DNS01 challenge flow. DNS01 *ACMEChallengeSolverDNS01 } // CertificateDomainSelector selects certificates using a label selector, and // can optionally select individual DNS names within those certificates. // If both MatchLabels and DNSNames are empty, this selector will match all // certificates and DNS names within them. type CertificateDNSNameSelector struct { // A label selector that is used to refine the set of certificate's that // this challenge solver will apply to. MatchLabels map[string]string // List of DNSNames that this solver will be used to solve. // If specified and a match is found, a dnsNames selector will take // precedence over a dnsZones selector. // If multiple solvers match with the same dnsNames value, the solver // with the most matching labels in matchLabels will be selected. // If neither has more matches, the solver defined earlier in the list // will be selected. DNSNames []string // List of DNSZones that this solver will be used to solve. // The most specific DNS zone match specified here will take precedence // over other DNS zone matches, so a solver specifying sys.example.com // will be selected over one specifying example.com for the domain // www.sys.example.com. // If multiple solvers match with the same dnsZones value, the solver // with the most matching labels in matchLabels will be selected. // If neither has more matches, the solver defined earlier in the list // will be selected. DNSZones []string } // ACMEChallengeSolverHTTP01 contains configuration detailing how to solve // HTTP01 challenges within a Kubernetes cluster. // Typically this is accomplished through creating 'routes' of some description // that configure ingress controllers to direct traffic to 'solver pods', which // are responsible for responding to the ACME server's HTTP requests. type ACMEChallengeSolverHTTP01 struct { // The ingress based HTTP01 challenge solver will solve challenges by // creating or modifying Ingress resources in order to route requests for // '/.well-known/acme-challenge/XYZ' to 'challenge solver' pods that are // provisioned by cert-manager for each Challenge to be completed. Ingress *ACMEChallengeSolverHTTP01Ingress } type ACMEChallengeSolverHTTP01Ingress struct { // Optional service type for Kubernetes solver service ServiceType corev1.ServiceType // The ingress class to use when creating Ingress resources to solve ACME // challenges that use this challenge solver. // Only one of 'class' or 'name' may be specified. Class *string // The name of the ingress resource that should have ACME challenge solving // routes inserted into it in order to solve HTTP01 challenges. // This is typically used in conjunction with ingress controllers like // ingress-gce, which maintains a 1:1 mapping between external IPs and // ingress resources. Name string // Optional pod template used to configure the ACME challenge solver pods // used for HTTP01 challenges PodTemplate *ACMEChallengeSolverHTTP01IngressPodTemplate // Optional ingress template used to configure the ACME challenge solver // ingress used for HTTP01 challenges IngressTemplate *ACMEChallengeSolverHTTP01IngressTemplate } type ACMEChallengeSolverHTTP01IngressPodTemplate struct { // ObjectMeta overrides for the pod used to solve HTTP01 challenges. // Only the 'labels' and 'annotations' fields may be set. // If labels or annotations overlap with in-built values, the values here // will override the in-built values. ACMEChallengeSolverHTTP01IngressPodObjectMeta // PodSpec defines overrides for the HTTP01 challenge solver pod. // Only the 'priorityClassName', 'nodeSelector', 'affinity', // 'serviceAccountName' and 'tolerations' fields are supported currently. // All other fields will be ignored. // +optional Spec ACMEChallengeSolverHTTP01IngressPodSpec } type ACMEChallengeSolverHTTP01IngressPodObjectMeta struct { // Annotations that should be added to the create ACME HTTP01 solver pods. Annotations map[string]string // Labels that should be added to the created ACME HTTP01 solver pods. Labels map[string]string } type ACMEChallengeSolverHTTP01IngressPodSpec struct { // NodeSelector is a selector which must be true for the pod to fit on a node. // Selector which must match a node's labels for the pod to be scheduled on that node. // More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ NodeSelector map[string]string // If specified, the pod's scheduling constraints Affinity *corev1.Affinity // If specified, the pod's tolerations. Tolerations []corev1.Toleration // If specified, the pod's priorityClassName. PriorityClassName string `json:"priorityClassName,omitempty"` // If specified, the pod's service account // +optional ServiceAccountName string `json:"serviceAccountName,omitempty"` } type ACMEChallengeSolverHTTP01IngressTemplate struct { // ObjectMeta overrides for the ingress used to solve HTTP01 challenges. // Only the 'labels' and 'annotations' fields may be set. // If labels or annotations overlap with in-built values, the values here // will override the in-built values. ACMEChallengeSolverHTTP01IngressObjectMeta } type ACMEChallengeSolverHTTP01IngressObjectMeta struct { // Annotations that should be added to the created ACME HTTP01 solver ingress. Annotations map[string]string // Labels that should be added to the created ACME HTTP01 solver ingress. Labels map[string]string } // Used to configure a DNS01 challenge provider to be used when solving DNS01 // challenges. // Only one DNS provider may be configured per solver. type ACMEChallengeSolverDNS01 struct { // CNAMEStrategy configures how the DNS01 provider should handle CNAME // records when found in DNS zones. CNAMEStrategy CNAMEStrategy // Use the Akamai DNS zone management API to manage DNS01 challenge records. Akamai *ACMEIssuerDNS01ProviderAkamai // Use the Google Cloud DNS API to manage DNS01 challenge records. CloudDNS *ACMEIssuerDNS01ProviderCloudDNS // Use the Cloudflare API to manage DNS01 challenge records. Cloudflare *ACMEIssuerDNS01ProviderCloudflare // Use the AWS Route53 API to manage DNS01 challenge records. Route53 *ACMEIssuerDNS01ProviderRoute53 // Use the Microsoft Azure DNS API to manage DNS01 challenge records. AzureDNS *ACMEIssuerDNS01ProviderAzureDNS // Use the DigitalOcean DNS API to manage DNS01 challenge records. DigitalOcean *ACMEIssuerDNS01ProviderDigitalOcean // Use the 'ACME DNS' (https://github.com/joohoi/acme-dns) API to manage // DNS01 challenge records. AcmeDNS *ACMEIssuerDNS01ProviderAcmeDNS // Use RFC2136 ("Dynamic Updates in the Domain Name System") (https://datatracker.ietf.org/doc/rfc2136/) // to manage DNS01 challenge records. RFC2136 *ACMEIssuerDNS01ProviderRFC2136 // Configure an external webhook based DNS01 challenge solver to manage // DNS01 challenge records. Webhook *ACMEIssuerDNS01ProviderWebhook } // CNAMEStrategy configures how the DNS01 provider should handle CNAME records // when found in DNS zones. // By default, the None strategy will be applied (i.e. do not follow CNAMEs). type CNAMEStrategy string const ( // NoneStrategy indicates that no CNAME resolution strategy should be used // when determining which DNS zone to update during DNS01 challenges. NoneStrategy = "None" // FollowStrategy will cause cert-manager to recurse through CNAMEs in // order to determine which DNS zone to update during DNS01 challenges. // This is useful if you do not want to grant cert-manager access to your // root DNS zone, and instead delegate the _acme-challenge.example.com // subdomain to some other, less privileged domain. FollowStrategy = "Follow" ) // ACMEIssuerDNS01ProviderAkamai is a structure containing the DNS // configuration for Akamai DNS—Zone Record Management API type ACMEIssuerDNS01ProviderAkamai struct { ServiceConsumerDomain string ClientToken cmmeta.SecretKeySelector ClientSecret cmmeta.SecretKeySelector AccessToken cmmeta.SecretKeySelector } // ACMEIssuerDNS01ProviderCloudDNS is a structure containing the DNS // configuration for Google Cloud DNS type ACMEIssuerDNS01ProviderCloudDNS struct { ServiceAccount *cmmeta.SecretKeySelector Project string HostedZoneName string } // ACMEIssuerDNS01ProviderCloudflare is a structure containing the DNS // configuration for Cloudflare. // One of `apiKeySecretRef` or `apiTokenSecretRef` must be provided. type ACMEIssuerDNS01ProviderCloudflare struct { // Email of the account, only required when using API key based authentication. Email string // API key to use to authenticate with Cloudflare. // Note: using an API token to authenticate is now the recommended method // as it allows greater control of permissions. APIKey *cmmeta.SecretKeySelector // API token used to authenticate with Cloudflare. APIToken *cmmeta.SecretKeySelector } // ACMEIssuerDNS01ProviderDigitalOcean is a structure containing the DNS // configuration for DigitalOcean Domains type ACMEIssuerDNS01ProviderDigitalOcean struct { Token cmmeta.SecretKeySelector } // ACMEIssuerDNS01ProviderRoute53 is a structure containing the Route 53 // configuration for AWS type ACMEIssuerDNS01ProviderRoute53 struct { // The AccessKeyID is used for authentication. If not set we fall-back to using env vars, shared credentials file or AWS Instance metadata // see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials AccessKeyID string // The SecretAccessKey is used for authentication. If not set we fall-back to using env vars, shared credentials file or AWS Instance metadata // https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials SecretAccessKey cmmeta.SecretKeySelector // Role is a Role ARN which the Route53 provider will assume using either the explicit credentials AccessKeyID/SecretAccessKey // or the inferred credentials from environment variables, shared credentials file or AWS Instance metadata Role string // If set, the provider will manage only this zone in Route53 and will not do an lookup using the route53:ListHostedZonesByName api call. HostedZoneID string // Always set the region when using AccessKeyID and SecretAccessKey Region string } // ACMEIssuerDNS01ProviderAzureDNS is a structure containing the // configuration for Azure DNS type ACMEIssuerDNS01ProviderAzureDNS struct { // if both this and ClientSecret are left unset MSI will be used ClientID string // if both this and ClientID are left unset MSI will be used ClientSecret *cmmeta.SecretKeySelector SubscriptionID string // when specifying ClientID and ClientSecret then this field is also needed TenantID string ResourceGroupName string HostedZoneName string Environment AzureDNSEnvironment } type AzureDNSEnvironment string const ( AzurePublicCloud AzureDNSEnvironment = "AzurePublicCloud" AzureChinaCloud AzureDNSEnvironment = "AzureChinaCloud" AzureGermanCloud AzureDNSEnvironment = "AzureGermanCloud" AzureUSGovernmentCloud AzureDNSEnvironment = "AzureUSGovernmentCloud" ) // ACMEIssuerDNS01ProviderAcmeDNS is a structure containing the // configuration for ACME-DNS servers type ACMEIssuerDNS01ProviderAcmeDNS struct { Host string AccountSecret cmmeta.SecretKeySelector } // ACMEIssuerDNS01ProviderRFC2136 is a structure containing the // configuration for RFC2136 DNS type ACMEIssuerDNS01ProviderRFC2136 struct { // The IP address or hostname of an authoritative DNS server supporting // RFC2136 in the form host:port. If the host is an IPv6 address it must be // enclosed in square brackets (e.g [2001:db8::1]) ; port is optional. // This field is required. Nameserver string // The name of the secret containing the TSIG value. // If ``tsigKeyName`` is defined, this field is required. TSIGSecret cmmeta.SecretKeySelector // The TSIG Key name configured in the DNS. // If ``tsigSecretSecretRef`` is defined, this field is required. TSIGKeyName string // The TSIG Algorithm configured in the DNS supporting RFC2136. Used only // when ``tsigSecretSecretRef`` and ``tsigKeyName`` are defined. // Supported values are (case-insensitive): ``HMACMD5`` (default), // ``HMACSHA1``, ``HMACSHA256`` or ``HMACSHA512``. TSIGAlgorithm string } // ACMEIssuerDNS01ProviderWebhook specifies configuration for a webhook DNS01 // provider, including where to POST ChallengePayload resources. type ACMEIssuerDNS01ProviderWebhook struct { // The API group name that should be used when POSTing ChallengePayload // resources to the webhook apiserver. // This should be the same as the GroupName specified in the webhook // provider implementation. GroupName string // The name of the solver to use, as defined in the webhook provider // implementation. // This will typically be the name of the provider, e.g. 'cloudflare'. SolverName string // Additional configuration that should be passed to the webhook apiserver // when challenges are processed. // This can contain arbitrary JSON data. // Secret values should not be specified in this stanza. // If secret values are needed (e.g. credentials for a DNS service), you // should use a SecretKeySelector to reference a Secret resource. // For details on the schema of this field, consult the webhook provider // implementation's documentation. Config *apiext.JSON } type ACMEIssuerStatus struct { // URI is the unique account identifier, which can also be used to retrieve // account details from the CA URI string // LastRegisteredEmail is the email associated with the latest registered // ACME account, in order to track changes made to registered account // associated with the Issuer LastRegisteredEmail string }
1
22,959
I prefer not to have markdown here, while it is nice for the site this is also shown for `kubectl explain` where this will look weird
jetstack-cert-manager
go
@@ -26,7 +26,7 @@ describe('monitoring', function () { mockServer.setMessageHandler(request => { const doc = request.document; if (doc.ismaster || doc.hello) { - request.reply(Object.assign({}, mock.DEFAULT_ISMASTER)); + request.reply(Object.assign({}, mock.DEFAULT_ISMASTER_36)); } else if (doc.endSessions) { request.reply({ ok: 1 }); }
1
'use strict'; const mock = require('../../tools/mock'); const { ServerType } = require('../../../src/sdam/common'); const { Topology } = require('../../../src/sdam/topology'); const { Monitor } = require('../../../src/sdam/monitor'); const { expect } = require('chai'); const { ServerDescription } = require('../../../src/sdam/server_description'); class MockServer { constructor(options) { this.s = { pool: { generation: 1 } }; this.description = new ServerDescription(`${options.host}:${options.port}`); this.description.type = ServerType.Unknown; } } describe('monitoring', function () { let mockServer; after(() => mock.cleanup()); beforeEach(function () { return mock.createServer().then(server => (mockServer = server)); }); it('should record roundTripTime', function (done) { mockServer.setMessageHandler(request => { const doc = request.document; if (doc.ismaster || doc.hello) { request.reply(Object.assign({}, mock.DEFAULT_ISMASTER)); } else if (doc.endSessions) { request.reply({ ok: 1 }); } }); // set `heartbeatFrequencyMS` to 250ms to force a quick monitoring check, and wait 500ms to validate below const topology = new Topology(mockServer.hostAddress(), { heartbeatFrequencyMS: 250 }); topology.connect(err => { expect(err).to.not.exist; setTimeout(() => { expect(topology).property('description').property('servers').to.have.length(1); const serverDescription = Array.from(topology.description.servers.values())[0]; expect(serverDescription).property('roundTripTime').to.be.greaterThan(0); topology.close(done); }, 500); }); }); // TODO(NODE-3600): Unskip flaky test it.skip('should recover on error during initial connect', function (done) { // This test should take ~1s because initial server selection fails and an immediate check // is requested. If the behavior of the immediate check is broken, the test will take ~10s // to complete. We want to ensure validation of the immediate check behavior, and therefore // hardcode the test timeout to 2s. this.timeout(2000); let acceptConnections = false; mockServer.setMessageHandler(request => { if (!acceptConnections) { request.connection.destroy(); return; } const doc = request.document; if (doc.ismaster || doc.hello) { request.reply(Object.assign({}, mock.DEFAULT_ISMASTER)); } else if (doc.endSessions) { request.reply({ ok: 1 }); } }); setTimeout(() => { acceptConnections = true; }, 250); const topology = new Topology(mockServer.hostAddress(), {}); topology.connect(err => { expect(err).to.not.exist; expect(topology).property('description').property('servers').to.have.length(1); const serverDescription = Array.from(topology.description.servers.values())[0]; expect(serverDescription).property('roundTripTime').to.be.greaterThan(0); topology.close(done); }); }); describe('Monitor', function () { it('should connect and issue an initial server check', function (done) { mockServer.setMessageHandler(request => { const doc = request.document; if (doc.ismaster || doc.hello) { request.reply(Object.assign({}, mock.DEFAULT_ISMASTER)); } }); const server = new MockServer(mockServer.address()); const monitor = new Monitor(server, {}); this.defer(() => monitor.close()); monitor.on('serverHeartbeatFailed', () => done(new Error('unexpected heartbeat failure'))); monitor.on('serverHeartbeatSucceeded', () => done()); monitor.connect(); }); it('should ignore attempts to connect when not already closed', function (done) { mockServer.setMessageHandler(request => { const doc = request.document; if (doc.ismaster || doc.hello) { request.reply(Object.assign({}, mock.DEFAULT_ISMASTER)); } }); const server = new MockServer(mockServer.address()); const monitor = new Monitor(server, {}); this.defer(() => monitor.close()); monitor.on('serverHeartbeatFailed', () => done(new Error('unexpected heartbeat failure'))); monitor.on('serverHeartbeatSucceeded', () => done()); monitor.connect(); monitor.connect(); }); it('should not initiate another check if one is in progress', function (done) { mockServer.setMessageHandler(request => { const doc = request.document; if (doc.ismaster || doc.hello) { setTimeout(() => request.reply(Object.assign({}, mock.DEFAULT_ISMASTER)), 250); } }); const server = new MockServer(mockServer.address()); const monitor = new Monitor(server, {}); const startedEvents = []; monitor.on('serverHeartbeatStarted', event => startedEvents.push(event)); monitor.on('close', () => { expect(startedEvents).to.have.length(2); done(); }); monitor.connect(); monitor.once('serverHeartbeatSucceeded', () => { monitor.requestCheck(); monitor.requestCheck(); monitor.requestCheck(); monitor.requestCheck(); monitor.requestCheck(); const minHeartbeatFrequencyMS = 500; setTimeout(() => { // wait for minHeartbeatFrequencyMS, then request a check and verify another check occurred monitor.once('serverHeartbeatSucceeded', () => { monitor.close(); }); monitor.requestCheck(); }, minHeartbeatFrequencyMS); }); }); it('should not close the monitor on a failed heartbeat', function (done) { let isMasterCount = 0; mockServer.setMessageHandler(request => { const doc = request.document; if (doc.ismaster || doc.hello) { isMasterCount++; if (isMasterCount === 2) { request.reply({ ok: 0, errmsg: 'forced from mock server' }); return; } if (isMasterCount === 3) { request.connection.destroy(); return; } request.reply(mock.DEFAULT_ISMASTER_36); } }); const server = new MockServer(mockServer.address()); server.description = new ServerDescription(server.description.hostAddress); const monitor = new Monitor(server, { heartbeatFrequencyMS: 250, minHeartbeatFrequencyMS: 50 }); const events = []; monitor.on('serverHeartbeatFailed', event => events.push(event)); let successCount = 0; monitor.on('serverHeartbeatSucceeded', () => { if (successCount++ === 2) { monitor.close(); } }); monitor.on('close', () => { expect(events).to.have.length(2); done(); }); monitor.connect(); }); it('should upgrade to hello from legacy hello when initial handshake contains helloOk', function (done) { const docs = []; mockServer.setMessageHandler(request => { const doc = request.document; docs.push(doc); if (docs.length === 2) { expect(docs[0]).to.have.property('ismaster', true); expect(docs[0]).to.have.property('helloOk', true); expect(docs[1]).to.have.property('hello', true); done(); } else if (doc.ismaster || doc.hello) { setTimeout( () => request.reply(Object.assign({ helloOk: true }, mock.DEFAULT_ISMASTER)), 250 ); } }); const server = new MockServer(mockServer.address()); const monitor = new Monitor(server, {}); this.defer(() => monitor.close()); monitor.connect(); monitor.once('serverHeartbeatSucceeded', () => { const minHeartbeatFrequencyMS = 500; setTimeout(() => { // wait for minHeartbeatFrequencyMS, then request a check and verify another check occurred monitor.once('serverHeartbeatSucceeded', () => { monitor.close(); }); monitor.requestCheck(); }, minHeartbeatFrequencyMS); }); }); }); });
1
21,292
@durran Was this change intended to be included in this PR?
mongodb-node-mongodb-native
js
@@ -110,7 +110,7 @@ namespace pwiz.SkylineTestFunctional Assert.AreEqual(z, key.Charge); Assert.AreEqual(adduct, key.Adduct); Assert.AreEqual(caffeineInChiKey, key.Target.ToString()); - var viewLibPepInfo = new ViewLibraryPepInfo(key); + var viewLibPepInfo = new ViewLibraryPepInfo(key, null); Assert.AreEqual(key, viewLibPepInfo.Key); var smallMolInfo = viewLibPepInfo.GetSmallMoleculeLibraryAttributes(); Assert.AreEqual(moleculeName, smallMolInfo.MoleculeName);
1
/* * Original author: Brendan MacLean <brendanx .at. u.washington.edu>, * MacCoss Lab, Department of Genome Sciences, UW * * Copyright 2009 University of Washington - Seattle, WA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ using System.Collections.Generic; using System.Diagnostics; using System.IO; using System.Linq; using Microsoft.VisualStudio.TestTools.UnitTesting; using pwiz.BiblioSpec; using pwiz.Skyline.Alerts; using pwiz.Skyline.Controls.SeqNode; using pwiz.Skyline.EditUI; using pwiz.Skyline.Model; using pwiz.Skyline.Model.DocSettings; using pwiz.Skyline.Model.Irt; using pwiz.Skyline.Model.Lib; using pwiz.Skyline.Properties; using pwiz.Skyline.SettingsUI; using pwiz.Skyline.SettingsUI.Irt; using pwiz.Skyline.Util; using pwiz.SkylineTestUtil; namespace pwiz.SkylineTestFunctional { /// <summary> /// Summary description for LibraryBuildTest /// </summary> [TestClass] public class LibraryBuildTest : AbstractFunctionalTest { private string _libraryName; public LibraryBuildTest() { _libraryName = "library_test_试验"; } private PeptideSettingsUI PeptideSettingsUI { get; set; } private bool ReportLibraryBuildFailures { get; set; } [TestMethod] public void TestLibraryBuild() { TestFilesZip = @"TestFunctional\LibraryBuildTest.zip"; RunFunctionalTest(); } protected override void DoTest() { MainTest(); CirtLibraryBuildTest(); } private void MainTest() { // Clean-up before running the test RunUI(() => SkylineWindow.ModifyDocument("Set default settings", doc => doc.ChangeSettings(SrmSettingsList.GetDefault()))); // Check using libkey with small molecules var adduct = Adduct.FromStringAssumeProtonated("M+3Na"); var z = adduct.AdductCharge; const string caffeineFormula = "C8H10N4O2"; const string caffeineInChiKey = "RYYVLZVUVIJVGH-UHFFFAOYSA-N"; const string caffeineHMDB = "HMDB01847"; const string caffeineInChi = "InChI=1S/C8H10N4O2/c1-10-4-9-6-5(10)7(13)12(3)8(14)11(6)2/h4H,1-3H3"; const string caffeineCAS = "58-08-2"; const string caffeineSMILES = "Cn1cnc2n(C)c(=O)n(C)c(=O)c12"; const string caffeineKEGG = "C07481"; var mId = new MoleculeAccessionNumbers(string.Join("\t", MoleculeAccessionNumbers.TagHMDB + ":" + caffeineHMDB, MoleculeAccessionNumbers.TagInChI + ":" + caffeineInChi, MoleculeAccessionNumbers.TagCAS + ":" + caffeineCAS, MoleculeAccessionNumbers.TagInChiKey + ":" + caffeineInChiKey, MoleculeAccessionNumbers.TagSMILES + ":" + caffeineSMILES, MoleculeAccessionNumbers.TagKEGG + ":" + caffeineKEGG)); Assert.AreEqual(caffeineInChiKey, mId.GetInChiKey()); Assert.AreEqual(caffeineCAS, mId.GetCAS()); Assert.AreEqual(caffeineSMILES, mId.GetSMILES()); Assert.AreEqual(caffeineKEGG, mId.GetKEGG()); var moleculeName = "caffeine"; var smallMolAttributes = SmallMoleculeLibraryAttributes.Create(moleculeName, caffeineFormula, caffeineInChiKey, string.Join("\t", MoleculeAccessionNumbers.TagHMDB + ":" + caffeineHMDB, MoleculeAccessionNumbers.TagInChI + ":" + caffeineInChi, MoleculeAccessionNumbers.TagCAS + ":" + caffeineCAS, MoleculeAccessionNumbers.TagSMILES + ":" + caffeineSMILES, MoleculeAccessionNumbers.TagKEGG + ":" + caffeineKEGG)); LibKey key; for (var loop = 0; loop++ < 2;) { key = new LibKey(smallMolAttributes, adduct); Assert.IsFalse(key.IsPrecursorKey); Assert.IsFalse(key.IsProteomicKey); Assert.IsTrue(key.IsSmallMoleculeKey); Assert.IsFalse(key.IsModified); Assert.AreEqual(0, key.ModificationCount); Assert.AreEqual(z, key.Charge); Assert.AreEqual(adduct, key.Adduct); Assert.AreEqual(caffeineInChiKey, key.Target.ToString()); var viewLibPepInfo = new ViewLibraryPepInfo(key); Assert.AreEqual(key, viewLibPepInfo.Key); var smallMolInfo = viewLibPepInfo.GetSmallMoleculeLibraryAttributes(); Assert.AreEqual(moleculeName, smallMolInfo.MoleculeName); Assert.AreEqual(caffeineInChiKey, smallMolInfo.InChiKey); Assert.AreEqual(caffeineFormula, smallMolInfo.ChemicalFormula); Assert.IsTrue(smallMolInfo.OtherKeys.Contains(caffeineCAS)); Assert.IsTrue(smallMolInfo.OtherKeys.Contains(caffeineInChi)); Assert.IsTrue(smallMolInfo.OtherKeys.Contains(caffeineHMDB)); Assert.IsTrue(smallMolInfo.OtherKeys.Contains(caffeineSMILES)); Assert.IsTrue(smallMolInfo.OtherKeys.Contains(caffeineKEGG)); adduct = Adduct.FromString("M+3Si", Adduct.ADDUCT_TYPE.non_proteomic, z = -17); // Not realistic, but let's see if it's handled consistently } // Check general libkey operation var seq = "YTQSNSVC[+57.0]YAK"; key = new LibKey(seq, Adduct.DOUBLY_PROTONATED); Assert.IsFalse(key.IsPrecursorKey); Assert.IsTrue(key.IsProteomicKey); Assert.IsFalse(key.IsSmallMoleculeKey); Assert.IsTrue(key.IsModified); Assert.AreEqual(2, key.Charge); Assert.AreEqual(1, key.ModificationCount); Assert.AreEqual(Adduct.DOUBLY_PROTONATED, key.Adduct); Assert.AreEqual(seq, key.Target.ToString()); // Test error conditions BuildLibraryError("missing_charge.pep.XML", TestFilesDir.FullPath); BuildLibraryError("non_int_charge.pep.XML", null); BuildLibraryError("zero_charge.pep.XML", null); BuildLibraryError("truncated.pep.XML", null); BuildLibraryError("no_such_file.pep.XML", null, "Failed to open"); BuildLibraryError("missing_mzxml.pep.XML", null, "Could not find spectrum file"); // Check for proper handling of labeled addducts in small molecule files // (formerly this would throw on a null object, fixed with the use of ExplicitMods.EMPTY) BuildLibraryValid("heavy_adduct.ssl", true, false, false, 1); // Make sure explorer handles this adduct type var viewLibUI = ShowDialog<ViewLibraryDlg>(SkylineWindow.ViewSpectralLibraries); RunUI(() => AssertEx.IsTrue(viewLibUI.GraphItem.IonLabels.Any())); RunUI(viewLibUI.CancelDialog); // Barbara added code to ProteoWizard to rebuild a missing or invalid mzXML index // BuildLibraryError("bad_mzxml.pep.XML", "<index> not found"); BuildLibraryValid(TestFilesDir.GetTestPath("library_errors"), new[] { "bad_mzxml.pep.XML" }, false, false, false, 1); string libraryBaseName = _libraryName; // Test mascot parser _libraryName = libraryBaseName + "mascot"; string libraryMascot = _libraryName + BiblioSpecLiteSpec.EXT; BuildLibraryValid(TestFilesDir.GetTestPath("mascot"), new[] { "F027319.dat" }, true, false, false, 121, 4); Assert.IsTrue(File.Exists(TestFilesDir.GetTestPath(libraryMascot))); // Test successful builds _libraryName = libraryBaseName + "a"; string libraryA = _libraryName + BiblioSpecLiteSpec.EXT; string libraryARedundant = _libraryName + BiblioSpecLiteSpec.EXT_REDUNDANT; BuildLibraryValid("CPTAC_Set4_725_091509.pep.XML", true, false, false, 1); BuildLibraryValid("CPTAC_Set4_610_080509.pep.XML", true, false, true, 2); _libraryName = libraryBaseName + "b"; string libraryB = _libraryName + BiblioSpecLiteSpec.EXT; BuildLibraryValid("CPTAC_Set4_624_072409.pep.XML", false, false, false, 6); _libraryName = libraryBaseName + "c"; string libraryC = _libraryName + BiblioSpecLiteSpec.EXT; BuildLibraryValid(TestFilesDir.FullPath, new[] {libraryA, libraryB}, false, false, false, 8); Assert.IsTrue(File.Exists(TestFilesDir.GetTestPath(libraryA))); Assert.IsTrue(File.Exists(TestFilesDir.GetTestPath(libraryARedundant))); Assert.IsTrue(File.Exists(TestFilesDir.GetTestPath(libraryB))); Assert.IsTrue(File.Exists(TestFilesDir.GetTestPath(libraryC))); // Test peptide filter const string filterList = "ACARPIISVYSEK\n" + // TODO: Having the modified sequence as the first line causes an error with European number formats "ADRDESSPYAAM[+{0:F01}]IAAQDVAQR\n" + "ADAIQAGASQFETSAAK"; PastePeptideList(string.Format(filterList, 16.0), true, 0, 3, true); _libraryName = libraryBaseName + "filter"; string libraryFilter = _libraryName + BiblioSpecLiteSpec.EXT; BuildLibraryValid(TestFilesDir.GetTestPath("maxquant"), new[] { "test.msms.txt" }, false, true, false, 2); Assert.IsTrue(File.Exists(TestFilesDir.GetTestPath(libraryFilter))); RunUI(SkylineWindow.Undo); RunUI(SkylineWindow.Undo); // Test AddPathsDlg (file not found) EnsurePeptideSettings(); var buildLibraryDlg = ShowDialog<BuildLibraryDlg>(PeptideSettingsUI.ShowBuildLibraryDlg); string[] invalidPaths = { Path.Combine(TestFilesDir.GetTestPath("maxquant"), "test.msms.xml"), Path.Combine(TestFilesDir.GetTestPath("library_valid"), "CPTAC_Set4_624_072409.pep.XML") }; TestAddPaths(buildLibraryDlg, invalidPaths, true); // Test AddPathsDlg (file invalid type) string[] invalidTypes = { Path.Combine(TestFilesDir.GetTestPath("maxquant"), "test.msms.txt"), Path.Combine(TestFilesDir.GetTestPath("maxquant"), "mqpar.xml") }; TestAddPaths(buildLibraryDlg, invalidTypes, true); // Test AddPathsDlg (valid files) string[] goodPaths = { Path.Combine(TestFilesDir.GetTestPath("maxquant"), "test.msms.txt"), Path.Combine(TestFilesDir.GetTestPath("library_valid"), "CPTAC_Set4_624_072409.pep.XML") }; TestAddPaths(buildLibraryDlg, goodPaths, false); OkDialog(buildLibraryDlg, buildLibraryDlg.CancelDialog); const string heavyRPeptide = "TPAQFDADELR"; const string oxidizedMPeptide = "LVGNMHGDETVSR"; const string peptideList = heavyRPeptide + "\n" + oxidizedMPeptide + "\n" + "ALSIGFETCR\n" + "GNMHGDETVSR\n" + "VGNMHGDETVSR"; PastePeptideList(peptideList, true, 0, 1); // Set modifications on peptides to verify they connect with library spectra. const LabelAtoms labelAtoms = LabelAtoms.C13 | LabelAtoms.N15; const string heavyR = "Heavy R"; Settings.Default.HeavyModList.Add(new StaticMod(heavyR, "R", ModTerminus.C, null, labelAtoms, null, null)); const string oMeth = "Oxidized Methionine"; Settings.Default.StaticModList.Add(new StaticMod(oMeth, "M", null, "O")); var sequenceTree = SkylineWindow.SequenceTree; var docCurrent = SkylineWindow.Document; // Select the heavyR peptide PeptideTreeNode nodePepTree = null; IdentityPath pathPep = docCurrent.GetPathTo((int) SrmDocument.Level.Molecules, 0); RunUI(() => { sequenceTree.SelectedPath = pathPep; nodePepTree = sequenceTree.SelectedNode as PeptideTreeNode; }); Assert.IsNotNull(nodePepTree); Assert.AreEqual(heavyRPeptide, nodePepTree.DocNode.Peptide.Sequence); // Set the Heavy R modification explicitly var editPepModsDlg = ShowDialog<EditPepModsDlg>(SkylineWindow.ModifyPeptide); RunUI(() => { editPepModsDlg.SetModification(heavyRPeptide.Length - 1, IsotopeLabelType.heavy, heavyR); editPepModsDlg.OkDialog(); }); WaitForCondition(() => (SkylineWindow.Document.Molecules.First().TransitionGroupCount == 2)); // The peptide should now match the spectrum in the library, and have // both heavy and light precursors, with ranked transitions PeptideDocNode nodePep = null; RunUI(() => nodePep = nodePepTree.DocNode); Assert.IsNotNull(nodePep); Debug.Assert(nodePep != null); Assert.AreEqual(2, nodePep.Children.Count, "Missing precursor for heavy R peptide."); docCurrent = SkylineWindow.Document; foreach (TransitionGroupDocNode nodeGroup in nodePep.Children) AssertLibInfo(docCurrent, nodeGroup); // Which means all transition groups should now have spectrum info foreach (var nodeGroup in docCurrent.PeptideTransitionGroups) AssertLibInfo(docCurrent, nodeGroup); // New document var docNew = new SrmDocument(SrmSettingsList.GetDefault()); var docNewCopy = docNew; RunUI(() => SkylineWindow.SwitchDocument(docNewCopy, null)); const string idpList3 = "FHYKTDQGIK\n" + "WCAIGHQER\n" + "WCTISTHEANK"; int idpCount3 = idpList3.Split('\n').Length; const string idpList = "ADVTLGGGAK\n" + "AGFAGDDAPR\n" + "ALEFAKK\n" + "CCTESLVNR\n" + "DSYVGDEAQSK\n" + "YICDNQDTISSK\n" + // charge 3 peptides all have 2 also idpList3; int idpCount = idpList.Split('\n').Length; _libraryName = libraryBaseName + "_idp"; string libraryIdp = _libraryName + BiblioSpecLiteSpec.EXT; BuildLibraryValid(TestFilesDir.GetTestPath("idp_xml"), new[] { "orbi-small-eg.idpXML" }, false, false, false, idpCount + idpCount3); Assert.IsTrue(File.Exists(TestFilesDir.GetTestPath(libraryIdp))); // Add peptides expected to have charge 2 spectra in the library PastePeptideList(idpList, true, 0, 0); // Undo the paste RunUI(SkylineWindow.Undo); // Try filtering for only charge 3 spectra var transitionSettingsUI = ShowDialog<TransitionSettingsUI>( SkylineWindow.ShowTransitionSettingsUI); RunUI(() => { transitionSettingsUI.PrecursorCharges = "3"; transitionSettingsUI.OkDialog(); }); PastePeptideList(idpList, false, idpCount - idpCount3 + 1 /* missing cleavage*/, 0); // New document var docNewCopy2 = docNew; RunUI(() => SkylineWindow.SwitchDocument(docNewCopy2, null)); _libraryName = libraryBaseName + "_cpas1"; string libraryCpas1 = _libraryName + BiblioSpecLiteSpec.EXT; BuildLibraryValid(TestFilesDir.GetTestPath("cpas"), null, false, false, false, 3); Assert.IsTrue(File.Exists(TestFilesDir.GetTestPath(libraryCpas1))); // These are very poor searches, so repeat with no filter Settings.Default.LibraryResultCutOff = 0; _libraryName = libraryBaseName + "_cpas2"; BuildLibraryValid(TestFilesDir.GetTestPath("cpas"), null, false, false, false, 100, 100); // And, since the spectra are really poor, allow lots of // possibilities for fragment ions. var transitionSettingsCpas = ShowDialog<TransitionSettingsUI>( SkylineWindow.ShowTransitionSettingsUI); RunUI(() => { transitionSettingsCpas.PrecursorCharges = transitionSettingsCpas.ProductCharges = "1,2,3"; transitionSettingsCpas.FragmentTypes = "y,b"; transitionSettingsCpas.InstrumentMaxMz = 2000; transitionSettingsCpas.OkDialog(); }); EnsurePeptideSettings(); RunUI(() => { // Turn off carbamidomethyl cys, since not in these searches PeptideSettingsUI.PickedStaticMods = new string[0]; PeptideSettingsUI.OkDialog(); }); // Get the set of peptides to paste from the library, since there // are a lot. var setPeptides = new HashSet<Target>(); var library = SkylineWindow.Document.Settings.PeptideSettings.Libraries.Libraries[0]; foreach (var libKey in library.Keys) { if (!libKey.IsModified) setPeptides.Add(libKey.Target); } string cpasPeptides = string.Join("\n", setPeptides.Select(p => p.ToString()).ToArray()); var pasteFilteredPeptideDlg = ShowDialog<PasteFilteredPeptidesDlg>( () => SkylineWindow.Paste(cpasPeptides)); RunUI(pasteFilteredPeptideDlg.NoDialog); Assert.IsTrue(WaitForCondition(() => SkylineWindow.Document.PeptideCount == setPeptides.Count), string.Format("Expecting {0} peptides, found {1}.", setPeptides.Count, SkylineWindow.Document.PeptideCount)); Assert.AreEqual(setPeptides.Count, SkylineWindow.Document.PeptideTransitionGroupCount, "Expecting precursors for peptides matched to library spectrum."); // New document docNew = new SrmDocument(SrmSettingsList.GetDefault()); RunUI(() => SkylineWindow.SwitchDocument(docNew, null)); // Tests for adding iRTs to spectral library after building // 1. ask to recalibrate iRTs // 2. ask to add iRTs // 3. if added iRTs, ask to add RT predictor // no recalibrate, add iRTs, no add predictor _libraryName = libraryBaseName + "_irt1"; // library_test_irt1 BuildLibraryIrt(true, false, false); RunUI(() => Assert.IsTrue(PeptideSettingsUI.Prediction.RetentionTime == null)); // no recalibrate, add iRTs, add predictor _libraryName = libraryBaseName + "_irt2"; // library_test_irt2 BuildLibraryIrt(true, false, true); RunUI(() => Assert.IsTrue(PeptideSettingsUI.Prediction.RetentionTime.Name.Equals(_libraryName))); var editIrtDlg2 = ShowDialog<EditIrtCalcDlg>(PeptideSettingsUI.EditCalculator); RunUI(() => Assert.IsTrue(ReferenceEquals(editIrtDlg2.IrtStandards, IrtStandard.BIOGNOSYS_10))); OkDialog(editIrtDlg2, editIrtDlg2.CancelDialog); // recalibrate, add iRTs, no add predictor _libraryName = libraryBaseName + "_irt3"; // library_test_irt3 BuildLibraryIrt(true, true, false); RunUI(() => Assert.IsTrue(PeptideSettingsUI.Prediction.RetentionTime.Name.Equals(libraryBaseName + "_irt2"))); // recalibrate, add iRTs, add predictor _libraryName = libraryBaseName + "_irt4"; // library_test_irt4 BuildLibraryIrt(true, true, true); RunUI(() => Assert.IsTrue(PeptideSettingsUI.Prediction.RetentionTime.Name.Equals(_libraryName))); var editIrtDlg4 = ShowDialog<EditIrtCalcDlg>(PeptideSettingsUI.EditCalculator); RunUI(() => Assert.IsTrue(ReferenceEquals(editIrtDlg4.IrtStandards, IrtStandard.EMPTY))); OkDialog(editIrtDlg4, editIrtDlg4.CancelDialog); OkDialog(PeptideSettingsUI, PeptideSettingsUI.CancelDialog); } private void CirtLibraryBuildTest() { RunUI(() => { SkylineWindow.NewDocument(true); SkylineWindow.ModifyDocument("Set default settings", doc => doc.ChangeSettings(SrmSettingsList.GetDefault())); }); var peptideSettingsDlg = ShowDialog<PeptideSettingsUI>(SkylineWindow.ShowPeptideSettingsUI); // build a library with CiRT peptides BuildLibrary(TestFilesDir.GetTestPath("maxquant_cirt"), null, null, false, true, false, false, IrtStandard.CIRT_SHORT); var addIrtStandardsDlg = WaitForOpenForm<AddIrtStandardsDlg>(); // use 15 CiRT peptides as standards const int numStandards = 15; RunUI(() => addIrtStandardsDlg.StandardCount = numStandards); var addIrtPeptidesDlg = ShowDialog<AddIrtPeptidesDlg>(addIrtStandardsDlg.OkDialog); // don't recalibrate; add RT predictor var recalibrateDlg = ShowDialog<MultiButtonMsgDlg>(addIrtPeptidesDlg.OkDialog); var addRetentionTimePredictorDlg = ShowDialog<AddRetentionTimePredictorDlg>(recalibrateDlg.ClickNo); OkDialog(addRetentionTimePredictorDlg, addRetentionTimePredictorDlg.OkDialog); // verify that there are 15 CiRT peptides as standards in the calculator var editIrtCalcDlg = ShowDialog<EditIrtCalcDlg>(peptideSettingsDlg.EditCalculator); var cirtPeptides = new TargetMap<bool>(IrtStandard.CIRT.Peptides.Select(pep => new KeyValuePair<Target, bool>(pep.ModifiedTarget, true))); RunUI(() => { Assert.AreEqual(numStandards, editIrtCalcDlg.StandardPeptideCount); Assert.IsTrue(editIrtCalcDlg.StandardPeptides.All(pep => cirtPeptides.ContainsKey(pep.ModifiedTarget))); }); OkDialog(editIrtCalcDlg, editIrtCalcDlg.CancelDialog); OkDialog(peptideSettingsDlg, peptideSettingsDlg.CancelDialog); RunUI(() => SkylineWindow.SaveDocument(TestFilesDir.GetTestPath("cirt_test.sky"))); } private static void PastePeptideList(string peptideList, bool keep, int filteredPeptideCount, int missingSpectraCount, bool expectMessage = false) { int peptideCount = peptideList.Split('\n').Length; var pasteFilteredPeptideDlg = ShowDialog<PasteFilteredPeptidesDlg>( () => SkylineWindow.Paste(peptideList)); if (keep) { OkDialog(pasteFilteredPeptideDlg, pasteFilteredPeptideDlg.NoDialog); } else { OkDialog(pasteFilteredPeptideDlg, pasteFilteredPeptideDlg.YesDialog); peptideCount -= filteredPeptideCount; } if (expectMessage) { var messageDlg = WaitForOpenForm<MultiButtonMsgDlg>(); OkDialog(messageDlg, messageDlg.Btn1Click); } Assert.IsTrue(WaitForCondition(() => SkylineWindow.Document.PeptideCount == peptideCount), string.Format("Expecting {0} peptides, found {1}.", peptideCount, SkylineWindow.Document.PeptideCount)); if (peptideCount - missingSpectraCount != SkylineWindow.Document.PeptideTransitionGroupCount) { string peptideSeqs = string.Join(", ", (from nodeGroup in SkylineWindow.Document.PeptideTransitionGroups select nodeGroup.TransitionGroup.Peptide.Sequence).ToArray()); Assert.AreEqual(peptideCount - missingSpectraCount, SkylineWindow.Document.PeptideTransitionGroupCount, string.Format("Expecting precursors for peptides matched to library spectrum. Found precursors for {0}.", peptideSeqs)); } var docCurrent = SkylineWindow.Document; foreach (var nodeGroup in docCurrent.PeptideTransitionGroups) AssertLibInfo(docCurrent, nodeGroup); } private static void AssertLibInfo(SrmDocument docCurrent, TransitionGroupDocNode nodeGroup) { Assert.IsTrue(nodeGroup.HasLibInfo, string.Format("Precursor {0} found without library info", nodeGroup.TransitionGroup)); int ionCount = docCurrent.Settings.TransitionSettings.Libraries.IonCount; Assert.AreEqual(ionCount, nodeGroup.Children.Count); foreach (TransitionDocNode nodeTran in nodeGroup.Children) { Assert.IsTrue(nodeTran.HasLibInfo, string.Format("Transition {0} found without library info", nodeTran.Transition)); Assert.IsTrue(nodeTran.LibInfo.Rank <= ionCount); } } private void TestAddPaths(BuildLibraryDlg buildLibraryDlg, string[] paths, bool error) { RunDlg<AddPathsDlg>(buildLibraryDlg.ShowAddPathsDlg, addPathsDlg => { addPathsDlg.FileNames = paths; if (error) { string errorMsg = addPathsDlg.CheckForError(); Assert.AreNotEqual(string.Empty, errorMsg); } addPathsDlg.CancelDialog(); }); } private void BuildLibraryValid(string inputFile, bool keepRedundant, bool filterPeptides, bool append, int expectedSpectra) { BuildLibraryValid(TestFilesDir.GetTestPath("library_valid"), new[] { inputFile }, keepRedundant, filterPeptides, append, expectedSpectra); } private void BuildLibraryValid(string inputDir, IEnumerable<string> inputFiles, bool keepRedundant, bool filterPeptides, bool append, int expectedSpectra, int expectedAmbiguous = 0) { ReportLibraryBuildFailures = true; BuildLibrary(inputDir, inputFiles, null, keepRedundant, false, filterPeptides, append, null); if (expectedAmbiguous > 0) { var ambiguousDlg = WaitForOpenForm<MessageDlg>(); RunUI(() => Assert.AreEqual(expectedAmbiguous, ambiguousDlg.Message.Split('\n').Length - 1, ambiguousDlg.Message)); OkDialog(ambiguousDlg, ambiguousDlg.OkDialog); } if (!TryWaitForConditionUI(() => PeptideSettingsUI.AvailableLibraries.Contains(_libraryName))) { var messageDlg = FindOpenForm<MessageDlg>(); if (messageDlg != null) AssertEx.Fail("Unexpected MessageDlg: " + messageDlg.DetailedMessage); AssertEx.Fail("Failed waiting for the library {0} in Peptide Settings", _libraryName); } string nonRedundantBuildPath = TestFilesDir.GetTestPath(_libraryName + BiblioSpecLiteSpec.EXT); WaitForConditionUI(() => File.Exists(nonRedundantBuildPath), string.Format("Failed waiting for the non-redundant library {0}", nonRedundantBuildPath)); WaitForConditionUI(() => !PeptideSettingsUI.IsBuildingLibrary, string.Format("Failed waiting for library {0} build to complete", _libraryName)); RunUI(() => PeptideSettingsUI.PickedLibraries = new[] { _libraryName }); OkDialog(PeptideSettingsUI, PeptideSettingsUI.OkDialog); // Wait for the library to load AbstractFunctionalTestEx.WaitForLibrary(expectedSpectra); var librarySettings = SkylineWindow.Document.Settings.PeptideSettings.Libraries; Assert.IsTrue(librarySettings.IsLoaded); Assert.AreEqual(1, librarySettings.Libraries.Count); Assert.AreEqual(_libraryName, librarySettings.Libraries[0].Name); Assert.AreEqual(expectedSpectra, librarySettings.Libraries[0].Keys.Count()); } private void BuildLibraryError(string inputFile, string libraryPath, params string[] messageParts) { string redundantBuildPath = TestFilesDir.GetTestPath(_libraryName + BiblioSpecLiteSpec.EXT_REDUNDANT); FileEx.SafeDelete(redundantBuildPath); string nonredundantBuildPath = TestFilesDir.GetTestPath(_libraryName + BiblioSpecLiteSpec.EXT); FileEx.SafeDelete(nonredundantBuildPath); ReportLibraryBuildFailures = false; BuildLibrary(TestFilesDir.GetTestPath("library_errors"), new[] {inputFile}, libraryPath, false, false, false, false, null); var messageDlg = WaitForOpenForm<MessageDlg>(); Assert.IsNotNull(messageDlg, "No message box shown"); AssertEx.Contains(messageDlg.Message, "ERROR"); if (messageParts.Length == 0) AssertEx.Contains(messageDlg.Message, inputFile, "line"); else AssertEx.Contains(messageDlg.Message, messageParts); OkDialog(messageDlg, messageDlg.OkDialog); CheckLibraryExistence(redundantBuildPath, false); CheckLibraryExistence(nonredundantBuildPath, false); WaitForConditionUI(() => !PeptideSettingsUI.IsBuildingLibrary); } private void BuildLibraryIrt(bool addIrts, bool recalibrate, bool addPredictor) { BuildLibrary(TestFilesDir.GetTestPath("maxquant_irt"), new[] {"irt_test.msms.txt"}, null, false, false, false, false, IrtStandard.BIOGNOSYS_10); var addIrtDlg = WaitForOpenForm<AddIrtPeptidesDlg>(); if (!addIrts) { OkDialog(addIrtDlg, addIrtDlg.CancelDialog); return; } var recalibrateDlg = ShowDialog<MultiButtonMsgDlg>(addIrtDlg.OkDialog); var addPredictorDlg = recalibrate ? ShowDialog<AddRetentionTimePredictorDlg>(recalibrateDlg.BtnYesClick) : ShowDialog<AddRetentionTimePredictorDlg>(recalibrateDlg.BtnCancelClick); if (addPredictor) OkDialog(addPredictorDlg, addPredictorDlg.OkDialog); else OkDialog(addPredictorDlg, addPredictorDlg.NoDialog); } private void EnsurePeptideSettings() { PeptideSettingsUI = FindOpenForm<PeptideSettingsUI>() ?? ShowDialog<PeptideSettingsUI>(SkylineWindow.ShowPeptideSettingsUI); // Control console output on failure for diagnosing nightly test failures PeptideSettingsUI.ReportLibraryBuildFailure = ReportLibraryBuildFailures; // Allow a person watching to see what is going on in the Library tab RunUI(() => { if (PeptideSettingsUI.SelectedTab != PeptideSettingsUI.TABS.Library) PeptideSettingsUI.SelectedTab = PeptideSettingsUI.TABS.Library; }); } private void BuildLibrary(string inputDir, IEnumerable<string> inputFiles, string libraryPath, bool keepRedundant, bool includeAmbiguous, bool filterPeptides, bool append, IrtStandard irtStandard) { EnsurePeptideSettings(); var buildLibraryDlg = ShowDialog<BuildLibraryDlg>(PeptideSettingsUI.ShowBuildLibraryDlg); List<string> inputPaths = null; if (inputFiles != null) inputPaths = new List<string>(inputFiles).ConvertAll(f => Path.Combine(inputDir, f)); string autoLibPath = null; RunUI(() => { if (libraryPath != null) buildLibraryDlg.LibraryPath = libraryPath; buildLibraryDlg.LibraryName = _libraryName; autoLibPath = buildLibraryDlg.LibraryPath; buildLibraryDlg.LibraryKeepRedundant = keepRedundant; buildLibraryDlg.IncludeAmbiguousMatches = includeAmbiguous; buildLibraryDlg.LibraryFilterPeptides = filterPeptides; buildLibraryDlg.LibraryBuildAction = (append ? LibraryBuildAction.Append : LibraryBuildAction.Create); if (irtStandard != null && !irtStandard.Equals(IrtStandard.EMPTY)) buildLibraryDlg.IrtStandard = irtStandard; buildLibraryDlg.OkWizardPage(); if (inputPaths != null) buildLibraryDlg.AddInputFiles(inputPaths); else buildLibraryDlg.AddDirectory(inputDir); }); OkDialog(buildLibraryDlg, buildLibraryDlg.OkWizardPage); if (inputPaths != null) foreach (var inputFile in inputPaths) if (BiblioSpecLiteBuilder.HasEmbeddedSpectra(inputFile)) { var embeddedSpectraDlg = WaitForOpenForm<MultiButtonMsgDlg>(); OkDialog(embeddedSpectraDlg, embeddedSpectraDlg.BtnYesClick); } Assert.AreEqual(TestFilesDir.GetTestPath(_libraryName + BiblioSpecLiteSpec.EXT), autoLibPath); } private static void CheckLibraryExistence(string libPath, bool libExist) { // Wait for journal to be removed string libJournalPath = libPath + BlibBuild.EXT_SQLITE_JOURNAL; WaitForCondition(() => !File.Exists(libJournalPath)); Assert.IsFalse(File.Exists(libJournalPath), string.Format("Unexpected library journal {0} found", libJournalPath)); if (libExist) { Assert.IsTrue(File.Exists(libPath), string.Format("Expected library {0} not found", libPath)); } else { Assert.IsFalse(File.Exists(libPath), string.Format("Unexpected library {0} found", libPath)); } } } }
1
13,522
Feels like this could have a default null value to remove the need for this explicit "null" use.
ProteoWizard-pwiz
.cs
@@ -145,12 +145,12 @@ func (s *server) run(ctx context.Context, t cli.Telemetry) error { fs, err := s.createFilestore(ctx, cfg, t.Logger) if err != nil { - t.Logger.Error("failed creating firestore", zap.Error(err)) + t.Logger.Error("failed creating filestore", zap.Error(err)) return err } defer func() { if err := fs.Close(); err != nil { - t.Logger.Error("failed closing firestore client", zap.Error(err)) + t.Logger.Error("failed closing filestore client", zap.Error(err)) } }()
1
// Copyright 2020 The PipeCD Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package server import ( "context" "errors" "fmt" "net/http" "time" jwtgo "github.com/dgrijalva/jwt-go" "github.com/spf13/cobra" "go.uber.org/zap" "golang.org/x/sync/errgroup" "github.com/pipe-cd/pipe/pkg/admin" "github.com/pipe-cd/pipe/pkg/app/api/api" "github.com/pipe-cd/pipe/pkg/app/api/applicationlivestatestore" "github.com/pipe-cd/pipe/pkg/app/api/authhandler" "github.com/pipe-cd/pipe/pkg/app/api/commandstore" "github.com/pipe-cd/pipe/pkg/app/api/pipedtokenverifier" "github.com/pipe-cd/pipe/pkg/app/api/service/webservice" "github.com/pipe-cd/pipe/pkg/app/api/stagelogstore" "github.com/pipe-cd/pipe/pkg/cache/rediscache" "github.com/pipe-cd/pipe/pkg/cli" "github.com/pipe-cd/pipe/pkg/config" "github.com/pipe-cd/pipe/pkg/datastore" "github.com/pipe-cd/pipe/pkg/datastore/firestore" "github.com/pipe-cd/pipe/pkg/datastore/mongodb" "github.com/pipe-cd/pipe/pkg/filestore" "github.com/pipe-cd/pipe/pkg/filestore/gcs" "github.com/pipe-cd/pipe/pkg/filestore/minio" "github.com/pipe-cd/pipe/pkg/jwt" "github.com/pipe-cd/pipe/pkg/model" "github.com/pipe-cd/pipe/pkg/redis" "github.com/pipe-cd/pipe/pkg/rpc" "github.com/pipe-cd/pipe/pkg/version" ) var ( defaultSigningMethod = jwtgo.SigningMethodHS256 ) type httpHandler interface { Register(func(pattern string, handler func(http.ResponseWriter, *http.Request))) } type server struct { pipedAPIPort int webAPIPort int httpPort int adminPort int cacheAddress string gracePeriod time.Duration tls bool certFile string keyFile string tokenSigningKeyFile string configFile string useFakeResponse bool enableGRPCReflection bool } // NewCommand creates a new cobra command for executing api server. func NewCommand() *cobra.Command { s := &server{ pipedAPIPort: 9080, webAPIPort: 9081, httpPort: 9082, adminPort: 9085, cacheAddress: "cache:6379", gracePeriod: 30 * time.Second, } cmd := &cobra.Command{ Use: "server", Short: "Start running API server.", RunE: cli.WithContext(s.run), } cmd.Flags().IntVar(&s.pipedAPIPort, "piped-api-port", s.pipedAPIPort, "The port number used to run a grpc server that serving serves incoming piped requests.") cmd.Flags().IntVar(&s.webAPIPort, "web-api-port", s.webAPIPort, "The port number used to run a grpc server that serves incoming web requests.") cmd.Flags().IntVar(&s.httpPort, "http-port", s.httpPort, "The port number used to run a http server that serves incoming http requests such as auth callbacks or webhook events.") cmd.Flags().IntVar(&s.adminPort, "admin-port", s.adminPort, "The port number used to run a HTTP server for admin tasks such as metrics, healthz.") cmd.Flags().StringVar(&s.cacheAddress, "cache-address", s.cacheAddress, "The address to cache service.") cmd.Flags().DurationVar(&s.gracePeriod, "grace-period", s.gracePeriod, "How long to wait for graceful shutdown.") cmd.Flags().BoolVar(&s.tls, "tls", s.tls, "Whether running the gRPC server with TLS or not.") cmd.Flags().StringVar(&s.certFile, "cert-file", s.certFile, "The path to the TLS certificate file.") cmd.Flags().StringVar(&s.keyFile, "key-file", s.keyFile, "The path to the TLS key file.") cmd.Flags().StringVar(&s.tokenSigningKeyFile, "token-signing-key-file", s.tokenSigningKeyFile, "The path to key file used to sign ID token.") cmd.Flags().StringVar(&s.configFile, "config-file", s.configFile, "The path to the configuration file.") // For debugging early in development cmd.Flags().BoolVar(&s.useFakeResponse, "use-fake-response", s.useFakeResponse, "Whether the server responds fake response or not.") cmd.Flags().BoolVar(&s.enableGRPCReflection, "enable-grpc-reflection", s.enableGRPCReflection, "Whether to enable the reflection service or not.") return cmd } func (s *server) run(ctx context.Context, t cli.Telemetry) error { group, ctx := errgroup.WithContext(ctx) // Load control plane configuration from the specified file. cfg, err := s.loadConfig() if err != nil { t.Logger.Error("failed to load control-plane configuration", zap.String("config-file", s.configFile), zap.Error(err), ) return err } var ( pipedAPIServer *rpc.Server webAPIServer *rpc.Server ) ds, err := s.createDatastore(ctx, cfg, t.Logger) if err != nil { t.Logger.Error("failed creating datastore", zap.Error(err)) return err } defer func() { if err := ds.Close(); err != nil { t.Logger.Error("failed closing datastore client", zap.Error(err)) } }() fs, err := s.createFilestore(ctx, cfg, t.Logger) if err != nil { t.Logger.Error("failed creating firestore", zap.Error(err)) return err } defer func() { if err := fs.Close(); err != nil { t.Logger.Error("failed closing firestore client", zap.Error(err)) } }() rd := redis.NewRedis(s.cacheAddress, "") defer func() { if err := rd.Close(); err != nil { t.Logger.Error("failed closing redis client", zap.Error(err)) } }() cache := rediscache.NewTTLCache(rd, cfg.Cache.TTL.Duration()) sls := stagelogstore.NewStore(fs, cache, t.Logger) alss := applicationlivestatestore.NewStore(fs, cache, t.Logger) cmds := commandstore.NewStore(ds, cache, t.Logger) // Start a gRPC server for handling PipedAPI requests. { var ( verifier = pipedtokenverifier.NewVerifier(ctx, cfg, ds) service = api.NewPipedAPI(ds, sls, alss, cmds, t.Logger) opts = []rpc.Option{ rpc.WithPort(s.pipedAPIPort), rpc.WithGracePeriod(s.gracePeriod), rpc.WithLogger(t.Logger), rpc.WithLogUnaryInterceptor(t.Logger), rpc.WithPipedTokenAuthUnaryInterceptor(verifier, t.Logger), rpc.WithRequestValidationUnaryInterceptor(), } ) if s.tls { opts = append(opts, rpc.WithTLS(s.certFile, s.keyFile)) } if s.enableGRPCReflection { opts = append(opts, rpc.WithGRPCReflection()) } pipedAPIServer = rpc.NewServer(service, opts...) group.Go(func() error { return pipedAPIServer.Run(ctx) }) } // Start a gRPC server for handling WebAPI requests. { verifier, err := jwt.NewVerifier(defaultSigningMethod, s.tokenSigningKeyFile) if err != nil { t.Logger.Error("failed to create a new JWT verifier", zap.Error(err)) return err } var service rpc.Service if s.useFakeResponse { service = api.NewFakeWebAPI() } else { service = api.NewWebAPI(ds, sls, alss, cmds, t.Logger) } opts := []rpc.Option{ rpc.WithPort(s.webAPIPort), rpc.WithGracePeriod(s.gracePeriod), rpc.WithLogger(t.Logger), rpc.WithJWTAuthUnaryInterceptor(verifier, webservice.NewRBACAuthorizer(), t.Logger), rpc.WithRequestValidationUnaryInterceptor(), } if s.tls { opts = append(opts, rpc.WithTLS(s.certFile, s.keyFile)) } if s.enableGRPCReflection { opts = append(opts, rpc.WithGRPCReflection()) } webAPIServer = rpc.NewServer(service, opts...) group.Go(func() error { return webAPIServer.Run(ctx) }) } // Start an http server for handling incoming http requests such as auth callbacks or webhook events. { signer, err := jwt.NewSigner(defaultSigningMethod, s.tokenSigningKeyFile) if err != nil { t.Logger.Error("failed to create a new signer", zap.Error(err)) return err } mux := http.NewServeMux() httpServer := &http.Server{ Addr: fmt.Sprintf(":%d", s.httpPort), Handler: mux, } handlers := []httpHandler{ authhandler.NewHandler(signer, cfg.APIURL, cfg.StateKey, datastore.NewProjectStore(ds), t.Logger), } for _, h := range handlers { h.Register(mux.HandleFunc) } group.Go(func() error { return runHTTPServer(ctx, httpServer, s.gracePeriod, t.Logger) }) } // Start running admin server. { var ( ver = []byte(version.Get().Version) admin = admin.NewAdmin(s.adminPort, s.gracePeriod, t.Logger) ) admin.HandleFunc("/version", func(w http.ResponseWriter, r *http.Request) { w.Write(ver) }) admin.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("ok")) }) admin.Handle("/metrics", t.PrometheusMetricsHandler()) group.Go(func() error { return admin.Run(ctx) }) } // Wait until all components have finished. // A terminating signal or a finish of any components // could trigger the finish of server. // This ensures that all components are good or no one. if err := group.Wait(); err != nil { t.Logger.Error("failed while running", zap.Error(err)) return err } return nil } func runHTTPServer(ctx context.Context, httpServer *http.Server, gracePeriod time.Duration, logger *zap.Logger) error { doneCh := make(chan error, 1) ctx, cancel := context.WithCancel(ctx) go func() { defer cancel() logger.Info("start running http server") if err := httpServer.ListenAndServe(); err != nil && err != http.ErrServerClosed { logger.Error("failed to listen and http server", zap.Error(err)) doneCh <- err } doneCh <- nil }() <-ctx.Done() ctx, _ = context.WithTimeout(context.Background(), gracePeriod) logger.Info("stopping http server") if err := httpServer.Shutdown(ctx); err != nil { logger.Error("failed to shutdown http server", zap.Error(err)) } return <-doneCh } func (s *server) loadConfig() (*config.ControlPlaneSpec, error) { cfg, err := config.LoadFromYAML(s.configFile) if err != nil { return nil, err } if cfg.Kind != config.KindControlPlane { return nil, fmt.Errorf("wrong configuration kind for control-plane: %v", cfg.Kind) } return cfg.ControlPlaneSpec, nil } func (s *server) createDatastore(ctx context.Context, cfg *config.ControlPlaneSpec, logger *zap.Logger) (datastore.DataStore, error) { switch cfg.Datastore.Type { case model.DataStoreFirestore: fsConfig := cfg.Datastore.FirestoreConfig options := []firestore.Option{ firestore.WithCredentialsFile(fsConfig.CredentialsFile), firestore.WithLogger(logger), } return firestore.NewFireStore(ctx, fsConfig.Project, fsConfig.Namespace, fsConfig.Environment, options...) case model.DataStoreDynamoDB: return nil, errors.New("dynamodb is unimplemented yet") case model.DataStoreMongoDB: mdConfig := cfg.Datastore.MongoDBConfig options := []mongodb.Option{ mongodb.WithLogger(logger), } return mongodb.NewMongoDB(ctx, mdConfig.URL, mdConfig.Database, options...) default: return nil, fmt.Errorf("unknown datastore type %q", cfg.Datastore.Type) } } func (s *server) createFilestore(ctx context.Context, cfg *config.ControlPlaneSpec, logger *zap.Logger) (filestore.Store, error) { ctx, cancel := context.WithTimeout(ctx, 5*time.Second) defer cancel() switch cfg.Filestore.Type { case model.FileStoreGCS: gcsCfg := cfg.Filestore.GCSConfig options := []gcs.Option{ gcs.WithLogger(logger), } if gcsCfg.CredentialsFile != "" { options = append(options, gcs.WithCredentialsFile(gcsCfg.CredentialsFile)) } return gcs.NewStore(ctx, gcsCfg.Bucket, options...) case model.FileStoreS3: return nil, errors.New("s3 is unimplemented yet") case model.FileStoreMINIO: minioCfg := cfg.Filestore.MinioConfig options := []minio.Option{ minio.WithLogger(logger), } return minio.NewStore(minioCfg.Endpoint, minioCfg.Bucket, minioCfg.AccessKeyFile, minioCfg.SecretKeyFile, options...) default: return nil, fmt.Errorf("unknown filestore type %q", cfg.Filestore.Type) } }
1
9,571
"failed to create ..."
pipe-cd-pipe
go
@@ -69,7 +69,7 @@ func PodCreator(fakeKubeClient kubernetes.Interface, podName string) { } _, err := fakeKubeClient.CoreV1().Pods("openebs").Create(podObjet) if err != nil { - glog.Errorf("Fake pod object could not be created:", err) + glog.Error("Fake pod object could not be created:", err) } } }
1
/* Copyright 2018 The OpenEBS Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package spc import ( apis "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1" "github.com/golang/glog" openebsFakeClientset "github.com/openebs/maya/pkg/client/generated/clientset/internalclientset/fake" env "github.com/openebs/maya/pkg/env/v1alpha1" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" k8sfake "k8s.io/client-go/kubernetes/fake" "os" "strconv" "testing" ) // SpcCreator will create fake spc objects func (focs *clientSet) SpcCreator(poolName string, SpcLeaseKeyPresent bool, SpcLeaseKeyValue string) *apis.StoragePoolClaim { var spcObject *apis.StoragePoolClaim if SpcLeaseKeyPresent { spcObject = &apis.StoragePoolClaim{ ObjectMeta: metav1.ObjectMeta{ Name: poolName, Annotations: map[string]string{ SpcLeaseKey: "{\"holder\":\"" + SpcLeaseKeyValue + "\",\"leaderTransition\":1}", }, }, } } else { spcObject = &apis.StoragePoolClaim{ ObjectMeta: metav1.ObjectMeta{ Name: poolName, }, } } spcGot, err := focs.oecs.OpenebsV1alpha1().StoragePoolClaims().Create(spcObject) if err != nil { glog.Error(err) } return spcGot } // Create 5 fake pods that will compete to acquire lease on spc func PodCreator(fakeKubeClient kubernetes.Interface, podName string) { for i := 1; i <= 5; i++ { podObjet := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: podName + strconv.Itoa(i), }, Status: v1.PodStatus{ Phase: v1.PodRunning, }, } _, err := fakeKubeClient.CoreV1().Pods("openebs").Create(podObjet) if err != nil { glog.Errorf("Fake pod object could not be created:", err) } } } func TestHold(t *testing.T) { // Get a fake openebs client set focs := &clientSet{ oecs: openebsFakeClientset.NewSimpleClientset(), } fakeKubeClient := k8sfake.NewSimpleClientset() // Make a map of string(key) to struct(value). // Key of map describes test case behaviour. // Value of map is the test object. PodCreator(fakeKubeClient, "maya-apiserver") tests := map[string]struct { // fakestoragepoolclaim holds the fake storagepoolcalim object in test cases. fakestoragepoolclaim *apis.StoragePoolClaim storagePoolClaimName string podName string podNamespace string // expectedResult holds the expected error for the test case under run. expectedError bool // expectedResult holds the expected lease value the test case under run. expectedResult string }{ // TestCase#1 "SPC#1 Lease Not acquired": { fakestoragepoolclaim: focs.SpcCreator("pool1", false, ""), podName: "maya-apiserver1", podNamespace: "openebs", expectedError: false, expectedResult: "{\"holder\":\"openebs/maya-apiserver1\",\"leaderTransition\":1}", }, // TestCase#2 "SPC#2 Lease already acquired": { fakestoragepoolclaim: focs.SpcCreator("pool2", true, "openebs/maya-apiserver1"), podName: "maya-apiserver2", podNamespace: "openebs", expectedError: true, expectedResult: "{\"holder\":\"openebs/maya-apiserver1\",\"leaderTransition\":1}", }, // TestCase#3 "SPC#3 Lease already acquired": { fakestoragepoolclaim: focs.SpcCreator("pool3", true, "openebs/maya-apiserver6"), podName: "maya-apiserver2", podNamespace: "openebs", expectedError: false, expectedResult: "{\"holder\":\"openebs/maya-apiserver2\",\"leaderTransition\":2}", }, // TestCase#4 "SPC#4 Lease Not acquired": { fakestoragepoolclaim: focs.SpcCreator("pool4", true, ""), podName: "maya-apiserver3", podNamespace: "openebs", expectedError: false, expectedResult: "{\"holder\":\"openebs/maya-apiserver3\",\"leaderTransition\":2}", }, } // Iterate over whole map to run the test cases. for name, test := range tests { t.Run(name, func(t *testing.T) { var newSpcLease Lease var gotError bool os.Setenv(string(env.OpenEBSMayaPodName), test.podName) os.Setenv(string(env.OpenEBSNamespace), test.podNamespace) newSpcLease = Lease{test.fakestoragepoolclaim, SpcLeaseKey, focs.oecs, fakeKubeClient} // Hold is the function under test. err := newSpcLease.Hold() if err == nil { gotError = false } else { gotError = true } //If the result does not matches expectedResult, test case fails. if gotError != test.expectedError { t.Errorf("Test case failed:expected nil error but got error:'%v'", err) } // Check for lease value spcGot, err := focs.oecs.OpenebsV1alpha1().StoragePoolClaims().Get(test.fakestoragepoolclaim.Name, metav1.GetOptions{}) if spcGot.Annotations[SpcLeaseKey] != test.expectedResult { t.Errorf("Test case failed: expected lease value '%v' but got '%v' ", test.expectedResult, spcGot.Annotations[SpcLeaseKey]) } os.Unsetenv(string(env.OpenEBSMayaPodName)) os.Unsetenv(string(env.OpenEBSNamespace)) }) } }
1
10,207
No formatting directives, `glog.Error` will do just fine.
openebs-maya
go
@@ -34,7 +34,7 @@ module Selenium driver.manage.timeouts.implicit_wait = 6 driver.find_element(id: 'adder').click - driver.find_element(id: 'box0') + expect { driver.find_element(id: 'box0') }.not_to raise_error(WebDriver::Error::NoSuchElementError) end it 'should still fail to find an element with implicit waits enabled' do
1
# frozen_string_literal: true # Licensed to the Software Freedom Conservancy (SFC) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The SFC licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. require_relative 'spec_helper' module Selenium module WebDriver describe Timeouts do context 'implicit waits' do before do driver.manage.timeouts.implicit_wait = 0 driver.navigate.to url_for('dynamic.html') end after { driver.manage.timeouts.implicit_wait = 0 } it 'should implicitly wait for a single element', except: {browser: :safari_preview} do driver.manage.timeouts.implicit_wait = 6 driver.find_element(id: 'adder').click driver.find_element(id: 'box0') end it 'should still fail to find an element with implicit waits enabled' do driver.manage.timeouts.implicit_wait = 0.5 expect { driver.find_element(id: 'box0') }.to raise_error(WebDriver::Error::NoSuchElementError) end it 'should return after first attempt to find one after disabling implicit waits' do driver.manage.timeouts.implicit_wait = 3 driver.manage.timeouts.implicit_wait = 0 expect { driver.find_element(id: 'box0') }.to raise_error(WebDriver::Error::NoSuchElementError) end it 'should implicitly wait until at least one element is found when searching for many' do add = driver.find_element(id: 'adder') driver.manage.timeouts.implicit_wait = 6 add.click add.click expect(driver.find_elements(class_name: 'redbox')).not_to be_empty end it 'should still fail to find elements when implicit waits are enabled' do driver.manage.timeouts.implicit_wait = 0.5 expect(driver.find_elements(class_name: 'redbox')).to be_empty end it 'should return after first attempt to find many after disabling implicit waits', except: {browser: :firefox, platform: :windows} do add = driver.find_element(id: 'adder') driver.manage.timeouts.implicit_wait = 3 driver.manage.timeouts.implicit_wait = 0 add.click expect(driver.find_elements(class_name: 'redbox')).to be_empty end end context 'page loads' do # w3c default is 300,000 after { driver.manage.timeouts.page_load = 300000 } it 'should be able to set the page load timeout' do expect { driver.manage.timeouts.page_load = 2 }.not_to raise_exception end end end end # WebDriver end # Selenium
1
16,703
This should just be `.not_to raise_error` otherwise it potentially hides errors
SeleniumHQ-selenium
java
@@ -442,7 +442,18 @@ configRetry: log.Infof("Starting the Typha connection") err := typhaConnection.Start(context.Background()) if err != nil { - log.WithError(err).Fatal("Failed to connect to Typha") + log.WithError(err).Error("Failed to connect to Typha. Retrying...") + startTime := time.Now() + for err != nil && time.Since(startTime) < 30*time.Second { + // Set Ready to false and Live to true when unable to connect to typha + healthAggregator.Report(healthName, &health.HealthReport{Live: true, Ready: false}) + err = typhaConnection.Start(context.Background()) + log.WithError(err).Debug("Retrying to start Typha") + time.Sleep(1 * time.Second) + } + if err != nil { + log.WithError(err).Fatal("Failed to connect to Typha") + } } go func() { typhaConnection.Finished.Wait()
1
// Copyright (c) 2017-2019 Tigera, Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package daemon import ( "context" "errors" "fmt" "math/rand" "net/http" "os" "os/exec" "os/signal" "runtime" "runtime/debug" "syscall" "time" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" log "github.com/sirupsen/logrus" "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "github.com/projectcalico/felix/buildinfo" "github.com/projectcalico/felix/calc" "github.com/projectcalico/felix/config" _ "github.com/projectcalico/felix/config" dp "github.com/projectcalico/felix/dataplane" "github.com/projectcalico/felix/logutils" "github.com/projectcalico/felix/policysync" "github.com/projectcalico/felix/proto" "github.com/projectcalico/felix/statusrep" "github.com/projectcalico/felix/usagerep" apiv3 "github.com/projectcalico/libcalico-go/lib/apis/v3" "github.com/projectcalico/libcalico-go/lib/backend" bapi "github.com/projectcalico/libcalico-go/lib/backend/api" "github.com/projectcalico/libcalico-go/lib/backend/model" "github.com/projectcalico/libcalico-go/lib/backend/syncersv1/felixsyncer" "github.com/projectcalico/libcalico-go/lib/backend/syncersv1/updateprocessors" "github.com/projectcalico/libcalico-go/lib/backend/watchersyncer" cerrors "github.com/projectcalico/libcalico-go/lib/errors" "github.com/projectcalico/libcalico-go/lib/health" lclogutils "github.com/projectcalico/libcalico-go/lib/logutils" "github.com/projectcalico/libcalico-go/lib/set" "github.com/projectcalico/pod2daemon/binder" "github.com/projectcalico/typha/pkg/syncclient" ) const usage = `Felix, the Calico per-host daemon. Usage: calico-felix [options] Options: -c --config-file=<filename> Config file to load [default: /etc/calico/felix.cfg]. --version Print the version and exit. ` const ( // Our default value for GOGC if it is not set. This is the percentage that heap usage must // grow by to trigger a garbage collection. Go's default is 100, meaning that 50% of the // heap can be lost to garbage. We reduce it to this value to trade increased CPU usage for // lower occupancy. defaultGCPercent = 20 // String sent on the failure report channel to indicate we're shutting down for config // change. reasonConfigChanged = "config changed" // Process return code used to report a config change. This is the same as the code used // by SIGHUP, which means that the wrapper script also restarts Felix on a SIGHUP. configChangedRC = 129 ) // Run is the entry point to run a Felix instance. // // Its main role is to sequence Felix's startup by: // // Initialising early logging config (log format and early debug settings). // // Parsing command line parameters. // // Loading datastore configuration from the environment or config file. // // Loading more configuration from the datastore (this is retried until success). // // Starting the configured internal (golang) or external dataplane driver. // // Starting the background processing goroutines, which load and keep in sync with the // state from the datastore, the "calculation graph". // // Starting the usage reporting and prometheus metrics endpoint threads (if configured). // // Then, it defers to monitorAndManageShutdown(), which blocks until one of the components // fails, then attempts a graceful shutdown. At that point, all the processing is in // background goroutines. // // To avoid having to maintain rarely-used code paths, Felix handles updates to its // main config parameters by exiting and allowing itself to be restarted by the init // daemon. func Run(configFile string) { // Go's RNG is not seeded by default. Do that now. rand.Seed(time.Now().UTC().UnixNano()) // Special-case handling for environment variable-configured logging: // Initialise early so we can trace out config parsing. logutils.ConfigureEarlyLogging() ctx := context.Background() if os.Getenv("GOGC") == "" { // Tune the GC to trade off a little extra CPU usage for significantly lower // occupancy at high scale. This is worthwhile because Felix runs per-host so // any occupancy improvement is multiplied by the number of hosts. log.Debugf("No GOGC value set, defaulting to %d%%.", defaultGCPercent) debug.SetGCPercent(defaultGCPercent) } buildInfoLogCxt := log.WithFields(log.Fields{ "version": buildinfo.GitVersion, "buildDate": buildinfo.BuildDate, "gitCommit": buildinfo.GitRevision, "GOMAXPROCS": runtime.GOMAXPROCS(0), }) buildInfoLogCxt.Info("Felix starting up") // Health monitoring, for liveness and readiness endpoints. The following loop can take a // while before the datastore reports itself as ready - for example when there is data that // needs to be migrated from a previous version - and we still want to Felix to report // itself as live (but not ready) while we are waiting for that. So we create the // aggregator upfront and will start serving health status over HTTP as soon as we see _any_ // config that indicates that. healthAggregator := health.NewHealthAggregator() const healthName = "felix-startup" // Register this function as a reporter of liveness and readiness, with no timeout. healthAggregator.RegisterReporter(healthName, &health.HealthReport{Live: true, Ready: true}, 0) // Load the configuration from all the different sources including the // datastore and merge. Keep retrying on failure. We'll sit in this // loop until the datastore is ready. log.Info("Loading configuration...") var backendClient bapi.Client var configParams *config.Config var typhaAddr string var numClientsCreated int configRetry: for { if numClientsCreated > 60 { // If we're in a restart loop, periodically exit (so we can be restarted) since // - it may solve the problem if there's something wrong with our process // - it prevents us from leaking connections to the datastore. exitWithCustomRC(configChangedRC, "Restarting to avoid leaking datastore connections") } // Make an initial report that says we're live but not yet ready. healthAggregator.Report(healthName, &health.HealthReport{Live: true, Ready: false}) // Load locally-defined config, including the datastore connection // parameters. First the environment variables. configParams = config.New() envConfig := config.LoadConfigFromEnvironment(os.Environ()) // Then, the config file. log.Infof("Loading config file: %v", configFile) fileConfig, err := config.LoadConfigFile(configFile) if err != nil { log.WithError(err).WithField("configFile", configFile).Error( "Failed to load configuration file") time.Sleep(1 * time.Second) continue configRetry } // Parse and merge the local config. configParams.UpdateFrom(envConfig, config.EnvironmentVariable) if configParams.Err != nil { log.WithError(configParams.Err).WithField("configFile", configFile).Error( "Failed to parse configuration environment variable") time.Sleep(1 * time.Second) continue configRetry } configParams.UpdateFrom(fileConfig, config.ConfigFile) if configParams.Err != nil { log.WithError(configParams.Err).WithField("configFile", configFile).Error( "Failed to parse configuration file") time.Sleep(1 * time.Second) continue configRetry } // Each time round this loop, check that we're serving health reports if we should // be, or cancel any existing server if we should not be serving any more. healthAggregator.ServeHTTP(configParams.HealthEnabled, configParams.HealthHost, configParams.HealthPort) // We should now have enough config to connect to the datastore // so we can load the remainder of the config. datastoreConfig := configParams.DatastoreConfig() // Can't dump the whole config because it may have sensitive information... log.WithField("datastore", datastoreConfig.Spec.DatastoreType).Info("Connecting to datastore") backendClient, err = backend.NewClient(datastoreConfig) if err != nil { log.WithError(err).Error("Failed to create datastore client") time.Sleep(1 * time.Second) continue configRetry } log.Info("Created datastore client") numClientsCreated++ for { globalConfig, hostConfig, err := loadConfigFromDatastore( ctx, backendClient, configParams.FelixHostname) if err == ErrNotReady { log.Warn("Waiting for datastore to be initialized (or migrated)") time.Sleep(1 * time.Second) healthAggregator.Report(healthName, &health.HealthReport{Live: true, Ready: true}) continue } else if err != nil { log.WithError(err).Error("Failed to get config from datastore") time.Sleep(1 * time.Second) continue configRetry } configParams.UpdateFrom(globalConfig, config.DatastoreGlobal) configParams.UpdateFrom(hostConfig, config.DatastorePerHost) break } configParams.Validate() if configParams.Err != nil { log.WithError(configParams.Err).Error( "Failed to parse/validate configuration from datastore.") time.Sleep(1 * time.Second) continue configRetry } // We now have some config flags that affect how we configure the syncer. // After loading the config from the datastore, reconnect, possibly with new // config. We don't need to re-load the configuration _again_ because the // calculation graph will spot if the config has changed since we were initialised. datastoreConfig = configParams.DatastoreConfig() backendClient, err = backend.NewClient(datastoreConfig) if err != nil { log.WithError(err).Error("Failed to (re)connect to datastore") time.Sleep(1 * time.Second) continue configRetry } numClientsCreated++ // If we're configured to discover Typha, do that now so we can retry if we fail. typhaAddr, err = discoverTyphaAddr(configParams) if err != nil { log.WithError(err).Error("Typha discovery enabled but discovery failed.") time.Sleep(1 * time.Second) continue configRetry } break configRetry } if numClientsCreated > 2 { // We don't have a way to close datastore connection so, if we reconnected after // a failure to load config, restart felix to avoid leaking connections. exitWithCustomRC(configChangedRC, "Restarting to avoid leaking datastore connections") } // We're now both live and ready. healthAggregator.Report(healthName, &health.HealthReport{Live: true, Ready: true}) // Enable or disable the health HTTP server according to coalesced config. healthAggregator.ServeHTTP(configParams.HealthEnabled, configParams.HealthHost, configParams.HealthPort) // If we get here, we've loaded the configuration successfully. // Update log levels before we do anything else. logutils.ConfigureLogging(configParams) // Since we may have enabled more logging, log with the build context // again. buildInfoLogCxt.WithField("config", configParams).Info( "Successfully loaded configuration.") // Start up the dataplane driver. This may be the internal go-based driver or an external // one. var dpDriver dp.DataplaneDriver var dpDriverCmd *exec.Cmd failureReportChan := make(chan string) configChangedRestartCallback := func() { failureReportChan <- reasonConfigChanged } dpDriver, dpDriverCmd = dp.StartDataplaneDriver(configParams, healthAggregator, configChangedRestartCallback) // Initialise the glue logic that connects the calculation graph to/from the dataplane driver. log.Info("Connect to the dataplane driver.") var connToUsageRepUpdChan chan map[string]string if configParams.UsageReportingEnabled { // Make a channel for the connector to use to send updates to the usage reporter. // (Otherwise, we pass in a nil channel, which disables such updates.) connToUsageRepUpdChan = make(chan map[string]string, 1) } dpConnector := newConnector(configParams, connToUsageRepUpdChan, backendClient, dpDriver, failureReportChan) // If enabled, create a server for the policy sync API. This allows clients to connect to // Felix over a socket and receive policy updates. var policySyncServer *policysync.Server var policySyncProcessor *policysync.Processor var policySyncAPIBinder binder.Binder calcGraphClientChannels := []chan<- interface{}{dpConnector.ToDataplane} if configParams.PolicySyncPathPrefix != "" { log.WithField("policySyncPathPrefix", configParams.PolicySyncPathPrefix).Info( "Policy sync API enabled. Creating the policy sync server.") toPolicySync := make(chan interface{}) policySyncUIDAllocator := policysync.NewUIDAllocator() policySyncProcessor = policysync.NewProcessor(toPolicySync) policySyncServer = policysync.NewServer( policySyncProcessor.JoinUpdates, policySyncUIDAllocator.NextUID, ) policySyncAPIBinder = binder.NewBinder(configParams.PolicySyncPathPrefix) policySyncServer.RegisterGrpc(policySyncAPIBinder.Server()) calcGraphClientChannels = append(calcGraphClientChannels, toPolicySync) } // Now create the calculation graph, which receives updates from the // datastore and outputs dataplane updates for the dataplane driver. // // The Syncer has its own thread and we use an extra thread for the // Validator, just to pipeline that part of the calculation then the // main calculation graph runs in a single thread for simplicity. // The output of the calculation graph arrives at the dataplane // connection via channel. // // Syncer -chan-> Validator -chan-> Calc graph -chan-> dataplane // KVPair KVPair protobufs // Get a Syncer from the datastore, or a connection to our remote sync daemon, Typha, // which will feed the calculation graph with updates, bringing Felix into sync. var syncer Startable var typhaConnection *syncclient.SyncerClient syncerToValidator := calc.NewSyncerCallbacksDecoupler() if typhaAddr != "" { // Use a remote Syncer, via the Typha server. log.WithField("addr", typhaAddr).Info("Connecting to Typha.") typhaConnection = syncclient.New( typhaAddr, buildinfo.GitVersion, configParams.FelixHostname, fmt.Sprintf("Revision: %s; Build date: %s", buildinfo.GitRevision, buildinfo.BuildDate), syncerToValidator, &syncclient.Options{ ReadTimeout: configParams.TyphaReadTimeout, WriteTimeout: configParams.TyphaWriteTimeout, KeyFile: configParams.TyphaKeyFile, CertFile: configParams.TyphaCertFile, CAFile: configParams.TyphaCAFile, ServerCN: configParams.TyphaCN, ServerURISAN: configParams.TyphaURISAN, }, ) } else { // Use the syncer locally. syncer = felixsyncer.New(backendClient, syncerToValidator) } log.WithField("syncer", syncer).Info("Created Syncer") // Create the ipsets/active policy calculation graph, which will // do the dynamic calculation of ipset memberships and active policies // etc. asyncCalcGraph := calc.NewAsyncCalcGraph( configParams, calcGraphClientChannels, healthAggregator, ) if configParams.UsageReportingEnabled { // Usage reporting enabled, add stats collector to graph. When it detects an update // to the stats, it makes a callback, which we use to send an update on a channel. // We use a buffered channel here to avoid blocking the calculation graph. statsChanIn := make(chan calc.StatsUpdate, 1) statsCollector := calc.NewStatsCollector(func(stats calc.StatsUpdate) error { statsChanIn <- stats return nil }) statsCollector.RegisterWith(asyncCalcGraph.CalcGraph) // Rather than sending the updates directly to the usage reporting thread, we // decouple with an extra goroutine. This prevents blocking the calculation graph // goroutine if the usage reporting goroutine is blocked on IO, for example. // Using a buffered channel wouldn't work here because the usage reporting // goroutine can block for a long time on IO so we could build up a long queue. statsChanOut := make(chan calc.StatsUpdate) go func() { var statsChanOutOrNil chan calc.StatsUpdate var stats calc.StatsUpdate for { select { case stats = <-statsChanIn: // Got a stats update, activate the output channel. log.WithField("stats", stats).Debug("Buffer: stats update received") statsChanOutOrNil = statsChanOut case statsChanOutOrNil <- stats: // Passed on the update, deactivate the output channel until // the next update. log.WithField("stats", stats).Debug("Buffer: stats update sent") statsChanOutOrNil = nil } } }() usageRep := usagerep.New( configParams.UsageReportingInitialDelaySecs, configParams.UsageReportingIntervalSecs, statsChanOut, connToUsageRepUpdChan, ) go usageRep.PeriodicallyReportUsage(context.Background()) } else { // Usage reporting disabled, but we still want a stats collector for the // felix_cluster_* metrics. Register a no-op function as the callback. statsCollector := calc.NewStatsCollector(func(stats calc.StatsUpdate) error { return nil }) statsCollector.RegisterWith(asyncCalcGraph.CalcGraph) } // Create the validator, which sits between the syncer and the // calculation graph. validator := calc.NewValidationFilter(asyncCalcGraph) // Start the background processing threads. if syncer != nil { log.Infof("Starting the datastore Syncer") syncer.Start() } else { log.Infof("Starting the Typha connection") err := typhaConnection.Start(context.Background()) if err != nil { log.WithError(err).Fatal("Failed to connect to Typha") } go func() { typhaConnection.Finished.Wait() failureReportChan <- "Connection to Typha failed" }() } go syncerToValidator.SendTo(validator) asyncCalcGraph.Start() log.Infof("Started the processing graph") var stopSignalChans []chan<- bool if configParams.EndpointReportingEnabled { delay := configParams.EndpointReportingDelaySecs log.WithField("delay", delay).Info( "Endpoint status reporting enabled, starting status reporter") dpConnector.statusReporter = statusrep.NewEndpointStatusReporter( configParams.FelixHostname, configParams.OpenstackRegion, dpConnector.StatusUpdatesFromDataplane, dpConnector.InSync, dpConnector.datastore, delay, delay*180, ) dpConnector.statusReporter.Start() } // Start communicating with the dataplane driver. dpConnector.Start() if policySyncProcessor != nil { log.WithField("policySyncPathPrefix", configParams.PolicySyncPathPrefix).Info( "Policy sync API enabled. Starting the policy sync server.") policySyncProcessor.Start() sc := make(chan bool) stopSignalChans = append(stopSignalChans, sc) go policySyncAPIBinder.SearchAndBind(sc) } // Send the opening message to the dataplane driver, giving it its // config. dpConnector.ToDataplane <- &proto.ConfigUpdate{ Config: configParams.RawValues(), } if configParams.PrometheusMetricsEnabled { log.Info("Prometheus metrics enabled. Starting server.") gaugeHost := prometheus.NewGauge(prometheus.GaugeOpts{ Name: "felix_host", Help: "Configured Felix hostname (as a label), typically used in grouping/aggregating stats; the label defaults to the hostname of the host but can be overridden by configuration. The value of the gauge is always set to 1.", ConstLabels: prometheus.Labels{"host": configParams.FelixHostname}, }) gaugeHost.Set(1) prometheus.MustRegister(gaugeHost) go servePrometheusMetrics(configParams) } // Register signal handlers to dump memory/CPU profiles. logutils.RegisterProfilingSignalHandlers(configParams) // Now monitor the worker process and our worker threads and shut // down the process gracefully if they fail. monitorAndManageShutdown(failureReportChan, dpDriverCmd, stopSignalChans) } func servePrometheusMetrics(configParams *config.Config) { for { log.WithField("port", configParams.PrometheusMetricsPort).Info("Starting prometheus metrics endpoint") if configParams.PrometheusGoMetricsEnabled && configParams.PrometheusProcessMetricsEnabled { log.Info("Including Golang & Process metrics") } else { if !configParams.PrometheusGoMetricsEnabled { log.Info("Discarding Golang metrics") prometheus.Unregister(prometheus.NewGoCollector()) } if !configParams.PrometheusProcessMetricsEnabled { log.Info("Discarding process metrics") prometheus.Unregister(prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{})) } } http.Handle("/metrics", promhttp.Handler()) err := http.ListenAndServe(fmt.Sprintf(":%v", configParams.PrometheusMetricsPort), nil) log.WithError(err).Error( "Prometheus metrics endpoint failed, trying to restart it...") time.Sleep(1 * time.Second) } } func monitorAndManageShutdown(failureReportChan <-chan string, driverCmd *exec.Cmd, stopSignalChans []chan<- bool) { // Ask the runtime to tell us if we get a term/int signal. signalChan := make(chan os.Signal, 1) signal.Notify(signalChan, syscall.SIGTERM) signal.Notify(signalChan, syscall.SIGINT) signal.Notify(signalChan, syscall.SIGHUP) // Start a background thread to tell us when the dataplane driver stops. // If the driver stops unexpectedly, we'll terminate this process. // If this process needs to stop, we'll kill the driver and then wait // for the message from the background thread. driverStoppedC := make(chan bool) go func() { if driverCmd == nil { log.Info("No driver process to monitor") return } err := driverCmd.Wait() log.WithError(err).Warn("Driver process stopped") driverStoppedC <- true }() // Wait for one of the channels to give us a reason to shut down. driverAlreadyStopped := driverCmd == nil receivedFatalSignal := false var reason string select { case <-driverStoppedC: reason = "Driver stopped" driverAlreadyStopped = true case sig := <-signalChan: if sig == syscall.SIGHUP { log.Warning("Received a SIGHUP, treating as a request to reload config") reason = reasonConfigChanged } else { reason = fmt.Sprintf("Received OS signal %v", sig) receivedFatalSignal = true } case reason = <-failureReportChan: } logCxt := log.WithField("reason", reason) logCxt.Warn("Felix is shutting down") // Notify other components to stop. for _, c := range stopSignalChans { select { case c <- true: default: } } if !driverAlreadyStopped { // Driver may still be running, just in case the driver is // unresponsive, start a thread to kill this process if we // don't manage to kill the driver. logCxt.Info("Driver still running, trying to shut it down...") giveUpOnSigTerm := make(chan bool) go func() { time.Sleep(4 * time.Second) giveUpOnSigTerm <- true time.Sleep(1 * time.Second) log.Fatal("Failed to wait for driver to exit, giving up.") }() // Signal to the driver to exit. driverCmd.Process.Signal(syscall.SIGTERM) select { case <-driverStoppedC: logCxt.Info("Driver shut down after SIGTERM") case <-giveUpOnSigTerm: logCxt.Error("Driver did not respond to SIGTERM, sending SIGKILL") driverCmd.Process.Kill() <-driverStoppedC logCxt.Info("Driver shut down after SIGKILL") } } if !receivedFatalSignal { // We're exiting due to a failure or a config change, wait // a couple of seconds to ensure that we don't go into a tight // restart loop (which would make the init daemon in calico/node give // up trying to restart us). logCxt.Info("Sleeping to avoid tight restart loop.") go func() { time.Sleep(2 * time.Second) if reason == reasonConfigChanged { exitWithCustomRC(configChangedRC, "Exiting for config change") return } logCxt.Fatal("Exiting.") }() for { sig := <-signalChan if sig == syscall.SIGHUP { logCxt.Warning("Ignoring SIGHUP because we're already shutting down") continue } logCxt.WithField("signal", sig).Fatal( "Signal received while shutting down, exiting immediately") } } logCxt.Fatal("Exiting immediately") } func exitWithCustomRC(rc int, message string) { // Since log writing is done a background thread, we set the force-flush flag on this log to ensure that // all the in-flight logs get written before we exit. log.WithFields(log.Fields{ "rc": rc, lclogutils.FieldForceFlush: true, }).Info(message) os.Exit(rc) } var ( ErrNotReady = errors.New("datastore is not ready or has not been initialised") ) func loadConfigFromDatastore( ctx context.Context, client bapi.Client, hostname string, ) (globalConfig, hostConfig map[string]string, err error) { // The configuration is split over 3 different resource types and 4 different resource // instances in the v3 data model: // - ClusterInformation (global): name "default" // - FelixConfiguration (global): name "default" // - FelixConfiguration (per-host): name "node.<hostname>" // - Node (per-host): name: <hostname> // Get the global values and host specific values separately. We re-use the updateprocessor // logic to convert the single v3 resource to a set of v1 key/values. hostConfig = make(map[string]string) globalConfig = make(map[string]string) var ready bool err = getAndMergeConfig( ctx, client, globalConfig, apiv3.KindClusterInformation, "default", updateprocessors.NewClusterInfoUpdateProcessor(), &ready, ) if err != nil { return } if !ready { // The ClusterInformation struct should contain the ready flag, if it is not set, abort. err = ErrNotReady return } err = getAndMergeConfig( ctx, client, globalConfig, apiv3.KindFelixConfiguration, "default", updateprocessors.NewFelixConfigUpdateProcessor(), &ready, ) if err != nil { return } err = getAndMergeConfig( ctx, client, hostConfig, apiv3.KindFelixConfiguration, "node."+hostname, updateprocessors.NewFelixConfigUpdateProcessor(), &ready, ) if err != nil { return } err = getAndMergeConfig( ctx, client, hostConfig, apiv3.KindNode, hostname, updateprocessors.NewFelixNodeUpdateProcessor(), &ready, ) if err != nil { return } return } // getAndMergeConfig gets the v3 resource configuration extracts the separate config values // (where each configuration value is stored in a field of the v3 resource Spec) and merges into // the supplied map, as required by our v1-style configuration loader. func getAndMergeConfig( ctx context.Context, client bapi.Client, config map[string]string, kind string, name string, configConverter watchersyncer.SyncerUpdateProcessor, ready *bool, ) error { logCxt := log.WithFields(log.Fields{"kind": kind, "name": name}) cfg, err := client.Get(ctx, model.ResourceKey{ Kind: kind, Name: name, Namespace: "", }, "") if err != nil { switch err.(type) { case cerrors.ErrorResourceDoesNotExist: logCxt.Info("No config of this type") return nil default: logCxt.WithError(err).Info("Failed to load config from datastore") return err } } // Re-use the update processor logic implemented for the Syncer. We give it a v3 config // object in a KVPair and it uses the annotations defined on it to split it into v1-style // KV pairs. Log any errors - but don't fail completely to avoid cyclic restarts. v1kvs, err := configConverter.Process(cfg) if err != nil { logCxt.WithError(err).Error("Failed to convert configuration") } // Loop through the converted values and update our config map with values from either the // Global or Host configs. for _, v1KV := range v1kvs { if _, ok := v1KV.Key.(model.ReadyFlagKey); ok { logCxt.WithField("ready", v1KV.Value).Info("Loaded ready flag") if v1KV.Value == true { *ready = true } } else if v1KV.Value != nil { switch k := v1KV.Key.(type) { case model.GlobalConfigKey: config[k.Name] = v1KV.Value.(string) case model.HostConfigKey: config[k.Name] = v1KV.Value.(string) default: logCxt.WithField("KV", v1KV).Debug("Skipping config - not required for initial loading") } } } return nil } type DataplaneConnector struct { config *config.Config configUpdChan chan<- map[string]string ToDataplane chan interface{} StatusUpdatesFromDataplane chan interface{} InSync chan bool failureReportChan chan<- string dataplane dp.DataplaneDriver datastore bapi.Client statusReporter *statusrep.EndpointStatusReporter datastoreInSync bool firstStatusReportSent bool } type Startable interface { Start() } func newConnector(configParams *config.Config, configUpdChan chan<- map[string]string, datastore bapi.Client, dataplane dp.DataplaneDriver, failureReportChan chan<- string, ) *DataplaneConnector { felixConn := &DataplaneConnector{ config: configParams, configUpdChan: configUpdChan, datastore: datastore, ToDataplane: make(chan interface{}), StatusUpdatesFromDataplane: make(chan interface{}), InSync: make(chan bool, 1), failureReportChan: failureReportChan, dataplane: dataplane, } return felixConn } func (fc *DataplaneConnector) readMessagesFromDataplane() { defer func() { fc.shutDownProcess("Failed to read messages from dataplane") }() log.Info("Reading from dataplane driver pipe...") ctx := context.Background() for { payload, err := fc.dataplane.RecvMessage() if err != nil { log.WithError(err).Error("Failed to read from front-end socket") fc.shutDownProcess("Failed to read from front-end socket") } log.WithField("payload", payload).Debug("New message from dataplane") switch msg := payload.(type) { case *proto.ProcessStatusUpdate: fc.handleProcessStatusUpdate(ctx, msg) case *proto.WorkloadEndpointStatusUpdate: if fc.statusReporter != nil { fc.StatusUpdatesFromDataplane <- msg } case *proto.WorkloadEndpointStatusRemove: if fc.statusReporter != nil { fc.StatusUpdatesFromDataplane <- msg } case *proto.HostEndpointStatusUpdate: if fc.statusReporter != nil { fc.StatusUpdatesFromDataplane <- msg } case *proto.HostEndpointStatusRemove: if fc.statusReporter != nil { fc.StatusUpdatesFromDataplane <- msg } default: log.WithField("msg", msg).Warning("Unknown message from dataplane") } log.Debug("Finished handling message from front-end") } } func (fc *DataplaneConnector) handleProcessStatusUpdate(ctx context.Context, msg *proto.ProcessStatusUpdate) { log.Debugf("Status update from dataplane driver: %v", *msg) statusReport := model.StatusReport{ Timestamp: msg.IsoTimestamp, UptimeSeconds: msg.Uptime, FirstUpdate: !fc.firstStatusReportSent, } kv := model.KVPair{ Key: model.ActiveStatusReportKey{Hostname: fc.config.FelixHostname, RegionString: model.RegionString(fc.config.OpenstackRegion)}, Value: &statusReport, TTL: fc.config.ReportingTTLSecs, } applyCtx, cancel := context.WithTimeout(ctx, 2*time.Second) _, err := fc.datastore.Apply(applyCtx, &kv) cancel() if err != nil { if _, ok := err.(cerrors.ErrorOperationNotSupported); ok { log.Debug("Datastore doesn't support status reports.") return // and it won't support the last status key either. } else { log.Warningf("Failed to write status to datastore: %v", err) } } else { fc.firstStatusReportSent = true } kv = model.KVPair{ Key: model.LastStatusReportKey{Hostname: fc.config.FelixHostname, RegionString: model.RegionString(fc.config.OpenstackRegion)}, Value: &statusReport, } applyCtx, cancel = context.WithTimeout(ctx, 2*time.Second) _, err = fc.datastore.Apply(applyCtx, &kv) cancel() if err != nil { log.Warningf("Failed to write status to datastore: %v", err) } } var handledConfigChanges = set.From("CalicoVersion", "ClusterGUID", "ClusterType") func (fc *DataplaneConnector) sendMessagesToDataplaneDriver() { defer func() { fc.shutDownProcess("Failed to send messages to dataplane") }() var config map[string]string for { msg := <-fc.ToDataplane switch msg := msg.(type) { case *proto.InSync: log.Info("Datastore now in sync.") if !fc.datastoreInSync { log.Info("Datastore in sync for first time, sending message to status reporter.") fc.datastoreInSync = true fc.InSync <- true } case *proto.ConfigUpdate: if config != nil { log.WithFields(log.Fields{ "old": config, "new": msg.Config, }).Info("Config updated, checking whether we need to restart") restartNeeded := false for kNew, vNew := range msg.Config { logCxt := log.WithFields(log.Fields{"key": kNew, "new": vNew}) if vOld, prs := config[kNew]; !prs { logCxt = logCxt.WithField("updateType", "add") } else if vNew != vOld { logCxt = logCxt.WithFields(log.Fields{"old": vOld, "updateType": "update"}) } else { continue } if handledConfigChanges.Contains(kNew) { logCxt.Info("Config change can be handled without restart") continue } logCxt.Warning("Config change requires restart") restartNeeded = true } for kOld, vOld := range config { logCxt := log.WithFields(log.Fields{"key": kOld, "old": vOld, "updateType": "delete"}) if _, prs := msg.Config[kOld]; prs { // Key was present in the message so we've handled above. continue } if handledConfigChanges.Contains(kOld) { logCxt.Info("Config change can be handled without restart") continue } logCxt.Warning("Config change requires restart") restartNeeded = true } if restartNeeded { fc.shutDownProcess("config changed") } } // Take a copy of the config to compare against next time. config = make(map[string]string) for k, v := range msg.Config { config[k] = v } if fc.configUpdChan != nil { // Send the config over to the usage reporter. fc.configUpdChan <- config } case *calc.DatastoreNotReady: log.Warn("Datastore became unready, need to restart.") fc.shutDownProcess("datastore became unready") } if err := fc.dataplane.SendMessage(msg); err != nil { fc.shutDownProcess("Failed to write to dataplane driver") } } } func (fc *DataplaneConnector) shutDownProcess(reason string) { // Send a failure report to the managed shutdown thread then give it // a few seconds to do the shutdown. fc.failureReportChan <- reason time.Sleep(5 * time.Second) // The graceful shutdown failed, terminate the process. log.Panic("Managed shutdown failed. Panicking.") } func (fc *DataplaneConnector) Start() { // Start a background thread to write to the dataplane driver. go fc.sendMessagesToDataplaneDriver() // Start background thread to read messages from dataplane driver. go fc.readMessagesFromDataplane() } var ErrServiceNotReady = errors.New("Kubernetes service missing IP or port.") func discoverTyphaAddr(configParams *config.Config) (string, error) { if configParams.TyphaAddr != "" { // Explicit address; trumps other sources of config. return configParams.TyphaAddr, nil } if configParams.TyphaK8sServiceName == "" { // No explicit address, and no service name, not using Typha. return "", nil } // If we get here, we need to look up the Typha service using the k8s API. // TODO Typha: support Typha lookup without using rest.InClusterConfig(). k8sconf, err := rest.InClusterConfig() if err != nil { log.WithError(err).Error("Unable to create Kubernetes config.") return "", err } clientset, err := kubernetes.NewForConfig(k8sconf) if err != nil { log.WithError(err).Error("Unable to create Kubernetes client set.") return "", err } svcClient := clientset.CoreV1().Services(configParams.TyphaK8sNamespace) svc, err := svcClient.Get(configParams.TyphaK8sServiceName, v1.GetOptions{}) if err != nil { log.WithError(err).Error("Unable to get Typha service from Kubernetes.") return "", err } host := svc.Spec.ClusterIP log.WithField("clusterIP", host).Info("Found Typha ClusterIP.") if host == "" { log.WithError(err).Error("Typha service had no ClusterIP.") return "", ErrServiceNotReady } for _, p := range svc.Spec.Ports { if p.Name == "calico-typha" { log.WithField("port", p).Info("Found Typha service port.") typhaAddr := fmt.Sprintf("%s:%v", host, p.Port) return typhaAddr, nil } } log.Error("Didn't find Typha service port.") return "", ErrServiceNotReady }
1
16,841
Need an `if err == nil {break}` above this line so that we don't log/sleep if the retry succeeds.
projectcalico-felix
go
@@ -10,17 +10,14 @@ declare(strict_types = 1); namespace Ergonode\Attribute\Persistence\Dbal\Projector\Attribute; use Doctrine\DBAL\Connection; +use Doctrine\DBAL\DBALException; use Ergonode\Attribute\Domain\Event\Attribute\AttributeCreatedEvent; -use Ergonode\Core\Domain\Entity\AbstractId; -use Ergonode\EventSourcing\Infrastructure\DomainEventInterface; -use Ergonode\EventSourcing\Infrastructure\Exception\UnsupportedEventException; -use Ergonode\EventSourcing\Infrastructure\Projector\DomainEventProjectorInterface; use JMS\Serializer\SerializerInterface; use Ramsey\Uuid\Uuid; /** */ -class AttributeCreatedEventProjector implements DomainEventProjectorInterface +class AttributeCreatedEventProjector { private const TABLE = 'attribute'; private const TABLE_PARAMETER = 'attribute_parameter';
1
<?php /** * Copyright © Bold Brand Commerce Sp. z o.o. All rights reserved. * See LICENSE.txt for license details. */ declare(strict_types = 1); namespace Ergonode\Attribute\Persistence\Dbal\Projector\Attribute; use Doctrine\DBAL\Connection; use Ergonode\Attribute\Domain\Event\Attribute\AttributeCreatedEvent; use Ergonode\Core\Domain\Entity\AbstractId; use Ergonode\EventSourcing\Infrastructure\DomainEventInterface; use Ergonode\EventSourcing\Infrastructure\Exception\UnsupportedEventException; use Ergonode\EventSourcing\Infrastructure\Projector\DomainEventProjectorInterface; use JMS\Serializer\SerializerInterface; use Ramsey\Uuid\Uuid; /** */ class AttributeCreatedEventProjector implements DomainEventProjectorInterface { private const TABLE = 'attribute'; private const TABLE_PARAMETER = 'attribute_parameter'; private const TABLE_VALUE = 'value'; private const TABLE_VALUE_TRANSLATION = 'value_translation'; /** * @var Connection */ private $connection; /** * @var SerializerInterface */ private $serializer; /** * @param Connection $connection * @param SerializerInterface $serializer */ public function __construct(Connection $connection, SerializerInterface $serializer) { $this->connection = $connection; $this->serializer = $serializer; } /** * {@inheritDoc} */ public function supports(DomainEventInterface $event): bool { return $event instanceof AttributeCreatedEvent; } /** * {@inheritDoc} * * @throws \Throwable */ public function projection(AbstractId $aggregateId, DomainEventInterface $event): void { if (!$this->supports($event)) { throw new UnsupportedEventException($event, AttributeCreatedEvent::class); } $this->connection->transactional(function () use ($aggregateId, $event) { $labelUuid = Uuid::uuid4(); $placeholderUuid = Uuid::uuid4(); $hintUuid = Uuid::uuid4(); $this->connection->insert( self::TABLE_VALUE, [ 'id' => $labelUuid->toString(), ] ); $this->connection->insert( self::TABLE_VALUE, [ 'id' => $placeholderUuid->toString(), ] ); $this->connection->insert( self::TABLE_VALUE, [ 'id' => $hintUuid->toString(), ] ); foreach ($event->getLabel()->getTranslations() as $language => $value) { $this->connection->insert( self::TABLE_VALUE_TRANSLATION, [ 'id' => Uuid::uuid4()->toString(), 'value_id' => $labelUuid, 'language' => $language, 'value' => $value, ] ); } foreach ($event->getHint()->getTranslations() as $language => $value) { $this->connection->insert( self::TABLE_VALUE_TRANSLATION, [ 'id' => Uuid::uuid4()->toString(), 'value_id' => $hintUuid, 'language' => $language, 'value' => $value, ] ); } foreach ($event->getPlaceholder()->getTranslations() as $language => $value) { $this->connection->insert( self::TABLE_VALUE_TRANSLATION, [ 'id' => Uuid::uuid4()->toString(), 'value_id' => $placeholderUuid, 'language' => $language, 'value' => $value, ] ); } $this->connection->insert( self::TABLE, [ 'id' => $aggregateId->getValue(), 'multilingual' => $event->isMultilingual(), 'code' => $event->getCode()->getValue(), 'type' => $event->getType(), 'label' => $labelUuid->toString(), 'placeholder' => $placeholderUuid->toString(), 'hint' => $hintUuid->toString(), 'system' => $event->isSystem(), 'editable' => $event->isEditable(), 'deletable' => $event->isDeletable(), ], [ 'multilingual' => \PDO::PARAM_BOOL, 'system' => \PDO::PARAM_BOOL, 'editable' => \PDO::PARAM_BOOL, 'deletable' => \PDO::PARAM_BOOL, ] ); foreach ($event->getParameters() as $name => $value) { if (!empty($value)) { $this->connection->insert( self::TABLE_PARAMETER, [ 'attribute_id' => $aggregateId->getValue(), 'type' => $name, 'value' => $this->serializer->serialize($value, 'json'), ] ); } } }); } }
1
8,460
Separate it to different methods :D Invoke method look's like old fashion portal class :D
ergonode-backend
php
@@ -11,11 +11,7 @@ import 'emby-button'; elem.classList.remove('hide'); elem.classList.add('expanded'); elem.style.height = 'auto'; - const height = elem.offsetHeight + 'px'; - elem.style.height = '0'; - - // trigger reflow - const newHeight = elem.offsetHeight; + var height = elem.offsetHeight + 'px'; elem.style.height = height; setTimeout(function () {
1
import 'css!./emby-collapse'; import 'webcomponents'; import 'emby-button'; /* eslint-disable indent */ const EmbyButtonPrototype = Object.create(HTMLDivElement.prototype); function slideDownToShow(button, elem) { elem.classList.remove('hide'); elem.classList.add('expanded'); elem.style.height = 'auto'; const height = elem.offsetHeight + 'px'; elem.style.height = '0'; // trigger reflow const newHeight = elem.offsetHeight; elem.style.height = height; setTimeout(function () { if (elem.classList.contains('expanded')) { elem.classList.remove('hide'); } else { elem.classList.add('hide'); } elem.style.height = 'auto'; }, 300); const icon = button.querySelector('.material-icons'); //icon.innerHTML = 'expand_less'; icon.classList.add('emby-collapse-expandIconExpanded'); } function slideUpToHide(button, elem) { elem.style.height = elem.offsetHeight + 'px'; // trigger reflow const newHeight = elem.offsetHeight; elem.classList.remove('expanded'); elem.style.height = '0'; setTimeout(function () { if (elem.classList.contains('expanded')) { elem.classList.remove('hide'); } else { elem.classList.add('hide'); } }, 300); const icon = button.querySelector('.material-icons'); //icon.innerHTML = 'expand_more'; icon.classList.remove('emby-collapse-expandIconExpanded'); } function onButtonClick(e) { const button = this; const collapseContent = button.parentNode.querySelector('.collapseContent'); if (collapseContent.expanded) { collapseContent.expanded = false; slideUpToHide(button, collapseContent); } else { collapseContent.expanded = true; slideDownToShow(button, collapseContent); } } EmbyButtonPrototype.attachedCallback = function () { if (this.classList.contains('emby-collapse')) { return; } this.classList.add('emby-collapse'); const collapseContent = this.querySelector('.collapseContent'); if (collapseContent) { collapseContent.classList.add('hide'); } const title = this.getAttribute('title'); const html = '<button is="emby-button" type="button" on-click="toggleExpand" id="expandButton" class="emby-collapsible-button iconRight"><h3 class="emby-collapsible-title" title="' + title + '">' + title + '</h3><span class="material-icons emby-collapse-expandIcon expand_more"></span></button>'; this.insertAdjacentHTML('afterbegin', html); const button = this.querySelector('.emby-collapsible-button'); button.addEventListener('click', onButtonClick); if (this.getAttribute('data-expanded') === 'true') { onButtonClick.call(button); } }; document.registerElement('emby-collapse', { prototype: EmbyButtonPrototype, extends: 'div' }); /* eslint-enable indent */
1
16,758
Collapse/expand animation of filter is broken - not smooth.
jellyfin-jellyfin-web
js
@@ -16,16 +16,10 @@ module Travis install_sdk_components(config[:android][:components]) unless config[:android][:components].empty? end - def install - self.if '-f gradlew', './gradlew assemble', fold: 'install', retry: true - self.elif '-f build.gradle', 'gradle assemble', fold: 'install', retry: true - self.elif '-f pom.xml', 'mvn install -DskipTests=true -B', fold: 'install', retry: true # Otherwise mvn install will run tests which. Suggestion from Charles Nutter. MK. - end - def script self.if '-f gradlew', './gradlew check connectedCheck' self.elif '-f build.gradle', 'gradle check connectedCheck' - self.elif '-f pom.xml', 'mvn test -B' + self.elif '-f pom.xml', 'mvn install -B' self.else 'ant debug installt test' end
1
module Travis module Build class Script class Android < Script include Jdk DEFAULTS = { android: { components: [], licenses: [] } } def setup super install_sdk_components(config[:android][:components]) unless config[:android][:components].empty? end def install self.if '-f gradlew', './gradlew assemble', fold: 'install', retry: true self.elif '-f build.gradle', 'gradle assemble', fold: 'install', retry: true self.elif '-f pom.xml', 'mvn install -DskipTests=true -B', fold: 'install', retry: true # Otherwise mvn install will run tests which. Suggestion from Charles Nutter. MK. end def script self.if '-f gradlew', './gradlew check connectedCheck' self.elif '-f build.gradle', 'gradle check connectedCheck' self.elif '-f pom.xml', 'mvn test -B' self.else 'ant debug installt test' end private def install_sdk_components(components) fold("android.install") do |script| echo "Installing Android dependencies" components.each do |component_name| install_sdk_component(script, component_name) end end end def install_sdk_component(script, component_name) install_cmd = "android-update-sdk --components=#{component_name}" unless config[:android][:licenses].empty? install_cmd += " --accept-licenses='#{config[:android][:licenses].join('|')}'" end script.cmd install_cmd end end end end end
1
11,479
so during the `script` stage we run `mvn install`?
travis-ci-travis-build
rb
@@ -19,6 +19,7 @@ import ( "github.com/iotexproject/iotex-core/action" "github.com/iotexproject/iotex-core/action/protocol" "github.com/iotexproject/iotex-core/pkg/log" + "github.com/iotexproject/iotex-core/state/factory" ) // protocolID is the protocol ID
1
// Copyright (c) 2020 IoTeX Foundation // This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no // warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent // permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache // License 2.0 that can be found in the LICENSE file. package staking import ( "context" "math/big" "github.com/pkg/errors" "go.uber.org/zap" "github.com/iotexproject/go-pkgs/hash" "github.com/iotexproject/iotex-address/address" "github.com/iotexproject/iotex-core/action" "github.com/iotexproject/iotex-core/action/protocol" "github.com/iotexproject/iotex-core/pkg/log" ) // protocolID is the protocol ID const protocolID = "staking" // Errors var ( ErrAlreadyExist = errors.New("candidate already exist") ) // Protocol defines the protocol of handling staking type Protocol struct { addr address.Address inMemCandidates CandidateCenter voteCal VoteWeightCalConsts depositGas DepositGas } // DepositGas deposits gas to some pool type DepositGas func(ctx context.Context, sm protocol.StateManager, amount *big.Int) error // NewProtocol instantiates the protocol of staking func NewProtocol(depositGas DepositGas) *Protocol { h := hash.Hash160b([]byte(protocolID)) addr, err := address.FromBytes(h[:]) if err != nil { log.L().Panic("Error when constructing the address of staking protocol", zap.Error(err)) } return &Protocol{addr: addr, depositGas: depositGas} } // Handle handles a staking message func (p *Protocol) Handle(ctx context.Context, act action.Action, sm protocol.StateManager) (*action.Receipt, error) { switch act := act.(type) { case *action.CreateStake: return p.handleCreateStake(ctx, act, sm) case *action.Unstake: return p.handleUnstake(ctx, act, sm) case *action.WithdrawStake: return p.handleWithdrawStake(ctx, act, sm) case *action.ChangeCandidate: return p.handleChangeCandidate(ctx, act, sm) case *action.TransferStake: return p.handleTransferStake(ctx, act, sm) case *action.DepositToStake: return p.handleDepositToStake(ctx, act, sm) case *action.Restake: return p.handleRestake(ctx, act, sm) case *action.CandidateRegister: return p.handleCandidateRegister(ctx, act, sm) case *action.CandidateUpdate: return p.handleCandidateUpdate(ctx, act, sm) } return nil, nil } // Validate validates a staking message func (p *Protocol) Validate(ctx context.Context, act action.Action) error { switch act := act.(type) { case *action.CreateStake: return p.validateCreateStake(ctx, act) case *action.Unstake: return p.validateUnstake(ctx, act) case *action.WithdrawStake: return p.validateWithdrawStake(ctx, act) case *action.ChangeCandidate: return p.validateChangeCandidate(ctx, act) case *action.TransferStake: return p.validateTransferStake(ctx, act) case *action.DepositToStake: return p.validateDepositToStake(ctx, act) case *action.Restake: return p.validateRestake(ctx, act) case *action.CandidateRegister: return p.validateCandidateRegister(ctx, act) case *action.CandidateUpdate: return p.validateCandidateUpdate(ctx, act) } return nil } // ReadState read the state on blockchain via protocol func (p *Protocol) ReadState(context.Context, protocol.StateReader, []byte, ...[]byte) ([]byte, error) { //TODO return nil, protocol.ErrUnimplemented } // Register registers the protocol with a unique ID func (p *Protocol) Register(r *protocol.Registry) error { return r.Register(protocolID, p) } // ForceRegister registers the protocol with a unique ID and force replacing the previous protocol if it exists func (p *Protocol) ForceRegister(r *protocol.Registry) error { return r.ForceRegister(protocolID, p) } func (p *Protocol) calculateVoteWeight(v *VoteBucket, selfStake bool) *big.Int { return calculateVoteWeight(p.voteCal, v, selfStake) }
1
21,116
we may need to move CandidateNamespace to this protocol as it is only used here.
iotexproject-iotex-core
go