patch
stringlengths
17
31.2k
y
int64
1
1
oldf
stringlengths
0
2.21M
idx
int64
1
1
id
int64
4.29k
68.4k
msg
stringlengths
8
843
proj
stringclasses
212 values
lang
stringclasses
9 values
@@ -57,6 +57,10 @@ func (ctx *ChannelContext) Cleanup(module string) { // Send send msg to a module. Todo: do not stuck func (ctx *ChannelContext) Send(module string, message model.Message) { + // check if msg is sync + if message.Header.Sync { + klog.Warningf("Get a sync-msg when use beehive.Send(),msg.Header:{%v},msg.Route:{%v}",message.Header,message.Router) + } // avoid exception because of channel colsing // TODO: need reconstruction defer func() {
1
package context import ( "errors" "fmt" "sync" "sync/atomic" "time" "k8s.io/klog" "github.com/kubeedge/beehive/pkg/core/model" ) // constants for channel context const ( ChannelSizeDefault = 1024 MessageTimeoutDefault = 30 * time.Second TickerTimeoutDefault = 20 * time.Millisecond ) // ChannelContext is object for Context channel type ChannelContext struct { //ConfigFactory goarchaius.ConfigurationFactory channels map[string]chan model.Message chsLock sync.RWMutex typeChannels map[string]map[string]chan model.Message typeChsLock sync.RWMutex anonChannels map[string]chan model.Message anonChsLock sync.RWMutex } // NewChannelContext creates and returns object of new channel context // TODO: Singleton func NewChannelContext() *ChannelContext { channelMap := make(map[string]chan model.Message) moduleChannels := make(map[string]map[string]chan model.Message) anonChannels := make(map[string]chan model.Message) return &ChannelContext{ channels: channelMap, typeChannels: moduleChannels, anonChannels: anonChannels, } } // Cleanup close modules func (ctx *ChannelContext) Cleanup(module string) { if channel := ctx.getChannel(module); channel != nil { ctx.delChannel(module) // decrease probable exception of channel closing time.Sleep(20 * time.Millisecond) close(channel) } } // Send send msg to a module. Todo: do not stuck func (ctx *ChannelContext) Send(module string, message model.Message) { // avoid exception because of channel colsing // TODO: need reconstruction defer func() { if exception := recover(); exception != nil { klog.Warningf("Recover when send message, exception: %+v", exception) } }() if channel := ctx.getChannel(module); channel != nil { channel <- message return } klog.Warningf("Get bad module name :%s when send message, do nothing", module) } // Receive msg from channel of module func (ctx *ChannelContext) Receive(module string) (model.Message, error) { if channel := ctx.getChannel(module); channel != nil { content := <-channel return content, nil } klog.Warningf("Failed to get channel for module:%s when receive message", module) return model.Message{}, fmt.Errorf("failed to get channel for module(%s)", module) } func getAnonChannelName(msgID string) string { return msgID } // SendSync sends message in a sync way func (ctx *ChannelContext) SendSync(module string, message model.Message, timeout time.Duration) (model.Message, error) { // avoid exception because of channel colsing // TODO: need reconstruction defer func() { if exception := recover(); exception != nil { klog.Warningf("Recover when sendsync message, exception: %+v", exception) } }() if timeout <= 0 { timeout = MessageTimeoutDefault } deadline := time.Now().Add(timeout) // make sure to set sync flag message.Header.Sync = true // check req/resp channel reqChannel := ctx.getChannel(module) if reqChannel == nil { return model.Message{}, fmt.Errorf("bad request module name(%s)", module) } sendTimer := time.NewTimer(timeout) select { case reqChannel <- message: case <-sendTimer.C: return model.Message{}, errors.New("timeout to send message") } sendTimer.Stop() // new anonymous channel for response anonChan := make(chan model.Message) anonName := getAnonChannelName(message.GetID()) ctx.anonChsLock.Lock() ctx.anonChannels[anonName] = anonChan ctx.anonChsLock.Unlock() defer func() { ctx.anonChsLock.Lock() delete(ctx.anonChannels, anonName) close(anonChan) ctx.anonChsLock.Unlock() }() var resp model.Message respTimer := time.NewTimer(time.Until(deadline)) select { case resp = <-anonChan: case <-respTimer.C: return model.Message{}, errors.New("timeout to get response") } respTimer.Stop() return resp, nil } // SendResp send resp for this message when using sync mode func (ctx *ChannelContext) SendResp(message model.Message) { anonName := getAnonChannelName(message.GetParentID()) ctx.anonChsLock.RLock() defer ctx.anonChsLock.RUnlock() if channel, exist := ctx.anonChannels[anonName]; exist { channel <- message return } klog.V(4).Infof("Get bad anonName:%s when sendresp message, do nothing", anonName) } // SendToGroup send msg to modules. Todo: do not stuck func (ctx *ChannelContext) SendToGroup(moduleType string, message model.Message) { // avoid exception because of channel colsing // TODO: need reconstruction defer func() { if exception := recover(); exception != nil { klog.Warningf("Recover when sendToGroup message, exception: %+v", exception) } }() send := func(ch chan model.Message) { select { case ch <- message: default: klog.Warningf("the message channel is full, message: %+v", message) select { case ch <- message: } } } if channelList := ctx.getTypeChannel(moduleType); channelList != nil { for _, channel := range channelList { go send(channel) } return } klog.Warningf("Get bad module type:%s when sendToGroup message, do nothing", moduleType) } // SendToGroupSync : broadcast the message to echo module channel, the module send response back anon channel // check timeout and the size of anon channel func (ctx *ChannelContext) SendToGroupSync(moduleType string, message model.Message, timeout time.Duration) error { // avoid exception because of channel colsing // TODO: need reconstruction defer func() { if exception := recover(); exception != nil { klog.Warningf("Recover when sendToGroupsync message, exception: %+v", exception) } }() if timeout <= 0 { timeout = MessageTimeoutDefault } deadline := time.Now().Add(timeout) channelList := ctx.getTypeChannel(moduleType) if channelList == nil { return fmt.Errorf("failed to get module type(%s) channel list", moduleType) } // echo module must sync a response, // let anonchan size be module number channelNumber := len(channelList) anonChan := make(chan model.Message, channelNumber) anonName := getAnonChannelName(message.GetID()) ctx.anonChsLock.Lock() ctx.anonChannels[anonName] = anonChan ctx.anonChsLock.Unlock() cleanup := func() error { ctx.anonChsLock.Lock() delete(ctx.anonChannels, anonName) close(anonChan) ctx.anonChsLock.Unlock() var uninvitedGuests int // cleanup anonchan and check parentid for resp for resp := range anonChan { if resp.GetParentID() != message.GetID() { uninvitedGuests++ } } if uninvitedGuests != 0 { klog.Errorf("Get some unexpected:%d resp when sendToGroupsync message", uninvitedGuests) return fmt.Errorf("got some unexpected(%d) resp", uninvitedGuests) } return nil } // make sure to set sync flag before sending message.Header.Sync = true var timeoutCounter int32 send := func(ch chan model.Message) { sendTimer := time.NewTimer(time.Until(deadline)) select { case ch <- message: sendTimer.Stop() case <-sendTimer.C: atomic.AddInt32(&timeoutCounter, 1) } } for _, channel := range channelList { go send(channel) } sendTimer := time.NewTimer(time.Until(deadline)) ticker := time.NewTicker(TickerTimeoutDefault) for { // annonChan is full if len(anonChan) == channelNumber { break } select { case <-ticker.C: case <-sendTimer.C: cleanup() if timeoutCounter != 0 { errInfo := fmt.Sprintf("timeout to send message, several %d timeout when send", timeoutCounter) return fmt.Errorf(errInfo) } klog.Error("Timeout to sendToGroupsync message") return fmt.Errorf("Timeout to send message") } } return cleanup() } // New Channel func (ctx *ChannelContext) newChannel() chan model.Message { channel := make(chan model.Message, ChannelSizeDefault) return channel } // getChannel return chan func (ctx *ChannelContext) getChannel(module string) chan model.Message { ctx.chsLock.RLock() defer ctx.chsLock.RUnlock() if _, exist := ctx.channels[module]; exist { return ctx.channels[module] } klog.Warningf("Failed to get channel, type:%s", module) return nil } // addChannel return chan func (ctx *ChannelContext) addChannel(module string, moduleCh chan model.Message) { ctx.chsLock.Lock() defer ctx.chsLock.Unlock() ctx.channels[module] = moduleCh } // deleteChannel by module name func (ctx *ChannelContext) delChannel(module string) { // delete module channel from channels map ctx.chsLock.Lock() _, exist := ctx.channels[module] if !exist { klog.Warningf("Failed to get channel, module:%s", module) return } delete(ctx.channels, module) ctx.chsLock.Unlock() // delete module channel from typechannels map ctx.typeChsLock.Lock() for _, moduleMap := range ctx.typeChannels { if _, exist := moduleMap[module]; exist { delete(moduleMap, module) break } } ctx.typeChsLock.Unlock() } // getTypeChannel return chan func (ctx *ChannelContext) getTypeChannel(moduleType string) map[string]chan model.Message { ctx.typeChsLock.RLock() defer ctx.typeChsLock.RUnlock() if _, exist := ctx.typeChannels[moduleType]; exist { return ctx.typeChannels[moduleType] } klog.Warningf("Failed to get type channel, type:%s", moduleType) return nil } func (ctx *ChannelContext) getModuleByChannel(ch chan model.Message) string { ctx.chsLock.RLock() defer ctx.chsLock.RUnlock() for module, channel := range ctx.channels { if channel == ch { return module } } klog.Warning("Failed to get module by channel") return "" } // addTypeChannel put modules into moduleType map func (ctx *ChannelContext) addTypeChannel(module, group string, moduleCh chan model.Message) { ctx.typeChsLock.Lock() defer ctx.typeChsLock.Unlock() if _, exist := ctx.typeChannels[group]; !exist { ctx.typeChannels[group] = make(map[string]chan model.Message) } ctx.typeChannels[group][module] = moduleCh } // AddModule adds module into module context func (ctx *ChannelContext) AddModule(module string) { channel := ctx.newChannel() ctx.addChannel(module, channel) } // AddModuleGroup adds modules into module context group func (ctx *ChannelContext) AddModuleGroup(module, group string) { if channel := ctx.getChannel(module); channel != nil { ctx.addTypeChannel(module, group, channel) return } klog.Warningf("Get bad module name %s when addmodulegroup", module) }
1
18,887
In fact it would be useful for debug, i prefer combine them into one debug level log.
kubeedge-kubeedge
go
@@ -24,7 +24,9 @@ var alertDefine = { crash: { target: [ { value: 'Total crashes', name: 'Total crashes' }, - { value: 'New crash occurence', name: 'New crash occurence' } + { value: 'New crash occurence', name: 'New crash occurence' }, + { value: 'None fatal crash per session', name: 'None fatal crash per session' }, + { value: 'Fatal crash per session', name: 'Fatal crash per session' }, ], condition: [ { value: 'increased by at least', name: 'increased by at least' },
1
var alertDefine = { metric: { target: [ { value: 'Total users', name: 'Total users' }, { value: 'New users', name: 'New users' }, { value: 'Total sessions', name: 'Total sessions' }, { value: 'Average session duration', name: 'Average session duration' }, { value: 'Bounce rate', name: 'Bounce rate (%)' }, { value: 'Number of page views', name: 'Number of page views' }, { value: 'Purchases', name: 'Purchases' } ], condition: [ { value: 'increased by at least', name: 'increased by at least' }, { value: 'decreased by at least', name: 'decreased by at least' }, ] }, event: { target: [], condition: [ { value: 'increased by at least', name: 'increased by at least' }, { value: 'decreased by at least', name: 'decreased by at least' }, ] }, crash: { target: [ { value: 'Total crashes', name: 'Total crashes' }, { value: 'New crash occurence', name: 'New crash occurence' } ], condition: [ { value: 'increased by at least', name: 'increased by at least' }, { value: 'decreased by at least', name: 'decreased by at least' }, ] } } // dynamic to get value for different settings properties. var dict = { crash: { 'New crash occurence': { compareDescribe: function (settings) {return settings.alertDataSubType ;}, period: function(){ return 'every 5 minutes'} }, }, }; window.AlertsView = countlyView.extend({ initialize: function () { statusChanged = {}; }, beforeRender: function () { var self = this; return $.when( $.get(countlyGlobal["path"] + '/alerts/templates/alert-widget-drawer.html', function (src) { Handlebars.registerPartial("alert-widget-drawer", src); }), $.get(countlyGlobal["path"] + '/alerts/templates/alert-types-config-template.html', function (src) { Handlebars.registerPartial("alert-types-config-template", src); }), $.get(countlyGlobal["path"] + '/alerts/templates/form.html', function (src) { self.template = Handlebars.compile(src); }), alertsPlugin.requestAlertsList() ).then(function () { }); }, prepareDrawer: function () { this.widgetDrawer.init(); var self = this; $("#create-alert").off("click").on("click", function () { self.widgetDrawer.init(); $("#current_alert_id").text(''); $("#alert-widget-drawer").removeClass("open editing"); $("#alert-widget-drawer").find("#widget-types .opt").removeClass("disabled"); $("#alert-widget-drawer").addClass("open"); $("#create-widget").removeClass("disabled"); $(($('#alert-data-types').find("[data-data-type='metric']"))).trigger("click"); }); $('#alert-widge-close').off("click").on("click", function () { $("#alert-widget-drawer").removeClass("open"); }); }, renderTable: function () { pluginsData = []; var self = this; var alertsList = alertsPlugin.getAlertsList(); app.alertsView.updateCount(); for (var i = 0; i < alertsList.length; i++) { var appNameList = []; if(alertsList[i].selectedApps){ appNameList = _.map(alertsList[i].selectedApps, function (appID) { return countlyGlobal.apps[appID] && countlyGlobal.apps[appID].name }); } pluginsData.push({ id: alertsList[i]._id, appNameList: appNameList.join(', '), alertName: alertsList[i].alertName || '', type: alertsList[i].alertDataSubType || '', condtionText: alertsList[i].compareDescribe || '', enabled: alertsList[i].enabled || false, createdByUser: alertsList[i].createdByUser || '' }); } var isAdmin = countlyGlobal.member.global_admin; var dataTableDefine = { "aaData": pluginsData, "aoColumns": [ { "mData": 'alertName', "sType": "string", "sTitle": 'Alert Name' }, { "mData": function (row, type) { if (type == "display") { var disabled = (row.prepackaged) ? 'disabled' : ''; var input = '<div class="on-off-switch ' + disabled + '">'; if (row.enabled) { input += '<input type="checkbox" class="on-off-switch-checkbox alert-switcher" id="plugin-' + row.id + '" checked ' + disabled + '>'; } else { input += '<input type="checkbox" class="on-off-switch-checkbox alert-switcher" id="plugin-' + row.id + '" ' + disabled + '>'; } input += '<label class="on-off-switch-label" for="plugin-' + row.id + '"></label>'; input += '<span class="text">' + 'Enable' + '</span>'; return input; } else { return row.enabled; } }, "sType": "string", "sTitle": 'Status', "bSortable": false, }, { "mData": 'appNameList', "sType": "string", "sTitle": 'Application', "bSortable": false, }, { "mData": 'condtionText', "sType": "string", "sTitle": 'Condition', "bSortable": false, } ] }; if(isAdmin){ dataTableDefine.aoColumns.push({ "mData": 'createdByUser', "sType": "string", "sTitle": 'Created by', "bSortable": false }); }; dataTableDefine.aoColumns.push({ "mData": function (row) { return "<div class='options-item'>" + "<div class='edit'></div>" + "<div class='edit-menu'>" + "<div class='edit-alert item'" + " id='" + row.id + "'" + ">Edit</div>" + "<div class='delete-alert item'" + " id='" + row.id + "'" + ">Delete</div></div>" + "</div>"; }, "bSortable": false, }); this.dtable = $('#alerts-table').dataTable($.extend({}, $.fn.dataTable.defaults, dataTableDefine)); this.dtable.stickyTableHeaders(); this.dtable.fnSort([[0, 'asc']]); $(".alert-switcher").off("click").on("click", function (e) { var pluginId = this.id.toString().replace(/^plugin-/, ''); var newStatus = $(this).is(":checked"); var list = alertsPlugin.getAlertsList(); var alertRecord = _.filter(list, function (item) { return item._id === pluginId; }); if (alertRecord) { (alertRecord[0].enabled != newStatus) ? (statusChanged[pluginId] = newStatus) : (delete statusChanged[pluginId]); } var keys = _.keys(statusChanged); if (keys && keys.length > 0) { $(".data-save-bar-remind").text(' You made ' + keys.length + ( keys.length === 1 ? ' change.' : ' changes.') ); return $(".data-saver-bar").removeClass("data-saver-bar-hide"); } $(".data-saver-bar").addClass("data-saver-bar-hide"); }); $(".data-saver-cancel-button").off("click").on("click", function () { statusChanged = {}; self.renderTable(); return $(".data-saver-bar").addClass("data-saver-bar-hide"); }) $(".data-saver-button").off("click").on("click", function () { alertsPlugin.updateAlertStatus(statusChanged, function () { alertsPlugin.requestAlertsList(function () { self.renderTable(); }); }); return $(".data-saver-bar").addClass("data-saver-bar-hide"); }) // load menu $("body").off("click", ".options-item .edit").on("click", ".options-item .edit", function () { $(this).next(".edit-menu").fadeToggle(); event.stopPropagation(); }); $(window).click(function() { $(".options-item").find(".edit").next(".edit-menu").fadeOut(); }); $(".delete-alert").off("click").on("click", function (e) { var alertID = e.target.id; return CountlyHelpers.confirm("Delete this alert?", "red", function (result) { if (result) { alertsPlugin.deleteAlert(alertID, function () { alertsPlugin.requestAlertsList(function () { self.renderTable(); }); }); } }); }) $(".edit-alert").off("click").on("click", function (e) { var alertID = e.target.id; var formData = alertsPlugin.getAlert(alertID); $("#alert-widget-drawer").addClass("open editing"); self.widgetDrawer.loadData(formData); }); }, renderCommon: function (isRefresh) { $(this.el).html(this.template()); this.renderTable(); this.prepareDrawer(); }, updateCount: function () { var count = alertsPlugin.getCount(); $("#alerts-running-sum").text(count.r); $("#alerts-total-sum").text(count.t); $("#alerts-today-sum").text(count.today); }, widgetDrawer: { loadAppViewData: function(selectedView){ var appID = $("#single-app-dropdown").clySelectGetSelection(); if (appID) { alertsPlugin.getViewForApp(appID, function (viewList) { $("#single-target2-dropdown").clySelectSetItems(viewList); if(selectedView){ alertsPlugin.getViewForApp(appID, function (viewList) { $("#single-target2-dropdown").clySelectSetSelection(selectedView, selectedView); }); }else { $("#single-target2-dropdown").clySelectSetSelection("", "Select a View"); } }); }else{ $("#single-target2-dropdown").clySelectSetSelection("", "please select app first"); } }, init: function () { var self = this; var apps = []; // clear alertName $("#alert-name-input").val(''); // select alert data type : metric , event crash var metricClickListner = function(){ $("#single-target-dropdown").off("cly-select-change").on("cly-select-change", function (e, selected) { var dataType = $(($('#alert-data-types').find(".selected")[0])).data("dataType"); var source = $("#" + dataType + "-condition-template").html(); $('.alert-condition-block').html(source); if(selected === 'Number of page views'){ var source = $("#metric2-condition-template").html(); $('.alert-condition-block').html(source); $("#single-target-dropdown").clySelectSetItems(alertDefine[dataType].target); self.loadAppViewData(); }else if(selected === 'Bounce rate'){ var source = $("#metric2-condition-template").html(); $('.alert-condition-block').html(source); $("#single-target-dropdown").clySelectSetItems(alertDefine[dataType].target); self.loadAppViewData(); }else if (selected === 'New crash occurence') { $("#single-target-condition-dropdown").css("visibility","hidden"); $('#alert-compare-value').css("visibility","hidden"); } else { $("#single-target-condition-dropdown").css("visibility","visible"); $('#alert-compare-value').css("visibility","visible"); } $("#single-target-dropdown").clySelectSetItems(alertDefine[dataType].target); $("#single-target-condition-dropdown").clySelectSetItems(alertDefine[dataType].condition); for(var i = 0; i < alertDefine[dataType].target.length; i++){ var item = alertDefine[dataType].target[i]; if( item.value === selected){ $("#single-target-dropdown").clySelectSetSelection(item.value, item.name); } } metricClickListner(); app.localize(); }); } $(".alert-data-type").off("click").on("click", function () { var dataType = $(this).data("dataType"); $(".alert-data-type").removeClass('selected'); $(this).addClass('selected'); $("#widget-section-single-app").show(); $("#single-app-dropdown").clySelectSetSelection("", "Select App"); var source = $("#" + dataType + "-condition-template").html(); $('.alert-condition-block').html(source); $("#single-target-dropdown").clySelectSetItems(alertDefine[dataType].target); $("#single-target-condition-dropdown").clySelectSetItems(alertDefine[dataType].condition); app.localize(); switch (dataType) { case 'metric': case 'crash': metricClickListner() break; case 'event': break; } }) // init content $(".alert-condition-block").html(''); for (var appId in countlyGlobal.apps) { apps.push({ value: appId, name: countlyGlobal.apps[appId].name }); } // $("#multi-app-dropdown").clyMultiSelectSetItems(apps); $("#single-app-dropdown").clySelectSetItems(apps); $("#single-app-dropdown").off("cly-select-change").on("cly-select-change", function (e, selected) { var dataType = $(($('#alert-data-types').find(".selected")[0])).data("dataType"); var dataSubType = $("#single-target-dropdown").clySelectGetSelection(); if (selected && dataType === 'event') { alertsPlugin.getEventsForApps(selected, function (eventData) { $("#single-target-dropdown").clySelectSetItems(eventData); $("#single-target-dropdown").clySelectSetSelection("", "Select event"); }); } if(selected && (dataSubType === 'Number of page views' || dataSubType === 'Bounce rate')) { self.loadAppViewData(); } }); // clear app selected value // $("#multi-app-dropdown").clyMultiSelectClearSelection(); $("#single-app-dropdown").clySelectSetSelection({}); //alert by $("#email-alert-input").val(""); $("#alert-widget-drawer").find(".section.settings").hide(); // $("#alert-widget-drawer").trigger("cly-widget-section-complete"); $(".cly-drawer").find(".close").off("click").on("click", function () { $(".grid-stack-item").removeClass("marked-for-editing"); $(this).parents(".cly-drawer").removeClass("open"); }); $("#create-widget").off().on("click", function () { var alertConfig = self.getWidgetSettings(true); for (var key in alertConfig) { if (!alertConfig[key]) { return CountlyHelpers.alert("Please complete all required fields", "green", function (result) { }); } } $("#alert-widget-drawer").removeClass("open"); alertsPlugin.saveAlert(alertConfig, function callback() { alertsPlugin.requestAlertsList(function () { app.alertsView.renderTable() }) }); }); $("#save-widget").off("click").on("click", function () { var alertConfig = self.getWidgetSettings(); for (var key in alertConfig) { if (!alertConfig[key]) { return CountlyHelpers.confirm("Please input all the fields", "green", function (result) { }); } } $("#alert-widget-drawer").removeClass("open"); alertsPlugin.saveAlert(alertConfig, function callback() { alertsPlugin.requestAlertsList(function () { app.alertsView.renderTable(); }); }); }); }, loadData: function (data) { $(($('#alert-data-types').find("[data-data-type='" + data.alertDataType + "']"))).trigger("click"); $("#current_alert_id").text(data._id) $("#alert-name-input").val(data.alertName); switch (data.alertDataType) { case 'metric': case 'crash': var appSelected = []; for (var index in data.selectedApps) { var appId = data.selectedApps[index]; countlyGlobal.apps[appId] && appSelected.push({ value: appId, name: countlyGlobal.apps[appId].name }); } // $("#multi-app-dropdown").clyMultiSelectSetSelection(appSelected); for (var index in data.selectedApps) { var appId = data.selectedApps[index]; countlyGlobal.apps[appId] && $("#single-app-dropdown").clySelectSetSelection(appId, countlyGlobal.apps[appId].name); } var target = _.find(alertDefine[data.alertDataType]['target'], function (m) { return m.value === data.alertDataSubType }); if (target) { $("#single-target-dropdown").clySelectSetSelection(target.value, target.name); } if(data.alertDataSubType2 && (data.alertDataSubType === 'Number of page views' || data.alertDataSubType === 'Bounce rate')){ this.loadAppViewData(data.alertDataSubType2) } break; case 'event': $("#single-target-dropdown").off("cly-select-change").on("cly-select-change", function (e, selected) { $("#single-target-dropdown").off("cly-select-change"); $("#single-target-dropdown").clySelectSetSelection(data.alertDataSubType, data.alertDataSubType); }); for (var index in data.selectedApps) { var appId = data.selectedApps[index]; countlyGlobal.apps[appId] && $("#single-app-dropdown").clySelectSetSelection(appId, countlyGlobal.apps[appId].name); } break; } var condition = _.find(alertDefine[data.alertDataType]['condition'], function (m) { return m.value === data.compareType }); if (condition) { $("#single-target-condition-dropdown").clySelectSetSelection(condition.value, condition.name); } $('#alert-compare-value-input').val(data.compareValue); for (var key in dict[data.alertDataSubType]) { if (typeof dict[data.alertDataSubType][key] === 'string') { $("#" + dict[data.alertDataSubType][key]).val(data[key]); } } $("#save-widget").removeClass("disabled"); }, getWidgetSettings: function (enabled) { var dataType = $(($('#alert-data-types').find(".selected")[0])).data("dataType"); var settings = { alertName: $("#alert-name-input").val(), alertDataType: dataType, alertDataSubType: $("#single-target-dropdown").clySelectGetSelection(), compareType: $('#single-target-condition-dropdown').clySelectGetSelection(), compareValue: $('#alert-compare-value-input').val(), period: 'every 59 mins starting on the 59 min', // 'every 10 seconds', //'at 23:59 everyday', alertBy: 'email', }; if(enabled){ settings.enabled = true; } if($("#single-target2-dropdown").clySelectGetSelection()){ settings.alertDataSubType2 = $("#single-target2-dropdown").clySelectGetSelection(); } switch (dataType) { case 'metric': case 'crash': if(settings.alertDataSubType === 'New crash occurence'){ delete settings.compareType; delete settings.compareValue; } break; case 'event': break; } var selectedSingleAPP = $("#single-app-dropdown").clySelectGetSelection(); settings['selectedApps'] = selectedSingleAPP ? [selectedSingleAPP] : null; settings['compareDescribe'] = settings.alertDataSubType + (settings.alertDataSubType2 ? ' (' + settings.alertDataSubType2 + ')' : '') + ' ' + settings.compareType + ' ' + settings.compareValue + "%"; var dictObject = dict[settings.alertDataType] && dict[settings.alertDataType][settings.alertDataSubType]; if (dictObject) { for (var key in dictObject) { settings[key] = typeof dictObject[key] === 'string' ? $("#" + dictObject[key]).val() : dictObject[key](settings) } } var emailList = [countlyGlobal.member._id]; settings['alertValues'] = emailList && emailList.length > 0 ? emailList : null; var currentId = $("#current_alert_id").text(); currentId && (settings._id = currentId); return settings; } } }); app.alertsView = new AlertsView(); if (countlyGlobal["member"].global_admin || countlyGlobal["member"]["admin_of"].length) { app.route('/manage/alerts', 'alerts', function () { this.renderWhenReady(this.alertsView); }); } $(document).ready(function () { if (countlyGlobal["member"].global_admin || countlyGlobal["member"]["admin_of"].length) { var menu = '<a href="#/manage/alerts" class="item">' + '<div class="logo-icon fa fa-envelope"></div>' + '<div class="text" data-localize="alert.plugin-title"></div>' + '</a>'; if ($('#management-submenu .help-toggle').length) $('#management-submenu .help-toggle').before(menu); } });
1
13,133
We would need to localize them eventually too, just FYI
Countly-countly-server
js
@@ -70,7 +70,7 @@ void sequential_model::write_proto(lbann_data::Model* proto) { model::write_proto(proto); //Add layers if (m_comm->am_world_master()) { - proto->set_name(name()); + proto->set_name(type()); for(size_t l = 0; l < m_layers.size(); l++) { auto layer_proto = proto->add_layer(); m_layers[l]->write_proto(layer_proto);
1
//////////////////////////////////////////////////////////////////////////////// // Copyright (c) 2014-2016, Lawrence Livermore National Security, LLC. // Produced at the Lawrence Livermore National Laboratory. // Written by the LBANN Research Team (B. Van Essen, et al.) listed in // the CONTRIBUTORS file. <[email protected]> // // LLNL-CODE-697807. // All rights reserved. // // This file is part of LBANN: Livermore Big Artificial Neural Network // Toolkit. For details, see http://software.llnl.gov/LBANN or // https://github.com/LLNL/LBANN. // // Licensed under the Apache License, Version 2.0 (the "Licensee"); you // may not use this file except in compliance with the License. You may // obtain a copy of the License at: // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the license. // // sequential .hpp .cpp - Sequential neural network models //////////////////////////////////////////////////////////////////////////////// #include "lbann/models/sequential.hpp" #include <unordered_set> namespace lbann { sequential_model::sequential_model(lbann_comm *comm, int mini_batch_size, objective_function *obj_fn, optimizer* default_optimizer) : model(comm, mini_batch_size, obj_fn, default_optimizer) {} void sequential_model::setup_layer_topology() { // Set up parent/child relationships between adjacent layers for (size_t i = 1; i < m_layers.size(); ++i) { m_layers[i]->add_parent_layer(m_layers[i-1]); } for (size_t i = 0; i < m_layers.size() - 1; ++i) { m_layers[i]->add_child_layer(m_layers[i+1]); } // Setup layer graph model::setup_layer_topology(); // Make sure that execution order is valid std::set<int> nodes; std::map<int,std::set<int>> edges; construct_layer_graph(nodes, edges); if (!graph::is_topologically_sorted(nodes, edges)) { std::stringstream err; err << __FILE__ << " " << __LINE__ << " :: " << "layer execution order is not topologically sorted"; throw lbann_exception(err.str()); } freeze_layers_under_frozen_surface(); } void sequential_model::write_proto(lbann_data::Model* proto) { model::write_proto(proto); //Add layers if (m_comm->am_world_master()) { proto->set_name(name()); for(size_t l = 0; l < m_layers.size(); l++) { auto layer_proto = proto->add_layer(); m_layers[l]->write_proto(layer_proto); } } } } // namespace lbann
1
13,227
It looks like the proto code needs to change the function name from set_name to set_type to be consistent.
LLNL-lbann
cpp
@@ -0,0 +1,16 @@ +// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package factory + +//go:generate go run ../../../scripts/generate/mockgen.go github.com/aws/amazon-ecs-agent/agent/ssm/factory SSMClientCreator mocks/factory_mocks.go
1
1
21,018
I think we can avoid using this pattern for this use case.
aws-amazon-ecs-agent
go
@@ -25,7 +25,9 @@ #include <pthread.h> #include <pwd.h> #include <signal.h> +#ifndef __ANDROID__ #include <spawn.h> +#endif #include <stdint.h> #include <stdlib.h> #include <string.h>
1
/* * Copyright (c) 2014-2016 DeNA Co., Ltd., Kazuho Oku, Nick Desaulniers * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include <errno.h> #include <fcntl.h> #include <grp.h> #include <pthread.h> #include <pwd.h> #include <signal.h> #include <spawn.h> #include <stdint.h> #include <stdlib.h> #include <string.h> #include <sys/types.h> #include <sys/wait.h> #include <unistd.h> #if !defined(_SC_NPROCESSORS_ONLN) #include <sys/sysctl.h> #endif #include "cloexec.h" #include "h2o/memory.h" #include "h2o/serverutil.h" #include "h2o/socket.h" #include "h2o/string_.h" void h2o_set_signal_handler(int signo, void (*cb)(int signo)) { struct sigaction action; memset(&action, 0, sizeof(action)); sigemptyset(&action.sa_mask); action.sa_handler = cb; sigaction(signo, &action, NULL); } int h2o_setuidgid(const char *user) { struct passwd pwbuf, *pw; char buf[65536]; /* should be large enough */ errno = 0; if (getpwnam_r(user, &pwbuf, buf, sizeof(buf), &pw) != 0) { perror("getpwnam_r"); return -1; } if (pw == NULL) { fprintf(stderr, "unknown user:%s\n", user); return -1; } if (setgid(pw->pw_gid) != 0) { fprintf(stderr, "setgid(%d) failed:%s\n", (int)pw->pw_gid, strerror(errno)); return -1; } if (initgroups(pw->pw_name, pw->pw_gid) != 0) { fprintf(stderr, "initgroups(%s, %d) failed:%s\n", pw->pw_name, (int)pw->pw_gid, strerror(errno)); return -1; } if (setuid(pw->pw_uid) != 0) { fprintf(stderr, "setuid(%d) failed:%s\n", (int)pw->pw_uid, strerror(errno)); return -1; } return 0; } size_t h2o_server_starter_get_fds(int **_fds) { const char *ports_env, *start, *end, *eq; size_t t; H2O_VECTOR(int) fds = {NULL}; if ((ports_env = getenv("SERVER_STARTER_PORT")) == NULL) return 0; if (ports_env[0] == '\0') { fprintf(stderr, "$SERVER_STARTER_PORT is empty\n"); return SIZE_MAX; } /* ports_env example: 127.0.0.1:80=3;/tmp/sock=4 */ for (start = ports_env; *start != '\0'; start = *end == ';' ? end + 1 : end) { if ((end = strchr(start, ';')) == NULL) end = start + strlen(start); if ((eq = memchr(start, '=', end - start)) == NULL) { fprintf(stderr, "invalid $SERVER_STARTER_PORT, an element without `=` in: %s\n", ports_env); goto Error; } if ((t = h2o_strtosize(eq + 1, end - eq - 1)) == SIZE_MAX) { fprintf(stderr, "invalid file descriptor number in $SERVER_STARTER_PORT: %s\n", ports_env); goto Error; } h2o_vector_reserve(NULL, &fds, fds.size + 1); fds.entries[fds.size++] = (int)t; } *_fds = fds.entries; return fds.size; Error: free(fds.entries); return SIZE_MAX; } static char **build_spawn_env(void) { extern char **environ; size_t num; /* calculate number of envvars, as well as looking for H2O_ROOT= */ for (num = 0; environ[num] != NULL; ++num) if (strncmp(environ[num], "H2O_ROOT=", sizeof("H2O_ROOT=") - 1) == 0) return NULL; /* not found */ char **newenv = h2o_mem_alloc(sizeof(*newenv) * (num + 2) + sizeof("H2O_ROOT=" H2O_TO_STR(H2O_ROOT))); memcpy(newenv, environ, sizeof(*newenv) * num); newenv[num] = (char *)(newenv + num + 2); newenv[num + 1] = NULL; strcpy(newenv[num], "H2O_ROOT=" H2O_TO_STR(H2O_ROOT)); return newenv; } pid_t h2o_spawnp(const char *cmd, char *const *argv, const int *mapped_fds, int cloexec_mutex_is_locked) { #if defined(__linux__) /* posix_spawnp of Linux does not return error if the executable does not exist, see * https://gist.github.com/kazuho/0c233e6f86d27d6e4f09 */ extern char **environ; int pipefds[2] = {-1, -1}, errnum; pid_t pid; /* create pipe, used for sending error codes */ if (pipe2(pipefds, O_CLOEXEC) != 0) goto Error; /* fork */ if (!cloexec_mutex_is_locked) pthread_mutex_lock(&cloexec_mutex); if ((pid = fork()) == 0) { /* in child process, map the file descriptors and execute; return the errnum through pipe if exec failed */ if (mapped_fds != NULL) { for (; *mapped_fds != -1; mapped_fds += 2) { if (mapped_fds[0] != mapped_fds[1]) { if (mapped_fds[1] != -1) dup2(mapped_fds[0], mapped_fds[1]); close(mapped_fds[0]); } } } char **env = build_spawn_env(); if (env != NULL) environ = env; execvp(cmd, argv); errnum = errno; write(pipefds[1], &errnum, sizeof(errnum)); _exit(EX_SOFTWARE); } if (!cloexec_mutex_is_locked) pthread_mutex_unlock(&cloexec_mutex); if (pid == -1) goto Error; /* parent process */ close(pipefds[1]); pipefds[1] = -1; ssize_t rret; errnum = 0; while ((rret = read(pipefds[0], &errnum, sizeof(errnum))) == -1 && errno == EINTR) ; if (rret != 0) { /* spawn failed */ while (waitpid(pid, NULL, 0) != pid) ; pid = -1; errno = errnum; goto Error; } /* spawn succeeded */ close(pipefds[0]); return pid; Error: errnum = errno; if (pipefds[0] != -1) close(pipefds[0]); if (pipefds[1] != -1) close(pipefds[1]); errno = errnum; return -1; #else posix_spawn_file_actions_t file_actions; pid_t pid; extern char **environ; char **env = build_spawn_env(); posix_spawn_file_actions_init(&file_actions); if (mapped_fds != NULL) { for (; *mapped_fds != -1; mapped_fds += 2) { if (mapped_fds[1] != -1) posix_spawn_file_actions_adddup2(&file_actions, mapped_fds[0], mapped_fds[1]); posix_spawn_file_actions_addclose(&file_actions, mapped_fds[0]); } } if (!cloexec_mutex_is_locked) pthread_mutex_lock(&cloexec_mutex); errno = posix_spawnp(&pid, cmd, &file_actions, NULL, argv, env != NULL ? env : environ); if (!cloexec_mutex_is_locked) pthread_mutex_unlock(&cloexec_mutex); free(env); if (errno != 0) return -1; return pid; #endif } int h2o_read_command(const char *cmd, char **argv, h2o_buffer_t **resp, int *child_status) { int respfds[2] = {-1, -1}; pid_t pid = -1; int mutex_locked = 0, ret = -1; h2o_buffer_init(resp, &h2o_socket_buffer_prototype); pthread_mutex_lock(&cloexec_mutex); mutex_locked = 1; /* create pipe for reading the result */ if (pipe(respfds) != 0) goto Exit; if (fcntl(respfds[0], F_SETFD, O_CLOEXEC) < 0) goto Exit; /* spawn */ int mapped_fds[] = {respfds[1], 1, /* stdout of the child process is read from the pipe */ -1}; if ((pid = h2o_spawnp(cmd, argv, mapped_fds, 1)) == -1) goto Exit; close(respfds[1]); respfds[1] = -1; pthread_mutex_unlock(&cloexec_mutex); mutex_locked = 0; /* read the response from pipe */ while (1) { h2o_iovec_t buf = h2o_buffer_reserve(resp, 8192); ssize_t r; while ((r = read(respfds[0], buf.base, buf.len)) == -1 && errno == EINTR) ; if (r <= 0) break; (*resp)->size += r; } Exit: if (mutex_locked) pthread_mutex_unlock(&cloexec_mutex); if (pid != -1) { /* wait for the child to complete */ pid_t r; while ((r = waitpid(pid, child_status, 0)) == -1 && errno == EINTR) ; if (r == pid) { /* success */ ret = 0; } } if (respfds[0] != -1) close(respfds[0]); if (respfds[1] != -1) close(respfds[1]); if (ret != 0) h2o_buffer_dispose(resp); return ret; } size_t h2o_numproc(void) { #if defined(_SC_NPROCESSORS_ONLN) return (size_t)sysconf(_SC_NPROCESSORS_ONLN); #elif defined(CTL_HW) && defined(HW_AVAILCPU) int name[] = {CTL_HW, HW_AVAILCPU}; int ncpu; size_t ncpu_sz = sizeof(ncpu); if (sysctl(name, sizeof(name) / sizeof(name[0]), &ncpu, &ncpu_sz, NULL, 0) != 0 || sizeof(ncpu) != ncpu_sz) { fprintf(stderr, "[ERROR] failed to obtain number of CPU cores, assuming as one\n"); ncpu = 1; } return ncpu; #else return 1; #endif }
1
12,672
Could you please change this to `#ifndef __linux__`? That's when we use our own implementation instead of `posix_spawnp`.
h2o-h2o
c
@@ -139,9 +139,10 @@ public class NodeStatus { return slots.stream().anyMatch(slot -> slot.getSession() == null); } + // Check if the Node's max session limit is not exceeded and has a free slot that supports the capability. public boolean hasCapacity(Capabilities caps) { - return slots.stream() - .anyMatch(slot -> slot.getSession() == null && slot.isSupporting(caps)); + return slots.stream().filter(slot -> slot.getSession() != null).count() < maxSessionCount + && slots.stream().anyMatch(slot -> slot.getSession() == null && slot.isSupporting(caps)); } public int getMaxSessionCount() {
1
// Licensed to the Software Freedom Conservancy (SFC) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The SFC licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package org.openqa.selenium.grid.data; import org.openqa.selenium.Capabilities; import org.openqa.selenium.internal.Require; import org.openqa.selenium.json.JsonInput; import org.openqa.selenium.json.TypeToken; import java.net.URI; import java.time.Duration; import java.time.Instant; import java.util.HashSet; import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.TreeMap; import static java.util.Collections.unmodifiableMap; import static java.util.Collections.unmodifiableSet; public class NodeStatus { private final NodeId nodeId; private final URI externalUri; private final int maxSessionCount; private final Set<Slot> slots; private final Availability availability; private final Duration heartbeatPeriod; private final String version; private final Map<String, String> osInfo; public NodeStatus( NodeId nodeId, URI externalUri, int maxSessionCount, Set<Slot> slots, Availability availability, Duration heartbeatPeriod, String version, Map<String, String> osInfo) { this.nodeId = Require.nonNull("Node id", nodeId); this.externalUri = Require.nonNull("URI", externalUri); this.maxSessionCount = Require.positive("Max session count", maxSessionCount, "Make sure that a driver is available on $PATH"); this.slots = unmodifiableSet(new HashSet<>(Require.nonNull("Slots", slots))); this.availability = Require.nonNull("Availability", availability); this.heartbeatPeriod = heartbeatPeriod; this.version = Require.nonNull("Grid Node version", version); this.osInfo = Require.nonNull("Node host OS info", osInfo); } public static NodeStatus fromJson(JsonInput input) { NodeId nodeId = null; URI externalUri = null; int maxSessions = 0; Set<Slot> slots = null; Availability availability = null; Duration heartbeatPeriod = null; String version = null; Map<String, String> osInfo = null; input.beginObject(); while (input.hasNext()) { switch (input.nextName()) { case "availability": availability = input.read(Availability.class); break; case "heartbeatPeriod": heartbeatPeriod = Duration.ofMillis(input.read(Long.class)); break; case "nodeId": nodeId = input.read(NodeId.class); break; case "maxSessions": maxSessions = input.read(Integer.class); break; case "slots": slots = input.read(new TypeToken<Set<Slot>>() { }.getType()); break; case "externalUri": externalUri = input.read(URI.class); break; case "version": version = input.read(String.class); break; case "osInfo": osInfo = input.read(Map.class); break; default: input.skipValue(); break; } } input.endObject(); return new NodeStatus( nodeId, externalUri, maxSessions, slots, availability, heartbeatPeriod, version, osInfo); } public boolean hasCapability(Capabilities caps) { return slots.stream().anyMatch(slot -> slot.isSupporting(caps)); } public boolean hasCapacity() { return slots.stream().anyMatch(slot -> slot.getSession() == null); } public boolean hasCapacity(Capabilities caps) { return slots.stream() .anyMatch(slot -> slot.getSession() == null && slot.isSupporting(caps)); } public int getMaxSessionCount() { return maxSessionCount; } public NodeId getNodeId() { return nodeId; } public URI getExternalUri() { return externalUri; } public Set<Slot> getSlots() { return slots; } public Availability getAvailability() { return availability; } public Duration getHeartbeatPeriod() { return heartbeatPeriod; } public String getVersion() { return version; } public Map<String, String> getOsInfo() { return osInfo; } public float getLoad() { float inUse = slots.parallelStream() .filter(slot -> slot.getSession() != null) .count(); return (inUse / (float) maxSessionCount) * 100f; } public long getLastSessionCreated() { return slots.parallelStream() .map(Slot::getLastStarted) .mapToLong(Instant::toEpochMilli) .max() .orElse(0); } @Override public boolean equals(Object o) { if (!(o instanceof NodeStatus)) { return false; } NodeStatus that = (NodeStatus) o; return Objects.equals(this.nodeId, that.nodeId) && Objects.equals(this.externalUri, that.externalUri) && this.maxSessionCount == that.maxSessionCount && Objects.equals(this.slots, that.slots) && Objects.equals(this.availability, that.availability) && Objects.equals(this.version, that.version); } @Override public int hashCode() { return Objects.hash(nodeId, externalUri, maxSessionCount, slots, version); } private Map<String, Object> toJson() { Map<String, Object> toReturn = new TreeMap<>(); toReturn.put("nodeId", nodeId); toReturn.put("externalUri", externalUri); toReturn.put("maxSessions", maxSessionCount); toReturn.put("slots", slots); toReturn.put("availability", availability); toReturn.put("heartbeatPeriod", heartbeatPeriod.toMillis()); toReturn.put("version", version); toReturn.put("osInfo", osInfo); return unmodifiableMap(toReturn); } }
1
19,085
Nit: put the `&&` on the previous line so that a reader knows that there's more to the statement at a casual glance.
SeleniumHQ-selenium
java
@@ -49,6 +49,19 @@ func (t RootType) String() string { } } +func tlfTypeToPath(t tlf.Type) string { + switch t { + case tlf.Private: + return string(libkbfs.PrivatePathType) + case tlf.Public: + return string(libkbfs.PublicPathType) + case tlf.SingleTeam: + return string(libkbfs.SingleTeamPathType) + default: + return "<unknown TLF type>" + } +} + // Debug tag ID for an individual FS in keybase pages. const ctxOpID = "KBP"
1
// Copyright 2017 Keybase Inc. All rights reserved. // Use of this source code is governed by a BSD // license that can be found in the LICENSE file. package libpages import ( "context" "fmt" "strings" "github.com/keybase/client/go/protocol/keybase1" "github.com/keybase/kbfs/libfs" "github.com/keybase/kbfs/libkbfs" "github.com/keybase/kbfs/tlf" "go.uber.org/zap" "go.uber.org/zap/zapcore" ) // ErrInvalidKeybasePagesRecord is returned when the kbp= DNS record for a // domain is invalid. type ErrInvalidKeybasePagesRecord struct{} // Error returns the error interface. func (ErrInvalidKeybasePagesRecord) Error() string { return "invalid TXT record" } // RootType specifies the type of a root. type RootType int const ( _ RootType = iota // KBFSRoot means the root is backed by a KBFS path. KBFSRoot // GitRoot means the root is backed by a git repo stored in KBFS. GitRoot ) // String implements the fmt.Stringer interface func (t RootType) String() string { switch t { case KBFSRoot: return "kbfs" case GitRoot: return "git" default: return "unknown" } } // Debug tag ID for an individual FS in keybase pages. const ctxOpID = "KBP" type ctxTagKey int const ( ctxIDKey ctxTagKey = iota ) // Root defines the root of a static site hosted by Keybase Pages. It is // normally constructed from DNS records directly and is cheap to make. type Root struct { Type RootType TlfType tlf.Type TlfNameUnparsed string PathUnparsed string } // MakeFS makes a *libfs.FS from *r, which can be adapted to a http.FileSystem // (through ToHTTPFileSystem) to be used by http package to serve through HTTP. func (r *Root) MakeFS( ctx context.Context, log *zap.Logger, kbfsConfig libkbfs.Config) ( fs *libfs.FS, tlfID tlf.ID, shutdown func(), err error) { fsCtx, cancel := context.WithCancel(context.Background()) defer func() { zapFields := []zapcore.Field{ zap.String("root_type", r.Type.String()), zap.String("tlf_type", r.TlfType.String()), zap.String("tlf", r.TlfNameUnparsed), zap.String("root_path", r.PathUnparsed), } if err == nil { log.Info("root.MakeFS", zapFields...) } else { cancel() log.Warn("root.MakeFS", append(zapFields, zap.Error(err))...) } }() fsCtx, err = libkbfs.NewContextWithCancellationDelayer( libkbfs.CtxWithRandomIDReplayable( fsCtx, ctxIDKey, ctxOpID, nil)) if err != nil { return nil, tlf.ID{}, nil, err } switch r.Type { case KBFSRoot: tlfHandle, err := libkbfs.GetHandleFromFolderNameAndType( ctx, kbfsConfig.KBPKI(), kbfsConfig.MDOps(), r.TlfNameUnparsed, r.TlfType) if err != nil { return nil, tlf.ID{}, nil, err } fs, err = libfs.NewFS(fsCtx, kbfsConfig, tlfHandle, r.PathUnparsed, "", keybase1.MDPriorityNormal) if err != nil { return nil, tlf.ID{}, nil, err } return fs, tlfHandle.TlfID(), cancel, nil case GitRoot: session, err := kbfsConfig.KeybaseService().CurrentSession(ctx, 0) if err != nil { return nil, tlf.ID{}, nil, err } tlfHandle, err := libkbfs.GetHandleFromFolderNameAndType( ctx, kbfsConfig.KBPKI(), kbfsConfig.MDOps(), // We'll just checkout to the bot's private TLF for now. Note that // this means git remote is only supported by kbp servers that have // logged into a bot account. string(session.Name), tlf.Private) if err != nil { return nil, tlf.ID{}, nil, err } fs, err = libfs.NewFS(fsCtx, kbfsConfig, tlfHandle, fmt.Sprintf(".kbfs_autogit/%s/%s/%s", r.TlfType, r.TlfNameUnparsed, r.PathUnparsed), "", keybase1.MDPriorityNormal) if err != nil { return nil, tlf.ID{}, nil, err } return fs, tlfHandle.TlfID(), cancel, nil default: return nil, tlf.ID{}, nil, ErrInvalidKeybasePagesRecord{} } } const gitPrefix = "git@keybase:" const kbfsPrefix = "/keybase/" const privatePrefix = "private/" const publicPrefix = "public/" const teamPrefix = "team/" func setRootTlfNameAndPath(root *Root, str string) { parts := strings.SplitN(str, "/", 2) root.TlfNameUnparsed = parts[0] if len(parts) > 1 { root.PathUnparsed = parts[1] } } // str is everything after either gitPrefix or kbfsPrefix. func setRoot(root *Root, str string) error { switch { case strings.HasPrefix(str, privatePrefix): root.TlfType = tlf.Private setRootTlfNameAndPath(root, str[len(privatePrefix):]) return nil case strings.HasPrefix(str, publicPrefix): root.TlfType = tlf.Public setRootTlfNameAndPath(root, str[len(publicPrefix):]) return nil case strings.HasPrefix(str, teamPrefix): root.TlfType = tlf.SingleTeam setRootTlfNameAndPath(root, str[len(teamPrefix):]) return nil default: return ErrInvalidKeybasePagesRecord{} } } // ParseRoot parses a kbp= TXT record from a domain into a Root object. func ParseRoot(str string) (Root, error) { str = strings.TrimSpace(str) switch { case strings.HasPrefix(str, gitPrefix): root := Root{Type: GitRoot} if err := setRoot(&root, str[len(gitPrefix):]); err != nil { return Root{}, err } return root, nil case strings.HasPrefix(str, kbfsPrefix): root := Root{Type: KBFSRoot} if err := setRoot(&root, str[len(kbfsPrefix):]); err != nil { return Root{}, err } return root, nil default: return Root{}, ErrInvalidKeybasePagesRecord{} } }
1
19,102
Technically the "right" variables to use would be the ones in `libgit/autogit_node_wrappers.go", which define how the tlf types map to subdirs for autogit specifically. (They would have to be exported.) But this is fine too I guess, just a little dirtier.
keybase-kbfs
go
@@ -27,6 +27,18 @@ class CommunicartMailer < ActionMailer::Base ) end + def notification_for_approver_removed(to_email, approval, alert_partial=nil) + @approval = approval + @alert_partial = alert_partial + proposal = approval.proposal + send_proposal_email( + from_email: user_email_with_name(proposal.requester), + to_email: to_email, + proposal: proposal, + template_name: 'proposal_notification_email' + ) + end + def proposal_observer_email(to_email, proposal) # TODO have the from_email be whomever triggered this notification send_proposal_email(
1
class CommunicartMailer < ActionMailer::Base include Roadie::Rails::Automatic layout 'communicart_mailer' add_template_helper CommunicartMailerHelper add_template_helper ValueHelper add_template_helper ClientHelper add_template_helper MarkdownHelper # Approver can approve/reject/take other action def actions_for_approver(to_email, approval, alert_partial=nil) @show_approval_actions = true self.notification_for_approver(to_email, approval, alert_partial) end def notification_for_approver(to_email, approval, alert_partial=nil) @approval = approval @alert_partial = alert_partial proposal = approval.proposal send_proposal_email( from_email: user_email_with_name(proposal.requester), to_email: to_email, proposal: proposal, template_name: 'proposal_notification_email' ) end def proposal_observer_email(to_email, proposal) # TODO have the from_email be whomever triggered this notification send_proposal_email( to_email: to_email, proposal: proposal ) end def proposal_created_confirmation(proposal) send_proposal_email( to_email: proposal.requester.email_address, proposal: proposal ) end def approval_reply_received_email(approval) proposal = approval.proposal @approval = approval @alert_partial = 'approvals_complete' if proposal.approved? send_proposal_email( from_email: user_email_with_name(approval.user), to_email: proposal.requester.email_address, proposal: proposal ) end def comment_added_email(comment, to_email) @comment = comment # Don't send if special comment if [email protected]_comment send_proposal_email( from_email: user_email_with_name(comment.user), to_email: to_email, proposal: comment.proposal ) end end def feedback(sending_user, form_values) form_strings = form_values.map { |pair| "#{pair[0]}: #{pair[1]}" } message = form_strings.join("\n") mail( to: CommunicartMailer.support_email, subject: 'Feedback submission', from: default_sender_email, body: message, cc: sending_user.try(:email_address) ) end def self.support_email ENV['SUPPORT_EMAIL'] || '[email protected]' # not sensitive, so hard coding end private def email_with_name(email, name) # http://stackoverflow.com/a/8106387/358804 address = Mail::Address.new(email) address.display_name = name address.format end def sender_email ENV['NOTIFICATION_FROM_EMAIL'] || '[email protected]' end def default_sender_email email_with_name(sender_email, "Communicart") end def user_email_with_name(user) email_with_name(sender_email, user.full_name) end # `proposal` and `to_email` are required def send_proposal_email(proposal: nil, to_email: nil, from_email: nil, template_name: nil) @proposal = proposal.decorate # http://www.jwz.org/doc/threading.html headers['In-Reply-To'] = @proposal.email_msg_id headers['References'] = @proposal.email_msg_id mail( to: to_email, subject: @proposal.email_subject, from: from_email || default_sender_email, template_name: template_name ) end end
1
13,459
Should there be a template or something associated with this email? Right now it's identical to `notification_for_approver`
18F-C2
rb
@@ -67,8 +67,9 @@ class SliderController extends AdminBaseController */ public function listAction() { - $queryBuilder = $this->getDoctrine()->getManager()->createQueryBuilder(); - $queryBuilder + /** @var \Doctrine\ORM\EntityManager $em */ + $em = $this->getDoctrine()->getManager(); + $queryBuilder = $em->createQueryBuilder() ->select('s') ->from(SliderItem::class, 's') ->where('s.domainId = :selectedDomainId')
1
<?php namespace Shopsys\FrameworkBundle\Controller\Admin; use Sensio\Bundle\FrameworkExtraBundle\Configuration\Route; use Shopsys\FrameworkBundle\Component\Domain\AdminDomainTabsFacade; use Shopsys\FrameworkBundle\Component\Grid\GridFactory; use Shopsys\FrameworkBundle\Component\Grid\QueryBuilderDataSource; use Shopsys\FrameworkBundle\Component\Router\Security\Annotation\CsrfProtection; use Shopsys\FrameworkBundle\Form\Admin\Slider\SliderItemFormType; use Shopsys\FrameworkBundle\Model\AdminNavigation\BreadcrumbOverrider; use Shopsys\FrameworkBundle\Model\Slider\SliderItem; use Shopsys\FrameworkBundle\Model\Slider\SliderItemDataFactoryInterface; use Shopsys\FrameworkBundle\Model\Slider\SliderItemFacade; use Symfony\Component\HttpFoundation\Request; class SliderController extends AdminBaseController { /** * @var \Shopsys\FrameworkBundle\Model\AdminNavigation\BreadcrumbOverrider */ protected $breadcrumbOverrider; /** * @var \Shopsys\FrameworkBundle\Component\Domain\AdminDomainTabsFacade */ protected $adminDomainTabsFacade; /** * @var \Shopsys\FrameworkBundle\Component\Grid\GridFactory */ protected $gridFactory; /** * @var \Shopsys\FrameworkBundle\Model\Slider\SliderItemFacade */ protected $sliderItemFacade; /** * @var \Shopsys\FrameworkBundle\Model\Slider\SliderItemDataFactoryInterface */ protected $sliderItemDataFactory; /** * @param \Shopsys\FrameworkBundle\Model\Slider\SliderItemFacade $sliderItemFacade * @param \Shopsys\FrameworkBundle\Component\Grid\GridFactory $gridFactory * @param \Shopsys\FrameworkBundle\Component\Domain\AdminDomainTabsFacade $adminDomainTabsFacade * @param \Shopsys\FrameworkBundle\Model\AdminNavigation\BreadcrumbOverrider $breadcrumbOverrider * @param \Shopsys\FrameworkBundle\Model\Slider\SliderItemDataFactoryInterface $sliderItemDataFactory */ public function __construct( SliderItemFacade $sliderItemFacade, GridFactory $gridFactory, AdminDomainTabsFacade $adminDomainTabsFacade, BreadcrumbOverrider $breadcrumbOverrider, SliderItemDataFactoryInterface $sliderItemDataFactory ) { $this->sliderItemFacade = $sliderItemFacade; $this->gridFactory = $gridFactory; $this->adminDomainTabsFacade = $adminDomainTabsFacade; $this->breadcrumbOverrider = $breadcrumbOverrider; $this->sliderItemDataFactory = $sliderItemDataFactory; } /** * @Route("/slider/list/") */ public function listAction() { $queryBuilder = $this->getDoctrine()->getManager()->createQueryBuilder(); $queryBuilder ->select('s') ->from(SliderItem::class, 's') ->where('s.domainId = :selectedDomainId') ->setParameter('selectedDomainId', $this->adminDomainTabsFacade->getSelectedDomainId()); $dataSource = new QueryBuilderDataSource($queryBuilder, 's.id'); $grid = $this->gridFactory->create('sliderItemList', $dataSource); $grid->enableDragAndDrop(SliderItem::class); $grid->addColumn('name', 's.name', t('Name')); $grid->addColumn('link', 's.link', t('Link')); $grid->addEditActionColumn('admin_slider_edit', ['id' => 's.id']); $grid->addDeleteActionColumn('admin_slider_delete', ['id' => 's.id']) ->setConfirmMessage(t('Do you really want to remove this page?')); $grid->setTheme('@ShopsysFramework/Admin/Content/Slider/listGrid.html.twig'); return $this->render('@ShopsysFramework/Admin/Content/Slider/list.html.twig', [ 'gridView' => $grid->createView(), ]); } /** * @Route("/slider/item/new/") * @param \Symfony\Component\HttpFoundation\Request $request */ public function newAction(Request $request) { $sliderItemData = $this->sliderItemDataFactory->create(); $sliderItemData->domainId = $this->adminDomainTabsFacade->getSelectedDomainId(); $form = $this->createForm(SliderItemFormType::class, $sliderItemData, [ 'scenario' => SliderItemFormType::SCENARIO_CREATE, 'slider_item' => null, ]); $form->handleRequest($request); if ($form->isSubmitted() && $form->isValid()) { $sliderItem = $this->sliderItemFacade->create($form->getData()); $this->getFlashMessageSender()->addSuccessFlashTwig( t('Slider page <strong><a href="{{ url }}">{{ name }}</a></strong> created'), [ 'name' => $sliderItem->getName(), 'url' => $this->generateUrl('admin_slider_edit', ['id' => $sliderItem->getId()]), ] ); return $this->redirectToRoute('admin_slider_list'); } if ($form->isSubmitted() && !$form->isValid()) { $this->getFlashMessageSender()->addErrorFlashTwig(t('Please check the correctness of all data filled.')); } return $this->render('@ShopsysFramework/Admin/Content/Slider/new.html.twig', [ 'form' => $form->createView(), 'selectedDomainId' => $this->adminDomainTabsFacade->getSelectedDomainId(), ]); } /** * @Route("/slider/item/edit/{id}", requirements={"id"="\d+"}) * @param \Symfony\Component\HttpFoundation\Request $request * @param int $id */ public function editAction(Request $request, $id) { $sliderItem = $this->sliderItemFacade->getById($id); $sliderItemData = $this->sliderItemDataFactory->createFromSliderItem($sliderItem); $form = $this->createForm(SliderItemFormType::class, $sliderItemData, [ 'scenario' => SliderItemFormType::SCENARIO_EDIT, 'slider_item' => $sliderItem, ]); $form->handleRequest($request); if ($form->isSubmitted() && $form->isValid()) { $this->sliderItemFacade->edit($id, $sliderItemData); $this->getFlashMessageSender()->addSuccessFlashTwig( t('Slider page <strong><a href="{{ url }}">{{ name }}</a></strong> modified'), [ 'name' => $sliderItem->getName(), 'url' => $this->generateUrl('admin_slider_edit', ['id' => $sliderItem->getId()]), ] ); return $this->redirectToRoute('admin_slider_list'); } if ($form->isSubmitted() && !$form->isValid()) { $this->getFlashMessageSender()->addErrorFlash(t('Please check the correctness of all data filled.')); } $this->breadcrumbOverrider->overrideLastItem(t('Editing slider page - %name%', ['%name%' => $sliderItem->getName()])); return $this->render('@ShopsysFramework/Admin/Content/Slider/edit.html.twig', [ 'form' => $form->createView(), 'sliderItem' => $sliderItem, ]); } /** * @Route("/slider/item/delete/{id}", requirements={"id" = "\d+"}) * @CsrfProtection * @param int $id */ public function deleteAction($id) { try { $name = $this->sliderItemFacade->getById($id)->getName(); $this->sliderItemFacade->delete($id); $this->getFlashMessageSender()->addSuccessFlashTwig( t('Page <strong>{{ name }}</strong> deleted'), [ 'name' => $name, ] ); } catch (\Shopsys\FrameworkBundle\Model\Slider\Exception\SliderItemNotFoundException $ex) { $this->getFlashMessageSender()->addErrorFlash(t('Selected page doesn\'t exist.')); } return $this->redirectToRoute('admin_slider_list'); } }
1
16,297
i hope there is some extension in phpstan for this
shopsys-shopsys
php
@@ -1025,6 +1025,7 @@ public class Dictionary { assert morphSep > 0; assert morphSep > flagSep; int sep = flagSep < 0 ? morphSep : flagSep; + if (sep == 0) return 0; CharSequence toWrite; String beforeSep = line.substring(0, sep);
1
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.lucene.analysis.hunspell; import static org.apache.lucene.analysis.hunspell.AffixKind.*; import java.io.BufferedInputStream; import java.io.BufferedReader; import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.io.LineNumberReader; import java.nio.charset.Charset; import java.nio.charset.CharsetDecoder; import java.nio.charset.CodingErrorAction; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; import java.text.ParseException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; import java.util.LinkedHashMap; import java.util.LinkedHashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.TreeMap; import java.util.stream.Collectors; import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexOutput; import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.CharsRef; import org.apache.lucene.util.FixedBitSet; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.IntsRef; import org.apache.lucene.util.IntsRefBuilder; import org.apache.lucene.util.OfflineSorter; import org.apache.lucene.util.OfflineSorter.ByteSequencesReader; import org.apache.lucene.util.OfflineSorter.ByteSequencesWriter; import org.apache.lucene.util.fst.FST; import org.apache.lucene.util.fst.FSTCompiler; import org.apache.lucene.util.fst.IntSequenceOutputs; import org.apache.lucene.util.fst.Util; /** In-memory structure for the dictionary (.dic) and affix (.aff) data of a hunspell dictionary. */ public class Dictionary { // Derived from woorm/LibreOffice dictionaries. // See TestAllDictionaries.testMaxPrologueNeeded. static final int MAX_PROLOGUE_SCAN_WINDOW = 30 * 1024; static final char[] NOFLAGS = new char[0]; static final char FLAG_UNSET = (char) 0; private static final int DEFAULT_FLAGS = 65510; static final char HIDDEN_FLAG = (char) 65511; // called 'ONLYUPCASEFLAG' in Hunspell static final Charset DEFAULT_CHARSET = StandardCharsets.ISO_8859_1; CharsetDecoder decoder = replacingDecoder(DEFAULT_CHARSET); FST<IntsRef> prefixes; FST<IntsRef> suffixes; Breaks breaks = Breaks.DEFAULT; /** * All condition checks used by prefixes and suffixes. these are typically re-used across many * affix stripping rules. so these are deduplicated, to save RAM. */ ArrayList<AffixCondition> patterns = new ArrayList<>(); /** * The entries in the .dic file, mapping to their set of flags. the fst output is the ordinal list * for flagLookup. */ FST<IntsRef> words; /** A Bloom filter over {@link #words} to avoid unnecessary expensive FST traversals */ FixedBitSet wordHashes; /** * The list of unique flagsets (wordforms). theoretically huge, but practically small (for Polish * this is 756), otherwise humans wouldn't be able to deal with it either. */ final FlagEnumerator.Lookup flagLookup; // the list of unique strip affixes. char[] stripData; int[] stripOffsets; String wordChars = ""; // 4 chars per affix, each char representing an unsigned 2-byte integer char[] affixData = new char[32]; private int currentAffix = 0; // offsets in affixData static final int AFFIX_FLAG = 0; static final int AFFIX_STRIP_ORD = 1; private static final int AFFIX_CONDITION = 2; static final int AFFIX_APPEND = 3; // Default flag parsing strategy FlagParsingStrategy flagParsingStrategy = new SimpleFlagParsingStrategy(); // AF entries private String[] aliases; private int aliasCount = 0; // AM entries private String[] morphAliases; private int morphAliasCount = 0; final List<String> morphData = new ArrayList<>(Collections.singletonList("")); // empty data at 0 /** * we set this during sorting, so we know to add an extra int (index in {@link #morphData}) to FST * output */ boolean hasCustomMorphData; boolean ignoreCase; boolean checkSharpS; boolean complexPrefixes; /** * All flags used in affix continuation classes. If an outer affix's flag isn't here, there's no * need to do 2-level affix stripping with it. */ private char[] secondStagePrefixFlags, secondStageSuffixFlags; char circumfix; char keepcase, forceUCase; char needaffix; char forbiddenword; char onlyincompound, compoundBegin, compoundMiddle, compoundEnd, compoundFlag; char compoundPermit, compoundForbid; boolean checkCompoundCase, checkCompoundDup, checkCompoundRep; boolean checkCompoundTriple, simplifiedTriple; int compoundMin = 3, compoundMax = Integer.MAX_VALUE; List<CompoundRule> compoundRules; // nullable List<CheckCompoundPattern> checkCompoundPatterns = new ArrayList<>(); // ignored characters (dictionary, affix, inputs) private char[] ignore; String tryChars = ""; String[] neighborKeyGroups = {"qwertyuiop", "asdfghjkl", "zxcvbnm"}; boolean enableSplitSuggestions = true; List<RepEntry> repTable = new ArrayList<>(); List<List<String>> mapTable = new ArrayList<>(); int maxDiff = 5; int maxNGramSuggestions = 4; boolean onlyMaxDiff; char noSuggest, subStandard; ConvTable iconv, oconv; // true if we can strip suffixes "down to nothing" boolean fullStrip; // language declaration of the dictionary String language; // true if case algorithms should use alternate (Turkish/Azeri) mapping private boolean alternateCasing; /** * Creates a new Dictionary containing the information read from the provided InputStreams to * hunspell affix and dictionary files. You have to close the provided InputStreams yourself. * * @param tempDir Directory to use for offline sorting * @param tempFileNamePrefix prefix to use to generate temp file names * @param affix InputStream for reading the hunspell affix file (won't be closed). * @param dictionary InputStream for reading the hunspell dictionary file (won't be closed). * @throws IOException Can be thrown while reading from the InputStreams * @throws ParseException Can be thrown if the content of the files does not meet expected formats */ public Dictionary( Directory tempDir, String tempFileNamePrefix, InputStream affix, InputStream dictionary) throws IOException, ParseException { this(tempDir, tempFileNamePrefix, affix, Collections.singletonList(dictionary), false); } /** * Creates a new Dictionary containing the information read from the provided InputStreams to * hunspell affix and dictionary files. You have to close the provided InputStreams yourself. * * @param tempDir Directory to use for offline sorting * @param tempFileNamePrefix prefix to use to generate temp file names * @param affix InputStream for reading the hunspell affix file (won't be closed). * @param dictionaries InputStream for reading the hunspell dictionary files (won't be closed). * @throws IOException Can be thrown while reading from the InputStreams * @throws ParseException Can be thrown if the content of the files does not meet expected formats */ public Dictionary( Directory tempDir, String tempFileNamePrefix, InputStream affix, List<InputStream> dictionaries, boolean ignoreCase) throws IOException, ParseException { this.ignoreCase = ignoreCase; try (BufferedInputStream affixStream = new BufferedInputStream(affix, MAX_PROLOGUE_SCAN_WINDOW) { @Override public void close() { // TODO: maybe we should consume and close it? Why does it need to stay open? // Don't close the affix stream as per javadoc. } }) { // I assume we don't support other BOMs (utf16, etc.)? We trivially could, // by adding maybeConsume() with a proper bom... but I don't see hunspell repo to have // any such exotic examples. Charset streamCharset; if (maybeConsume(affixStream, BOM_UTF8)) { streamCharset = StandardCharsets.UTF_8; } else { streamCharset = DEFAULT_CHARSET; } /* * pass 1: look for encoding & flag. This is simple but works. We just prefetch * a large enough chunk of the input and scan through it. The buffered data will * be subsequently reused anyway so nothing is wasted. */ affixStream.mark(MAX_PROLOGUE_SCAN_WINDOW); byte[] prologue = affixStream.readNBytes(MAX_PROLOGUE_SCAN_WINDOW - 1); affixStream.reset(); readConfig(new ByteArrayInputStream(prologue), streamCharset); // pass 2: parse affixes FlagEnumerator flagEnumerator = new FlagEnumerator(); readAffixFile(affixStream, decoder, flagEnumerator); // read dictionary entries IndexOutput unsorted = tempDir.createTempOutput(tempFileNamePrefix, "dat", IOContext.DEFAULT); int wordCount = mergeDictionaries(dictionaries, decoder, unsorted); wordHashes = new FixedBitSet(Integer.highestOneBit(wordCount * 10)); String sortedFile = sortWordsOffline(tempDir, tempFileNamePrefix, unsorted); words = readSortedDictionaries(tempDir, sortedFile, flagEnumerator); flagLookup = flagEnumerator.finish(); aliases = null; // no longer needed morphAliases = null; // no longer needed } } int formStep() { return hasCustomMorphData ? 2 : 1; } /** Looks up Hunspell word forms from the dictionary */ IntsRef lookupWord(char[] word, int offset, int length) { int hash = CharsRef.stringHashCode(word, offset, length); if (!wordHashes.get(Math.abs(hash) % wordHashes.length())) { return null; } return lookup(words, word, offset, length); } // only for testing IntsRef lookupPrefix(char[] word) { return lookup(prefixes, word, 0, word.length); } // only for testing IntsRef lookupSuffix(char[] word) { return lookup(suffixes, word, 0, word.length); } IntsRef lookup(FST<IntsRef> fst, char[] word, int offset, int length) { if (fst == null) { return null; } final FST.BytesReader bytesReader = fst.getBytesReader(); final FST.Arc<IntsRef> arc = fst.getFirstArc(new FST.Arc<>()); // Accumulate output as we go IntsRef output = fst.outputs.getNoOutput(); int l = offset + length; for (int i = offset, cp; i < l; i += Character.charCount(cp)) { cp = Character.codePointAt(word, i, l); output = nextArc(fst, arc, bytesReader, output, cp); if (output == null) { return null; } } return nextArc(fst, arc, bytesReader, output, FST.END_LABEL); } static IntsRef nextArc( FST<IntsRef> fst, FST.Arc<IntsRef> arc, FST.BytesReader reader, IntsRef output, int ch) { try { if (fst.findTargetArc(ch, arc, arc, reader) == null) { return null; } } catch (IOException bogus) { throw new RuntimeException(bogus); } return fst.outputs.add(output, arc.output()); } /** * Reads the affix file through the provided InputStream, building up the prefix and suffix maps * * @param affixStream InputStream to read the content of the affix file from * @param decoder CharsetDecoder to decode the content of the file * @throws IOException Can be thrown while reading from the InputStream */ private void readAffixFile(InputStream affixStream, CharsetDecoder decoder, FlagEnumerator flags) throws IOException, ParseException { TreeMap<String, List<Integer>> prefixes = new TreeMap<>(); TreeMap<String, List<Integer>> suffixes = new TreeMap<>(); Set<Character> prefixContFlags = new HashSet<>(); Set<Character> suffixContFlags = new HashSet<>(); Map<String, Integer> seenPatterns = new HashMap<>(); // zero condition -> 0 ord seenPatterns.put(AffixCondition.ALWAYS_TRUE_KEY, 0); patterns.add(null); // zero strip -> 0 ord Map<String, Integer> seenStrips = new LinkedHashMap<>(); seenStrips.put("", 0); LineNumberReader reader = new LineNumberReader(new InputStreamReader(affixStream, decoder)); String line; while ((line = reader.readLine()) != null) { // ignore any BOM marker on first line if (reader.getLineNumber() == 1 && line.startsWith("\uFEFF")) { line = line.substring(1); } line = line.trim(); if (line.isEmpty()) continue; String firstWord = line.split("\\s")[0]; // TODO: convert to a switch? if ("AF".equals(firstWord)) { parseAlias(line); } else if ("AM".equals(firstWord)) { parseMorphAlias(line); } else if ("PFX".equals(firstWord)) { parseAffix( prefixes, prefixContFlags, line, reader, PREFIX, seenPatterns, seenStrips, flags); } else if ("SFX".equals(firstWord)) { parseAffix( suffixes, suffixContFlags, line, reader, SUFFIX, seenPatterns, seenStrips, flags); } else if (line.equals("COMPLEXPREFIXES")) { complexPrefixes = true; // 2-stage prefix+1-stage suffix instead of 2-stage suffix+1-stage prefix } else if ("CIRCUMFIX".equals(firstWord)) { circumfix = flagParsingStrategy.parseFlag(singleArgument(reader, line)); } else if ("KEEPCASE".equals(firstWord)) { keepcase = flagParsingStrategy.parseFlag(singleArgument(reader, line)); } else if ("FORCEUCASE".equals(firstWord)) { forceUCase = flagParsingStrategy.parseFlag(singleArgument(reader, line)); } else if ("NEEDAFFIX".equals(firstWord) || "PSEUDOROOT".equals(firstWord)) { needaffix = flagParsingStrategy.parseFlag(singleArgument(reader, line)); } else if ("ONLYINCOMPOUND".equals(firstWord)) { onlyincompound = flagParsingStrategy.parseFlag(singleArgument(reader, line)); } else if ("CHECKSHARPS".equals(firstWord)) { checkSharpS = true; } else if ("IGNORE".equals(firstWord)) { ignore = singleArgument(reader, line).toCharArray(); Arrays.sort(ignore); } else if ("ICONV".equals(firstWord) || "OCONV".equals(firstWord)) { int num = parseNum(reader, line); ConvTable res = parseConversions(reader, num); if (line.startsWith("I")) { iconv = res; } else { oconv = res; } } else if ("FULLSTRIP".equals(firstWord)) { fullStrip = true; } else if ("LANG".equals(firstWord)) { language = singleArgument(reader, line); this.alternateCasing = hasLanguage("tr", "az"); } else if ("BREAK".equals(firstWord)) { breaks = parseBreaks(reader, line); } else if ("WORDCHARS".equals(firstWord)) { wordChars = firstArgument(reader, line); } else if ("TRY".equals(firstWord)) { tryChars = firstArgument(reader, line); } else if ("REP".equals(firstWord)) { int count = parseNum(reader, line); for (int i = 0; i < count; i++) { String[] parts = splitBySpace(reader, reader.readLine(), 3, Integer.MAX_VALUE); repTable.add(new RepEntry(parts[1], parts[2])); } } else if ("MAP".equals(firstWord)) { int count = parseNum(reader, line); for (int i = 0; i < count; i++) { mapTable.add(parseMapEntry(reader, reader.readLine())); } } else if ("KEY".equals(firstWord)) { neighborKeyGroups = singleArgument(reader, line).split("\\|"); } else if ("NOSPLITSUGS".equals(firstWord)) { enableSplitSuggestions = false; } else if ("MAXNGRAMSUGS".equals(firstWord)) { maxNGramSuggestions = Integer.parseInt(singleArgument(reader, line)); } else if ("MAXDIFF".equals(firstWord)) { int i = Integer.parseInt(singleArgument(reader, line)); if (i < 0 || i > 10) { throw new ParseException("MAXDIFF should be between 0 and 10", reader.getLineNumber()); } maxDiff = i; } else if ("ONLYMAXDIFF".equals(firstWord)) { onlyMaxDiff = true; } else if ("FORBIDDENWORD".equals(firstWord)) { forbiddenword = flagParsingStrategy.parseFlag(singleArgument(reader, line)); } else if ("NOSUGGEST".equals(firstWord)) { noSuggest = flagParsingStrategy.parseFlag(singleArgument(reader, line)); } else if ("SUBSTANDARD".equals(firstWord)) { subStandard = flagParsingStrategy.parseFlag(singleArgument(reader, line)); } else if ("COMPOUNDMIN".equals(firstWord)) { compoundMin = Math.max(1, parseNum(reader, line)); } else if ("COMPOUNDWORDMAX".equals(firstWord)) { compoundMax = Math.max(1, parseNum(reader, line)); } else if ("COMPOUNDRULE".equals(firstWord)) { compoundRules = parseCompoundRules(reader, parseNum(reader, line)); } else if ("COMPOUNDFLAG".equals(firstWord)) { compoundFlag = flagParsingStrategy.parseFlag(singleArgument(reader, line)); } else if ("COMPOUNDBEGIN".equals(firstWord)) { compoundBegin = flagParsingStrategy.parseFlag(singleArgument(reader, line)); } else if ("COMPOUNDMIDDLE".equals(firstWord)) { compoundMiddle = flagParsingStrategy.parseFlag(singleArgument(reader, line)); } else if ("COMPOUNDEND".equals(firstWord)) { compoundEnd = flagParsingStrategy.parseFlag(singleArgument(reader, line)); } else if ("COMPOUNDPERMITFLAG".equals(firstWord)) { compoundPermit = flagParsingStrategy.parseFlag(singleArgument(reader, line)); } else if ("COMPOUNDFORBIDFLAG".equals(firstWord)) { compoundForbid = flagParsingStrategy.parseFlag(singleArgument(reader, line)); } else if ("CHECKCOMPOUNDCASE".equals(firstWord)) { checkCompoundCase = true; } else if ("CHECKCOMPOUNDDUP".equals(firstWord)) { checkCompoundDup = true; } else if ("CHECKCOMPOUNDREP".equals(firstWord)) { checkCompoundRep = true; } else if ("CHECKCOMPOUNDTRIPLE".equals(firstWord)) { checkCompoundTriple = true; } else if ("SIMPLIFIEDTRIPLE".equals(firstWord)) { simplifiedTriple = true; } else if ("CHECKCOMPOUNDPATTERN".equals(firstWord)) { int count = parseNum(reader, line); for (int i = 0; i < count; i++) { checkCompoundPatterns.add( new CheckCompoundPattern(reader.readLine(), flagParsingStrategy, this)); } } else if ("SET".equals(firstWord)) { checkCriticalDirectiveSame( "SET", reader, decoder.charset(), getDecoder(singleArgument(reader, line)).charset()); } else if ("FLAG".equals(firstWord)) { FlagParsingStrategy strategy = getFlagParsingStrategy(line, decoder.charset()); checkCriticalDirectiveSame( "FLAG", reader, flagParsingStrategy.getClass(), strategy.getClass()); } } this.prefixes = affixFST(prefixes); this.suffixes = affixFST(suffixes); secondStagePrefixFlags = toSortedCharArray(prefixContFlags); secondStageSuffixFlags = toSortedCharArray(suffixContFlags); int totalChars = 0; for (String strip : seenStrips.keySet()) { totalChars += strip.length(); } stripData = new char[totalChars]; stripOffsets = new int[seenStrips.size() + 1]; int currentOffset = 0; int currentIndex = 0; for (String strip : seenStrips.keySet()) { stripOffsets[currentIndex++] = currentOffset; strip.getChars(0, strip.length(), stripData, currentOffset); currentOffset += strip.length(); } assert currentIndex == seenStrips.size(); stripOffsets[currentIndex] = currentOffset; } private void checkCriticalDirectiveSame( String directive, LineNumberReader reader, Object expected, Object actual) throws ParseException { if (!expected.equals(actual)) { throw new ParseException( directive + " directive should occur at most once, and in the first " + MAX_PROLOGUE_SCAN_WINDOW + " bytes of the *.aff file", reader.getLineNumber()); } } private List<String> parseMapEntry(LineNumberReader reader, String line) throws ParseException { String unparsed = firstArgument(reader, line); List<String> mapEntry = new ArrayList<>(); for (int j = 0; j < unparsed.length(); j++) { if (unparsed.charAt(j) == '(') { int closing = unparsed.indexOf(')', j); if (closing < 0) { throw new ParseException("Unclosed parenthesis: " + line, reader.getLineNumber()); } mapEntry.add(unparsed.substring(j + 1, closing)); j = closing; } else { mapEntry.add(String.valueOf(unparsed.charAt(j))); } } return mapEntry; } boolean hasLanguage(String... langCodes) { if (language == null) return false; String langCode = extractLanguageCode(language); for (String code : langCodes) { if (langCode.equals(code)) { return true; } } return false; } /** * @param root a string to look up in the dictionary. No case conversion or affix removal is * performed. To get the possible roots of any word, you may call {@link * Hunspell#getRoots(String)} * @return the dictionary entries for the given root, or {@code null} if there's none */ public DictEntries lookupEntries(String root) { IntsRef forms = lookupWord(root.toCharArray(), 0, root.length()); if (forms == null) return null; return new DictEntries() { @Override public int size() { return forms.length / (hasCustomMorphData ? 2 : 1); } @Override public String getMorphologicalData(int entryIndex) { if (!hasCustomMorphData) return ""; return morphData.get(forms.ints[forms.offset + entryIndex * 2 + 1]); } @Override public List<String> getMorphologicalValues(int entryIndex, String key) { assert key.length() == 3; assert key.charAt(2) == ':'; String fields = getMorphologicalData(entryIndex); if (fields.isEmpty() || !fields.contains(key)) return Collections.emptyList(); return Arrays.stream(fields.split(" ")) .filter(s -> s.startsWith(key)) .map(s -> s.substring(3)) .collect(Collectors.toList()); } }; } static String extractLanguageCode(String isoCode) { int underscore = isoCode.indexOf("_"); return underscore < 0 ? isoCode : isoCode.substring(0, underscore); } private int parseNum(LineNumberReader reader, String line) throws ParseException { return Integer.parseInt(splitBySpace(reader, line, 2, Integer.MAX_VALUE)[1]); } private String singleArgument(LineNumberReader reader, String line) throws ParseException { return splitBySpace(reader, line, 2)[1]; } private String firstArgument(LineNumberReader reader, String line) throws ParseException { return splitBySpace(reader, line, 2, Integer.MAX_VALUE)[1]; } private String[] splitBySpace(LineNumberReader reader, String line, int expectedParts) throws ParseException { return splitBySpace(reader, line, expectedParts, expectedParts); } private String[] splitBySpace(LineNumberReader reader, String line, int minParts, int maxParts) throws ParseException { String[] parts = line.split("\\s+"); if (parts.length < minParts || parts.length > maxParts && !parts[maxParts].startsWith("#")) { throw new ParseException("Invalid syntax: " + line, reader.getLineNumber()); } return parts; } private List<CompoundRule> parseCompoundRules(LineNumberReader reader, int num) throws IOException, ParseException { List<CompoundRule> compoundRules = new ArrayList<>(); for (int i = 0; i < num; i++) { compoundRules.add(new CompoundRule(singleArgument(reader, reader.readLine()), this)); } return compoundRules; } private Breaks parseBreaks(LineNumberReader reader, String line) throws IOException, ParseException { Set<String> starting = new LinkedHashSet<>(); Set<String> ending = new LinkedHashSet<>(); Set<String> middle = new LinkedHashSet<>(); int num = parseNum(reader, line); for (int i = 0; i < num; i++) { String breakStr = singleArgument(reader, reader.readLine()); if (breakStr.startsWith("^")) { starting.add(breakStr.substring(1)); } else if (breakStr.endsWith("$")) { ending.add(breakStr.substring(0, breakStr.length() - 1)); } else { middle.add(breakStr); } } return new Breaks(starting, ending, middle); } private FST<IntsRef> affixFST(TreeMap<String, List<Integer>> affixes) throws IOException { IntSequenceOutputs outputs = IntSequenceOutputs.getSingleton(); FSTCompiler<IntsRef> fstCompiler = new FSTCompiler<>(FST.INPUT_TYPE.BYTE4, outputs); IntsRefBuilder scratch = new IntsRefBuilder(); for (Map.Entry<String, List<Integer>> entry : affixes.entrySet()) { Util.toUTF32(entry.getKey(), scratch); List<Integer> entries = entry.getValue(); IntsRef output = new IntsRef(entries.size()); for (Integer c : entries) { output.ints[output.length++] = c; } fstCompiler.add(scratch.get(), output); } return fstCompiler.compile(); } /** * Parses a specific affix rule putting the result into the provided affix map * * @param affixes Map where the result of the parsing will be put * @param header Header line of the affix rule * @param reader BufferedReader to read the content of the rule from * @param seenPatterns map from condition -&gt; index of patterns, for deduplication. * @throws IOException Can be thrown while reading the rule */ private void parseAffix( TreeMap<String, List<Integer>> affixes, Set<Character> secondStageFlags, String header, LineNumberReader reader, AffixKind kind, Map<String, Integer> seenPatterns, Map<String, Integer> seenStrips, FlagEnumerator flags) throws IOException, ParseException { StringBuilder sb = new StringBuilder(); String[] args = header.split("\\s+"); boolean crossProduct = args[2].equals("Y"); int numLines; try { numLines = Integer.parseInt(args[3]); } catch (NumberFormatException e) { return; } affixData = ArrayUtil.grow(affixData, currentAffix * 4 + numLines * 4); for (int i = 0; i < numLines; i++) { String line = reader.readLine(); // from the manpage: PFX flag stripping prefix [condition [morphological_fields...]] String[] ruleArgs = splitBySpace(reader, line, 4, Integer.MAX_VALUE); char flag = flagParsingStrategy.parseFlag(ruleArgs[1]); String strip = ruleArgs[2].equals("0") ? "" : ruleArgs[2]; String affixArg = ruleArgs[3]; char[] appendFlags = null; // first: parse continuation classes out of affix int flagSep = affixArg.lastIndexOf('/'); if (flagSep != -1) { String flagPart = affixArg.substring(flagSep + 1); affixArg = affixArg.substring(0, flagSep); if (aliasCount > 0) { flagPart = getAliasValue(Integer.parseInt(flagPart)); } appendFlags = flagParsingStrategy.parseFlags(flagPart); for (char appendFlag : appendFlags) { secondStageFlags.add(appendFlag); } } // zero affix -> empty string if ("0".equals(affixArg)) { affixArg = ""; } String condition = ruleArgs.length > 4 ? ruleArgs[4] : "."; String key = AffixCondition.uniqueKey(kind, strip, condition); // deduplicate patterns Integer patternIndex = seenPatterns.get(key); if (patternIndex == null) { patternIndex = patterns.size(); if (patternIndex > Short.MAX_VALUE) { throw new UnsupportedOperationException( "Too many patterns, please report this to [email protected]"); } seenPatterns.put(key, patternIndex); patterns.add(AffixCondition.compile(kind, strip, condition, line)); } Integer stripOrd = seenStrips.get(strip); if (stripOrd == null) { stripOrd = seenStrips.size(); seenStrips.put(strip, stripOrd); if (stripOrd > Character.MAX_VALUE) { throw new UnsupportedOperationException( "Too many unique strips, please report this to [email protected]"); } } if (appendFlags == null) { appendFlags = NOFLAGS; } int appendFlagsOrd = flags.add(appendFlags); if (appendFlagsOrd < 0) { // already exists in our hash appendFlagsOrd = (-appendFlagsOrd) - 1; } else if (appendFlagsOrd > Short.MAX_VALUE) { // this limit is probably flexible, but it's a good sanity check too throw new UnsupportedOperationException( "Too many unique append flags, please report this to [email protected]"); } int dataStart = currentAffix * 4; affixData[dataStart + AFFIX_FLAG] = flag; affixData[dataStart + AFFIX_STRIP_ORD] = (char) stripOrd.intValue(); // encode crossProduct into patternIndex int patternOrd = patternIndex << 1 | (crossProduct ? 1 : 0); affixData[dataStart + AFFIX_CONDITION] = (char) patternOrd; affixData[dataStart + AFFIX_APPEND] = (char) appendFlagsOrd; if (needsInputCleaning(affixArg)) { affixArg = cleanInput(affixArg, sb).toString(); } if (kind == SUFFIX) { affixArg = new StringBuilder(affixArg).reverse().toString(); } affixes.computeIfAbsent(affixArg, __ -> new ArrayList<>()).add(currentAffix); currentAffix++; } } char affixData(int affixIndex, int offset) { return affixData[affixIndex * 4 + offset]; } boolean isCrossProduct(int affix) { return (affixData(affix, AFFIX_CONDITION) & 1) == 1; } int getAffixCondition(int affix) { return affixData(affix, AFFIX_CONDITION) >>> 1; } private ConvTable parseConversions(LineNumberReader reader, int num) throws IOException, ParseException { TreeMap<String, String> mappings = new TreeMap<>(); for (int i = 0; i < num; i++) { String[] parts = splitBySpace(reader, reader.readLine(), 3); if (mappings.put(parts[1], parts[2]) != null) { throw new IllegalStateException("duplicate mapping specified for: " + parts[1]); } } return new ConvTable(mappings); } private static final byte[] BOM_UTF8 = {(byte) 0xef, (byte) 0xbb, (byte) 0xbf}; /** Parses the encoding and flag format specified in the provided InputStream */ private void readConfig(InputStream stream, Charset streamCharset) throws IOException, ParseException { LineNumberReader reader = new LineNumberReader(new InputStreamReader(stream, streamCharset)); String line; String flagLine = null; boolean charsetFound = false; boolean flagFound = false; while ((line = reader.readLine()) != null) { if (line.isBlank()) continue; String firstWord = line.split("\\s")[0]; if ("SET".equals(firstWord)) { decoder = getDecoder(singleArgument(reader, line)); charsetFound = true; } else if ("FLAG".equals(firstWord)) { // Preserve the flag line for parsing later since we need the decoder's charset // and just in case they come out of order. flagLine = line; flagFound = true; } else { continue; } if (charsetFound && flagFound) { break; } } if (flagFound) { flagParsingStrategy = getFlagParsingStrategy(flagLine, decoder.charset()); } } /** * Consume the provided byte sequence in full, if present. Otherwise leave the input stream * intact. * * @return {@code true} if the sequence matched and has been consumed. */ @SuppressWarnings("SameParameterValue") private static boolean maybeConsume(BufferedInputStream stream, byte[] bytes) throws IOException { stream.mark(bytes.length); for (byte b : bytes) { int nextByte = stream.read(); if (nextByte != (b & 0xff)) { // covers EOF (-1) as well. stream.reset(); return false; } } return true; } static final Map<String, String> CHARSET_ALIASES = Map.of("microsoft-cp1251", "windows-1251", "TIS620-2533", "TIS-620"); /** * Retrieves the CharsetDecoder for the given encoding. Note, This isn't perfect as I think * ISCII-DEVANAGARI and MICROSOFT-CP1251 etc are allowed... * * @param encoding Encoding to retrieve the CharsetDecoder for * @return CharSetDecoder for the given encoding */ private CharsetDecoder getDecoder(String encoding) { if ("ISO8859-14".equals(encoding)) { return new ISO8859_14Decoder(); } String canon = CHARSET_ALIASES.get(encoding); if (canon != null) { encoding = canon; } return replacingDecoder(Charset.forName(encoding)); } private static CharsetDecoder replacingDecoder(Charset charset) { return charset.newDecoder().onMalformedInput(CodingErrorAction.REPLACE); } /** * Determines the appropriate {@link FlagParsingStrategy} based on the FLAG definition line taken * from the affix file * * @param flagLine Line containing the flag information * @return FlagParsingStrategy that handles parsing flags in the way specified in the FLAG * definition */ static FlagParsingStrategy getFlagParsingStrategy(String flagLine, Charset charset) { String[] parts = flagLine.split("\\s+"); if (parts.length != 2) { throw new IllegalArgumentException("Illegal FLAG specification: " + flagLine); } String flagType = parts[1]; if ("num".equals(flagType)) { return new NumFlagParsingStrategy(); } else if ("UTF-8".equals(flagType)) { if (DEFAULT_CHARSET.equals(charset)) { return new DefaultAsUtf8FlagParsingStrategy(); } return new SimpleFlagParsingStrategy(); } else if ("long".equals(flagType)) { return new DoubleASCIIFlagParsingStrategy(); } throw new IllegalArgumentException("Unknown flag type: " + flagType); } private static final char FLAG_SEPARATOR = 0x1f; // flag separator after escaping private static final char MORPH_SEPARATOR = 0x1e; // separator for boundary of entry (may be followed by morph data) private String unescapeEntry(String entry) { StringBuilder sb = new StringBuilder(); int end = morphBoundary(entry); for (int i = 0; i < end; i++) { char ch = entry.charAt(i); if (ch == '\\' && i + 1 < entry.length()) { sb.append(entry.charAt(i + 1)); i++; } else if (ch == '/' && i > 0) { sb.append(FLAG_SEPARATOR); } else if (!shouldSkipEscapedChar(ch)) { sb.append(ch); } } sb.append(MORPH_SEPARATOR); if (end < entry.length()) { for (int i = end; i < entry.length(); i++) { char c = entry.charAt(i); if (!shouldSkipEscapedChar(c)) { sb.append(c); } } } return sb.toString(); } private static boolean shouldSkipEscapedChar(char ch) { return ch == FLAG_SEPARATOR || ch == MORPH_SEPARATOR; // BINARY EXECUTABLES EMBEDDED IN ZULU DICTIONARIES!!!!!!! } private static int morphBoundary(String line) { int end = indexOfSpaceOrTab(line, 0); if (end == -1) { return line.length(); } while (end >= 0 && end < line.length()) { if (line.charAt(end) == '\t' || end > 0 && end + 3 < line.length() && Character.isLetter(line.charAt(end + 1)) && Character.isLetter(line.charAt(end + 2)) && line.charAt(end + 3) == ':') { break; } end = indexOfSpaceOrTab(line, end + 1); } if (end == -1) { return line.length(); } return end; } static int indexOfSpaceOrTab(String text, int start) { int pos1 = text.indexOf('\t', start); int pos2 = text.indexOf(' ', start); if (pos1 >= 0 && pos2 >= 0) { return Math.min(pos1, pos2); } else { return Math.max(pos1, pos2); } } private int mergeDictionaries( List<InputStream> dictionaries, CharsetDecoder decoder, IndexOutput output) throws IOException { StringBuilder sb = new StringBuilder(); int wordCount = 0; try (ByteSequencesWriter writer = new ByteSequencesWriter(output)) { for (InputStream dictionary : dictionaries) { BufferedReader lines = new BufferedReader(new InputStreamReader(dictionary, decoder)); lines.readLine(); // first line is number of entries (approximately, sometimes) String line; while ((line = lines.readLine()) != null) { // wild and unpredictable code comment rules if (line.isEmpty() || line.charAt(0) == '#' || line.charAt(0) == '\t') { continue; } line = unescapeEntry(line); // if we haven't seen any custom morphological data, try to parse one if (!hasCustomMorphData) { int morphStart = line.indexOf(MORPH_SEPARATOR); if (morphStart >= 0 && morphStart < line.length()) { String data = line.substring(morphStart + 1); hasCustomMorphData = splitMorphData(data).stream().anyMatch(s -> !s.startsWith("ph:")); } } wordCount += writeNormalizedWordEntry(sb, writer, line); } } CodecUtil.writeFooter(output); } return wordCount; } /** @return the number of word entries written */ private int writeNormalizedWordEntry(StringBuilder reuse, ByteSequencesWriter writer, String line) throws IOException { int flagSep = line.indexOf(FLAG_SEPARATOR); int morphSep = line.indexOf(MORPH_SEPARATOR); assert morphSep > 0; assert morphSep > flagSep; int sep = flagSep < 0 ? morphSep : flagSep; CharSequence toWrite; String beforeSep = line.substring(0, sep); if (needsInputCleaning(beforeSep)) { cleanInput(beforeSep, reuse); reuse.append(line, sep, line.length()); toWrite = reuse; } else { toWrite = line; } String written = toWrite.toString(); sep = written.length() - (line.length() - sep); writer.write(written.getBytes(StandardCharsets.UTF_8)); WordCase wordCase = WordCase.caseOf(written, sep); if (wordCase == WordCase.MIXED || wordCase == WordCase.UPPER && flagSep > 0) { addHiddenCapitalizedWord(reuse, writer, written.substring(0, sep), written.substring(sep)); return 2; } return 1; } private void addHiddenCapitalizedWord( StringBuilder reuse, ByteSequencesWriter writer, String word, String afterSep) throws IOException { reuse.setLength(0); reuse.append(Character.toUpperCase(word.charAt(0))); for (int i = 1; i < word.length(); i++) { reuse.append(caseFold(word.charAt(i))); } reuse.append(FLAG_SEPARATOR); reuse.append(HIDDEN_FLAG); reuse.append(afterSep, afterSep.charAt(0) == FLAG_SEPARATOR ? 1 : 0, afterSep.length()); writer.write(reuse.toString().getBytes(StandardCharsets.UTF_8)); } String toLowerCase(String word) { char[] chars = new char[word.length()]; for (int i = 0; i < word.length(); i++) { chars[i] = caseFold(word.charAt(i)); } return new String(chars); } String toTitleCase(String word) { char[] chars = new char[word.length()]; chars[0] = Character.toUpperCase(word.charAt(0)); for (int i = 1; i < word.length(); i++) { chars[i] = caseFold(word.charAt(i)); } return new String(chars); } private String sortWordsOffline( Directory tempDir, String tempFileNamePrefix, IndexOutput unsorted) throws IOException { OfflineSorter sorter = new OfflineSorter( tempDir, tempFileNamePrefix, new Comparator<>() { final BytesRef scratch1 = new BytesRef(); final BytesRef scratch2 = new BytesRef(); private void initScratch(BytesRef o, BytesRef scratch) { scratch.bytes = o.bytes; scratch.offset = o.offset; scratch.length = o.length; for (int i = scratch.length - 1; i >= 0; i--) { if (scratch.bytes[scratch.offset + i] == FLAG_SEPARATOR || scratch.bytes[scratch.offset + i] == MORPH_SEPARATOR) { scratch.length = i; break; } } } @Override public int compare(BytesRef o1, BytesRef o2) { initScratch(o1, scratch1); initScratch(o2, scratch2); int cmp = scratch1.compareTo(scratch2); if (cmp == 0) { // tie break on whole row return o1.compareTo(o2); } else { return cmp; } } }); String sorted; boolean success = false; try { sorted = sorter.sort(unsorted.getName()); success = true; } finally { if (success) { tempDir.deleteFile(unsorted.getName()); } else { IOUtils.deleteFilesIgnoringExceptions(tempDir, unsorted.getName()); } } return sorted; } private FST<IntsRef> readSortedDictionaries( Directory tempDir, String sorted, FlagEnumerator flags) throws IOException { boolean success = false; Map<String, Integer> morphIndices = new HashMap<>(); EntryGrouper grouper = new EntryGrouper(flags); try (ByteSequencesReader reader = new ByteSequencesReader(tempDir.openChecksumInput(sorted, IOContext.READONCE), sorted)) { // TODO: the flags themselves can be double-chars (long) or also numeric // either way the trick is to encode them as char... but they must be parsed differently while (true) { BytesRef scratch = reader.next(); if (scratch == null) { break; } String line = scratch.utf8ToString(); String entry; char[] wordForm; int end; int flagSep = line.indexOf(FLAG_SEPARATOR); if (flagSep == -1) { wordForm = NOFLAGS; end = line.indexOf(MORPH_SEPARATOR); entry = line.substring(0, end); } else { end = line.indexOf(MORPH_SEPARATOR); boolean hidden = line.charAt(flagSep + 1) == HIDDEN_FLAG; String flagPart = line.substring(flagSep + (hidden ? 2 : 1), end); if (aliasCount > 0 && !flagPart.isEmpty()) { flagPart = getAliasValue(Integer.parseInt(flagPart)); } wordForm = flagParsingStrategy.parseFlags(flagPart); if (hidden) { wordForm = ArrayUtil.growExact(wordForm, wordForm.length + 1); wordForm[wordForm.length - 1] = HIDDEN_FLAG; } entry = line.substring(0, flagSep); } int morphDataID = 0; if (end + 1 < line.length()) { List<String> morphFields = readMorphFields(entry, line.substring(end + 1)); if (!morphFields.isEmpty()) { morphFields.sort(Comparator.naturalOrder()); morphDataID = addMorphFields(morphIndices, String.join(" ", morphFields)); } } wordHashes.set(Math.abs(entry.hashCode()) % wordHashes.length()); grouper.add(entry, wordForm, morphDataID); } // finalize last entry grouper.flushGroup(); success = true; return grouper.words.compile(); } finally { if (success) { tempDir.deleteFile(sorted); } else { IOUtils.deleteFilesIgnoringExceptions(tempDir, sorted); } } } private List<String> readMorphFields(String word, String unparsed) { List<String> morphFields = null; for (String datum : splitMorphData(unparsed)) { if (datum.startsWith("ph:")) { addPhoneticRepEntries(word, datum.substring(3)); } else { if (morphFields == null) morphFields = new ArrayList<>(1); morphFields.add(datum); } } return morphFields == null ? Collections.emptyList() : morphFields; } private int addMorphFields(Map<String, Integer> indices, String morphFields) { Integer alreadyCached = indices.get(morphFields); if (alreadyCached != null) { return alreadyCached; } int index = morphData.size(); indices.put(morphFields, index); morphData.add(morphFields); return index; } private void addPhoneticRepEntries(String word, String ph) { // e.g. "pretty ph:prity ph:priti->pretti" to suggest both prity->pretty and pritier->prettiest int arrow = ph.indexOf("->"); String pattern; String replacement; if (arrow > 0) { pattern = ph.substring(0, arrow); replacement = ph.substring(arrow + 2); } else { pattern = ph; replacement = word; } // when the ph: field ends with *, strip last character of pattern and replacement // e.g., "pretty ph:prity*" results in "prit->prett" replacement instead of "prity->pretty", // to get both prity->pretty and pritiest->prettiest suggestions. if (pattern.endsWith("*") && pattern.length() > 2 && replacement.length() > 1) { pattern = pattern.substring(0, pattern.length() - 2); replacement = replacement.substring(0, replacement.length() - 1); } // capitalize lowercase pattern for capitalized words to support // good suggestions also for capitalized misspellings, // e.g. Wednesday ph:wendsay results in wendsay -> Wednesday and Wendsay -> Wednesday. if (WordCase.caseOf(word) == WordCase.TITLE && WordCase.caseOf(pattern) == WordCase.LOWER) { // add also lowercase word in the case of German or // Hungarian to support lowercase suggestions lowercased by // compound word generation or derivational suffixes // for example by adjectival suffix "-i" of geographical names in Hungarian: // Massachusetts ph:messzecsuzec // messzecsuzeci -> massachusettsi (adjective) // For lowercasing by conditional PFX rules, see e.g. germancompounding test if (hasLanguage("de", "hu")) { repTable.add(new RepEntry(pattern, toLowerCase(replacement))); } repTable.add(new RepEntry(toTitleCase(pattern), replacement)); } repTable.add(new RepEntry(pattern, replacement)); } boolean isDotICaseChangeDisallowed(char[] word) { return word[0] == 'İ' && !alternateCasing; } private class EntryGrouper { final FSTCompiler<IntsRef> words = new FSTCompiler<>(FST.INPUT_TYPE.BYTE4, IntSequenceOutputs.getSingleton()); private final List<char[]> group = new ArrayList<>(); private final List<Integer> morphDataIDs = new ArrayList<>(); private final IntsRefBuilder scratchInts = new IntsRefBuilder(); private String currentEntry = null; private final FlagEnumerator flagEnumerator; EntryGrouper(FlagEnumerator flagEnumerator) { this.flagEnumerator = flagEnumerator; } void add(String entry, char[] flags, int morphDataID) throws IOException { if (!entry.equals(currentEntry)) { if (currentEntry != null) { if (entry.compareTo(currentEntry) < 0) { throw new IllegalArgumentException("out of order: " + entry + " < " + currentEntry); } flushGroup(); } currentEntry = entry; } group.add(flags); if (hasCustomMorphData) { morphDataIDs.add(morphDataID); } } void flushGroup() throws IOException { IntsRefBuilder currentOrds = new IntsRefBuilder(); boolean hasNonHidden = false; for (char[] flags : group) { if (!hasHiddenFlag(flags)) { hasNonHidden = true; break; } } for (int i = 0; i < group.size(); i++) { char[] flags = group.get(i); if (hasNonHidden && hasHiddenFlag(flags)) { continue; } currentOrds.append(flagEnumerator.add(flags)); if (hasCustomMorphData) { currentOrds.append(morphDataIDs.get(i)); } } Util.toUTF32(currentEntry, scratchInts); words.add(scratchInts.get(), currentOrds.get()); group.clear(); morphDataIDs.clear(); } } private static boolean hasHiddenFlag(char[] flags) { for (char flag : flags) { if (flag == HIDDEN_FLAG) { return true; } } return false; } private void parseAlias(String line) { String[] ruleArgs = line.split("\\s+"); if (aliases == null) { // first line should be the aliases count final int count = Integer.parseInt(ruleArgs[1]); aliases = new String[count]; } else { // an alias can map to no flags String aliasValue = ruleArgs.length == 1 ? "" : ruleArgs[1]; aliases[aliasCount++] = aliasValue; } } private String getAliasValue(int id) { try { return aliases[id - 1]; } catch (IndexOutOfBoundsException ex) { throw new IllegalArgumentException("Bad flag alias number:" + id, ex); } } private void parseMorphAlias(String line) { if (morphAliases == null) { // first line should be the aliases count final int count = Integer.parseInt(line.substring(3)); morphAliases = new String[count]; } else { String arg = line.substring(2); // leave the space morphAliases[morphAliasCount++] = arg; } } private List<String> splitMorphData(String morphData) { // first see if it's an alias if (morphAliasCount > 0) { try { int alias = Integer.parseInt(morphData.trim()); morphData = morphAliases[alias - 1]; } catch (NumberFormatException ignored) { } } if (morphData.isBlank()) { return Collections.emptyList(); } return Arrays.stream(morphData.split("\\s+")) .filter( s -> s.length() > 3 && Character.isLetter(s.charAt(0)) && Character.isLetter(s.charAt(1)) && s.charAt(2) == ':') .collect(Collectors.toList()); } boolean hasFlag(IntsRef forms, char flag) { int formStep = formStep(); for (int i = 0; i < forms.length; i += formStep) { if (hasFlag(forms.ints[forms.offset + i], flag)) { return true; } } return false; } /** Abstraction of the process of parsing flags taken from the affix and dic files */ abstract static class FlagParsingStrategy { // we don't check the flag count, as Hunspell accepts longer sequences // https://github.com/hunspell/hunspell/issues/707 static final boolean checkFlags = false; /** * Parses the given String into a single flag * * @param rawFlag String to parse into a flag * @return Parsed flag */ char parseFlag(String rawFlag) { char[] flags = parseFlags(rawFlag); if (checkFlags && flags.length != 1) { throw new IllegalArgumentException("expected only one flag, got: " + rawFlag); } return flags[0]; } /** * Parses the given String into multiple flags * * @param rawFlags String to parse into flags * @return Parsed flags */ abstract char[] parseFlags(String rawFlags); } /** * Simple implementation of {@link FlagParsingStrategy} that treats the chars in each String as a * individual flags. Can be used with both the ASCII and UTF-8 flag types. */ private static class SimpleFlagParsingStrategy extends FlagParsingStrategy { @Override public char[] parseFlags(String rawFlags) { return rawFlags.toCharArray(); } } /** Used to read flags as UTF-8 even if the rest of the file is in the default (8-bit) encoding */ private static class DefaultAsUtf8FlagParsingStrategy extends FlagParsingStrategy { @Override public char[] parseFlags(String rawFlags) { return new String(rawFlags.getBytes(DEFAULT_CHARSET), StandardCharsets.UTF_8).toCharArray(); } } /** * Implementation of {@link FlagParsingStrategy} that assumes each flag is encoded in its * numerical form. In the case of multiple flags, each number is separated by a comma. */ private static class NumFlagParsingStrategy extends FlagParsingStrategy { @Override public char[] parseFlags(String rawFlags) { StringBuilder result = new StringBuilder(); StringBuilder group = new StringBuilder(); for (int i = 0; i <= rawFlags.length(); i++) { if (i == rawFlags.length() || rawFlags.charAt(i) == ',') { if (group.length() > 0) { // ignoring empty flags (this happens in danish, for example) int flag = Integer.parseInt(group, 0, group.length(), 10); if (flag >= DEFAULT_FLAGS) { // accept 0 due to https://github.com/hunspell/hunspell/issues/708 throw new IllegalArgumentException( "Num flags should be between 0 and " + DEFAULT_FLAGS + ", found " + flag); } result.append((char) flag); group.setLength(0); } } else if (rawFlags.charAt(i) >= '0' && rawFlags.charAt(i) <= '9') { group.append(rawFlags.charAt(i)); } } return result.toString().toCharArray(); } } /** * Implementation of {@link FlagParsingStrategy} that assumes each flag is encoded as two ASCII * characters whose codes must be combined into a single character. */ private static class DoubleASCIIFlagParsingStrategy extends FlagParsingStrategy { @Override public char[] parseFlags(String rawFlags) { if (checkFlags && rawFlags.length() % 2 == 1) { throw new IllegalArgumentException( "Invalid flags (should be even number of characters): " + rawFlags); } char[] flags = new char[rawFlags.length() / 2]; for (int i = 0; i < flags.length; i++) { char f1 = rawFlags.charAt(i * 2); char f2 = rawFlags.charAt(i * 2 + 1); if (f1 >= 256 || f2 >= 256) { throw new IllegalArgumentException( "Invalid flags (LONG flags must be double ASCII): " + rawFlags); } flags[i] = (char) (f1 << 8 | f2); } return flags; } } boolean hasFlag(int entryId, char flag) { return flagLookup.hasFlag(entryId, flag); } boolean mayNeedInputCleaning() { return ignoreCase || ignore != null || iconv != null; } boolean needsInputCleaning(CharSequence input) { if (mayNeedInputCleaning()) { for (int i = 0; i < input.length(); i++) { char ch = input.charAt(i); if (ignore != null && Arrays.binarySearch(ignore, ch) >= 0 || ignoreCase && caseFold(ch) != ch || iconv != null && iconv.mightReplaceChar(ch)) { return true; } } } return false; } CharSequence cleanInput(CharSequence input, StringBuilder reuse) { reuse.setLength(0); for (int i = 0; i < input.length(); i++) { char ch = input.charAt(i); if (ignore != null && Arrays.binarySearch(ignore, ch) >= 0) { continue; } if (ignoreCase && iconv == null) { // if we have no input conversion mappings, do this on-the-fly ch = caseFold(ch); } reuse.append(ch); } if (iconv != null) { iconv.applyMappings(reuse); if (ignoreCase) { for (int i = 0; i < reuse.length(); i++) { reuse.setCharAt(i, caseFold(reuse.charAt(i))); } } } return reuse; } private static char[] toSortedCharArray(Set<Character> set) { char[] chars = new char[set.size()]; int i = 0; for (Character c : set) { chars[i++] = c; } Arrays.sort(chars); return chars; } boolean isSecondStagePrefix(char flag) { return Arrays.binarySearch(secondStagePrefixFlags, flag) >= 0; } boolean isSecondStageSuffix(char flag) { return Arrays.binarySearch(secondStageSuffixFlags, flag) >= 0; } /** folds single character (according to LANG if present) */ char caseFold(char c) { if (alternateCasing) { if (c == 'I') { return 'ı'; } else if (c == 'İ') { return 'i'; } else { return Character.toLowerCase(c); } } else { return Character.toLowerCase(c); } } /** Returns true if this dictionary was constructed with the {@code ignoreCase} option */ public boolean getIgnoreCase() { return ignoreCase; } /** * Returns the default temporary directory pointed to by {@code java.io.tmpdir}. If not accessible * or not available, an IOException is thrown. */ static Path getDefaultTempDir() throws IOException { String tmpDir = System.getProperty("java.io.tmpdir"); if (tmpDir == null) { throw new IOException("No temporary path (java.io.tmpdir)?"); } Path tmpPath = Paths.get(tmpDir); if (!Files.isWritable(tmpPath)) { throw new IOException( "Temporary path not present or writeable?: " + tmpPath.toAbsolutePath()); } return tmpPath; } /** Possible word breaks according to BREAK directives */ static class Breaks { private static final Set<String> MINUS = Collections.singleton("-"); static final Breaks DEFAULT = new Breaks(MINUS, MINUS, MINUS); final String[] starting, ending, middle; Breaks(Collection<String> starting, Collection<String> ending, Collection<String> middle) { this.starting = starting.toArray(new String[0]); this.ending = ending.toArray(new String[0]); this.middle = middle.toArray(new String[0]); } boolean isNotEmpty() { return middle.length > 0 || starting.length > 0 || ending.length > 0; } } }
1
40,746
We don't store empty dictionary entries anymore: they bring no benefits, only trouble.
apache-lucene-solr
java
@@ -625,6 +625,15 @@ class CppGenerator : public BaseGenerator { return false; } + bool VectorElementUserFacing(const Type& type) const { + // Normally, in non-Object-API, we use the non-user-facing type when + // emitting the Vector element type, however in the case of enums + // we want to avoid that when using scoped-enums. + return opts_.g_cpp_std >= cpp::CPP_STD_17 && + opts_.scoped_enums && + IsEnum(type); + } + void GenComment(const std::vector<std::string> &dc, const char *prefix = "") { std::string text; ::flatbuffers::GenComment(dc, &text, nullptr, prefix);
1
/* * Copyright 2014 Google Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // independent from idl_parser, since this code is not needed for most clients #include <unordered_set> #include "flatbuffers/code_generators.h" #include "flatbuffers/flatbuffers.h" #include "flatbuffers/flatc.h" #include "flatbuffers/idl.h" #include "flatbuffers/util.h" namespace flatbuffers { // Pedantic warning free version of toupper(). inline char ToUpper(char c) { return static_cast<char>(::toupper(static_cast<unsigned char>(c))); } // Make numerical literal with type-suffix. // This function is only needed for C++! Other languages do not need it. static inline std::string NumToStringCpp(std::string val, BaseType type) { // Avoid issues with -2147483648, -9223372036854775808. switch (type) { case BASE_TYPE_INT: return (val != "-2147483648") ? val : ("(-2147483647 - 1)"); case BASE_TYPE_ULONG: return (val == "0") ? val : (val + "ULL"); case BASE_TYPE_LONG: if (val == "-9223372036854775808") return "(-9223372036854775807LL - 1LL)"; else return (val == "0") ? val : (val + "LL"); default: return val; } } static std::string GeneratedFileName(const std::string &path, const std::string &file_name) { return path + file_name + "_generated.h"; } static std::string GenIncludeGuard(const std::string &file_name, const Namespace &name_space, const std::string &postfix= "") { // Generate include guard. std::string guard = file_name; // Remove any non-alpha-numeric characters that may appear in a filename. struct IsAlnum { bool operator()(char c) const { return !is_alnum(c); } }; guard.erase(std::remove_if(guard.begin(), guard.end(), IsAlnum()), guard.end()); guard = "FLATBUFFERS_GENERATED_" + guard; guard += "_"; // For further uniqueness, also add the namespace. for (auto it = name_space.components.begin(); it != name_space.components.end(); ++it) { guard += *it + "_"; } // Anything extra to add to the guard? if (!postfix.empty()) { guard += postfix + "_"; } guard += "H_"; std::transform(guard.begin(), guard.end(), guard.begin(), ToUpper); return guard; } namespace cpp { enum CppStandard { CPP_STD_X0 = 0, CPP_STD_11, CPP_STD_17 }; // Extension of IDLOptions for cpp-generator. struct IDLOptionsCpp : public IDLOptions { // All fields start with 'g_' prefix to distinguish from the base IDLOptions. CppStandard g_cpp_std; // Base version of C++ standard. bool g_only_fixed_enums; // Generate underlaying type for all enums. // clang-format off IDLOptionsCpp(const IDLOptions &opts) : IDLOptions(opts), g_cpp_std(CPP_STD_11), g_only_fixed_enums(true) {} // clang-format on }; class CppGenerator : public BaseGenerator { public: CppGenerator(const Parser &parser, const std::string &path, const std::string &file_name, IDLOptionsCpp opts) : BaseGenerator(parser, path, file_name, "", "::"), cur_name_space_(nullptr), opts_(opts), float_const_gen_("std::numeric_limits<double>::", "std::numeric_limits<float>::", "quiet_NaN()", "infinity()") { static const char *const keywords[] = { "alignas", "alignof", "and", "and_eq", "asm", "atomic_cancel", "atomic_commit", "atomic_noexcept", "auto", "bitand", "bitor", "bool", "break", "case", "catch", "char", "char16_t", "char32_t", "class", "compl", "concept", "const", "constexpr", "const_cast", "continue", "co_await", "co_return", "co_yield", "decltype", "default", "delete", "do", "double", "dynamic_cast", "else", "enum", "explicit", "export", "extern", "false", "float", "for", "friend", "goto", "if", "import", "inline", "int", "long", "module", "mutable", "namespace", "new", "noexcept", "not", "not_eq", "nullptr", "operator", "or", "or_eq", "private", "protected", "public", "register", "reinterpret_cast", "requires", "return", "short", "signed", "sizeof", "static", "static_assert", "static_cast", "struct", "switch", "synchronized", "template", "this", "thread_local", "throw", "true", "try", "typedef", "typeid", "typename", "union", "unsigned", "using", "virtual", "void", "volatile", "wchar_t", "while", "xor", "xor_eq", nullptr, }; for (auto kw = keywords; *kw; kw++) keywords_.insert(*kw); } void GenIncludeDependencies() { int num_includes = 0; for (auto it = parser_.native_included_files_.begin(); it != parser_.native_included_files_.end(); ++it) { code_ += "#include \"" + *it + "\""; num_includes++; } for (auto it = parser_.included_files_.begin(); it != parser_.included_files_.end(); ++it) { if (it->second.empty()) continue; auto noext = flatbuffers::StripExtension(it->second); auto basename = flatbuffers::StripPath(noext); code_ += "#include \"" + opts_.include_prefix + (opts_.keep_include_path ? noext : basename) + "_generated.h\""; num_includes++; } if (num_includes) code_ += ""; } void GenExtraIncludes() { for (std::size_t i = 0; i < opts_.cpp_includes.size(); ++i) { code_ += "#include \"" + opts_.cpp_includes[i] + "\""; } if (!opts_.cpp_includes.empty()) { code_ += ""; } } std::string EscapeKeyword(const std::string &name) const { return keywords_.find(name) == keywords_.end() ? name : name + "_"; } std::string Name(const Definition &def) const { return EscapeKeyword(def.name); } std::string Name(const EnumVal &ev) const { return EscapeKeyword(ev.name); } bool generate_bfbs_embed() { code_.Clear(); code_ += "// " + std::string(FlatBuffersGeneratedWarning()) + "\n\n"; // If we don't have a root struct definition, if (!parser_.root_struct_def_) { // put a comment in the output why there is no code generated. code_ += "// Binary schema not generated, no root struct found"; } else { auto &struct_def = *parser_.root_struct_def_; const auto include_guard = GenIncludeGuard(file_name_, *struct_def.defined_namespace, "bfbs"); code_ += "#ifndef " + include_guard; code_ += "#define " + include_guard; code_ += ""; if (parser_.opts.gen_nullable) { code_ += "#pragma clang system_header\n\n"; } SetNameSpace(struct_def.defined_namespace); auto name = Name(struct_def); code_.SetValue("STRUCT_NAME", name); // Create code to return the binary schema data. auto binary_schema_hex_text = BufferToHexText(parser_.builder_.GetBufferPointer(), parser_.builder_.GetSize(), 105, " ", ""); code_ += "struct {{STRUCT_NAME}}BinarySchema {"; code_ += " static const uint8_t *data() {"; code_ += " // Buffer containing the binary schema."; code_ += " static const uint8_t bfbsData[" + NumToString(parser_.builder_.GetSize()) + "] = {"; code_ += binary_schema_hex_text; code_ += " };"; code_ += " return bfbsData;"; code_ += " }"; code_ += " static size_t size() {"; code_ += " return " + NumToString(parser_.builder_.GetSize()) + ";"; code_ += " }"; code_ += " const uint8_t *begin() {"; code_ += " return data();"; code_ += " }"; code_ += " const uint8_t *end() {"; code_ += " return data() + size();"; code_ += " }"; code_ += "};"; code_ += ""; if (cur_name_space_) SetNameSpace(nullptr); // Close the include guard. code_ += "#endif // " + include_guard; } // We are just adding "_bfbs" to the generated filename. const auto file_path = GeneratedFileName(path_, file_name_ + "_bfbs"); const auto final_code = code_.ToString(); return SaveFile(file_path.c_str(), final_code, false); } // Iterate through all definitions we haven't generate code for (enums, // structs, and tables) and output them to a single file. bool generate() { code_.Clear(); code_ += "// " + std::string(FlatBuffersGeneratedWarning()) + "\n\n"; const auto include_guard = GenIncludeGuard(file_name_, *parser_.current_namespace_); code_ += "#ifndef " + include_guard; code_ += "#define " + include_guard; code_ += ""; if (opts_.gen_nullable) { code_ += "#pragma clang system_header\n\n"; } code_ += "#include \"flatbuffers/flatbuffers.h\""; if (parser_.uses_flexbuffers_) { code_ += "#include \"flatbuffers/flexbuffers.h\""; } code_ += ""; if (opts_.include_dependence_headers) { GenIncludeDependencies(); } GenExtraIncludes(); FLATBUFFERS_ASSERT(!cur_name_space_); // Generate forward declarations for all structs/tables, since they may // have circular references. for (auto it = parser_.structs_.vec.begin(); it != parser_.structs_.vec.end(); ++it) { const auto &struct_def = **it; if (!struct_def.generated) { SetNameSpace(struct_def.defined_namespace); code_ += "struct " + Name(struct_def) + ";"; if (!struct_def.fixed) { code_ += "struct " + Name(struct_def) + "Builder;"; } if (opts_.generate_object_based_api) { auto nativeName = NativeName(Name(struct_def), &struct_def, opts_); if (!struct_def.fixed) { code_ += "struct " + nativeName + ";"; } } code_ += ""; } } // Generate forward declarations for all equal operators if (opts_.generate_object_based_api && opts_.gen_compare) { for (auto it = parser_.structs_.vec.begin(); it != parser_.structs_.vec.end(); ++it) { const auto &struct_def = **it; if (!struct_def.generated) { SetNameSpace(struct_def.defined_namespace); auto nativeName = NativeName(Name(struct_def), &struct_def, opts_); code_ += "bool operator==(const " + nativeName + " &lhs, const " + nativeName + " &rhs);"; code_ += "bool operator!=(const " + nativeName + " &lhs, const " + nativeName + " &rhs);"; } } code_ += ""; } // Generate preablmle code for mini reflection. if (opts_.mini_reflect != IDLOptions::kNone) { // To break cyclic dependencies, first pre-declare all tables/structs. for (auto it = parser_.structs_.vec.begin(); it != parser_.structs_.vec.end(); ++it) { const auto &struct_def = **it; if (!struct_def.generated) { SetNameSpace(struct_def.defined_namespace); GenMiniReflectPre(&struct_def); } } } // Generate code for all the enum declarations. for (auto it = parser_.enums_.vec.begin(); it != parser_.enums_.vec.end(); ++it) { const auto &enum_def = **it; if (!enum_def.generated) { SetNameSpace(enum_def.defined_namespace); GenEnum(enum_def); } } // Generate code for all structs, then all tables. for (auto it = parser_.structs_.vec.begin(); it != parser_.structs_.vec.end(); ++it) { const auto &struct_def = **it; if (struct_def.fixed && !struct_def.generated) { SetNameSpace(struct_def.defined_namespace); GenStruct(struct_def); } } for (auto it = parser_.structs_.vec.begin(); it != parser_.structs_.vec.end(); ++it) { const auto &struct_def = **it; if (!struct_def.fixed && !struct_def.generated) { SetNameSpace(struct_def.defined_namespace); GenTable(struct_def); } } for (auto it = parser_.structs_.vec.begin(); it != parser_.structs_.vec.end(); ++it) { const auto &struct_def = **it; if (!struct_def.fixed && !struct_def.generated) { SetNameSpace(struct_def.defined_namespace); GenTablePost(struct_def); } } // Generate code for union verifiers. for (auto it = parser_.enums_.vec.begin(); it != parser_.enums_.vec.end(); ++it) { const auto &enum_def = **it; if (enum_def.is_union && !enum_def.generated) { SetNameSpace(enum_def.defined_namespace); GenUnionPost(enum_def); } } // Generate code for mini reflection. if (opts_.mini_reflect != IDLOptions::kNone) { // Then the unions/enums that may refer to them. for (auto it = parser_.enums_.vec.begin(); it != parser_.enums_.vec.end(); ++it) { const auto &enum_def = **it; if (!enum_def.generated) { SetNameSpace(enum_def.defined_namespace); GenMiniReflect(nullptr, &enum_def); } } // Then the full tables/structs. for (auto it = parser_.structs_.vec.begin(); it != parser_.structs_.vec.end(); ++it) { const auto &struct_def = **it; if (!struct_def.generated) { SetNameSpace(struct_def.defined_namespace); GenMiniReflect(&struct_def, nullptr); } } } // Generate convenient global helper functions: if (parser_.root_struct_def_) { auto &struct_def = *parser_.root_struct_def_; SetNameSpace(struct_def.defined_namespace); auto name = Name(struct_def); auto qualified_name = cur_name_space_->GetFullyQualifiedName(name); auto cpp_name = TranslateNameSpace(qualified_name); code_.SetValue("STRUCT_NAME", name); code_.SetValue("CPP_NAME", cpp_name); code_.SetValue("NULLABLE_EXT", NullableExtension()); // The root datatype accessor: code_ += "inline \\"; code_ += "const {{CPP_NAME}} *{{NULLABLE_EXT}}Get{{STRUCT_NAME}}(const void " "*buf) {"; code_ += " return flatbuffers::GetRoot<{{CPP_NAME}}>(buf);"; code_ += "}"; code_ += ""; code_ += "inline \\"; code_ += "const {{CPP_NAME}} " "*{{NULLABLE_EXT}}GetSizePrefixed{{STRUCT_NAME}}(const void " "*buf) {"; code_ += " return flatbuffers::GetSizePrefixedRoot<{{CPP_NAME}}>(buf);"; code_ += "}"; code_ += ""; if (opts_.mutable_buffer) { code_ += "inline \\"; code_ += "{{STRUCT_NAME}} *GetMutable{{STRUCT_NAME}}(void *buf) {"; code_ += " return flatbuffers::GetMutableRoot<{{STRUCT_NAME}}>(buf);"; code_ += "}"; code_ += ""; } if (parser_.file_identifier_.length()) { // Return the identifier code_ += "inline const char *{{STRUCT_NAME}}Identifier() {"; code_ += " return \"" + parser_.file_identifier_ + "\";"; code_ += "}"; code_ += ""; // Check if a buffer has the identifier. code_ += "inline \\"; code_ += "bool {{STRUCT_NAME}}BufferHasIdentifier(const void *buf) {"; code_ += " return flatbuffers::BufferHasIdentifier("; code_ += " buf, {{STRUCT_NAME}}Identifier());"; code_ += "}"; code_ += ""; } // The root verifier. if (parser_.file_identifier_.length()) { code_.SetValue("ID", name + "Identifier()"); } else { code_.SetValue("ID", "nullptr"); } code_ += "inline bool Verify{{STRUCT_NAME}}Buffer("; code_ += " flatbuffers::Verifier &verifier) {"; code_ += " return verifier.VerifyBuffer<{{CPP_NAME}}>({{ID}});"; code_ += "}"; code_ += ""; code_ += "inline bool VerifySizePrefixed{{STRUCT_NAME}}Buffer("; code_ += " flatbuffers::Verifier &verifier) {"; code_ += " return verifier.VerifySizePrefixedBuffer<{{CPP_NAME}}>({{ID}});"; code_ += "}"; code_ += ""; if (parser_.file_extension_.length()) { // Return the extension code_ += "inline const char *{{STRUCT_NAME}}Extension() {"; code_ += " return \"" + parser_.file_extension_ + "\";"; code_ += "}"; code_ += ""; } // Finish a buffer with a given root object: code_ += "inline void Finish{{STRUCT_NAME}}Buffer("; code_ += " flatbuffers::FlatBufferBuilder &fbb,"; code_ += " flatbuffers::Offset<{{CPP_NAME}}> root) {"; if (parser_.file_identifier_.length()) code_ += " fbb.Finish(root, {{STRUCT_NAME}}Identifier());"; else code_ += " fbb.Finish(root);"; code_ += "}"; code_ += ""; code_ += "inline void FinishSizePrefixed{{STRUCT_NAME}}Buffer("; code_ += " flatbuffers::FlatBufferBuilder &fbb,"; code_ += " flatbuffers::Offset<{{CPP_NAME}}> root) {"; if (parser_.file_identifier_.length()) code_ += " fbb.FinishSizePrefixed(root, {{STRUCT_NAME}}Identifier());"; else code_ += " fbb.FinishSizePrefixed(root);"; code_ += "}"; code_ += ""; if (opts_.generate_object_based_api) { // A convenient root unpack function. auto native_name = NativeName(WrapInNameSpace(struct_def), &struct_def, opts_); code_.SetValue("UNPACK_RETURN", GenTypeNativePtr(native_name, nullptr, false)); code_.SetValue("UNPACK_TYPE", GenTypeNativePtr(native_name, nullptr, true)); code_ += "inline {{UNPACK_RETURN}} UnPack{{STRUCT_NAME}}("; code_ += " const void *buf,"; code_ += " const flatbuffers::resolver_function_t *res = nullptr) {"; code_ += " return {{UNPACK_TYPE}}\\"; code_ += "(Get{{STRUCT_NAME}}(buf)->UnPack(res));"; code_ += "}"; code_ += ""; code_ += "inline {{UNPACK_RETURN}} UnPackSizePrefixed{{STRUCT_NAME}}("; code_ += " const void *buf,"; code_ += " const flatbuffers::resolver_function_t *res = nullptr) {"; code_ += " return {{UNPACK_TYPE}}\\"; code_ += "(GetSizePrefixed{{STRUCT_NAME}}(buf)->UnPack(res));"; code_ += "}"; code_ += ""; } } if (cur_name_space_) SetNameSpace(nullptr); // Close the include guard. code_ += "#endif // " + include_guard; const auto file_path = GeneratedFileName(path_, file_name_); const auto final_code = code_.ToString(); // Save the file and optionally generate the binary schema code. return SaveFile(file_path.c_str(), final_code, false) && (!parser_.opts.binary_schema_gen_embed || generate_bfbs_embed()); } private: CodeWriter code_; std::unordered_set<std::string> keywords_; // This tracks the current namespace so we can insert namespace declarations. const Namespace *cur_name_space_; const IDLOptionsCpp opts_; const TypedFloatConstantGenerator float_const_gen_; const Namespace *CurrentNameSpace() const { return cur_name_space_; } // Translates a qualified name in flatbuffer text format to the same name in // the equivalent C++ namespace. static std::string TranslateNameSpace(const std::string &qualified_name) { std::string cpp_qualified_name = qualified_name; size_t start_pos = 0; while ((start_pos = cpp_qualified_name.find('.', start_pos)) != std::string::npos) { cpp_qualified_name.replace(start_pos, 1, "::"); } return cpp_qualified_name; } bool TypeHasKey(const Type &type) { if (type.base_type != BASE_TYPE_STRUCT) { return false; } for (auto it = type.struct_def->fields.vec.begin(); it != type.struct_def->fields.vec.end(); ++it) { const auto &field = **it; if (field.key) { return true; } } return false; } void GenComment(const std::vector<std::string> &dc, const char *prefix = "") { std::string text; ::flatbuffers::GenComment(dc, &text, nullptr, prefix); code_ += text + "\\"; } // Return a C++ type from the table in idl.h std::string GenTypeBasic(const Type &type, bool user_facing_type) const { // clang-format off static const char *const ctypename[] = { #define FLATBUFFERS_TD(ENUM, IDLTYPE, CTYPE, ...) \ #CTYPE, FLATBUFFERS_GEN_TYPES(FLATBUFFERS_TD) #undef FLATBUFFERS_TD }; // clang-format on if (user_facing_type) { if (type.enum_def) return WrapInNameSpace(*type.enum_def); if (type.base_type == BASE_TYPE_BOOL) return "bool"; } return ctypename[type.base_type]; } // Return a C++ pointer type, specialized to the actual struct/table types, // and vector element types. std::string GenTypePointer(const Type &type) const { switch (type.base_type) { case BASE_TYPE_STRING: { return "flatbuffers::String"; } case BASE_TYPE_VECTOR: { const auto type_name = GenTypeWire(type.VectorType(), "", false); return "flatbuffers::Vector<" + type_name + ">"; } case BASE_TYPE_STRUCT: { return WrapInNameSpace(*type.struct_def); } case BASE_TYPE_UNION: // fall through default: { return "void"; } } } // Return a C++ type for any type (scalar/pointer) specifically for // building a flatbuffer. std::string GenTypeWire(const Type &type, const char *postfix, bool user_facing_type) const { if (IsScalar(type.base_type)) { return GenTypeBasic(type, user_facing_type) + postfix; } else if (IsStruct(type)) { return "const " + GenTypePointer(type) + " *"; } else { return "flatbuffers::Offset<" + GenTypePointer(type) + ">" + postfix; } } // Return a C++ type for any type (scalar/pointer) that reflects its // serialized size. std::string GenTypeSize(const Type &type) const { if (IsScalar(type.base_type)) { return GenTypeBasic(type, false); } else if (IsStruct(type)) { return GenTypePointer(type); } else { return "flatbuffers::uoffset_t"; } } std::string NullableExtension() { return opts_.gen_nullable ? " _Nullable " : ""; } static std::string NativeName(const std::string &name, const StructDef *sd, const IDLOptions &opts) { return sd && !sd->fixed ? opts.object_prefix + name + opts.object_suffix : name; } const std::string &PtrType(const FieldDef *field) { auto attr = field ? field->attributes.Lookup("cpp_ptr_type") : nullptr; return attr ? attr->constant : opts_.cpp_object_api_pointer_type; } const std::string NativeString(const FieldDef *field) { auto attr = field ? field->attributes.Lookup("cpp_str_type") : nullptr; auto &ret = attr ? attr->constant : opts_.cpp_object_api_string_type; if (ret.empty()) { return "std::string"; } return ret; } bool FlexibleStringConstructor(const FieldDef *field) { auto attr = field ? (field->attributes.Lookup("cpp_str_flex_ctor") != nullptr) : false; auto ret = attr ? attr : opts_.cpp_object_api_string_flexible_constructor; return ret && NativeString(field) != "std::string"; // Only for custom string types. } std::string GenTypeNativePtr(const std::string &type, const FieldDef *field, bool is_constructor) { auto &ptr_type = PtrType(field); if (ptr_type != "naked") { return (ptr_type != "default_ptr_type" ? ptr_type : opts_.cpp_object_api_pointer_type) + "<" + type + ">"; } else if (is_constructor) { return ""; } else { return type + " *"; } } std::string GenPtrGet(const FieldDef &field) { auto cpp_ptr_type_get = field.attributes.Lookup("cpp_ptr_type_get"); if (cpp_ptr_type_get) return cpp_ptr_type_get->constant; auto &ptr_type = PtrType(&field); return ptr_type == "naked" ? "" : ".get()"; } std::string GenTypeNative(const Type &type, bool invector, const FieldDef &field) { switch (type.base_type) { case BASE_TYPE_STRING: { return NativeString(&field); } case BASE_TYPE_VECTOR: { const auto type_name = GenTypeNative(type.VectorType(), true, field); if (type.struct_def && type.struct_def->attributes.Lookup("native_custom_alloc")) { auto native_custom_alloc = type.struct_def->attributes.Lookup("native_custom_alloc"); return "std::vector<" + type_name + "," + native_custom_alloc->constant + "<" + type_name + ">>"; } else return "std::vector<" + type_name + ">"; } case BASE_TYPE_STRUCT: { auto type_name = WrapInNameSpace(*type.struct_def); if (IsStruct(type)) { auto native_type = type.struct_def->attributes.Lookup("native_type"); if (native_type) { type_name = native_type->constant; } if (invector || field.native_inline) { return type_name; } else { return GenTypeNativePtr(type_name, &field, false); } } else { return GenTypeNativePtr(NativeName(type_name, type.struct_def, opts_), &field, false); } } case BASE_TYPE_UNION: { auto type_name = WrapInNameSpace(*type.enum_def); return type_name + "Union"; } default: { return GenTypeBasic(type, true); } } } // Return a C++ type for any type (scalar/pointer) specifically for // using a flatbuffer. std::string GenTypeGet(const Type &type, const char *afterbasic, const char *beforeptr, const char *afterptr, bool user_facing_type) { if (IsScalar(type.base_type)) { return GenTypeBasic(type, user_facing_type) + afterbasic; } else if (IsArray(type)) { auto element_type = type.VectorType(); // Check if enum arrays are used in C++ without specifying --scoped-enums if (IsEnum(element_type) && !opts_.g_only_fixed_enums) { LogCompilerError( "--scoped-enums must be enabled to use enum arrays in C++"); FLATBUFFERS_ASSERT(true); } return beforeptr + (IsScalar(element_type.base_type) ? GenTypeBasic(element_type, user_facing_type) : GenTypePointer(element_type)) + afterptr; } else { return beforeptr + GenTypePointer(type) + afterptr; } } std::string GenEnumValDecl(const EnumDef &enum_def, const std::string &enum_val) const { return opts_.prefixed_enums ? Name(enum_def) + "_" + enum_val : enum_val; } std::string GetEnumValUse(const EnumDef &enum_def, const EnumVal &enum_val) const { if (opts_.scoped_enums) { return Name(enum_def) + "::" + Name(enum_val); } else if (opts_.prefixed_enums) { return Name(enum_def) + "_" + Name(enum_val); } else { return Name(enum_val); } } std::string StripUnionType(const std::string &name) { return name.substr(0, name.size() - strlen(UnionTypeFieldSuffix())); } std::string GetUnionElement(const EnumVal &ev, bool wrap, bool actual_type, bool native_type = false) { if (ev.union_type.base_type == BASE_TYPE_STRUCT) { auto name = actual_type ? ev.union_type.struct_def->name : Name(ev); return wrap ? WrapInNameSpace(ev.union_type.struct_def->defined_namespace, name) : name; } else if (ev.union_type.base_type == BASE_TYPE_STRING) { return actual_type ? (native_type ? "std::string" : "flatbuffers::String") : Name(ev); } else { FLATBUFFERS_ASSERT(false); return Name(ev); } } std::string UnionVerifySignature(const EnumDef &enum_def) { return "bool Verify" + Name(enum_def) + "(flatbuffers::Verifier &verifier, const void *obj, " + Name(enum_def) + " type)"; } std::string UnionVectorVerifySignature(const EnumDef &enum_def) { return "bool Verify" + Name(enum_def) + "Vector" + "(flatbuffers::Verifier &verifier, " + "const flatbuffers::Vector<flatbuffers::Offset<void>> *values, " + "const flatbuffers::Vector<uint8_t> *types)"; } std::string UnionUnPackSignature(const EnumDef &enum_def, bool inclass) { return (inclass ? "static " : "") + std::string("void *") + (inclass ? "" : Name(enum_def) + "Union::") + "UnPack(const void *obj, " + Name(enum_def) + " type, const flatbuffers::resolver_function_t *resolver)"; } std::string UnionPackSignature(const EnumDef &enum_def, bool inclass) { return "flatbuffers::Offset<void> " + (inclass ? "" : Name(enum_def) + "Union::") + "Pack(flatbuffers::FlatBufferBuilder &_fbb, " + "const flatbuffers::rehasher_function_t *_rehasher" + (inclass ? " = nullptr" : "") + ") const"; } std::string TableCreateSignature(const StructDef &struct_def, bool predecl, const IDLOptions &opts) { return "flatbuffers::Offset<" + Name(struct_def) + "> Create" + Name(struct_def) + "(flatbuffers::FlatBufferBuilder &_fbb, const " + NativeName(Name(struct_def), &struct_def, opts) + " *_o, const flatbuffers::rehasher_function_t *_rehasher" + (predecl ? " = nullptr" : "") + ")"; } std::string TablePackSignature(const StructDef &struct_def, bool inclass, const IDLOptions &opts) { return std::string(inclass ? "static " : "") + "flatbuffers::Offset<" + Name(struct_def) + "> " + (inclass ? "" : Name(struct_def) + "::") + "Pack(flatbuffers::FlatBufferBuilder &_fbb, " + "const " + NativeName(Name(struct_def), &struct_def, opts) + "* _o, " + "const flatbuffers::rehasher_function_t *_rehasher" + (inclass ? " = nullptr" : "") + ")"; } std::string TableUnPackSignature(const StructDef &struct_def, bool inclass, const IDLOptions &opts) { return NativeName(Name(struct_def), &struct_def, opts) + " *" + (inclass ? "" : Name(struct_def) + "::") + "UnPack(const flatbuffers::resolver_function_t *_resolver" + (inclass ? " = nullptr" : "") + ") const"; } std::string TableUnPackToSignature(const StructDef &struct_def, bool inclass, const IDLOptions &opts) { return "void " + (inclass ? "" : Name(struct_def) + "::") + "UnPackTo(" + NativeName(Name(struct_def), &struct_def, opts) + " *" + "_o, const flatbuffers::resolver_function_t *_resolver" + (inclass ? " = nullptr" : "") + ") const"; } void GenMiniReflectPre(const StructDef *struct_def) { code_.SetValue("NAME", struct_def->name); code_ += "inline const flatbuffers::TypeTable *{{NAME}}TypeTable();"; code_ += ""; } void GenMiniReflect(const StructDef *struct_def, const EnumDef *enum_def) { code_.SetValue("NAME", struct_def ? struct_def->name : enum_def->name); code_.SetValue("SEQ_TYPE", struct_def ? (struct_def->fixed ? "ST_STRUCT" : "ST_TABLE") : (enum_def->is_union ? "ST_UNION" : "ST_ENUM")); auto num_fields = struct_def ? struct_def->fields.vec.size() : enum_def->size(); code_.SetValue("NUM_FIELDS", NumToString(num_fields)); std::vector<std::string> names; std::vector<Type> types; if (struct_def) { for (auto it = struct_def->fields.vec.begin(); it != struct_def->fields.vec.end(); ++it) { const auto &field = **it; names.push_back(Name(field)); types.push_back(field.value.type); } } else { for (auto it = enum_def->Vals().begin(); it != enum_def->Vals().end(); ++it) { const auto &ev = **it; names.push_back(Name(ev)); types.push_back(enum_def->is_union ? ev.union_type : Type(enum_def->underlying_type)); } } std::string ts; std::vector<std::string> type_refs; for (auto it = types.begin(); it != types.end(); ++it) { auto &type = *it; if (!ts.empty()) ts += ",\n "; auto is_vector = type.base_type == BASE_TYPE_VECTOR; auto bt = is_vector ? type.element : type.base_type; auto et = IsScalar(bt) || bt == BASE_TYPE_STRING ? bt - BASE_TYPE_UTYPE + ET_UTYPE : ET_SEQUENCE; int ref_idx = -1; std::string ref_name = type.struct_def ? WrapInNameSpace(*type.struct_def) : type.enum_def ? WrapInNameSpace(*type.enum_def) : ""; if (!ref_name.empty()) { auto rit = type_refs.begin(); for (; rit != type_refs.end(); ++rit) { if (*rit == ref_name) { ref_idx = static_cast<int>(rit - type_refs.begin()); break; } } if (rit == type_refs.end()) { ref_idx = static_cast<int>(type_refs.size()); type_refs.push_back(ref_name); } } ts += "{ flatbuffers::" + std::string(ElementaryTypeNames()[et]) + ", " + NumToString(is_vector) + ", " + NumToString(ref_idx) + " }"; } std::string rs; for (auto it = type_refs.begin(); it != type_refs.end(); ++it) { if (!rs.empty()) rs += ",\n "; rs += *it + "TypeTable"; } std::string ns; for (auto it = names.begin(); it != names.end(); ++it) { if (!ns.empty()) ns += ",\n "; ns += "\"" + *it + "\""; } std::string vs; const auto consecutive_enum_from_zero = enum_def && enum_def->MinValue()->IsZero() && ((enum_def->size() - 1) == enum_def->Distance()); if (enum_def && !consecutive_enum_from_zero) { for (auto it = enum_def->Vals().begin(); it != enum_def->Vals().end(); ++it) { const auto &ev = **it; if (!vs.empty()) vs += ", "; vs += NumToStringCpp(enum_def->ToString(ev), enum_def->underlying_type.base_type); } } else if (struct_def && struct_def->fixed) { for (auto it = struct_def->fields.vec.begin(); it != struct_def->fields.vec.end(); ++it) { const auto &field = **it; vs += NumToString(field.value.offset); vs += ", "; } vs += NumToString(struct_def->bytesize); } code_.SetValue("TYPES", ts); code_.SetValue("REFS", rs); code_.SetValue("NAMES", ns); code_.SetValue("VALUES", vs); code_ += "inline const flatbuffers::TypeTable *{{NAME}}TypeTable() {"; if (num_fields) { code_ += " static const flatbuffers::TypeCode type_codes[] = {"; code_ += " {{TYPES}}"; code_ += " };"; } if (!type_refs.empty()) { code_ += " static const flatbuffers::TypeFunction type_refs[] = {"; code_ += " {{REFS}}"; code_ += " };"; } if (!vs.empty()) { // Problem with uint64_t values greater than 9223372036854775807ULL. code_ += " static const int64_t values[] = { {{VALUES}} };"; } auto has_names = num_fields && opts_.mini_reflect == IDLOptions::kTypesAndNames; if (has_names) { code_ += " static const char * const names[] = {"; code_ += " {{NAMES}}"; code_ += " };"; } code_ += " static const flatbuffers::TypeTable tt = {"; code_ += std::string(" flatbuffers::{{SEQ_TYPE}}, {{NUM_FIELDS}}, ") + (num_fields ? "type_codes, " : "nullptr, ") + (!type_refs.empty() ? "type_refs, " : "nullptr, ") + (!vs.empty() ? "values, " : "nullptr, ") + (has_names ? "names" : "nullptr"); code_ += " };"; code_ += " return &tt;"; code_ += "}"; code_ += ""; } // Generate an enum declaration, // an enum string lookup table, // and an enum array of values void GenEnum(const EnumDef &enum_def) { code_.SetValue("ENUM_NAME", Name(enum_def)); code_.SetValue("BASE_TYPE", GenTypeBasic(enum_def.underlying_type, false)); GenComment(enum_def.doc_comment); code_ += (opts_.scoped_enums ? "enum class " : "enum ") + Name(enum_def) + "\\"; if (opts_.g_only_fixed_enums) { code_ += " : {{BASE_TYPE}}\\"; } code_ += " {"; code_.SetValue("SEP", ","); auto add_sep = false; for (auto it = enum_def.Vals().begin(); it != enum_def.Vals().end(); ++it) { const auto &ev = **it; if (add_sep) code_ += "{{SEP}}"; GenComment(ev.doc_comment, " "); code_.SetValue("KEY", GenEnumValDecl(enum_def, Name(ev))); code_.SetValue("VALUE", NumToStringCpp(enum_def.ToString(ev), enum_def.underlying_type.base_type)); code_ += " {{KEY}} = {{VALUE}}\\"; add_sep = true; } const EnumVal *minv = enum_def.MinValue(); const EnumVal *maxv = enum_def.MaxValue(); if (opts_.scoped_enums || opts_.prefixed_enums) { FLATBUFFERS_ASSERT(minv && maxv); code_.SetValue("SEP", ",\n"); if (enum_def.attributes.Lookup("bit_flags")) { code_.SetValue("KEY", GenEnumValDecl(enum_def, "NONE")); code_.SetValue("VALUE", "0"); code_ += "{{SEP}} {{KEY}} = {{VALUE}}\\"; code_.SetValue("KEY", GenEnumValDecl(enum_def, "ANY")); code_.SetValue("VALUE", NumToStringCpp(enum_def.AllFlags(), enum_def.underlying_type.base_type)); code_ += "{{SEP}} {{KEY}} = {{VALUE}}\\"; } else { // MIN & MAX are useless for bit_flags code_.SetValue("KEY", GenEnumValDecl(enum_def, "MIN")); code_.SetValue("VALUE", GenEnumValDecl(enum_def, Name(*minv))); code_ += "{{SEP}} {{KEY}} = {{VALUE}}\\"; code_.SetValue("KEY", GenEnumValDecl(enum_def, "MAX")); code_.SetValue("VALUE", GenEnumValDecl(enum_def, Name(*maxv))); code_ += "{{SEP}} {{KEY}} = {{VALUE}}\\"; } } code_ += ""; code_ += "};"; if (opts_.scoped_enums && enum_def.attributes.Lookup("bit_flags")) { code_ += "FLATBUFFERS_DEFINE_BITMASK_OPERATORS({{ENUM_NAME}}, {{BASE_TYPE}})"; } code_ += ""; // Generate an array of all enumeration values auto num_fields = NumToString(enum_def.size()); code_ += "inline const {{ENUM_NAME}} (&EnumValues{{ENUM_NAME}}())[" + num_fields + "] {"; code_ += " static const {{ENUM_NAME}} values[] = {"; for (auto it = enum_def.Vals().begin(); it != enum_def.Vals().end(); ++it) { const auto &ev = **it; auto value = GetEnumValUse(enum_def, ev); auto suffix = *it != enum_def.Vals().back() ? "," : ""; code_ += " " + value + suffix; } code_ += " };"; code_ += " return values;"; code_ += "}"; code_ += ""; // Generate a generate string table for enum values. // Problem is, if values are very sparse that could generate really big // tables. Ideally in that case we generate a map lookup instead, but for // the moment we simply don't output a table at all. auto range = enum_def.Distance(); // Average distance between values above which we consider a table // "too sparse". Change at will. static const uint64_t kMaxSparseness = 5; if (range / static_cast<uint64_t>(enum_def.size()) < kMaxSparseness) { code_ += "inline const char * const *EnumNames{{ENUM_NAME}}() {"; code_ += " static const char * const names[" + NumToString(range + 1 + 1) + "] = {"; auto val = enum_def.Vals().front(); for (auto it = enum_def.Vals().begin(); it != enum_def.Vals().end(); ++it) { auto ev = *it; for (auto k = enum_def.Distance(val, ev); k > 1; --k) { code_ += " \"\","; } val = ev; code_ += " \"" + Name(*ev) + "\","; } code_ += " nullptr"; code_ += " };"; code_ += " return names;"; code_ += "}"; code_ += ""; code_ += "inline const char *EnumName{{ENUM_NAME}}({{ENUM_NAME}} e) {"; code_ += " if (flatbuffers::IsOutRange(e, " + GetEnumValUse(enum_def, *enum_def.MinValue()) + ", " + GetEnumValUse(enum_def, *enum_def.MaxValue()) + ")) return \"\";"; code_ += " const size_t index = static_cast<size_t>(e)\\"; if (enum_def.MinValue()->IsNonZero()) { auto vals = GetEnumValUse(enum_def, *enum_def.MinValue()); code_ += " - static_cast<size_t>(" + vals + ")\\"; } code_ += ";"; code_ += " return EnumNames{{ENUM_NAME}}()[index];"; code_ += "}"; code_ += ""; } else { code_ += "inline const char *EnumName{{ENUM_NAME}}({{ENUM_NAME}} e) {"; code_ += " switch (e) {"; for (auto it = enum_def.Vals().begin(); it != enum_def.Vals().end(); ++it) { const auto &ev = **it; code_ += " case " + GetEnumValUse(enum_def, ev) + ": return \"" + Name(ev) + "\";"; } code_ += " default: return \"\";"; code_ += " }"; code_ += "}"; code_ += ""; } // Generate type traits for unions to map from a type to union enum value. if (enum_def.is_union && !enum_def.uses_multiple_type_instances) { for (auto it = enum_def.Vals().begin(); it != enum_def.Vals().end(); ++it) { const auto &ev = **it; if (it == enum_def.Vals().begin()) { code_ += "template<typename T> struct {{ENUM_NAME}}Traits {"; } else { auto name = GetUnionElement(ev, true, true); code_ += "template<> struct {{ENUM_NAME}}Traits<" + name + "> {"; } auto value = GetEnumValUse(enum_def, ev); code_ += " static const {{ENUM_NAME}} enum_value = " + value + ";"; code_ += "};"; code_ += ""; } } if (opts_.generate_object_based_api && enum_def.is_union) { // Generate a union type code_.SetValue("NAME", Name(enum_def)); FLATBUFFERS_ASSERT(enum_def.Lookup("NONE")); code_.SetValue("NONE", GetEnumValUse(enum_def, *enum_def.Lookup("NONE"))); code_ += "struct {{NAME}}Union {"; code_ += " {{NAME}} type;"; code_ += " void *value;"; code_ += ""; code_ += " {{NAME}}Union() : type({{NONE}}), value(nullptr) {}"; code_ += " {{NAME}}Union({{NAME}}Union&& u) FLATBUFFERS_NOEXCEPT :"; code_ += " type({{NONE}}), value(nullptr)"; code_ += " { std::swap(type, u.type); std::swap(value, u.value); }"; code_ += " {{NAME}}Union(const {{NAME}}Union &) FLATBUFFERS_NOEXCEPT;"; code_ += " {{NAME}}Union &operator=(const {{NAME}}Union &u) " "FLATBUFFERS_NOEXCEPT"; code_ += " { {{NAME}}Union t(u); std::swap(type, t.type); std::swap(value, " "t.value); return *this; }"; code_ += " {{NAME}}Union &operator=({{NAME}}Union &&u) FLATBUFFERS_NOEXCEPT"; code_ += " { std::swap(type, u.type); std::swap(value, u.value); return " "*this; }"; code_ += " ~{{NAME}}Union() { Reset(); }"; code_ += ""; code_ += " void Reset();"; code_ += ""; if (!enum_def.uses_multiple_type_instances) { code_ += "#ifndef FLATBUFFERS_CPP98_STL"; code_ += " template <typename T>"; code_ += " void Set(T&& val) {"; code_ += " using RT = typename std::remove_reference<T>::type;"; code_ += " Reset();"; code_ += " type = {{NAME}}Traits<typename RT::TableType>::enum_value;"; code_ += " if (type != {{NONE}}) {"; code_ += " value = new RT(std::forward<T>(val));"; code_ += " }"; code_ += " }"; code_ += "#endif // FLATBUFFERS_CPP98_STL"; code_ += ""; } code_ += " " + UnionUnPackSignature(enum_def, true) + ";"; code_ += " " + UnionPackSignature(enum_def, true) + ";"; code_ += ""; for (auto it = enum_def.Vals().begin(); it != enum_def.Vals().end(); ++it) { const auto &ev = **it; if (ev.IsZero()) { continue; } const auto native_type = NativeName(GetUnionElement(ev, true, true, true), ev.union_type.struct_def, opts_); code_.SetValue("NATIVE_TYPE", native_type); code_.SetValue("NATIVE_NAME", Name(ev)); code_.SetValue("NATIVE_ID", GetEnumValUse(enum_def, ev)); code_ += " {{NATIVE_TYPE}} *As{{NATIVE_NAME}}() {"; code_ += " return type == {{NATIVE_ID}} ?"; code_ += " reinterpret_cast<{{NATIVE_TYPE}} *>(value) : nullptr;"; code_ += " }"; code_ += " const {{NATIVE_TYPE}} *As{{NATIVE_NAME}}() const {"; code_ += " return type == {{NATIVE_ID}} ?"; code_ += " reinterpret_cast<const {{NATIVE_TYPE}} *>(value) : nullptr;"; code_ += " }"; } code_ += "};"; code_ += ""; if (opts_.gen_compare) { code_ += ""; code_ += "inline bool operator==(const {{NAME}}Union &lhs, const " "{{NAME}}Union &rhs) {"; code_ += " if (lhs.type != rhs.type) return false;"; code_ += " switch (lhs.type) {"; for (auto it = enum_def.Vals().begin(); it != enum_def.Vals().end(); ++it) { const auto &ev = **it; code_.SetValue("NATIVE_ID", GetEnumValUse(enum_def, ev)); if (ev.IsNonZero()) { const auto native_type = NativeName(GetUnionElement(ev, true, true, true), ev.union_type.struct_def, opts_); code_.SetValue("NATIVE_TYPE", native_type); code_ += " case {{NATIVE_ID}}: {"; code_ += " return *(reinterpret_cast<const {{NATIVE_TYPE}} " "*>(lhs.value)) =="; code_ += " *(reinterpret_cast<const {{NATIVE_TYPE}} " "*>(rhs.value));"; code_ += " }"; } else { code_ += " case {{NATIVE_ID}}: {"; code_ += " return true;"; // "NONE" enum value. code_ += " }"; } } code_ += " default: {"; code_ += " return false;"; code_ += " }"; code_ += " }"; code_ += "}"; code_ += ""; code_ += "inline bool operator!=(const {{NAME}}Union &lhs, const " "{{NAME}}Union &rhs) {"; code_ += " return !(lhs == rhs);"; code_ += "}"; code_ += ""; } } if (enum_def.is_union) { code_ += UnionVerifySignature(enum_def) + ";"; code_ += UnionVectorVerifySignature(enum_def) + ";"; code_ += ""; } } void GenUnionPost(const EnumDef &enum_def) { // Generate a verifier function for this union that can be called by the // table verifier functions. It uses a switch case to select a specific // verifier function to call, this should be safe even if the union type // has been corrupted, since the verifiers will simply fail when called // on the wrong type. code_.SetValue("ENUM_NAME", Name(enum_def)); code_ += "inline " + UnionVerifySignature(enum_def) + " {"; code_ += " switch (type) {"; for (auto it = enum_def.Vals().begin(); it != enum_def.Vals().end(); ++it) { const auto &ev = **it; code_.SetValue("LABEL", GetEnumValUse(enum_def, ev)); if (ev.IsNonZero()) { code_.SetValue("TYPE", GetUnionElement(ev, true, true)); code_ += " case {{LABEL}}: {"; auto getptr = " auto ptr = reinterpret_cast<const {{TYPE}} *>(obj);"; if (ev.union_type.base_type == BASE_TYPE_STRUCT) { if (ev.union_type.struct_def->fixed) { code_ += " return verifier.Verify<{{TYPE}}>(static_cast<const " "uint8_t *>(obj), 0);"; } else { code_ += getptr; code_ += " return verifier.VerifyTable(ptr);"; } } else if (ev.union_type.base_type == BASE_TYPE_STRING) { code_ += getptr; code_ += " return verifier.VerifyString(ptr);"; } else { FLATBUFFERS_ASSERT(false); } code_ += " }"; } else { code_ += " case {{LABEL}}: {"; code_ += " return true;"; // "NONE" enum value. code_ += " }"; } } code_ += " default: return true;"; // unknown values are OK. code_ += " }"; code_ += "}"; code_ += ""; code_ += "inline " + UnionVectorVerifySignature(enum_def) + " {"; code_ += " if (!values || !types) return !values && !types;"; code_ += " if (values->size() != types->size()) return false;"; code_ += " for (flatbuffers::uoffset_t i = 0; i < values->size(); ++i) {"; code_ += " if (!Verify" + Name(enum_def) + "("; code_ += " verifier, values->Get(i), types->GetEnum<" + Name(enum_def) + ">(i))) {"; code_ += " return false;"; code_ += " }"; code_ += " }"; code_ += " return true;"; code_ += "}"; code_ += ""; if (opts_.generate_object_based_api) { // Generate union Unpack() and Pack() functions. code_ += "inline " + UnionUnPackSignature(enum_def, false) + " {"; code_ += " switch (type) {"; for (auto it = enum_def.Vals().begin(); it != enum_def.Vals().end(); ++it) { const auto &ev = **it; if (ev.IsZero()) { continue; } code_.SetValue("LABEL", GetEnumValUse(enum_def, ev)); code_.SetValue("TYPE", GetUnionElement(ev, true, true)); code_ += " case {{LABEL}}: {"; code_ += " auto ptr = reinterpret_cast<const {{TYPE}} *>(obj);"; if (ev.union_type.base_type == BASE_TYPE_STRUCT) { if (ev.union_type.struct_def->fixed) { code_ += " return new " + WrapInNameSpace(*ev.union_type.struct_def) + "(*ptr);"; } else { code_ += " return ptr->UnPack(resolver);"; } } else if (ev.union_type.base_type == BASE_TYPE_STRING) { code_ += " return new std::string(ptr->c_str(), ptr->size());"; } else { FLATBUFFERS_ASSERT(false); } code_ += " }"; } code_ += " default: return nullptr;"; code_ += " }"; code_ += "}"; code_ += ""; code_ += "inline " + UnionPackSignature(enum_def, false) + " {"; code_ += " switch (type) {"; for (auto it = enum_def.Vals().begin(); it != enum_def.Vals().end(); ++it) { auto &ev = **it; if (ev.IsZero()) { continue; } code_.SetValue("LABEL", GetEnumValUse(enum_def, ev)); code_.SetValue("TYPE", NativeName(GetUnionElement(ev, true, true, true), ev.union_type.struct_def, opts_)); code_.SetValue("NAME", GetUnionElement(ev, false, true)); code_ += " case {{LABEL}}: {"; code_ += " auto ptr = reinterpret_cast<const {{TYPE}} *>(value);"; if (ev.union_type.base_type == BASE_TYPE_STRUCT) { if (ev.union_type.struct_def->fixed) { code_ += " return _fbb.CreateStruct(*ptr).Union();"; } else { code_ += " return Create{{NAME}}(_fbb, ptr, _rehasher).Union();"; } } else if (ev.union_type.base_type == BASE_TYPE_STRING) { code_ += " return _fbb.CreateString(*ptr).Union();"; } else { FLATBUFFERS_ASSERT(false); } code_ += " }"; } code_ += " default: return 0;"; code_ += " }"; code_ += "}"; code_ += ""; // Union copy constructor code_ += "inline {{ENUM_NAME}}Union::{{ENUM_NAME}}Union(const " "{{ENUM_NAME}}Union &u) FLATBUFFERS_NOEXCEPT : type(u.type), " "value(nullptr) {"; code_ += " switch (type) {"; for (auto it = enum_def.Vals().begin(); it != enum_def.Vals().end(); ++it) { const auto &ev = **it; if (ev.IsZero()) { continue; } code_.SetValue("LABEL", GetEnumValUse(enum_def, ev)); code_.SetValue("TYPE", NativeName(GetUnionElement(ev, true, true, true), ev.union_type.struct_def, opts_)); code_ += " case {{LABEL}}: {"; bool copyable = true; if (ev.union_type.base_type == BASE_TYPE_STRUCT) { // Don't generate code to copy if table is not copyable. // TODO(wvo): make tables copyable instead. for (auto fit = ev.union_type.struct_def->fields.vec.begin(); fit != ev.union_type.struct_def->fields.vec.end(); ++fit) { const auto &field = **fit; if (!field.deprecated && field.value.type.struct_def && !field.native_inline) { copyable = false; break; } } } if (copyable) { code_ += " value = new {{TYPE}}(*reinterpret_cast<{{TYPE}} *>" "(u.value));"; } else { code_ += " FLATBUFFERS_ASSERT(false); // {{TYPE}} not copyable."; } code_ += " break;"; code_ += " }"; } code_ += " default:"; code_ += " break;"; code_ += " }"; code_ += "}"; code_ += ""; // Union Reset() function. FLATBUFFERS_ASSERT(enum_def.Lookup("NONE")); code_.SetValue("NONE", GetEnumValUse(enum_def, *enum_def.Lookup("NONE"))); code_ += "inline void {{ENUM_NAME}}Union::Reset() {"; code_ += " switch (type) {"; for (auto it = enum_def.Vals().begin(); it != enum_def.Vals().end(); ++it) { const auto &ev = **it; if (ev.IsZero()) { continue; } code_.SetValue("LABEL", GetEnumValUse(enum_def, ev)); code_.SetValue("TYPE", NativeName(GetUnionElement(ev, true, true, true), ev.union_type.struct_def, opts_)); code_ += " case {{LABEL}}: {"; code_ += " auto ptr = reinterpret_cast<{{TYPE}} *>(value);"; code_ += " delete ptr;"; code_ += " break;"; code_ += " }"; } code_ += " default: break;"; code_ += " }"; code_ += " value = nullptr;"; code_ += " type = {{NONE}};"; code_ += "}"; code_ += ""; } } // Generates a value with optionally a cast applied if the field has a // different underlying type from its interface type (currently only the // case for enums. "from" specify the direction, true meaning from the // underlying type to the interface type. std::string GenUnderlyingCast(const FieldDef &field, bool from, const std::string &val) { if (from && field.value.type.base_type == BASE_TYPE_BOOL) { return val + " != 0"; } else if ((field.value.type.enum_def && IsScalar(field.value.type.base_type)) || field.value.type.base_type == BASE_TYPE_BOOL) { return "static_cast<" + GenTypeBasic(field.value.type, from) + ">(" + val + ")"; } else { return val; } } std::string GenFieldOffsetName(const FieldDef &field) { std::string uname = Name(field); std::transform(uname.begin(), uname.end(), uname.begin(), ToUpper); return "VT_" + uname; } void GenFullyQualifiedNameGetter(const StructDef &struct_def, const std::string &name) { if (!opts_.generate_name_strings) { return; } auto fullname = struct_def.defined_namespace->GetFullyQualifiedName(name); code_.SetValue("NAME", fullname); code_.SetValue("CONSTEXPR", "FLATBUFFERS_CONSTEXPR"); code_ += " static {{CONSTEXPR}} const char *GetFullyQualifiedName() {"; code_ += " return \"{{NAME}}\";"; code_ += " }"; } std::string GenDefaultConstant(const FieldDef &field) { if (IsFloat(field.value.type.base_type)) return float_const_gen_.GenFloatConstant(field); else return NumToStringCpp(field.value.constant, field.value.type.base_type); } std::string GetDefaultScalarValue(const FieldDef &field, bool is_ctor) { if (field.value.type.enum_def && IsScalar(field.value.type.base_type)) { auto ev = field.value.type.enum_def->FindByValue(field.value.constant); if (ev) { return WrapInNameSpace(field.value.type.enum_def->defined_namespace, GetEnumValUse(*field.value.type.enum_def, *ev)); } else { return GenUnderlyingCast( field, true, NumToStringCpp(field.value.constant, field.value.type.base_type)); } } else if (field.value.type.base_type == BASE_TYPE_BOOL) { return field.value.constant == "0" ? "false" : "true"; } else if (field.attributes.Lookup("cpp_type")) { if (is_ctor) { if (PtrType(&field) == "naked") { return "nullptr"; } else { return ""; } } else { return "0"; } } else { return GenDefaultConstant(field); } } void GenParam(const FieldDef &field, bool direct, const char *prefix) { code_.SetValue("PRE", prefix); code_.SetValue("PARAM_NAME", Name(field)); if (direct && field.value.type.base_type == BASE_TYPE_STRING) { code_.SetValue("PARAM_TYPE", "const char *"); code_.SetValue("PARAM_VALUE", "nullptr"); } else if (direct && field.value.type.base_type == BASE_TYPE_VECTOR) { const auto vtype = field.value.type.VectorType(); std::string type; if (IsStruct(vtype)) { type = WrapInNameSpace(*vtype.struct_def); } else { type = GenTypeWire(vtype, "", false); } if (TypeHasKey(vtype)) { code_.SetValue("PARAM_TYPE", "std::vector<" + type + "> *"); } else { code_.SetValue("PARAM_TYPE", "const std::vector<" + type + "> *"); } code_.SetValue("PARAM_VALUE", "nullptr"); } else { code_.SetValue("PARAM_TYPE", GenTypeWire(field.value.type, " ", true)); code_.SetValue("PARAM_VALUE", GetDefaultScalarValue(field, false)); } code_ += "{{PRE}}{{PARAM_TYPE}}{{PARAM_NAME}} = {{PARAM_VALUE}}\\"; } // Generate a member, including a default value for scalars and raw pointers. void GenMember(const FieldDef &field) { if (!field.deprecated && // Deprecated fields won't be accessible. field.value.type.base_type != BASE_TYPE_UTYPE && (field.value.type.base_type != BASE_TYPE_VECTOR || field.value.type.element != BASE_TYPE_UTYPE)) { auto type = GenTypeNative(field.value.type, false, field); auto cpp_type = field.attributes.Lookup("cpp_type"); auto full_type = (cpp_type ? (field.value.type.base_type == BASE_TYPE_VECTOR ? "std::vector<" + GenTypeNativePtr(cpp_type->constant, &field, false) + "> " : GenTypeNativePtr(cpp_type->constant, &field, false)) : type + " "); code_.SetValue("FIELD_TYPE", full_type); code_.SetValue("FIELD_NAME", Name(field)); code_ += " {{FIELD_TYPE}}{{FIELD_NAME}};"; } } // Generate the default constructor for this struct. Properly initialize all // scalar members with default values. void GenDefaultConstructor(const StructDef &struct_def) { std::string initializer_list; for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { const auto &field = **it; if (!field.deprecated && // Deprecated fields won't be accessible. field.value.type.base_type != BASE_TYPE_UTYPE) { auto cpp_type = field.attributes.Lookup("cpp_type"); auto native_default = field.attributes.Lookup("native_default"); // Scalar types get parsed defaults, raw pointers get nullptrs. if (IsScalar(field.value.type.base_type)) { if (!initializer_list.empty()) { initializer_list += ",\n "; } initializer_list += Name(field); initializer_list += "(" + (native_default ? std::string(native_default->constant) : GetDefaultScalarValue(field, true)) + ")"; } else if (field.value.type.base_type == BASE_TYPE_STRUCT) { if (IsStruct(field.value.type)) { if (native_default) { if (!initializer_list.empty()) { initializer_list += ",\n "; } initializer_list += Name(field) + "(" + native_default->constant + ")"; } } } else if (cpp_type && field.value.type.base_type != BASE_TYPE_VECTOR) { if (!initializer_list.empty()) { initializer_list += ",\n "; } initializer_list += Name(field) + "(0)"; } } } if (!initializer_list.empty()) { initializer_list = "\n : " + initializer_list; } code_.SetValue("NATIVE_NAME", NativeName(Name(struct_def), &struct_def, opts_)); code_.SetValue("INIT_LIST", initializer_list); code_ += " {{NATIVE_NAME}}(){{INIT_LIST}} {"; code_ += " }"; } void GenCompareOperator(const StructDef &struct_def, std::string accessSuffix = "") { std::string compare_op; for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { const auto &field = **it; if (!field.deprecated && // Deprecated fields won't be accessible. field.value.type.base_type != BASE_TYPE_UTYPE && (field.value.type.base_type != BASE_TYPE_VECTOR || field.value.type.element != BASE_TYPE_UTYPE)) { if (!compare_op.empty()) { compare_op += " &&\n "; } auto accessor = Name(field) + accessSuffix; compare_op += "(lhs." + accessor + " == rhs." + accessor + ")"; } } std::string cmp_lhs; std::string cmp_rhs; if (compare_op.empty()) { cmp_lhs = ""; cmp_rhs = ""; compare_op = " return true;"; } else { cmp_lhs = "lhs"; cmp_rhs = "rhs"; compare_op = " return\n " + compare_op + ";"; } code_.SetValue("CMP_OP", compare_op); code_.SetValue("CMP_LHS", cmp_lhs); code_.SetValue("CMP_RHS", cmp_rhs); code_ += ""; code_ += "inline bool operator==(const {{NATIVE_NAME}} &{{CMP_LHS}}, const " "{{NATIVE_NAME}} &{{CMP_RHS}}) {"; code_ += "{{CMP_OP}}"; code_ += "}"; code_ += ""; code_ += "inline bool operator!=(const {{NATIVE_NAME}} &lhs, const " "{{NATIVE_NAME}} &rhs) {"; code_ += " return !(lhs == rhs);"; code_ += "}"; code_ += ""; } void GenOperatorNewDelete(const StructDef &struct_def) { if (auto native_custom_alloc = struct_def.attributes.Lookup("native_custom_alloc")) { code_ += " inline void *operator new (std::size_t count) {"; code_ += " return " + native_custom_alloc->constant + "<{{NATIVE_NAME}}>().allocate(count / sizeof({{NATIVE_NAME}}));"; code_ += " }"; code_ += " inline void operator delete (void *ptr) {"; code_ += " return " + native_custom_alloc->constant + "<{{NATIVE_NAME}}>().deallocate(static_cast<{{NATIVE_NAME}}*>(" "ptr),1);"; code_ += " }"; } } void GenNativeTable(const StructDef &struct_def) { const auto native_name = NativeName(Name(struct_def), &struct_def, opts_); code_.SetValue("STRUCT_NAME", Name(struct_def)); code_.SetValue("NATIVE_NAME", native_name); // Generate a C++ object that can hold an unpacked version of this table. code_ += "struct {{NATIVE_NAME}} : public flatbuffers::NativeTable {"; code_ += " typedef {{STRUCT_NAME}} TableType;"; GenFullyQualifiedNameGetter(struct_def, native_name); for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { GenMember(**it); } GenOperatorNewDelete(struct_def); GenDefaultConstructor(struct_def); code_ += "};"; if (opts_.gen_compare) GenCompareOperator(struct_def); code_ += ""; } // Generate the code to call the appropriate Verify function(s) for a field. void GenVerifyCall(const FieldDef &field, const char *prefix) { code_.SetValue("PRE", prefix); code_.SetValue("NAME", Name(field)); code_.SetValue("REQUIRED", field.required ? "Required" : ""); code_.SetValue("SIZE", GenTypeSize(field.value.type)); code_.SetValue("OFFSET", GenFieldOffsetName(field)); if (IsScalar(field.value.type.base_type) || IsStruct(field.value.type)) { code_ += "{{PRE}}VerifyField{{REQUIRED}}<{{SIZE}}>(verifier, {{OFFSET}})\\"; } else { code_ += "{{PRE}}VerifyOffset{{REQUIRED}}(verifier, {{OFFSET}})\\"; } switch (field.value.type.base_type) { case BASE_TYPE_UNION: { code_.SetValue("ENUM_NAME", field.value.type.enum_def->name); code_.SetValue("SUFFIX", UnionTypeFieldSuffix()); code_ += "{{PRE}}Verify{{ENUM_NAME}}(verifier, {{NAME}}(), " "{{NAME}}{{SUFFIX}}())\\"; break; } case BASE_TYPE_STRUCT: { if (!field.value.type.struct_def->fixed) { code_ += "{{PRE}}verifier.VerifyTable({{NAME}}())\\"; } break; } case BASE_TYPE_STRING: { code_ += "{{PRE}}verifier.VerifyString({{NAME}}())\\"; break; } case BASE_TYPE_VECTOR: { code_ += "{{PRE}}verifier.VerifyVector({{NAME}}())\\"; switch (field.value.type.element) { case BASE_TYPE_STRING: { code_ += "{{PRE}}verifier.VerifyVectorOfStrings({{NAME}}())\\"; break; } case BASE_TYPE_STRUCT: { if (!field.value.type.struct_def->fixed) { code_ += "{{PRE}}verifier.VerifyVectorOfTables({{NAME}}())\\"; } break; } case BASE_TYPE_UNION: { code_.SetValue("ENUM_NAME", field.value.type.enum_def->name); code_ += "{{PRE}}Verify{{ENUM_NAME}}Vector(verifier, {{NAME}}(), " "{{NAME}}_type())\\"; break; } default: break; } break; } default: { break; } } } // Generate CompareWithValue method for a key field. void GenKeyFieldMethods(const FieldDef &field) { FLATBUFFERS_ASSERT(field.key); const bool is_string = (field.value.type.base_type == BASE_TYPE_STRING); code_ += " bool KeyCompareLessThan(const {{STRUCT_NAME}} *o) const {"; if (is_string) { // use operator< of flatbuffers::String code_ += " return *{{FIELD_NAME}}() < *o->{{FIELD_NAME}}();"; } else { code_ += " return {{FIELD_NAME}}() < o->{{FIELD_NAME}}();"; } code_ += " }"; if (is_string) { code_ += " int KeyCompareWithValue(const char *val) const {"; code_ += " return strcmp({{FIELD_NAME}}()->c_str(), val);"; code_ += " }"; } else { FLATBUFFERS_ASSERT(IsScalar(field.value.type.base_type)); auto type = GenTypeBasic(field.value.type, false); if (opts_.scoped_enums && field.value.type.enum_def && IsScalar(field.value.type.base_type)) { type = GenTypeGet(field.value.type, " ", "const ", " *", true); } // Returns {field<val: -1, field==val: 0, field>val: +1}. code_.SetValue("KEY_TYPE", type); code_ += " int KeyCompareWithValue({{KEY_TYPE}} val) const {"; code_ += " return static_cast<int>({{FIELD_NAME}}() > val) - " "static_cast<int>({{FIELD_NAME}}() < val);"; code_ += " }"; } } // Generate an accessor struct, builder structs & function for a table. void GenTable(const StructDef &struct_def) { if (opts_.generate_object_based_api) { GenNativeTable(struct_def); } // Generate an accessor struct, with methods of the form: // type name() const { return GetField<type>(offset, defaultval); } GenComment(struct_def.doc_comment); code_.SetValue("STRUCT_NAME", Name(struct_def)); code_ += "struct {{STRUCT_NAME}} FLATBUFFERS_FINAL_CLASS" " : private flatbuffers::Table {"; if (opts_.generate_object_based_api) { code_ += " typedef {{NATIVE_NAME}} NativeTableType;"; } code_ += " typedef {{STRUCT_NAME}}Builder Builder;"; if (opts_.g_cpp_std >= cpp::CPP_STD_17) { code_ += " struct Traits;"; } if (opts_.mini_reflect != IDLOptions::kNone) { code_ += " static const flatbuffers::TypeTable *MiniReflectTypeTable() {"; code_ += " return {{STRUCT_NAME}}TypeTable();"; code_ += " }"; } GenFullyQualifiedNameGetter(struct_def, Name(struct_def)); // Generate field id constants. if (struct_def.fields.vec.size() > 0) { // We need to add a trailing comma to all elements except the last one as // older versions of gcc complain about this. code_.SetValue("SEP", ""); code_ += " enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {"; for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { const auto &field = **it; if (field.deprecated) { // Deprecated fields won't be accessible. continue; } code_.SetValue("OFFSET_NAME", GenFieldOffsetName(field)); code_.SetValue("OFFSET_VALUE", NumToString(field.value.offset)); code_ += "{{SEP}} {{OFFSET_NAME}} = {{OFFSET_VALUE}}\\"; code_.SetValue("SEP", ",\n"); } code_ += ""; code_ += " };"; } // Generate the accessors. for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { const auto &field = **it; if (field.deprecated) { // Deprecated fields won't be accessible. continue; } const bool is_struct = IsStruct(field.value.type); const bool is_scalar = IsScalar(field.value.type.base_type); code_.SetValue("FIELD_NAME", Name(field)); // Call a different accessor for pointers, that indirects. std::string accessor = ""; if (is_scalar) { accessor = "GetField<"; } else if (is_struct) { accessor = "GetStruct<"; } else { accessor = "GetPointer<"; } auto offset_str = GenFieldOffsetName(field); auto offset_type = GenTypeGet(field.value.type, "", "const ", " *", false); auto call = accessor + offset_type + ">(" + offset_str; // Default value as second arg for non-pointer types. if (is_scalar) { call += ", " + GenDefaultConstant(field); } call += ")"; std::string afterptr = " *" + NullableExtension(); GenComment(field.doc_comment, " "); code_.SetValue("FIELD_TYPE", GenTypeGet(field.value.type, " ", "const ", afterptr.c_str(), true)); code_.SetValue("FIELD_VALUE", GenUnderlyingCast(field, true, call)); code_.SetValue("NULLABLE_EXT", NullableExtension()); code_ += " {{FIELD_TYPE}}{{FIELD_NAME}}() const {"; code_ += " return {{FIELD_VALUE}};"; code_ += " }"; if (field.value.type.base_type == BASE_TYPE_UNION) { auto u = field.value.type.enum_def; if (!field.value.type.enum_def->uses_multiple_type_instances) code_ += " template<typename T> " "const T *{{NULLABLE_EXT}}{{FIELD_NAME}}_as() const;"; for (auto u_it = u->Vals().begin(); u_it != u->Vals().end(); ++u_it) { auto &ev = **u_it; if (ev.union_type.base_type == BASE_TYPE_NONE) { continue; } auto full_struct_name = GetUnionElement(ev, true, true); // @TODO: Mby make this decisions more universal? How? code_.SetValue("U_GET_TYPE", EscapeKeyword(field.name + UnionTypeFieldSuffix())); code_.SetValue( "U_ELEMENT_TYPE", WrapInNameSpace(u->defined_namespace, GetEnumValUse(*u, ev))); code_.SetValue("U_FIELD_TYPE", "const " + full_struct_name + " *"); code_.SetValue("U_FIELD_NAME", Name(field) + "_as_" + Name(ev)); code_.SetValue("U_NULLABLE", NullableExtension()); // `const Type *union_name_asType() const` accessor. code_ += " {{U_FIELD_TYPE}}{{U_NULLABLE}}{{U_FIELD_NAME}}() const {"; code_ += " return {{U_GET_TYPE}}() == {{U_ELEMENT_TYPE}} ? " "static_cast<{{U_FIELD_TYPE}}>({{FIELD_NAME}}()) " ": nullptr;"; code_ += " }"; } } if (opts_.mutable_buffer && !(is_scalar && IsUnion(field.value.type))) { if (is_scalar) { const auto type = GenTypeWire(field.value.type, "", false); code_.SetValue("SET_FN", "SetField<" + type + ">"); code_.SetValue("OFFSET_NAME", offset_str); code_.SetValue("FIELD_TYPE", GenTypeBasic(field.value.type, true)); code_.SetValue("FIELD_VALUE", GenUnderlyingCast(field, false, "_" + Name(field))); code_.SetValue("DEFAULT_VALUE", GenDefaultConstant(field)); code_ += " bool mutate_{{FIELD_NAME}}({{FIELD_TYPE}} " "_{{FIELD_NAME}}) {"; code_ += " return {{SET_FN}}({{OFFSET_NAME}}, {{FIELD_VALUE}}, " "{{DEFAULT_VALUE}});"; code_ += " }"; } else { auto postptr = " *" + NullableExtension(); auto type = GenTypeGet(field.value.type, " ", "", postptr.c_str(), true); auto underlying = accessor + type + ">(" + offset_str + ")"; code_.SetValue("FIELD_TYPE", type); code_.SetValue("FIELD_VALUE", GenUnderlyingCast(field, true, underlying)); code_ += " {{FIELD_TYPE}}mutable_{{FIELD_NAME}}() {"; code_ += " return {{FIELD_VALUE}};"; code_ += " }"; } } auto nested = field.attributes.Lookup("nested_flatbuffer"); if (nested) { std::string qualified_name = nested->constant; auto nested_root = parser_.LookupStruct(nested->constant); if (nested_root == nullptr) { qualified_name = parser_.current_namespace_->GetFullyQualifiedName( nested->constant); nested_root = parser_.LookupStruct(qualified_name); } FLATBUFFERS_ASSERT(nested_root); // Guaranteed to exist by parser. (void)nested_root; code_.SetValue("CPP_NAME", TranslateNameSpace(qualified_name)); code_ += " const {{CPP_NAME}} *{{FIELD_NAME}}_nested_root() const {"; code_ += " return " "flatbuffers::GetRoot<{{CPP_NAME}}>({{FIELD_NAME}}()->Data());"; code_ += " }"; } if (field.flexbuffer) { code_ += " flexbuffers::Reference {{FIELD_NAME}}_flexbuffer_root()" " const {"; // Both Data() and size() are const-methods, therefore call order // doesn't matter. code_ += " return flexbuffers::GetRoot({{FIELD_NAME}}()->Data(), " "{{FIELD_NAME}}()->size());"; code_ += " }"; } // Generate a comparison function for this field if it is a key. if (field.key) { GenKeyFieldMethods(field); } } // Generate a verifier function that can check a buffer from an untrusted // source will never cause reads outside the buffer. code_ += " bool Verify(flatbuffers::Verifier &verifier) const {"; code_ += " return VerifyTableStart(verifier)\\"; for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { const auto &field = **it; if (field.deprecated) { continue; } GenVerifyCall(field, " &&\n "); } code_ += " &&\n verifier.EndTable();"; code_ += " }"; if (opts_.generate_object_based_api) { // Generate the UnPack() pre declaration. code_ += " " + TableUnPackSignature(struct_def, true, opts_) + ";"; code_ += " " + TableUnPackToSignature(struct_def, true, opts_) + ";"; code_ += " " + TablePackSignature(struct_def, true, opts_) + ";"; } code_ += "};"; // End of table. code_ += ""; // Explicit specializations for union accessors for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { const auto &field = **it; if (field.deprecated || field.value.type.base_type != BASE_TYPE_UNION) { continue; } auto u = field.value.type.enum_def; if (u->uses_multiple_type_instances) continue; code_.SetValue("FIELD_NAME", Name(field)); for (auto u_it = u->Vals().begin(); u_it != u->Vals().end(); ++u_it) { auto &ev = **u_it; if (ev.union_type.base_type == BASE_TYPE_NONE) { continue; } auto full_struct_name = GetUnionElement(ev, true, true); code_.SetValue( "U_ELEMENT_TYPE", WrapInNameSpace(u->defined_namespace, GetEnumValUse(*u, ev))); code_.SetValue("U_FIELD_TYPE", "const " + full_struct_name + " *"); code_.SetValue("U_ELEMENT_NAME", full_struct_name); code_.SetValue("U_FIELD_NAME", Name(field) + "_as_" + Name(ev)); // `template<> const T *union_name_as<T>() const` accessor. code_ += "template<> " "inline {{U_FIELD_TYPE}}{{STRUCT_NAME}}::{{FIELD_NAME}}_as" "<{{U_ELEMENT_NAME}}>() const {"; code_ += " return {{U_FIELD_NAME}}();"; code_ += "}"; code_ += ""; } } GenBuilders(struct_def); if (opts_.generate_object_based_api) { // Generate a pre-declaration for a CreateX method that works with an // unpacked C++ object. code_ += TableCreateSignature(struct_def, true, opts_) + ";"; code_ += ""; } } void GenBuilders(const StructDef &struct_def) { code_.SetValue("STRUCT_NAME", Name(struct_def)); // Generate a builder struct: code_ += "struct {{STRUCT_NAME}}Builder {"; code_ += " typedef {{STRUCT_NAME}} Table;"; code_ += " flatbuffers::FlatBufferBuilder &fbb_;"; code_ += " flatbuffers::uoffset_t start_;"; bool has_string_or_vector_fields = false; for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { const auto &field = **it; if (!field.deprecated) { const bool is_scalar = IsScalar(field.value.type.base_type); const bool is_string = field.value.type.base_type == BASE_TYPE_STRING; const bool is_vector = field.value.type.base_type == BASE_TYPE_VECTOR; if (is_string || is_vector) { has_string_or_vector_fields = true; } std::string offset = GenFieldOffsetName(field); std::string name = GenUnderlyingCast(field, false, Name(field)); std::string value = is_scalar ? GenDefaultConstant(field) : ""; // Generate accessor functions of the form: // void add_name(type name) { // fbb_.AddElement<type>(offset, name, default); // } code_.SetValue("FIELD_NAME", Name(field)); code_.SetValue("FIELD_TYPE", GenTypeWire(field.value.type, " ", true)); code_.SetValue("ADD_OFFSET", Name(struct_def) + "::" + offset); code_.SetValue("ADD_NAME", name); code_.SetValue("ADD_VALUE", value); if (is_scalar) { const auto type = GenTypeWire(field.value.type, "", false); code_.SetValue("ADD_FN", "AddElement<" + type + ">"); } else if (IsStruct(field.value.type)) { code_.SetValue("ADD_FN", "AddStruct"); } else { code_.SetValue("ADD_FN", "AddOffset"); } code_ += " void add_{{FIELD_NAME}}({{FIELD_TYPE}}{{FIELD_NAME}}) {"; code_ += " fbb_.{{ADD_FN}}(\\"; if (is_scalar) { code_ += "{{ADD_OFFSET}}, {{ADD_NAME}}, {{ADD_VALUE}});"; } else { code_ += "{{ADD_OFFSET}}, {{ADD_NAME}});"; } code_ += " }"; } } // Builder constructor code_ += " explicit {{STRUCT_NAME}}Builder(flatbuffers::FlatBufferBuilder " "&_fbb)"; code_ += " : fbb_(_fbb) {"; code_ += " start_ = fbb_.StartTable();"; code_ += " }"; // Assignment operator; code_ += " {{STRUCT_NAME}}Builder &operator=" "(const {{STRUCT_NAME}}Builder &);"; // Finish() function. code_ += " flatbuffers::Offset<{{STRUCT_NAME}}> Finish() {"; code_ += " const auto end = fbb_.EndTable(start_);"; code_ += " auto o = flatbuffers::Offset<{{STRUCT_NAME}}>(end);"; for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { const auto &field = **it; if (!field.deprecated && field.required) { code_.SetValue("FIELD_NAME", Name(field)); code_.SetValue("OFFSET_NAME", GenFieldOffsetName(field)); code_ += " fbb_.Required(o, {{STRUCT_NAME}}::{{OFFSET_NAME}});"; } } code_ += " return o;"; code_ += " }"; code_ += "};"; code_ += ""; // Generate a convenient CreateX function that uses the above builder // to create a table in one go. code_ += "inline flatbuffers::Offset<{{STRUCT_NAME}}> " "Create{{STRUCT_NAME}}("; code_ += " flatbuffers::FlatBufferBuilder &_fbb\\"; for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { const auto &field = **it; if (!field.deprecated) { GenParam(field, false, ",\n "); } } code_ += ") {"; code_ += " {{STRUCT_NAME}}Builder builder_(_fbb);"; for (size_t size = struct_def.sortbysize ? sizeof(largest_scalar_t) : 1; size; size /= 2) { for (auto it = struct_def.fields.vec.rbegin(); it != struct_def.fields.vec.rend(); ++it) { const auto &field = **it; if (!field.deprecated && (!struct_def.sortbysize || size == SizeOf(field.value.type.base_type))) { code_.SetValue("FIELD_NAME", Name(field)); code_ += " builder_.add_{{FIELD_NAME}}({{FIELD_NAME}});"; } } } code_ += " return builder_.Finish();"; code_ += "}"; code_ += ""; // Definition for type traits for this table type. This allows querying var- // ious compile-time traits of the table. if (opts_.g_cpp_std >= cpp::CPP_STD_17) { code_ += "struct {{STRUCT_NAME}}::Traits {"; code_ += " using type = {{STRUCT_NAME}};"; code_ += " static auto constexpr Create = Create{{STRUCT_NAME}};"; code_ += "};"; code_ += ""; } // Generate a CreateXDirect function with vector types as parameters if (has_string_or_vector_fields) { code_ += "inline flatbuffers::Offset<{{STRUCT_NAME}}> " "Create{{STRUCT_NAME}}Direct("; code_ += " flatbuffers::FlatBufferBuilder &_fbb\\"; for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { const auto &field = **it; if (!field.deprecated) { GenParam(field, true, ",\n "); } } // Need to call "Create" with the struct namespace. const auto qualified_create_name = struct_def.defined_namespace->GetFullyQualifiedName("Create"); code_.SetValue("CREATE_NAME", TranslateNameSpace(qualified_create_name)); code_ += ") {"; for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { const auto &field = **it; if (!field.deprecated) { code_.SetValue("FIELD_NAME", Name(field)); if (field.value.type.base_type == BASE_TYPE_STRING) { if (!field.shared) { code_.SetValue("CREATE_STRING", "CreateString"); } else { code_.SetValue("CREATE_STRING", "CreateSharedString"); } code_ += " auto {{FIELD_NAME}}__ = {{FIELD_NAME}} ? " "_fbb.{{CREATE_STRING}}({{FIELD_NAME}}) : 0;"; } else if (field.value.type.base_type == BASE_TYPE_VECTOR) { code_ += " auto {{FIELD_NAME}}__ = {{FIELD_NAME}} ? \\"; const auto vtype = field.value.type.VectorType(); const auto has_key = TypeHasKey(vtype); if (IsStruct(vtype)) { const auto type = WrapInNameSpace(*vtype.struct_def); code_ += (has_key ? "_fbb.CreateVectorOfSortedStructs<" : "_fbb.CreateVectorOfStructs<") + type + ">\\"; } else if (has_key) { const auto type = WrapInNameSpace(*vtype.struct_def); code_ += "_fbb.CreateVectorOfSortedTables<" + type + ">\\"; } else { const auto type = GenTypeWire(vtype, "", false); code_ += "_fbb.CreateVector<" + type + ">\\"; } code_ += has_key ? "({{FIELD_NAME}}) : 0;" : "(*{{FIELD_NAME}}) : 0;"; } } } code_ += " return {{CREATE_NAME}}{{STRUCT_NAME}}("; code_ += " _fbb\\"; for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { const auto &field = **it; if (!field.deprecated) { code_.SetValue("FIELD_NAME", Name(field)); code_ += ",\n {{FIELD_NAME}}\\"; if (field.value.type.base_type == BASE_TYPE_STRING || field.value.type.base_type == BASE_TYPE_VECTOR) { code_ += "__\\"; } } } code_ += ");"; code_ += "}"; code_ += ""; } } std::string GenUnionUnpackVal(const FieldDef &afield, const char *vec_elem_access, const char *vec_type_access) { auto type_name = WrapInNameSpace(*afield.value.type.enum_def); return type_name + "Union::UnPack(" + "_e" + vec_elem_access + ", " + EscapeKeyword(afield.name + UnionTypeFieldSuffix()) + "()" + vec_type_access + ", _resolver)"; } std::string GenUnpackVal(const Type &type, const std::string &val, bool invector, const FieldDef &afield) { switch (type.base_type) { case BASE_TYPE_STRING: { if (FlexibleStringConstructor(&afield)) { return NativeString(&afield) + "(" + val + "->c_str(), " + val + "->size())"; } else { return val + "->str()"; } } case BASE_TYPE_STRUCT: { const auto name = WrapInNameSpace(*type.struct_def); if (IsStruct(type)) { auto native_type = type.struct_def->attributes.Lookup("native_type"); if (native_type) { return "flatbuffers::UnPack(*" + val + ")"; } else if (invector || afield.native_inline) { return "*" + val; } else { const auto ptype = GenTypeNativePtr(name, &afield, true); return ptype + "(new " + name + "(*" + val + "))"; } } else { const auto ptype = GenTypeNativePtr( NativeName(name, type.struct_def, opts_), &afield, true); return ptype + "(" + val + "->UnPack(_resolver))"; } } case BASE_TYPE_UNION: { return GenUnionUnpackVal( afield, invector ? "->Get(_i)" : "", invector ? ("->GetEnum<" + type.enum_def->name + ">(_i)").c_str() : ""); } default: { return val; break; } } } std::string GenUnpackFieldStatement(const FieldDef &field, const FieldDef *union_field) { std::string code; switch (field.value.type.base_type) { case BASE_TYPE_VECTOR: { auto cpp_type = field.attributes.Lookup("cpp_type"); std::string indexing; if (field.value.type.enum_def) { indexing += "static_cast<" + WrapInNameSpace(*field.value.type.enum_def) + ">("; } indexing += "_e->Get(_i)"; if (field.value.type.enum_def) { indexing += ")"; } if (field.value.type.element == BASE_TYPE_BOOL) { indexing += " != 0"; } // Generate code that pushes data from _e to _o in the form: // for (uoffset_t i = 0; i < _e->size(); ++i) { // _o->field.push_back(_e->Get(_i)); // } auto name = Name(field); if (field.value.type.element == BASE_TYPE_UTYPE) { name = StripUnionType(Name(field)); } auto access = field.value.type.element == BASE_TYPE_UTYPE ? ".type" : (field.value.type.element == BASE_TYPE_UNION ? ".value" : ""); code += "{ _o->" + name + ".resize(_e->size()); "; code += "for (flatbuffers::uoffset_t _i = 0;"; code += " _i < _e->size(); _i++) { "; if (cpp_type) { // Generate code that resolves the cpp pointer type, of the form: // if (resolver) // (*resolver)(&_o->field, (hash_value_t)(_e)); // else // _o->field = nullptr; code += "//vector resolver, " + PtrType(&field) + "\n"; code += "if (_resolver) "; code += "(*_resolver)"; code += "(reinterpret_cast<void **>(&_o->" + name + "[_i]" + access + "), "; code += "static_cast<flatbuffers::hash_value_t>(" + indexing + "));"; if (PtrType(&field) == "naked") { code += " else "; code += "_o->" + name + "[_i]" + access + " = nullptr"; } else { // code += " else "; // code += "_o->" + name + "[_i]" + access + " = " + // GenTypeNativePtr(cpp_type->constant, &field, true) + "();"; code += "/* else do nothing */"; } } else { code += "_o->" + name + "[_i]" + access + " = "; code += GenUnpackVal(field.value.type.VectorType(), indexing, true, field); } code += "; } }"; break; } case BASE_TYPE_UTYPE: { FLATBUFFERS_ASSERT(union_field->value.type.base_type == BASE_TYPE_UNION); // Generate code that sets the union type, of the form: // _o->field.type = _e; code += "_o->" + union_field->name + ".type = _e;"; break; } case BASE_TYPE_UNION: { // Generate code that sets the union value, of the form: // _o->field.value = Union::Unpack(_e, field_type(), resolver); code += "_o->" + Name(field) + ".value = "; code += GenUnionUnpackVal(field, "", ""); code += ";"; break; } default: { auto cpp_type = field.attributes.Lookup("cpp_type"); if (cpp_type) { // Generate code that resolves the cpp pointer type, of the form: // if (resolver) // (*resolver)(&_o->field, (hash_value_t)(_e)); // else // _o->field = nullptr; code += "//scalar resolver, " + PtrType(&field) + " \n"; code += "if (_resolver) "; code += "(*_resolver)"; code += "(reinterpret_cast<void **>(&_o->" + Name(field) + "), "; code += "static_cast<flatbuffers::hash_value_t>(_e));"; if (PtrType(&field) == "naked") { code += " else "; code += "_o->" + Name(field) + " = nullptr;"; } else { // code += " else "; // code += "_o->" + Name(field) + " = " + // GenTypeNativePtr(cpp_type->constant, &field, true) + "();"; code += "/* else do nothing */;"; } } else { // Generate code for assigning the value, of the form: // _o->field = value; code += "_o->" + Name(field) + " = "; code += GenUnpackVal(field.value.type, "_e", false, field) + ";"; } break; } } return code; } std::string GenCreateParam(const FieldDef &field) { std::string value = "_o->"; if (field.value.type.base_type == BASE_TYPE_UTYPE) { value += StripUnionType(Name(field)); value += ".type"; } else { value += Name(field); } if (field.value.type.base_type != BASE_TYPE_VECTOR && field.attributes.Lookup("cpp_type")) { auto type = GenTypeBasic(field.value.type, false); value = "_rehasher ? " "static_cast<" + type + ">((*_rehasher)(" + value + GenPtrGet(field) + ")) : 0"; } std::string code; switch (field.value.type.base_type) { // String fields are of the form: // _fbb.CreateString(_o->field) // or // _fbb.CreateSharedString(_o->field) case BASE_TYPE_STRING: { if (!field.shared) { code += "_fbb.CreateString("; } else { code += "_fbb.CreateSharedString("; } code += value; code.push_back(')'); // For optional fields, check to see if there actually is any data // in _o->field before attempting to access it. If there isn't, // depending on set_empty_strings_to_null either set it to 0 or an empty string. if (!field.required) { auto empty_value = opts_.set_empty_strings_to_null ? "0" : "_fbb.CreateSharedString(\"\")"; code = value + ".empty() ? " + empty_value + " : " + code; } break; } // Vector fields come in several flavours, of the forms: // _fbb.CreateVector(_o->field); // _fbb.CreateVector((const utype*)_o->field.data(), _o->field.size()); // _fbb.CreateVectorOfStrings(_o->field) // _fbb.CreateVectorOfStructs(_o->field) // _fbb.CreateVector<Offset<T>>(_o->field.size() [&](size_t i) { // return CreateT(_fbb, _o->Get(i), rehasher); // }); case BASE_TYPE_VECTOR: { auto vector_type = field.value.type.VectorType(); switch (vector_type.base_type) { case BASE_TYPE_STRING: { if (NativeString(&field) == "std::string") { code += "_fbb.CreateVectorOfStrings(" + value + ")"; } else { // Use by-function serialization to emulate // CreateVectorOfStrings(); this works also with non-std strings. code += "_fbb.CreateVector<flatbuffers::Offset<flatbuffers::String>>" " "; code += "(" + value + ".size(), "; code += "[](size_t i, _VectorArgs *__va) { "; code += "return __va->__fbb->CreateString(__va->_" + value + "[i]);"; code += " }, &_va )"; } break; } case BASE_TYPE_STRUCT: { if (IsStruct(vector_type)) { auto native_type = field.value.type.struct_def->attributes.Lookup("native_type"); if (native_type) { code += "_fbb.CreateVectorOfNativeStructs<"; code += WrapInNameSpace(*vector_type.struct_def) + ">"; } else { code += "_fbb.CreateVectorOfStructs"; } code += "(" + value + ")"; } else { code += "_fbb.CreateVector<flatbuffers::Offset<"; code += WrapInNameSpace(*vector_type.struct_def) + ">> "; code += "(" + value + ".size(), "; code += "[](size_t i, _VectorArgs *__va) { "; code += "return Create" + vector_type.struct_def->name; code += "(*__va->__fbb, __va->_" + value + "[i]" + GenPtrGet(field) + ", "; code += "__va->__rehasher); }, &_va )"; } break; } case BASE_TYPE_BOOL: { code += "_fbb.CreateVector(" + value + ")"; break; } case BASE_TYPE_UNION: { code += "_fbb.CreateVector<flatbuffers::" "Offset<void>>(" + value + ".size(), [](size_t i, _VectorArgs *__va) { " "return __va->_" + value + "[i].Pack(*__va->__fbb, __va->__rehasher); }, &_va)"; break; } case BASE_TYPE_UTYPE: { value = StripUnionType(value); code += "_fbb.CreateVector<uint8_t>(" + value + ".size(), [](size_t i, _VectorArgs *__va) { " "return static_cast<uint8_t>(__va->_" + value + "[i].type); }, &_va)"; break; } default: { if (field.value.type.enum_def) { // For enumerations, we need to get access to the array data for // the underlying storage type (eg. uint8_t). const auto basetype = GenTypeBasic( field.value.type.enum_def->underlying_type, false); code += "_fbb.CreateVectorScalarCast<" + basetype + ">(flatbuffers::data(" + value + "), " + value + ".size())"; } else if (field.attributes.Lookup("cpp_type")) { auto type = GenTypeBasic(vector_type, false); code += "_fbb.CreateVector<" + type + ">(" + value + ".size(), "; code += "[](size_t i, _VectorArgs *__va) { "; code += "return __va->__rehasher ? "; code += "static_cast<" + type + ">((*__va->__rehasher)"; code += "(__va->_" + value + "[i]" + GenPtrGet(field) + ")) : 0"; code += "; }, &_va )"; } else { code += "_fbb.CreateVector(" + value + ")"; } break; } } // If set_empty_vectors_to_null option is enabled, for optional fields, check to // see if there actually is any data in _o->field before attempting to // access it. if (opts_.set_empty_vectors_to_null && !field.required) { code = value + ".size() ? " + code + " : 0"; } break; } case BASE_TYPE_UNION: { // _o->field.Pack(_fbb); code += value + ".Pack(_fbb)"; break; } case BASE_TYPE_STRUCT: { if (IsStruct(field.value.type)) { auto native_type = field.value.type.struct_def->attributes.Lookup("native_type"); if (native_type) { code += "flatbuffers::Pack(" + value + ")"; } else if (field.native_inline) { code += "&" + value; } else { code += value + " ? " + value + GenPtrGet(field) + " : 0"; } } else { // _o->field ? CreateT(_fbb, _o->field.get(), _rehasher); const auto type = field.value.type.struct_def->name; code += value + " ? Create" + type; code += "(_fbb, " + value + GenPtrGet(field) + ", _rehasher)"; code += " : 0"; } break; } default: { code += value; break; } } return code; } // Generate code for tables that needs to come after the regular definition. void GenTablePost(const StructDef &struct_def) { code_.SetValue("STRUCT_NAME", Name(struct_def)); code_.SetValue("NATIVE_NAME", NativeName(Name(struct_def), &struct_def, opts_)); if (opts_.generate_object_based_api) { // Generate the X::UnPack() method. code_ += "inline " + TableUnPackSignature(struct_def, false, opts_) + " {"; code_ += " auto _o = new {{NATIVE_NAME}}();"; code_ += " UnPackTo(_o, _resolver);"; code_ += " return _o;"; code_ += "}"; code_ += ""; code_ += "inline " + TableUnPackToSignature(struct_def, false, opts_) + " {"; code_ += " (void)_o;"; code_ += " (void)_resolver;"; for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { const auto &field = **it; if (field.deprecated) { continue; } // Assign a value from |this| to |_o|. Values from |this| are stored // in a variable |_e| by calling this->field_type(). The value is then // assigned to |_o| using the GenUnpackFieldStatement. const bool is_union = field.value.type.base_type == BASE_TYPE_UTYPE; const auto statement = GenUnpackFieldStatement(field, is_union ? *(it + 1) : nullptr); code_.SetValue("FIELD_NAME", Name(field)); auto prefix = " { auto _e = {{FIELD_NAME}}(); "; auto check = IsScalar(field.value.type.base_type) ? "" : "if (_e) "; auto postfix = " }"; code_ += std::string(prefix) + check + statement + postfix; } code_ += "}"; code_ += ""; // Generate the X::Pack member function that simply calls the global // CreateX function. code_ += "inline " + TablePackSignature(struct_def, false, opts_) + " {"; code_ += " return Create{{STRUCT_NAME}}(_fbb, _o, _rehasher);"; code_ += "}"; code_ += ""; // Generate a CreateX method that works with an unpacked C++ object. code_ += "inline " + TableCreateSignature(struct_def, false, opts_) + " {"; code_ += " (void)_rehasher;"; code_ += " (void)_o;"; code_ += " struct _VectorArgs " "{ flatbuffers::FlatBufferBuilder *__fbb; " "const " + NativeName(Name(struct_def), &struct_def, opts_) + "* __o; " "const flatbuffers::rehasher_function_t *__rehasher; } _va = { " "&_fbb, _o, _rehasher}; (void)_va;"; for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { auto &field = **it; if (field.deprecated) { continue; } code_ += " auto _" + Name(field) + " = " + GenCreateParam(field) + ";"; } // Need to call "Create" with the struct namespace. const auto qualified_create_name = struct_def.defined_namespace->GetFullyQualifiedName("Create"); code_.SetValue("CREATE_NAME", TranslateNameSpace(qualified_create_name)); code_ += " return {{CREATE_NAME}}{{STRUCT_NAME}}("; code_ += " _fbb\\"; for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { auto &field = **it; if (field.deprecated) { continue; } bool pass_by_address = false; if (field.value.type.base_type == BASE_TYPE_STRUCT) { if (IsStruct(field.value.type)) { auto native_type = field.value.type.struct_def->attributes.Lookup("native_type"); if (native_type) { pass_by_address = true; } } } // Call the CreateX function using values from |_o|. if (pass_by_address) { code_ += ",\n &_" + Name(field) + "\\"; } else { code_ += ",\n _" + Name(field) + "\\"; } } code_ += ");"; code_ += "}"; code_ += ""; } } static void GenPadding( const FieldDef &field, std::string *code_ptr, int *id, const std::function<void(int bits, std::string *code_ptr, int *id)> &f) { if (field.padding) { for (int i = 0; i < 4; i++) { if (static_cast<int>(field.padding) & (1 << i)) { f((1 << i) * 8, code_ptr, id); } } FLATBUFFERS_ASSERT(!(field.padding & ~0xF)); } } static void PaddingDefinition(int bits, std::string *code_ptr, int *id) { *code_ptr += " int" + NumToString(bits) + "_t padding" + NumToString((*id)++) + "__;"; } static void PaddingInitializer(int bits, std::string *code_ptr, int *id) { (void)bits; if (*code_ptr != "") *code_ptr += ",\n "; *code_ptr += "padding" + NumToString((*id)++) + "__(0)"; } static void PaddingNoop(int bits, std::string *code_ptr, int *id) { (void)bits; *code_ptr += " (void)padding" + NumToString((*id)++) + "__;"; } // Generate an accessor struct with constructor for a flatbuffers struct. void GenStruct(const StructDef &struct_def) { // Generate an accessor struct, with private variables of the form: // type name_; // Generates manual padding and alignment. // Variables are private because they contain little endian data on all // platforms. GenComment(struct_def.doc_comment); code_.SetValue("ALIGN", NumToString(struct_def.minalign)); code_.SetValue("STRUCT_NAME", Name(struct_def)); code_ += "FLATBUFFERS_MANUALLY_ALIGNED_STRUCT({{ALIGN}}) " "{{STRUCT_NAME}} FLATBUFFERS_FINAL_CLASS {"; code_ += " private:"; int padding_id = 0; for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { const auto &field = **it; const auto &field_type = field.value.type; code_.SetValue("FIELD_TYPE", GenTypeGet(field_type, " ", "", " ", false)); code_.SetValue("FIELD_NAME", Name(field)); code_.SetValue("ARRAY", IsArray(field_type) ? "[" + NumToString(field_type.fixed_length) + "]" : ""); code_ += (" {{FIELD_TYPE}}{{FIELD_NAME}}_{{ARRAY}};"); if (field.padding) { std::string padding; GenPadding(field, &padding, &padding_id, PaddingDefinition); code_ += padding; } } // Generate GetFullyQualifiedName code_ += ""; code_ += " public:"; // Make TypeTable accessible via the generated struct. if (opts_.mini_reflect != IDLOptions::kNone) { code_ += " static const flatbuffers::TypeTable *MiniReflectTypeTable() {"; code_ += " return {{STRUCT_NAME}}TypeTable();"; code_ += " }"; } GenFullyQualifiedNameGetter(struct_def, Name(struct_def)); // Generate a default constructor. code_ += " {{STRUCT_NAME}}() {"; code_ += " memset(static_cast<void *>(this), 0, sizeof({{STRUCT_NAME}}));"; code_ += " }"; // Generate a constructor that takes all fields as arguments, // excluding arrays std::string arg_list; std::string init_list; padding_id = 0; auto first = struct_def.fields.vec.begin(); for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { const auto &field = **it; if (IsArray(field.value.type)) { first++; continue; } const auto member_name = Name(field) + "_"; const auto arg_name = "_" + Name(field); const auto arg_type = GenTypeGet(field.value.type, " ", "const ", " &", true); if (it != first) { arg_list += ", "; } arg_list += arg_type; arg_list += arg_name; if (!IsArray(field.value.type)) { if (it != first && init_list != "") { init_list += ",\n "; } init_list += member_name; if (IsScalar(field.value.type.base_type)) { auto type = GenUnderlyingCast(field, false, arg_name); init_list += "(flatbuffers::EndianScalar(" + type + "))"; } else { init_list += "(" + arg_name + ")"; } } if (field.padding) { GenPadding(field, &init_list, &padding_id, PaddingInitializer); } } if (!arg_list.empty()) { code_.SetValue("ARG_LIST", arg_list); code_.SetValue("INIT_LIST", init_list); if (!init_list.empty()) { code_ += " {{STRUCT_NAME}}({{ARG_LIST}})"; code_ += " : {{INIT_LIST}} {"; } else { code_ += " {{STRUCT_NAME}}({{ARG_LIST}}) {"; } padding_id = 0; for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { const auto &field = **it; if (IsArray(field.value.type)) { const auto &member = Name(field) + "_"; code_ += " std::memset(" + member + ", 0, sizeof(" + member + "));"; } if (field.padding) { std::string padding; GenPadding(field, &padding, &padding_id, PaddingNoop); code_ += padding; } } code_ += " }"; } // Generate accessor methods of the form: // type name() const { return flatbuffers::EndianScalar(name_); } for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { const auto &field = **it; auto field_type = GenTypeGet(field.value.type, " ", IsArray(field.value.type) ? "" : "const ", IsArray(field.value.type) ? "" : " &", true); auto is_scalar = IsScalar(field.value.type.base_type); auto member = Name(field) + "_"; auto value = is_scalar ? "flatbuffers::EndianScalar(" + member + ")" : member; code_.SetValue("FIELD_NAME", Name(field)); code_.SetValue("FIELD_TYPE", field_type); code_.SetValue("FIELD_VALUE", GenUnderlyingCast(field, true, value)); GenComment(field.doc_comment, " "); // Generate a const accessor function. if (IsArray(field.value.type)) { auto underlying = GenTypeGet(field.value.type, "", "", "", false); code_ += " const flatbuffers::Array<" + field_type + ", " + NumToString(field.value.type.fixed_length) + "> *" + "{{FIELD_NAME}}() const {"; code_ += " return reinterpret_cast<const flatbuffers::Array<" + field_type + ", " + NumToString(field.value.type.fixed_length) + "> *>({{FIELD_VALUE}});"; code_ += " }"; } else { code_ += " {{FIELD_TYPE}}{{FIELD_NAME}}() const {"; code_ += " return {{FIELD_VALUE}};"; code_ += " }"; } // Generate a mutable accessor function. if (opts_.mutable_buffer) { auto mut_field_type = GenTypeGet(field.value.type, " ", "", IsArray(field.value.type) ? "" : " &", true); code_.SetValue("FIELD_TYPE", mut_field_type); if (is_scalar) { code_.SetValue("ARG", GenTypeBasic(field.value.type, true)); code_.SetValue("FIELD_VALUE", GenUnderlyingCast(field, false, "_" + Name(field))); code_ += " void mutate_{{FIELD_NAME}}({{ARG}} _{{FIELD_NAME}}) {"; code_ += " flatbuffers::WriteScalar(&{{FIELD_NAME}}_, " "{{FIELD_VALUE}});"; code_ += " }"; } else if (IsArray(field.value.type)) { auto underlying = GenTypeGet(field.value.type, "", "", "", false); code_ += " flatbuffers::Array<" + mut_field_type + ", " + NumToString(field.value.type.fixed_length) + "> *" + "mutable_{{FIELD_NAME}}() {"; code_ += " return reinterpret_cast<flatbuffers::Array<" + mut_field_type + ", " + NumToString(field.value.type.fixed_length) + "> *>({{FIELD_VALUE}});"; code_ += " }"; } else { code_ += " {{FIELD_TYPE}}mutable_{{FIELD_NAME}}() {"; code_ += " return {{FIELD_VALUE}};"; code_ += " }"; } } // Generate a comparison function for this field if it is a key. if (field.key) { GenKeyFieldMethods(field); } } code_.SetValue("NATIVE_NAME", Name(struct_def)); GenOperatorNewDelete(struct_def); code_ += "};"; code_.SetValue("STRUCT_BYTE_SIZE", NumToString(struct_def.bytesize)); code_ += "FLATBUFFERS_STRUCT_END({{STRUCT_NAME}}, {{STRUCT_BYTE_SIZE}});"; if (opts_.gen_compare) GenCompareOperator(struct_def, "()"); code_ += ""; } // Set up the correct namespace. Only open a namespace if the existing one is // different (closing/opening only what is necessary). // // The file must start and end with an empty (or null) namespace so that // namespaces are properly opened and closed. void SetNameSpace(const Namespace *ns) { if (cur_name_space_ == ns) { return; } // Compute the size of the longest common namespace prefix. // If cur_name_space is A::B::C::D and ns is A::B::E::F::G, // the common prefix is A::B:: and we have old_size = 4, new_size = 5 // and common_prefix_size = 2 size_t old_size = cur_name_space_ ? cur_name_space_->components.size() : 0; size_t new_size = ns ? ns->components.size() : 0; size_t common_prefix_size = 0; while (common_prefix_size < old_size && common_prefix_size < new_size && ns->components[common_prefix_size] == cur_name_space_->components[common_prefix_size]) { common_prefix_size++; } // Close cur_name_space in reverse order to reach the common prefix. // In the previous example, D then C are closed. for (size_t j = old_size; j > common_prefix_size; --j) { code_ += "} // namespace " + cur_name_space_->components[j - 1]; } if (old_size != common_prefix_size) { code_ += ""; } // open namespace parts to reach the ns namespace // in the previous example, E, then F, then G are opened for (auto j = common_prefix_size; j != new_size; ++j) { code_ += "namespace " + ns->components[j] + " {"; } if (new_size != common_prefix_size) { code_ += ""; } cur_name_space_ = ns; } }; } // namespace cpp bool GenerateCPP(const Parser &parser, const std::string &path, const std::string &file_name) { cpp::IDLOptionsCpp opts(parser.opts); // The '--cpp_std' argument could be extended (like ASAN): // Example: "flatc --cpp_std c++17:option1:option2". auto cpp_std = !opts.cpp_std.empty() ? opts.cpp_std : "C++0X"; std::transform(cpp_std.begin(), cpp_std.end(), cpp_std.begin(), ToUpper); if (cpp_std == "C++0X") { opts.g_cpp_std = cpp::CPP_STD_X0; opts.g_only_fixed_enums = false; } else if (cpp_std == "C++11") { // Use the standard C++11 code generator. opts.g_cpp_std = cpp::CPP_STD_11; opts.g_only_fixed_enums = true; } else if (cpp_std == "C++17") { opts.g_cpp_std = cpp::CPP_STD_17; // With c++17 generate strong enums only. opts.scoped_enums = true; // By default, prefixed_enums==true, reset it. opts.prefixed_enums = false; } else { LogCompilerError("Unknown value of the '--cpp-std' switch: " + opts.cpp_std); return false; } // The opts.scoped_enums has priority. opts.g_only_fixed_enums |= opts.scoped_enums; cpp::CppGenerator generator(parser, path, file_name, opts); return generator.generate(); } std::string CPPMakeRule(const Parser &parser, const std::string &path, const std::string &file_name) { const auto filebase = flatbuffers::StripPath(flatbuffers::StripExtension(file_name)); const auto included_files = parser.GetIncludedFilesRecursive(file_name); std::string make_rule = GeneratedFileName(path, filebase) + ": "; for (auto it = included_files.begin(); it != included_files.end(); ++it) { make_rule += " " + *it; } return make_rule; } } // namespace flatbuffers
1
17,693
Optional: This function is C++ specific and could be declared as `static`.
google-flatbuffers
java
@@ -831,13 +831,15 @@ public class ZMSClient implements Closeable { * * @param domainName name of the domain * @param members include all members for group roles as well + * @param tagKey query all roles with given tag name + * @param tagValue query all roles with given tag key and value * @return list of roles * @throws ZMSClientException in case of failure */ - public Roles getRoles(String domainName, Boolean members) { + public Roles getRoles(String domainName, Boolean members, String tagKey, String tagValue) { updatePrincipal(); try { - return client.getRoles(domainName, members); + return client.getRoles(domainName, members, tagKey, tagValue); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) {
1
/* * Copyright 2016 Yahoo Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.yahoo.athenz.zms; import java.io.Closeable; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; import java.text.DateFormat; import java.text.SimpleDateFormat; import java.util.Date; import java.util.List; import java.util.Map; import java.util.concurrent.TimeUnit; import javax.net.ssl.SSLContext; import javax.ws.rs.client.Client; import javax.ws.rs.client.ClientBuilder; import com.fasterxml.jackson.databind.DeserializationFeature; import org.glassfish.jersey.client.ClientConfig; import com.fasterxml.jackson.jaxrs.json.JacksonJaxbJsonProvider; import com.fasterxml.jackson.jaxrs.json.JacksonJsonProvider; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.glassfish.jersey.apache.connector.ApacheConnectorProvider; import com.yahoo.athenz.auth.Authority; import com.yahoo.athenz.auth.AuthorityConsts; import com.yahoo.athenz.auth.Principal; import com.yahoo.athenz.auth.PrivateKeyStore; import com.yahoo.athenz.auth.impl.PrincipalAuthority; import com.yahoo.athenz.auth.impl.SimplePrincipal; import com.yahoo.athenz.auth.token.PrincipalToken; import com.yahoo.athenz.common.config.AthenzConfig; import com.yahoo.athenz.common.utils.SSLUtils; import com.yahoo.athenz.common.utils.SSLUtils.ClientSSLContextBuilder; import com.yahoo.rdl.JSON; import com.yahoo.rdl.Timestamp; public class ZMSClient implements Closeable { private String zmsUrl = null; private Principal principal = null; private boolean principalCheckDone = false; protected ZMSRDLGeneratedClient client = null; private static final String STR_ENV_ROOT = "ROOT"; private static final String STR_DEF_ROOT = "/home/athenz"; private static final String HTTP_RFC1123_DATE_FORMAT = "EEE, d MMM yyyy HH:mm:ss zzz"; public static final String ZMS_CLIENT_PROP_ATHENZ_CONF = "athenz.athenz_conf"; public static final String ZMS_CLIENT_PROP_READ_TIMEOUT = "athenz.zms.client.read_timeout"; public static final String ZMS_CLIENT_PROP_CONNECT_TIMEOUT = "athenz.zms.client.connect_timeout"; public static final String ZMS_CLIENT_PROP_CERT_ALIAS = "athenz.zms.client.cert_alias"; public static final String ZMS_CLIENT_PROP_KEYSTORE_PATH = "athenz.zms.client.keystore_path"; public static final String ZMS_CLIENT_PROP_KEYSTORE_TYPE = "athenz.zms.client.keystore_type"; public static final String ZMS_CLIENT_PROP_KEYSTORE_PASSWORD = "athenz.zms.client.keystore_password"; public static final String ZMS_CLIENT_PROP_KEYSTORE_PWD_APP_NAME = "athenz.zms.client.keystore_pwd_app_name"; public static final String ZMS_CLIENT_PROP_KEY_MANAGER_PASSWORD = "athenz.zms.client.keymanager_password"; public static final String ZMS_CLIENT_PROP_KEY_MANAGER_PWD_APP_NAME = "athenz.zms.client.keymanager_pwd_app_name"; public static final String ZMS_CLIENT_PROP_TRUSTSTORE_PATH = "athenz.zms.client.truststore_path"; public static final String ZMS_CLIENT_PROP_TRUSTSTORE_TYPE = "athenz.zms.client.truststore_type"; public static final String ZMS_CLIENT_PROP_TRUSTSTORE_PASSWORD = "athenz.zms.client.truststore_password"; public static final String ZMS_CLIENT_PROP_TRUSTSTORE_PWD_APP_NAME = "athenz.zms.client.truststore_pwd_app_name"; public static final String ZMS_CLIENT_PROP_PRIVATE_KEY_STORE_FACTORY_CLASS = "athenz.zms.client.private_keystore_factory_class"; public static final String ZMS_CLIENT_PROP_CLIENT_PROTOCOL = "athenz.zms.client.client_ssl_protocol"; public static final String ZMS_CLIENT_PKEY_STORE_FACTORY_CLASS = "com.yahoo.athenz.auth.impl.FilePrivateKeyStoreFactory"; public static final String ZMS_CLIENT_DEFAULT_CLIENT_SSL_PROTOCOL = "TLSv1.2"; private static final Logger LOGGER = LoggerFactory.getLogger(ZMSClient.class); private static final Authority PRINCIPAL_AUTHORITY = new PrincipalAuthority(); private static final PrivateKeyStore PRIVATE_KEY_STORE = loadServicePrivateKey(); static PrivateKeyStore loadServicePrivateKey() { String pkeyFactoryClass = System.getProperty(ZMS_CLIENT_PROP_PRIVATE_KEY_STORE_FACTORY_CLASS, ZMS_CLIENT_PKEY_STORE_FACTORY_CLASS); return SSLUtils.loadServicePrivateKey(pkeyFactoryClass); } /** * Constructs a new ZMSClient object with default settings. * The url for ZMS Server is automatically retrieved from the athenz * configuration file (zmsUrl field). The client can only be used * to retrieve objects from ZMS that do not require any authentication * otherwise addCredentials method must be used to set the principal identity. * Default read and connect timeout values are 30000ms (30sec). The application can * change these values by using the athenz.zms.client.read_timeout and * athenz.zms.client.connect_timeout system properties. The values specified * for timeouts must be in milliseconds. */ public ZMSClient() { initClient(null, null); } /** * Constructs a new ZMSClient object with the given ZMS Server url. The client * can only be used to retrieve objects from ZMS that do not require any authentication * otherwise addCredentials method must be used to set the principal identity. * Default read and connect timeout values are 30000ms (30sec). The application can * change these values by using the athenz.zms.client.read_timeout and * athenz.zms.client.connect_timeout system properties. The values specified * for timeouts must be in milliseconds. * * @param url ZMS Server url (e.g. https://server1.athenzcompany.com:4443/zms/v1) */ public ZMSClient(String url) { initClient(url, null); } /** * Constructs a new ZMSClient object with the given ZMS Server url and * given principal. The credentials from the principal object will be used * to set call the addCredentials method for the zms client object. * Default read and connect timeout values are 30000ms (30sec). The application can * change these values by using the athenz.zms.client.read_timeout and * athenz.zms.client.connect_timeout system properties. The values specified * for timeouts must be in milliseconds. * * @param url ZMS Server url (e.g. https://server1.athenzcompany.com:4443/zms/v1) * @param identity Principal object that includes credentials */ public ZMSClient(String url, Principal identity) { initClient(url, null); addCredentials(identity); } /** * Constructs a new ZMSClient object with default settings and given * principal object for credentials. The url for ZMS Server is * automatically retrieved from the athenz configuration file * (zmsUrl field). * Default read and connect timeout values are 30000ms (30sec). The application can * change these values by using the athenz.zms.client.read_timeout and * athenz.zms.client.connect_timeout system properties. The values specified * for timeouts must be in milliseconds. * * @param identity Principal object that includes credentials */ public ZMSClient(Principal identity) { initClient(null, null); addCredentials(identity); } /** * Constructs a new ZMSClient object with the given SSLContext object * and ZMS Server Url. Default read and connect timeout values are 30000ms (30sec). * The application can change these values by using the athenz.zms.client.read_timeout * and athenz.zms.client.connect_timeout system properties. The values specified * for timeouts must be in milliseconds. * * @param url ZMS Server url (e.g. https://server1.athenzcompany.com:4443/zms/v1) * @param sslContext SSLContext that includes service's private key and x.509 certificate * for authenticating requests */ public ZMSClient(String url, SSLContext sslContext) { // verify we have a valid ssl context specified if (sslContext == null) { throw new IllegalArgumentException("SSLContext object must be specified"); } initClient(url, sslContext); } /** * Close the ZMSClient object and release any allocated resources. */ public void close() { client.close(); } /** * Set new ZMS Client configuration property. This method calls * internal javax.ws.rs.client.Client client's property method. * If already set, the existing value of the property will be updated. * Setting a null value into a property effectively removes the property * from the property bag. * * @param name property name. * @param value property value. null value removes the property with the given name. */ public void setProperty(String name, Object value) { if (client != null) { client.setProperty(name, value); } } public void setZMSRDLGeneratedClient(ZMSRDLGeneratedClient client) { this.client = client; } /** * Set the client credentials using the specified header and token. * * @param credHeader authentication header name * @param credToken authentication credentials */ public void addCredentials(String credHeader, String credToken) { client.addCredentials(credHeader, credToken); } /** * Sets or overrides the current principal identity set in the client. * * @param identity Principal identity for authenticating requests * @return self ZMSClient object */ public ZMSClient addCredentials(Principal identity) { // make sure the principal has proper authority assigned if (identity == null || identity.getAuthority() == null) { throw new IllegalArgumentException("Principal must be valid object with authority field"); } // if we already have a principal set, we're going to // clear our credentials first if (principal != null) { client.addCredentials(principal.getAuthority().getHeader(), null); } // now we're going to update our principal and set credentials principal = identity; principalCheckDone = false; // we've already verified that our authority in the passed // identity object is valid final Authority authority = principal.getAuthority(); client.addCredentials(authority.getHeader(), principal.getCredentials()); // final check if the authority does not support authorization // by the zms server then it's most likely a user authority and // we need to get a principal token principalCheckDone = authority.allowAuthorization(); return this; } /** * Clear the principal identity set for the client. Unless a new principal is set * using the addCredentials method, the client can only be used to requests data * from the ZMS Server that doesn't require any authentication. * * @return self ZMSClient object */ public ZMSClient clearCredentials() { if (principal != null) { client.addCredentials(principal.getAuthority().getHeader(), null); principal = null; principalCheckDone = true; } return this; } /** * If the current principal is the user principal then request * a UserToken from ZMS and set the UserToken as the principal * identity for authentication. */ private void updatePrincipal() { /* if the check has already been done then we have nothing to do */ if (principalCheckDone) { return; } /* make sure we have a principal specified */ if (principal == null) { principalCheckDone = true; return; } /* so at this point we have some credentials specified * but it's not the principal authority so we're going * to ask ZMS to return a UserToken for us. */ String userName = principal.getName(); UserToken userToken = getUserToken(userName, null, true); clearCredentials(); client.addCredentials(userToken.getHeader(), userToken.getToken()); principalCheckDone = true; } String lookupZMSUrl() { String rootDir = System.getenv(STR_ENV_ROOT); if (rootDir == null) { rootDir = STR_DEF_ROOT; } String confFileName = System.getProperty(ZMS_CLIENT_PROP_ATHENZ_CONF, rootDir + "/conf/athenz/athenz.conf"); String url = null; try { Path path = Paths.get(confFileName); AthenzConfig conf = JSON.fromBytes(Files.readAllBytes(path), AthenzConfig.class); url = conf.getZmsUrl(); } catch (Exception ex) { LOGGER.error("Unable to extract ZMS Url from {} exc: {}", confFileName, ex.getMessage()); } return url; } ClientBuilder getClientBuilder() { return ClientBuilder.newBuilder(); } /** * Initialize the client for class constructors * * @param url ZMS Server url * @param sslContext SSLContext for service authentication */ private void initClient(String url, SSLContext sslContext) { /* if we have no url specified then we're going to retrieve * the value from our configuration package */ if (url == null) { zmsUrl = lookupZMSUrl(); } else { zmsUrl = url; } /* verify if the url is ending with /zms/v1 and if it's * not we'll automatically append it */ if (zmsUrl != null && !zmsUrl.isEmpty()) { if (!zmsUrl.endsWith("/zms/v1")) { if (zmsUrl.charAt(zmsUrl.length() - 1) != '/') { zmsUrl += '/'; } zmsUrl += "zms/v1"; } } /* determine our read and connect timeouts */ int readTimeout = Integer.parseInt(System.getProperty(ZMS_CLIENT_PROP_READ_TIMEOUT, "30000")); int connectTimeout = Integer.parseInt(System.getProperty(ZMS_CLIENT_PROP_CONNECT_TIMEOUT, "30000")); /* if we are not given a url then use the default value */ if (sslContext == null) { sslContext = createSSLContext(); } ClientBuilder builder = getClientBuilder(); if (sslContext != null) { builder = builder.sslContext(sslContext); } final JacksonJsonProvider jacksonJsonProvider = new JacksonJaxbJsonProvider() .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); ClientConfig clientConfig = new ClientConfig(jacksonJsonProvider); clientConfig.connectorProvider(new ApacheConnectorProvider()); // JerseyClientBuilder::withConfig() replaces the existing config with the new client // config. Hence the client config should be added to the builder before the timeouts. // Otherwise the timeout settings would be overridden. Client rsClient = builder .withConfig(clientConfig) .connectTimeout(connectTimeout, TimeUnit.MILLISECONDS) .readTimeout(readTimeout, TimeUnit.MILLISECONDS) .build(); client = new ZMSRDLGeneratedClient(zmsUrl, rsClient); } SSLContext createSSLContext() { // to create the SSL context we must have the keystore path // specified. If it's not specified, then we are not going // to create our ssl context String keyStorePath = System.getProperty(ZMS_CLIENT_PROP_KEYSTORE_PATH); if (keyStorePath == null || keyStorePath.isEmpty()) { return null; } String keyStoreType = System.getProperty(ZMS_CLIENT_PROP_KEYSTORE_TYPE); String keyStorePwd = System.getProperty(ZMS_CLIENT_PROP_KEYSTORE_PASSWORD); char[] keyStorePassword = null; if (null != keyStorePwd && !keyStorePwd.isEmpty()) { keyStorePassword = keyStorePwd.toCharArray(); } String keyStorePasswordAppName = System.getProperty(ZMS_CLIENT_PROP_KEYSTORE_PWD_APP_NAME); char[] keyManagerPassword = null; String keyManagerPwd = System.getProperty(ZMS_CLIENT_PROP_KEY_MANAGER_PASSWORD); if (null != keyManagerPwd && !keyManagerPwd.isEmpty()) { keyManagerPassword = keyManagerPwd.toCharArray(); } String keyManagerPasswordAppName = System.getProperty(ZMS_CLIENT_PROP_KEY_MANAGER_PWD_APP_NAME); // truststore String trustStorePath = System.getProperty(ZMS_CLIENT_PROP_TRUSTSTORE_PATH); String trustStoreType = System.getProperty(ZMS_CLIENT_PROP_TRUSTSTORE_TYPE); String trustStorePwd = System.getProperty(ZMS_CLIENT_PROP_TRUSTSTORE_PASSWORD); char[] trustStorePassword = null; if (null != trustStorePwd && !trustStorePwd.isEmpty()) { trustStorePassword = trustStorePwd.toCharArray(); } String trustStorePasswordAppName = System.getProperty(ZMS_CLIENT_PROP_TRUSTSTORE_PWD_APP_NAME); // alias and protocol details String certAlias = System.getProperty(ZMS_CLIENT_PROP_CERT_ALIAS); String clientProtocol = System.getProperty(ZMS_CLIENT_PROP_CLIENT_PROTOCOL, ZMS_CLIENT_DEFAULT_CLIENT_SSL_PROTOCOL); ClientSSLContextBuilder builder = new SSLUtils.ClientSSLContextBuilder(clientProtocol) .privateKeyStore(PRIVATE_KEY_STORE).keyStorePath(keyStorePath); builder.certAlias(certAlias); if (null != keyStoreType && !keyStoreType.isEmpty()) { builder.keyStoreType(keyStoreType); } builder.keyStorePassword(keyStorePassword); builder.keyStorePasswordAppName(keyStorePasswordAppName); builder.keyManagerPassword(keyManagerPassword); builder.keyManagerPasswordAppName(keyManagerPasswordAppName); builder.trustStorePath(trustStorePath); if (null != trustStoreType && !trustStoreType.isEmpty()) { builder.trustStoreType(trustStoreType); } builder.trustStorePassword(trustStorePassword); builder.trustStorePasswordAppName(trustStorePasswordAppName); return builder.build(); } public String getZmsUrl() { return zmsUrl; } /** * Generate a role name as expected by ZMS Server can be used to * set the role object's name field (e.g. role.setName(name)) * * @param domain name of the domain * @param role name of the role * @return full role name */ public String generateRoleName(String domain, String role) { return domain + AuthorityConsts.ROLE_SEP + role; } /** * Generate a policy name as expected by ZMS Server can be used to * set the policy object's name field (e.g. policy.setName(name)) * * @param domain name of the domain * @param policy name of the policy * @return full policy name */ public String generatePolicyName(String domain, String policy) { return domain + ":policy." + policy; } /** * Generate a service name as expected by ZMS Server can be used to * set the service identity object's name field * (e.g. serviceIdentity.setName(name)) * * @param domain name of the domain * @param service name of the service * @return full service identity name */ public String generateServiceIdentityName(String domain, String service) { return domain + "." + service; } /** * Retrieve the specified domain object * * @param domain name of the domain to be retrieved * @return Domain object * @throws ZMSClientException in case of failure */ public Domain getDomain(String domain) { updatePrincipal(); try { return client.getDomain(domain); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Retrieve the specified singed domain object. The domain * object includes all roles, policies, services and * domain attributes. The domain data is base64url encoded * in the payload field based on JWS RFC 7515 * https://tools.ietf.org/html/rfc7515#section-7.2.2 * * @param domain name of the domain to be retrieved * @return JWSDomain object * @throws ZMSClientException in case of failure */ public JWSDomain getJWSDomain(String domain) { updatePrincipal(); try { return client.getJWSDomain(domain); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Retrieve the list of domains provisioned on the ZMS Server * * @return list of Domains * @throws ZMSClientException in case of failure */ public DomainList getDomainList() { return getDomainList(null, null, null, null, null, null, null, null); } /** * Retrieve the list of domains provisioned on the ZMS Server * filters based on the specified arguments * * @param limit number of domain objects to return * @param skip exclude all the domains including the specified one from the return set * @param prefix return domains starting with this value * @param depth maximum depth of the domain (0 - top level domains only) * @param awsAccount return domain that has the specified aws account name. If account name * is specified all other optional attributes are ignored since there must be * only one domain matching the specified account name. * @param productId return domain that has the specified product id. If product id * is specified all other optional attributes are ignored since there must be * only one domain matching the specified product id. * @param modifiedSince return domains only modified since this date * @return list of domain names * @throws ZMSClientException in case of failure */ public DomainList getDomainList(Integer limit, String skip, String prefix, Integer depth, String awsAccount, Integer productId, Date modifiedSince) { return getDomainList(limit, skip, prefix, depth, awsAccount, productId, null, modifiedSince); } /** * Retrieve the list of domains provisioned on the ZMS Server * filters based on the specified arguments * * @param limit number of domain objects to return * @param skip exclude all the domains including the specified one from the return set * @param prefix return domains starting with this value * @param depth maximum depth of the domain (0 - top level domains only) * @param awsAccount return domain that has the specified aws account name. If account name * is specified all other optional attributes are ignored since there must be * only one domain matching the specified account name. * @param productId return domain that has the specified product id. If product id * is specified all other optional attributes are ignored since there must be * only one domain matching the specified product id. * @param azureSubscription return domain that has the specified azure subscription id. If subscription * id is specified all other optional attributes are ignored since there must be * only one domain matching the specified subscription id. * @param modifiedSince return domains only modified since this date * @return list of domain names * @throws ZMSClientException in case of failure */ public DomainList getDomainList(Integer limit, String skip, String prefix, Integer depth, String awsAccount, Integer productId, String azureSubscription, Date modifiedSince) { updatePrincipal(); String modSinceStr = null; if (modifiedSince != null) { DateFormat df = new SimpleDateFormat(HTTP_RFC1123_DATE_FORMAT); modSinceStr = df.format(modifiedSince); } try { return client.getDomainList(limit, skip, prefix, depth, awsAccount, productId, null, null, azureSubscription, modSinceStr); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Retrieve the list of domains provisioned on the ZMS Server * filters based on the specified arguments * * @param roleMember name of the principal * @param roleName name of the role where the principal is a member of * @return list of domain names * @throws ZMSClientException in case of failure */ public DomainList getDomainList(String roleMember, String roleName) { updatePrincipal(); try { return client.getDomainList(null, null, null, null, null, null, roleMember, roleName, null, null); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Create/Update Top level domain. If updating a domain the provided * object must contain all attributes as it will replace the full domain * object configured on the server (not just some of the attributes). * * @param auditRef string containing audit specification or ticket number * @param detail TopLevelDomain object to be created in ZMS * @return created Domain object * @throws ZMSClientException in case of failure */ public Domain postTopLevelDomain(String auditRef, TopLevelDomain detail) { updatePrincipal(); try { return client.postTopLevelDomain(auditRef, detail); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Create/Update a sub-domain in the specified domain. If updating a * subdomain the provided object must contain all attributes as it will * replace the full domain object configured on the server (not just some * of the attributes). * * @param parent name of the parent domain * @param auditRef string containing audit specification or ticket number * @param detail SubDomain object to be created in ZMS * @return created Domain object * @throws ZMSClientException in case of failure */ public Domain postSubDomain(String parent, String auditRef, SubDomain detail) { updatePrincipal(); try { return client.postSubDomain(parent, auditRef, detail); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Create a top-level user-domain - this is user.&lt;userid&gt; domain. * * @param name domain to be created, this is the &lt;userid&gt; * @param auditRef string containing audit specification or ticket number * @param detail UserDomain object to be created in ZMS * @return created Domain object * @throws ZMSClientException in case of failure */ public Domain postUserDomain(String name, String auditRef, UserDomain detail) { updatePrincipal(); try { return client.postUserDomain(name, auditRef, detail); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Delete a top level domain * * @param name domain name to be deleted from ZMS * @param auditRef string containing audit specification or ticket number * @throws ZMSClientException in case of failure */ public void deleteTopLevelDomain(String name, String auditRef) { updatePrincipal(); try { client.deleteTopLevelDomain(name, auditRef); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Delete a sub-domain * * @param parent name of the parent domain * @param name sub-domain to be deleted * @param auditRef string containing audit specification or ticket number * @throws ZMSClientException in case of failure */ public void deleteSubDomain(String parent, String name, String auditRef) { updatePrincipal(); try { client.deleteSubDomain(parent, name, auditRef); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Delete a top-level user-domain (user.&lt;userid&gt;) * * @param name domain to be deleted, this is the &lt;userid&gt; * @param auditRef string containing audit specification or ticket number */ public void deleteUserDomain(String name, String auditRef) { updatePrincipal(); try { client.deleteUserDomain(name, auditRef); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Set the domain meta parameters * * @param name domain name to be modified * @param auditRef string containing audit specification or ticket number * @param detail meta parameters to be set on the domain */ public void putDomainMeta(String name, String auditRef, DomainMeta detail) { updatePrincipal(); try { client.putDomainMeta(name, auditRef, detail); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Set the domain system meta parameters * * @param name domain name to be modified * @param attribute system attribute being modified in this request * @param auditRef string containing audit specification or ticket number * @param detail meta parameters to be set on the domain */ public void putDomainSystemMeta(String name, String attribute, String auditRef, DomainMeta detail) { updatePrincipal(); try { client.putDomainSystemMeta(name, attribute, auditRef, detail); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Retrieve the list of roles defined for the specified domain * * @param domainName name of the domain * @return list of role names * @throws ZMSClientException in case of failure */ public RoleList getRoleList(String domainName) { updatePrincipal(); try { return client.getRoleList(domainName, null, null); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Retrieve the list of roles defined for the specified domain * filtered based on the parameters specified * * @param domainName name of the domain * @param limit number of roles to return * @param skip exclude all the roles including the specified one from the return set * @return list of role names * @throws ZMSClientException in case of failure */ public RoleList getRoleList(String domainName, Integer limit, String skip) { updatePrincipal(); try { return client.getRoleList(domainName, limit, skip); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Retrieve the list of roles defined for the specified domain. The roles * will contain their attributes and, if specified, the list of members. * * @param domainName name of the domain * @param members include all members for group roles as well * @return list of roles * @throws ZMSClientException in case of failure */ public Roles getRoles(String domainName, Boolean members) { updatePrincipal(); try { return client.getRoles(domainName, members); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Retrieve the specified role * * @param domainName name of the domain * @param roleName name of the role * @return role object * @throws ZMSClientException in case of failure */ public Role getRole(String domainName, String roleName) { return getRole(domainName, roleName, false, false, false); } /** * Retrieve the specified role * * @param domainName name of the domain * @param roleName name of the role * @param auditLog include audit log for the role changes in the response * @return role object * @throws ZMSClientException in case of failure */ public Role getRole(String domainName, String roleName, boolean auditLog) { return getRole(domainName, roleName, auditLog, false, false); } /** * Retrieve the specified role * * @param domainName name of the domain * @param roleName name of the role * @param auditLog include audit log for the role changes in the response * @param expand if the requested role is a delegated/trust role, this flag * will instruct the ZMS server to automatically retrieve the members of the * role from the delegated domain and return as part of the role object * @return role object * @throws ZMSClientException in case of failure */ public Role getRole(String domainName, String roleName, boolean auditLog, boolean expand) { return getRole(domainName, roleName, auditLog, expand, false); } /** * Retrieve the specified role * * @param domainName name of the domain * @param roleName name of the role * @param auditLog include audit log for the role changes in the response * @param expand if the requested role is a delegated/trust role, this flag * will instruct the ZMS server to automatically retrieve the members of the * role from the delegated domain and return as part of the role object * @param pending if this flag is set, then all members for that role will be retrieved * including pending members * @return role object * @throws ZMSClientException in case of failure */ public Role getRole(String domainName, String roleName, boolean auditLog, boolean expand, boolean pending) { updatePrincipal(); try { return client.getRole(domainName, roleName, auditLog, expand, pending); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Create/Update a new role in the specified domain. If updating a role * the provided object must contain all attributes as it will replace * the full role object configured on the server (not just some of the attributes). * * @param domainName name of the domain * @param roleName name of the role * @param auditRef string containing audit specification or ticket number * @param role role object to be added to the domain * @throws ZMSClientException in case of failure */ public void putRole(String domainName, String roleName, String auditRef, Role role) { updatePrincipal(); try { client.putRole(domainName, roleName, auditRef, role); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Delete the specified role from domain * * @param domainName name of the domain * @param roleName name of the role * @param auditRef string containing audit specification or ticket number * @throws ZMSClientException in case of failure */ public void deleteRole(String domainName, String roleName, String auditRef) { updatePrincipal(); try { client.deleteRole(domainName, roleName, auditRef); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Get membership details for the specified member in the given role * in a specified domain * * @param domainName name of the domain * @param roleName name of the role * @param memberName name of the member * @return Membership object * @throws ZMSClientException in case of failure */ public Membership getMembership(String domainName, String roleName, String memberName) { return getMembership(domainName, roleName, memberName, null); } /** * Get membership details for the specified member in the given role * in a specified domain with an optional expiration * * @param domainName name of the domain * @param roleName name of the role * @param memberName name of the member * @param expiration member expiration * @return Membership object * @throws ZMSClientException in case of failure */ public Membership getMembership(String domainName, String roleName, String memberName, String expiration) { updatePrincipal(); try { return client.getMembership(domainName, roleName, memberName, expiration); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Get all domain members with overdue review dates * * @param domainName name of the domain * @return Domain members with overdue review dates */ public DomainRoleMembers getOverdueReview(String domainName) { updatePrincipal(); try { return client.getOverdueReview(domainName); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Add a new member in the specified role. * * @param domainName name of the domain * @param roleName name of the role * @param memberName name of the member to be added * @param auditRef string containing audit specification or ticket number * @throws ZMSClientException in case of failure */ public void putMembership(String domainName, String roleName, String memberName, String auditRef) { putMembershipWithReview(domainName, roleName, memberName, null, null, auditRef); } /** * Add a temporary member in the specified role with expiration * * @param domainName name of the domain * @param roleName name of the role * @param memberName name of the member to be added * @param expiration timestamp when this membership will expire (optional) * @param auditRef string containing audit specification or ticket number * @throws ZMSClientException in case of failure */ public void putMembership(String domainName, String roleName, String memberName, Timestamp expiration, String auditRef) { putMembershipWithReview(domainName, roleName, memberName, expiration, null, auditRef); } /** * Add a member in the specified role with optional expiration and optional review * * @param domainName name of the domain * @param roleName name of the role * @param memberName name of the member to be added * @param expiration timestamp when this membership will expire (optional) * @param review timestamp when this membership will require review (optional) * @param auditRef string containing audit specification or ticket number * @throws ZMSClientException in case of failure */ public void putMembershipWithReview(String domainName, String roleName, String memberName, Timestamp expiration, Timestamp review, String auditRef) { Membership mbr = new Membership().setRoleName(roleName) .setMemberName(memberName).setExpiration(expiration).setReviewReminder(review) .setIsMember(true); updatePrincipal(); try { client.putMembership(domainName, roleName, memberName, auditRef, mbr); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Remove the specified member from the role * * @param domainName name of the domain * @param roleName name of the role * @param memberName name of the member to be removed * @param auditRef string containing audit specification or ticket number * @throws ZMSClientException in case of failure */ public void deleteMembership(String domainName, String roleName, String memberName, String auditRef) { updatePrincipal(); try { client.deleteMembership(domainName, roleName, memberName, auditRef); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Remove the specified pending member from the role * * @param domainName name of the domain * @param roleName name of the role * @param memberName name of the pending member to be removed * @param auditRef string containing audit specification or ticket number * @throws ZMSClientException in case of failure */ public void deletePendingMembership(String domainName, String roleName, String memberName, String auditRef) { updatePrincipal(); try { client.deletePendingMembership(domainName, roleName, memberName, auditRef); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Get list of users defined in the system * * @return list of user names * @throws ZMSClientException in case of failure */ public UserList getUserList() { updatePrincipal(); try { return client.getUserList(); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Remove the specified user from Athens system. This will delete any * user.{name} domain plus all of its subdomains (if exist) and remove * the user from any role in the system. This command requires authorization * from the Athens sys.auth domain (delete action on resource user). * * @param name name of the user * @param auditRef string containing audit specification or ticket number * @throws ZMSClientException in case of failure */ public void deleteUser(String name, String auditRef) { updatePrincipal(); try { client.deleteUser(name, auditRef); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Retrieve the list of policies defined for the specified domain. The policies * will contain their attributes and, if specified, the list of assertions. * * @param domainName name of the domain * @param assertions include all assertion for policies as well * @return list of policies * @throws ZMSClientException in case of failure */ public Policies getPolicies(String domainName, Boolean assertions) { updatePrincipal(); try { return client.getPolicies(domainName, assertions); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Get list of policies defined in the specified domain * * @param domainName name of the domain * @return list of policy names * @throws ZMSClientException in case of failure */ public PolicyList getPolicyList(String domainName) { updatePrincipal(); try { return client.getPolicyList(domainName, null, null); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Get list of policies defined in the specified domain filtered * based on the specified arguments * * @param domainName name of the domain * @param limit number of policies to return * @param skip exclude all the policies including the specified one from the return set * @return list of policy names * @throws ZMSClientException in case of failure */ public PolicyList getPolicyList(String domainName, Integer limit, String skip) { updatePrincipal(); try { return client.getPolicyList(domainName, limit, skip); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Return the specified policy object assertion * * @param domainName name of the domain * @param policyName name of the policy * @param assertionId the id of the assertion to be retrieved * @return Assertion object * @throws ZMSClientException in case of failure */ public Assertion getAssertion(String domainName, String policyName, Long assertionId) { updatePrincipal(); try { return client.getAssertion(domainName, policyName, assertionId); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Add the specified assertion to the specified policy * * @param domainName name of the domain * @param policyName name of the policy * @param auditRef string containing audit specification or ticket number * @param assertion Assertion object to be added to the policy * @return updated assertion object that includes the server assigned id * @throws ZMSClientException in case of failure */ public Assertion putAssertion(String domainName, String policyName, String auditRef, Assertion assertion) { updatePrincipal(); try { return client.putAssertion(domainName, policyName, auditRef, assertion); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Delete specified assertion from the given policy * * @param domainName name of the domain * @param policyName name of the policy * @param assertionId the id of the assertion to be deleted * @param auditRef string containing audit specification or ticket number * @throws ZMSClientException in case of failure */ public void deleteAssertion(String domainName, String policyName, Long assertionId, String auditRef) { updatePrincipal(); try { client.deleteAssertion(domainName, policyName, assertionId, auditRef); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Return the specified policy object * * @param domainName name of the domain * @param policyName name of the policy to be retrieved * @return Policy object * @throws ZMSClientException in case of failure */ public Policy getPolicy(String domainName, String policyName) { updatePrincipal(); try { return client.getPolicy(domainName, policyName); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Create/Update a new policy in the specified domain. If updating a policy * the provided object must contain all attributes as it will replace the * full policy object configured on the server (not just some of the attributes). * * @param domainName name of the domain * @param policyName name of the policy * @param auditRef string containing audit specification or ticket number * @param policy Policy object with details * @throws ZMSClientException in case of failure */ public void putPolicy(String domainName, String policyName, String auditRef, Policy policy) { updatePrincipal(); try { client.putPolicy(domainName, policyName, auditRef, policy); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Delete specified policy from a domain * * @param domainName name of the domain * @param policyName name of the policy to be deleted * @param auditRef string containing audit specification or ticket number * @throws ZMSClientException in case of failure */ public void deletePolicy(String domainName, String policyName, String auditRef) { updatePrincipal(); try { client.deletePolicy(domainName, policyName, auditRef); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Create/Update a new service in the specified domain. If updating a service * the provided object must contain all attributes as it will replace the * full service object configured on the server (not just some of the attributes). * * @param domainName name of the domain * @param serviceName name of the service * @param auditRef string containing audit specification or ticket number * @param service ServiceIdentity object with all service details * @throws ZMSClientException in case of failure */ public void putServiceIdentity(String domainName, String serviceName, String auditRef, ServiceIdentity service) { updatePrincipal(); try { client.putServiceIdentity(domainName, serviceName, auditRef, service); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Set the service system meta parameters * * @param domainName name of the domain * @param serviceName name of the service * @param attribute service meta attribute being modified in this request * @param auditRef string containing audit specification or ticket number * @param meta meta parameters to be set on the service * @throws ZMSClientException in case of failure */ public void putServiceIdentitySystemMeta(String domainName, String serviceName, String attribute, String auditRef, ServiceIdentitySystemMeta meta) { updatePrincipal(); try { client.putServiceIdentitySystemMeta(domainName, serviceName, attribute, auditRef, meta); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Retrieve the specified service object from a domain * * @param domainName name of the domain * @param serviceName name of the service to be retrieved * @return ServiceIdentity object * @throws ZMSClientException in case of failure */ public ServiceIdentity getServiceIdentity(String domainName, String serviceName) { updatePrincipal(); try { return client.getServiceIdentity(domainName, serviceName); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Delete the specified service from a domain * * @param domainName name of the domain * @param serviceName name of the service to be deleted * @param auditRef string containing audit specification or ticket number * @throws ZMSClientException in case of failure */ public void deleteServiceIdentity(String domainName, String serviceName, String auditRef) { updatePrincipal(); try { client.deleteServiceIdentity(domainName, serviceName, auditRef); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Retrieve the list of services defined for the specified domain. The services * will contain their attributes and, if specified, the list of publickeys and hosts. * * @param domainName name of the domain * @param publicKeys include all public keys for services as well * @param hosts include all configured hosts for services as well * @return list of services * @throws ZMSClientException in case of failure */ public ServiceIdentities getServiceIdentities(String domainName, Boolean publicKeys, Boolean hosts) { updatePrincipal(); try { return client.getServiceIdentities(domainName, publicKeys, hosts); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Retrieve the full list of services defined in a domain * * @param domainName name of the domain * @return list of all service names * @throws ZMSClientException in case of failure */ public ServiceIdentityList getServiceIdentityList(String domainName) { return getServiceIdentityList(domainName, null, null); } /** * Retrieve the list of services defined in a domain filtered * based on the specified arguments * * @param domainName name of the domain * @param limit number of services to return * @param skip exclude all the services including the specified one from the return set * @return list of service names * @throws ZMSClientException in case of failure */ public ServiceIdentityList getServiceIdentityList(String domainName, Integer limit, String skip) { updatePrincipal(); try { return client.getServiceIdentityList(domainName, limit, skip); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Retrieve the specified public key from the given service object * * @param domainName name of the domain * @param serviceName name of the service * @param keyId the identifier of the public key to be retrieved * @return PublicKeyEntry object * @throws ZMSClientException in case of failure */ public PublicKeyEntry getPublicKeyEntry(String domainName, String serviceName, String keyId) { updatePrincipal(); try { return client.getPublicKeyEntry(domainName, serviceName, keyId); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Update or add (if doesn't already exist) the specified public key in the service object * * @param domainName name of the domain * @param serviceName name of the service * @param keyId the identifier of the public key to be updated * @param auditRef string containing audit specification or ticket number * @param publicKeyEntry that contains the public key details * @throws ZMSClientException in case of failure */ public void putPublicKeyEntry(String domainName, String serviceName, String keyId, String auditRef, PublicKeyEntry publicKeyEntry) { updatePrincipal(); try { client.putPublicKeyEntry(domainName, serviceName, keyId, auditRef, publicKeyEntry); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Delete the specified public key from the service object. If the key doesn't exist then * it is treated as a successful operation and no exception will be thrown. * * @param domainName name of the domain * @param serviceName name of the service * @param keyId the identifier of the public key to be deleted * @param auditRef string containing audit specification or ticket number * @throws ZMSClientException in case of failure */ public void deletePublicKeyEntry(String domainName, String serviceName, String keyId, String auditRef) { updatePrincipal(); try { client.deletePublicKeyEntry(domainName, serviceName, keyId, auditRef); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Create/update an entity object in ZMS * * @param domainName name of the domain * @param entityName name of the entity * @param auditRef string containing audit specification or ticket number * @param entity entity object with details * @throws ZMSClientException in case of failure */ public void putEntity(String domainName, String entityName, String auditRef, Entity entity) { updatePrincipal(); try { client.putEntity(domainName, entityName, auditRef, entity); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Retrieve the specified entity from the ZMS Server * * @param domainName name of the domain * @param entityName name of the entity * @return Entity object with details * @throws ZMSClientException in case of failure */ public Entity getEntity(String domainName, String entityName) { updatePrincipal(); try { return client.getEntity(domainName, entityName); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Delete the specified entity from the ZMS Server * * @param domainName name of the domain * @param entityName name of the entity * @param auditRef string containing audit specification or ticket number * @throws ZMSClientException in case of failure */ public void deleteEntity(String domainName, String entityName, String auditRef) { updatePrincipal(); try { client.deleteEntity(domainName, entityName, auditRef); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Retrieve the list of entities defined for the specified domain * * @param domainName name of the domain * @return list of entity names * @throws ZMSClientException in case of failure */ public EntityList getEntityList(String domainName) { updatePrincipal(); try { return client.getEntityList(domainName); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Register a new provider service for a given tenant domain * * @param tenantDomain name of the tenant domain * @param providerService name of the provider service * format: provider-domain-name.provider-service-name, ex: "sports.storage" * @param auditRef string containing audit specification or ticket number * @param tenant Tenancy object with tenant details * @throws ZMSClientException in case of failure */ public void putTenancy(String tenantDomain, String providerService, String auditRef, Tenancy tenant) { updatePrincipal(); try { client.putTenancy(tenantDomain, providerService, auditRef, tenant); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Delete the specified provider service from a tenant domain * * @param tenantDomain name of the tenant domain * @param providerService name of the provider service, * format: provider-domain-name.provider-service-name, ex: "sports.storage" * @param auditRef string containing audit specification or ticket number * @throws ZMSClientException in case of failure */ public void deleteTenancy(String tenantDomain, String providerService, String auditRef) { updatePrincipal(); try { client.deleteTenancy(tenantDomain, providerService, auditRef); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Register a new tenant domain for the provider service * * @param providerDomain provider domain name * @param providerService provider service name * @param tenantDomain name of the tenant domain * @param auditRef string containing audit specification or ticket number * @param tenant Tenancy object with tenant details * @throws ZMSClientException in case of failure */ public void putTenant(String providerDomain, String providerService, String tenantDomain, String auditRef, Tenancy tenant) { updatePrincipal(); try { client.putTenant(providerDomain, providerService, tenantDomain, auditRef, tenant); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Delete the specified tenant from provider service * * @param providerDomain provider domain name * @param providerService provider service name * @param tenantDomain name of the tenant domain * @param auditRef string containing audit specification or ticket number * @throws ZMSClientException in case of failure */ public void deleteTenant(String providerDomain, String providerService, String tenantDomain, String auditRef) { updatePrincipal(); try { client.deleteTenant(providerDomain, providerService, tenantDomain, auditRef); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Create tenant roles for the specified tenant resource group. * * @param providerDomain name of the provider domain * @param providerServiceName name of the provider service * @param tenantDomain name of the tenant's domain * @param resourceGroup name of the resource group * @param auditRef string containing audit specification or ticket number * @param tenantRoles Tenant roles * @throws ZMSClientException in case of failure */ public void putTenantResourceGroupRoles(String providerDomain, String providerServiceName, String tenantDomain, String resourceGroup, String auditRef, TenantResourceGroupRoles tenantRoles) { updatePrincipal(); try { client.putTenantResourceGroupRoles(providerDomain, providerServiceName, tenantDomain, resourceGroup, auditRef, tenantRoles); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Retrieve the list of tenant roles defined for a tenant resource group in a domain * * @param providerDomain name of the provider domain * @param providerServiceName name of the provider service * @param tenantDomain name of the tenant's domain * @param resourceGroup name of the resource group * @return list of tenant roles * @throws ZMSClientException in case of failure */ public TenantResourceGroupRoles getTenantResourceGroupRoles(String providerDomain, String providerServiceName, String tenantDomain, String resourceGroup) { updatePrincipal(); try { return client.getTenantResourceGroupRoles(providerDomain, providerServiceName, tenantDomain, resourceGroup); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Delete tenant roles for the specified tenant resource group in a domain * * @param providerDomain name of the provider domain * @param providerServiceName name of the provider service * @param tenantDomain name of tenant's domain * @param resourceGroup name of the resource group * @param auditRef string containing audit specification or ticket number * @throws ZMSClientException in case of failure */ public void deleteTenantResourceGroupRoles(String providerDomain, String providerServiceName, String tenantDomain, String resourceGroup, String auditRef) { updatePrincipal(); try { client.deleteTenantResourceGroupRoles(providerDomain, providerServiceName, tenantDomain, resourceGroup, auditRef); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Requests the ZMS to indicate whether or not the specific request for the * specified resource with authentication details will be granted or not. * * @param action value of the action to be carried out (e.g. "UPDATE", "DELETE") * @param resource resource name. Resource is defined as {DomainName}:{Entity}" * @param trustDomain (optional) if the access checks involves cross domain check only * check the specified trusted domain and ignore all others * @return Access object indicating whether or not the request will be granted or not * @throws ZMSClientException in case of failure */ public Access getAccess(String action, String resource, String trustDomain) { return getAccess(action, resource, trustDomain, null); } /** * Requests the ZMS to indicate whether or not the specific request for the * specified resource with authentication details will be granted or not. * * @param action value of the action to be carried out (e.g. "UPDATE", "DELETE") * @param resource resource name. Resource is defined as {DomainName}:{Entity}" * @param trustDomain (optional) if the access checks involves cross domain check only * check the specified trusted domain and ignore all others * @param principal (optional) carry out the access check for specified principal * @return Access object indicating whether or not the request will be granted or not * @throws ZMSClientException in case of failure */ public Access getAccess(String action, String resource, String trustDomain, String principal) { try { return client.getAccess(action, resource, trustDomain, principal); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Requests the ZMS to indicate whether or not the specific request for the * specified resource with authentication details will be granted or not. * * @param action value of the action to be carried out (e.g. "UPDATE", "DELETE") * @param resource resource string. * @param trustDomain (optional) if the access checks involves cross domain check only * check the specified trusted domain and ignore all others * @param principal (optional) carry out the access check for specified principal * @return Access object indicating whether or not the request will be granted or not * @throws ZMSClientException in case of failure */ public Access getAccessExt(String action, String resource, String trustDomain, String principal) { try { return client.getAccessExt(action, resource, trustDomain, principal); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Retrieve the list of all domain data from the ZMS Server that * is signed with ZMS's private key. It will pass an optional matchingTag * so that ZMS can skip returning domains if no changes have taken * place since that tag was issued. * * @param domainName name of the domain. if specified, the server will * only return this domain in the result set * @param metaOnly (can be null) must have value of true or false (default). * if set to true, zms server will only return meta information * about each domain (description, last modified timestamp, etc) and * no role/policy/service details will be returned. * @param matchingTag (can be null) contains modified timestamp received * with last request. If null, then return all domains. * @param responseHeaders contains the "tag" returned for modification * time of the domains, map key = "tag", List should * contain a single value timestamp String to be used * with subsequent call as matchingTag to this API * @return list of domains signed by ZMS Server * @throws ZMSClientException in case of failure */ public SignedDomains getSignedDomains(String domainName, String metaOnly, String matchingTag, Map<String, List<String>> responseHeaders) { return getSignedDomains(domainName, metaOnly, null, true, matchingTag, responseHeaders); } /** * Retrieve the list of all domain data from the ZMS Server that * is signed with ZMS's private key. It will pass an optional matchingTag * so that ZMS can skip returning domains if no changes have taken * place since that tag was issued. * * @param domainName name of the domain. if specified, the server will * only return this domain in the result set * @param metaOnly (can be null) must have value of true or false (default). * if set to true, zms server will only return meta information * about each domain (description, last modified timestamp, etc) and * no role/policy/service details will be returned. * @param metaAttr (can be null) if metaOnly option is set to true, this * parameter can filter the results based on the presence of the * requested attribute. Allowed values are: account, ypmid, and all. * account - only return domains that have the account value set * ypmid - only return domains that have the ypmid value set * all - return all domains (no filtering). * @param matchingTag (can be null) contains modified timestamp received * with last request. If null, then return all domains. * @param responseHeaders contains the "tag" returned for modification * time of the domains, map key = "tag", List should * contain a single value timestamp String to be used * with subsequent call as matchingTag to this API * @return list of domains signed by ZMS Server * @throws ZMSClientException in case of failure */ public SignedDomains getSignedDomains(String domainName, String metaOnly, String metaAttr, String matchingTag, Map<String, List<String>> responseHeaders) { return getSignedDomains(domainName, metaOnly, metaAttr, true, matchingTag, responseHeaders); } /** * Retrieve the list of all domain data from the ZMS Server that * is signed with ZMS's private key. It will pass an optional matchingTag * so that ZMS can skip returning domains if no changes have taken * place since that tag was issued. * * @param domainName name of the domain. if specified, the server will * only return this domain in the result set * @param metaOnly (can be null) must have value of true or false (default). * if set to true, zms server will only return meta information * about each domain (description, last modified timestamp, etc) and * no role/policy/service details will be returned. * @param metaAttr (can be null) if metaOnly option is set to true, this * parameter can filter the results based on the presence of the * requested attribute. Allowed values are: account, ypmid, and all. * account - only return domains that have the account value set * ypmid - only return domains that have the ypmid value set * all - return all domains (no filtering). * @param masterCopy system principals can request the request to be processed * from the master data source instead of read replicas in case * there are any configured * @param matchingTag (can be null) contains modified timestamp received * with last request. If null, then return all domains. * @param responseHeaders contains the "tag" returned for modification * time of the domains, map key = "tag", List should * contain a single value timestamp String to be used * with subsequent call as matchingTag to this API * @return list of domains signed by ZMS Server * @throws ZMSClientException in case of failure */ public SignedDomains getSignedDomains(String domainName, String metaOnly, String metaAttr, boolean masterCopy, String matchingTag, Map<String, List<String>> responseHeaders) { updatePrincipal(); try { return client.getSignedDomains(domainName, metaOnly, metaAttr, masterCopy, matchingTag, responseHeaders); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * For the specified user credentials return the corresponding User Token that * can be used for authenticating other ZMS operations. The client internally * automatically calls this method and uses the UserToken if the ZMSClient * object was initialized with a user principal. * * @param userName name of the user. This is only used to verify that it matches * the user name from the credentials and is optional. The caller can just pass * the string "_self_" as the userName to bypass this optional check. * @return ZMS generated User Token * @throws ZMSClientException in case of failure */ public UserToken getUserToken(String userName) { return getUserToken(userName, null, null); } /** * For the specified user credentials return the corresponding User Token that * can be used for authenticating other ZMS operations by any of the specified * authorized services. * * @param userName name of the user * @param serviceNames comma separated list of authorized service names * @param header boolean flag whether or not return authority header name * @return ZMS generated User Token * @throws ZMSClientException in case of failure */ public UserToken getUserToken(String userName, String serviceNames, Boolean header) { try { return client.getUserToken(userName, serviceNames, header); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * For the specified user credentials return the corresponding User Token that * can be used for authenticating other ZMS operations by any of the specified * authorized services. * * @param userName name of the user * @param serviceNames comma separated list of authorized service names * @return ZMS generated User Token * @throws ZMSClientException in case of failure */ public UserToken getUserToken(String userName, String serviceNames) { return getUserToken(userName, serviceNames, null); } /** * For the specified domain in domainName, a list of default administrators * can be passed to this method and will be added to the domain's admin role * In addition this method will ensure that the admin role and policy exist and * are properly set up * * @param domainName - name of the domain to add default administrators to * @param auditRef - string containing audit specification or ticket number * @param defaultAdmins - list of names to be added as default administrators * @throws ZMSClientException in case of failure */ public void putDefaultAdmins(String domainName, String auditRef, DefaultAdmins defaultAdmins) { updatePrincipal(); try { client.putDefaultAdmins(domainName, auditRef, defaultAdmins); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * The client will validate the given serviceToken against the ZMS Server * and if the token is valid, it will return a Principal object. * * @param serviceToken token to be validated. * @return Principal object if the token is successfully validated or * @throws ZMSClientException in case of failure */ public Principal getPrincipal(String serviceToken) { return getPrincipal(serviceToken, PRINCIPAL_AUTHORITY.getHeader()); } /** * The client will validate the given serviceToken against the ZMS Server * and if the token is valid, it will return a Principal object. * * @param serviceToken token to be validated. * @param tokenHeader name of the authorization header for the token * @return Principal object if the token is successfully validated or * @throws ZMSClientException in case of failure */ public Principal getPrincipal(String serviceToken, String tokenHeader) { if (serviceToken == null) { throw new ZMSClientException(401, "Null service token provided"); } if (tokenHeader == null) { tokenHeader = PRINCIPAL_AUTHORITY.getHeader(); } // verify that service token is valid before sending the data to // the ZMS server PrincipalToken token; try { token = new PrincipalToken(serviceToken); } catch (IllegalArgumentException ex) { throw new ZMSClientException(ZMSClientException.UNAUTHORIZED, "Invalid service token provided: " + ex.getMessage()); } Principal servicePrincipal = SimplePrincipal.create(token.getDomain(), token.getName(), serviceToken, 0, PRINCIPAL_AUTHORITY); client.addCredentials(tokenHeader, serviceToken); principalCheckDone = true; ServicePrincipal validatedPrincipal; try { validatedPrincipal = client.getServicePrincipal(); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } if (validatedPrincipal == null) { throw new ZMSClientException(ZMSClientException.UNAUTHORIZED, "Invalid service token provided"); } // before returning let's validate that domain, name and // credentials match to what was passed to if (!servicePrincipal.getDomain().equalsIgnoreCase(validatedPrincipal.getDomain())) { throw new ZMSClientException(ZMSClientException.UNAUTHORIZED, "Validated principal domain name mismatch"); } if (!servicePrincipal.getName().equalsIgnoreCase(validatedPrincipal.getService())) { throw new ZMSClientException(ZMSClientException.UNAUTHORIZED, "Validated principal service name mismatch"); } return servicePrincipal; } /** * Create provider roles for the specified tenant resource group in the tenant domain. * If the principal requesting this operation has been authorized by the provider * service itself, then the corresponding tenant roles will be created in the provider * domain as well thus completing the tenancy on-boarding process in one call. * * @param tenantDomain name of the tenant's domain * @param providerDomain name of the provider domain * @param providerServiceName name of the provider service * @param resourceGroup name of the resource group * @param auditRef string containing audit specification or ticket number * @param providerRoles Provider roles * @throws ZMSClientException in case of failure */ public void putProviderResourceGroupRoles(String tenantDomain, String providerDomain, String providerServiceName, String resourceGroup, String auditRef, ProviderResourceGroupRoles providerRoles) { updatePrincipal(); try { client.putProviderResourceGroupRoles(tenantDomain, providerDomain, providerServiceName, resourceGroup, auditRef, providerRoles); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Delete the provider roles for the specified tenant resource group from the tenant domain. * If the principal requesting this operation has been authorized by the provider * service itself, then the corresponding tenant roles will be deleted from the provider * domain as well thus completing the process in one call. * * @param tenantDomain name of tenant's domain * @param providerDomain name of the provider domain * @param providerServiceName name of the provider service * @param resourceGroup name of the resource group * @param auditRef string containing audit specification or ticket number * @throws ZMSClientException in case of failure */ public void deleteProviderResourceGroupRoles(String tenantDomain, String providerDomain, String providerServiceName, String resourceGroup, String auditRef) { updatePrincipal(); try { client.deleteProviderResourceGroupRoles(tenantDomain, providerDomain, providerServiceName, resourceGroup, auditRef); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Retrieve the list of provider roles defined for a tenant resource group in a domain * * @param tenantDomain name of the tenant's domain * @param providerDomain name of the provider domain * @param providerServiceName name of the provider service * @param resourceGroup name of the resource group * @return list of provider roles * @throws ZMSClientException in case of failure */ public ProviderResourceGroupRoles getProviderResourceGroupRoles(String tenantDomain, String providerDomain, String providerServiceName, String resourceGroup) { updatePrincipal(); try { return client.getProviderResourceGroupRoles(tenantDomain, providerDomain, providerServiceName, resourceGroup); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Check the data for the specified domain object * * @param domain name of the domain to be checked * @return DomainDataCheck object * @throws ZMSClientException in case of failure */ public DomainDataCheck getDomainDataCheck(String domain) { updatePrincipal(); try { return client.getDomainDataCheck(domain); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Retrieve the the specified solution template provisioned on the ZMS Server. * The template object will include the list of roles and policies that will * be provisioned in the domain when the template is applied. * * @param template name of the solution template to be retrieved * @return template object * @throws ZMSClientException in case of failure */ public Template getTemplate(String template) { updatePrincipal(); try { return client.getTemplate(template); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Retrieve the list of solution templates provisioned on the ZMS Server * * @return list of template names * @throws ZMSClientException in case of failure */ public ServerTemplateList getServerTemplateList() { updatePrincipal(); try { return client.getServerTemplateList(); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Provision the specified solution template roles and policies in the domain * * @param domain name of the domain to be updated * @param auditRef string containing audit specification or ticket number * @param templates contains list of template names to be provisioned in the domain * @throws ZMSClientException in case of failure */ public void putDomainTemplate(String domain, String auditRef, DomainTemplate templates) { updatePrincipal(); try { client.putDomainTemplate(domain, auditRef, templates); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Provision the specified solution template roles and policies in the domain * * @param domain name of the domain to be updated * @param template name of the template to be applied * @param auditRef string containing audit specification or ticket number * @param templates containing the single template (must match the template parameter) to be provisioned in the domain * @throws ZMSClientException in case of failure */ public void putDomainTemplateExt(String domain, String template, String auditRef, DomainTemplate templates) { updatePrincipal(); try { client.putDomainTemplateExt(domain, template, auditRef, templates); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Delete the specified solution template roles and policies from the domain * * @param domain name of the domain to be updated * @param template is the name of the provisioned template to be deleted * @param auditRef string containing audit specification or ticket number * @throws ZMSClientException in case of failure */ public void deleteDomainTemplate(String domain, String template, String auditRef) { updatePrincipal(); try { client.deleteDomainTemplate(domain, template, auditRef); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Retrieve the list of solution template provisioned for a domain * * @param domain name of the domain * @return TemplateList object that includes the list of provisioned solution template names * @throws ZMSClientException in case of failure */ public DomainTemplateList getDomainTemplateList(String domain) { updatePrincipal(); try { return client.getDomainTemplateList(domain); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Retrieve the list of resources as defined in their respective assertions * that the given principal has access to through their role membership * * @param principal the principal name (e.g. user.joe). Must have special * privileges to execute this query without specifying the principal. * Check with Athenz Service Administrators if you have a use case to * request all principals from Athenz Service * @param action optional field specifying what action to filter assertions on * @return ResourceAccessList object that lists the set of assertions per principal * @throws ZMSClientException in case of failure */ public ResourceAccessList getResourceAccessList(String principal, String action) { updatePrincipal(); try { return client.getResourceAccessList(principal, action); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Retrieve the quota deatails for the specified domain * * @param domainName name of the domain * @return quota object * @throws ZMSClientException in case of failure */ public Quota getQuota(String domainName) { updatePrincipal(); try { return client.getQuota(domainName); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Create/Update the quota details for the specified domain * * @param domainName name of the domain * @param auditRef string containing audit specification or ticket number * @param quota object to be set for the domain * @throws ZMSClientException in case of failure */ public void putQuota(String domainName, String auditRef, Quota quota) { updatePrincipal(); try { client.putQuota(domainName, auditRef, quota); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Delete the specified quota details for the specified domain * * @param domainName name of the domain * @param auditRef string containing audit specification or ticket number * @throws ZMSClientException in case of failure */ public void deleteQuota(String domainName, String auditRef) { updatePrincipal(); try { client.deleteQuota(domainName, auditRef); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Delete the specified user from all roles in the given domain * * @param domainName name of the domain * @param memberName name of the member to be removed from all roles * @param auditRef string containing audit specification or ticket number * @throws ZMSClientException in case of failure */ public void deleteDomainRoleMember(String domainName, String memberName, String auditRef) { updatePrincipal(); try { client.deleteDomainRoleMember(domainName, memberName, auditRef); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Retrieve the list of all members provisioned for a domain * in regular roles * * @param domainName name of the domain * @return DomainRoleMembers object that includes the list of members with their roles * @throws ZMSClientException in case of failure */ public DomainRoleMembers getDomainRoleMembers(String domainName) { updatePrincipal(); try { return client.getDomainRoleMembers(domainName); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Fetch all the roles across domains by either calling or specified principal * @param principal - Requested principal. If null will return roles for the user making the call * @param domainName - Requested domain. If null will return roles from all domains * @return Member with roles in all requested domains */ public DomainRoleMember getPrincipalRoles(String principal, String domainName) { updatePrincipal(); try { return client.getPrincipalRoles(principal, domainName); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Set the role system meta parameters * * @param domainName domain name containing the role to be modified * @param roleName role name to be modified * @param attribute role meta attribute being modified in this request * @param auditRef string containing audit specification or ticket number * @param meta meta parameters to be set on the role */ public void putRoleSystemMeta(String domainName, String roleName, String attribute, String auditRef, RoleSystemMeta meta) { updatePrincipal(); try { client.putRoleSystemMeta(domainName, roleName, attribute, auditRef, meta); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Set the role meta parameters * * @param domainName domain name containing the role to be modified * @param roleName role name to be modified * @param auditRef string containing audit specification or ticket number * @param meta meta parameters to be set on the role */ public void putRoleMeta(String domainName, String roleName, String auditRef, RoleMeta meta) { updatePrincipal(); try { client.putRoleMeta(domainName, roleName, auditRef, meta); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Approve or reject addition of a member in the specified role optionally with expiration * * @param domainName name of the domain * @param roleName name of the role * @param memberName name of the member to be added * @param expiration timestamp when this membership will expire (optional) * @param approval flag indicating whether this membership is approved or rejected * @param auditRef string containing audit specification or ticket number * @throws ZMSClientException in case of failure */ public void putMembershipDecision(String domainName, String roleName, String memberName, Timestamp expiration, boolean approval, String auditRef) { Membership mbr = new Membership().setRoleName(roleName) .setMemberName(memberName).setExpiration(expiration).setApproved(approval); updatePrincipal(); try { client.putMembershipDecision(domainName, roleName, memberName, auditRef, mbr); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Return all the list of pending requests for the given principal. If the principal * is null, the server will return the list for the authenticated principal * making the call * @param principal name of the approver principal (optional) * @return DomainRoleMembership object listing all pending users * @throws ZMSClientException in case of failure */ public DomainRoleMembership getPendingDomainRoleMembersList(String principal) { updatePrincipal(); try { return client.getPendingDomainRoleMembersList(principal); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Review role membership to extend and/or delete role members * * @param domainName name of the domain * @param roleName name of the role * @param auditRef string containing audit specification or ticket number * @param role Role object containing updated and/or deleted members * @throws ZMSClientException in case of failure */ public void putRoleReview(String domainName, String roleName, String auditRef, Role role) { updatePrincipal(); try { client.putRoleReview(domainName, roleName, auditRef, role); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Delete the specified group from domain * * @param domainName name of the domain * @param groupName name of the group * @param auditRef string containing audit specification or ticket number * @throws ZMSClientException in case of failure */ public void deleteGroup(String domainName, String groupName, String auditRef) { updatePrincipal(); try { client.deleteGroup(domainName, groupName, auditRef); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Remove the specified member from the group * * @param domainName name of the domain * @param groupName name of the group * @param memberName name of the member to be removed * @param auditRef string containing audit specification or ticket number * @throws ZMSClientException in case of failure */ public void deleteGroupMembership(String domainName, String groupName, String memberName, String auditRef) { updatePrincipal(); try { client.deleteGroupMembership(domainName, groupName, memberName, auditRef); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Remove the specified pending member from the group * * @param domainName name of the domain * @param groupName name of the group * @param memberName name of the pending member to be removed * @param auditRef string containing audit specification or ticket number * @throws ZMSClientException in case of failure */ public void deletePendingGroupMembership(String domainName, String groupName, String memberName, String auditRef) { updatePrincipal(); try { client.deletePendingGroupMembership(domainName, groupName, memberName, auditRef); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Generate a group name as expected by ZMS Server can be used to * set the group object's name field (e.g. group.setName(name)) * * @param domain name of the domain * @param group name of the group * @return full group name */ public String generateGroupName(String domain, String group) { return domain + ":group." + group; } /** * Get membership details for the specified member in the given group * in a specified domain with an optional expiration * * @param domainName name of the domain * @param groupName name of the group * @param memberName name of the member * @param expiration member expiration * @return GroupMembership object * @throws ZMSClientException in case of failure */ public GroupMembership getGroupMembership(String domainName, String groupName, String memberName, String expiration) { updatePrincipal(); try { return client.getGroupMembership(domainName, groupName, memberName, expiration); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Fetch all the groups across domains by either calling or specified principal * @param principal - Requested principal. If null will return groups for the user making the call * @param domainName - Requested domain. If null will return groups from all domains * @return Member with groups in all requested domains */ public DomainGroupMember getPrincipalGroups(String principal, String domainName) { updatePrincipal(); try { return client.getPrincipalGroups(principal, domainName); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Set the group system meta parameters * * @param domainName domain name containing the group to be modified * @param groupName group name to be modified * @param attribute group meta attribute being modified in this request * @param auditRef string containing audit specification or ticket number * @param meta meta parameters to be set on the group */ public void putGroupSystemMeta(String domainName, String groupName, String attribute, String auditRef, GroupSystemMeta meta) { updatePrincipal(); try { client.putGroupSystemMeta(domainName, groupName, attribute, auditRef, meta); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Set the group meta parameters * * @param domainName domain name containing the group to be modified * @param groupName group name to be modified * @param auditRef string containing audit specification or ticket number * @param meta meta parameters to be set on the group */ public void putGroupMeta(String domainName, String groupName, String auditRef, GroupMeta meta) { updatePrincipal(); try { client.putGroupMeta(domainName, groupName, auditRef, meta); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Approve or reject addition of a member in the specified group * * @param domainName name of the domain * @param groupName name of the group * @param memberName name of the member to be added * @param approval flag indicating whether this membership is approved or rejected * @param auditRef string containing audit specification or ticket number * @throws ZMSClientException in case of failure */ public void putGroupMembershipDecision(String domainName, String groupName, String memberName, boolean approval, String auditRef) { GroupMembership mbr = new GroupMembership().setGroupName(groupName) .setMemberName(memberName).setApproved(approval); updatePrincipal(); try { client.putGroupMembershipDecision(domainName, groupName, memberName, auditRef, mbr); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Return all the list of pending requests for the given principal. If the principal * is null, the server will return the list for the authenticated principal * making the call * @param principal name of the approver principal (optional) * @return DomainGroupMembership object listing all pending users * @throws ZMSClientException in case of failure */ public DomainGroupMembership getPendingDomainGroupMembersList(String principal) { updatePrincipal(); try { return client.getPendingDomainGroupMembersList(principal); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Review group membership to extend and/or delete group members * * @param domainName name of the domain * @param groupName name of the group * @param auditRef string containing audit specification or ticket number * @param group Group object containing updated and/or deleted members * @throws ZMSClientException in case of failure */ public void putGroupReview(String domainName, String groupName, String auditRef, Group group) { updatePrincipal(); try { client.putGroupReview(domainName, groupName, auditRef, group); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Retrieve the specified group * * @param domainName name of the domain * @param groupName name of the group * @param auditLog include audit log for the group changes in the response * @param pending if this flag is set, then all members for that group will be retrieved * including pending members * @return group object * @throws ZMSClientException in case of failure */ public Group getGroup(String domainName, String groupName, boolean auditLog, boolean pending) { updatePrincipal(); try { return client.getGroup(domainName, groupName, auditLog, pending); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Create/Update a new group in the specified domain. If updating a group * the provided object must contain all attributes as it will replace * the full group object configured on the server (not just some of the attributes). * * @param domainName name of the domain * @param groupName name of the group * @param auditRef string containing audit specification or ticket number * @param group group object to be added to the domain * @throws ZMSClientException in case of failure */ public void putGroup(String domainName, String groupName, String auditRef, Group group) { updatePrincipal(); try { client.putGroup(domainName, groupName, auditRef, group); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Retrieve the list of groups defined for the specified domain. The groups * will contain their attributes and, if specified, the list of members. * * @param domainName name of the domain * @param members include all members for group groups as well * @return list of groups * @throws ZMSClientException in case of failure */ public Groups getGroups(String domainName, Boolean members) { updatePrincipal(); try { return client.getGroups(domainName, members); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } /** * Add a member in the specified group * * @param domainName name of the domain * @param groupName name of the group * @param memberName name of the member to be added * @param auditRef string containing audit specification or ticket number * @throws ZMSClientException in case of failure */ public void putGroupMembership(String domainName, String groupName, String memberName, String auditRef) { GroupMembership mbr = new GroupMembership().setGroupName(groupName) .setMemberName(memberName).setIsMember(true); updatePrincipal(); try { client.putGroupMembership(domainName, groupName, memberName, auditRef, mbr); } catch (ResourceException ex) { throw new ZMSClientException(ex.getCode(), ex.getData()); } catch (Exception ex) { throw new ZMSClientException(ZMSClientException.BAD_REQUEST, ex.getMessage()); } } }
1
5,480
we can't remove functions as that would introduce backward compatibility issues in our java client. So we should also have a function with the original arguments: ` public Roles getRoles(String domainName, Boolean members) { return getRoles(domainName, members, null, null); } ` For the Go client we don't have a wrapper so no option but to update your client code as necessary, but for java we have the wrapper thus we always maintain backward compatibility
AthenZ-athenz
java
@@ -32,5 +32,12 @@ ADDITIONAL def warn_deprecation(message) warn message end + + # @private + # + # Used internally to send deprecation warnings to io + def warn(message) + RSpec.configuration.deprecation_io.puts(message) + end end end
1
module RSpec class << self # @private # # Used internally to print deprecation warnings def deprecate(method, alternate_method=nil, version=nil) version_string = version ? "rspec-#{version}" : "a future version of RSpec" message = <<-NOTICE ***************************************************************** DEPRECATION WARNING: you are using deprecated behaviour that will be removed from #{version_string}. #{caller(0)[2]} * #{method} is deprecated. NOTICE if alternate_method message << <<-ADDITIONAL * please use #{alternate_method} instead. ADDITIONAL end message << "*****************************************************************" warn_deprecation(message) end # @private # # Used internally to print deprecation warnings def warn_deprecation(message) warn message end end end
1
8,827
I'd recommend keeping this in `warn_deprecation` and not adding an override of `warn`.
rspec-rspec-core
rb
@@ -0,0 +1,10 @@ +// Startup script for Phusion Passenger that uses next.js cli +// Run `blitz build` before starting +const path = require('path') + +const blitzPath = path.join(__dirname, 'node_modules', 'next', 'dist', 'bin', 'next'); + +process.argv.length = 1; +process.argv.push(blitzPath, 'start'); + +require(blitzPath);
1
1
12,573
I think we should use a `blitz` bin instead
blitz-js-blitz
js
@@ -101,7 +101,10 @@ class DictInterface(Interface): @classmethod def validate(cls, dataset): - dimensions = dataset.dimensions(label='name') + if dataset._virtual_vdims: + dimensions = dataset.dimensions('key', label='name') + else: + dimensions = dataset.dimensions(label='name') not_found = [d for d in dimensions if d not in dataset.data] if not_found: raise DataError('Following columns specified as dimensions '
1
from collections import OrderedDict from itertools import compress try: import itertools.izip as zip except ImportError: pass import numpy as np from .interface import Interface, DataError from ..dimension import Dimension from ..element import Element from ..dimension import OrderedDict as cyODict from ..ndmapping import NdMapping, item_check from .. import util class DictInterface(Interface): """ Interface for simple dictionary-based dataset format. The dictionary keys correspond to the column (i.e dimension) names and the values are collections representing the values in that column. """ types = (dict, OrderedDict, cyODict) datatype = 'dictionary' @classmethod def dimension_type(cls, dataset, dim): name = dataset.get_dimension(dim, strict=True).name values = dataset.data[name] return type(values) if np.isscalar(values) else values.dtype.type @classmethod def init(cls, eltype, data, kdims, vdims): odict_types = (OrderedDict, cyODict) if kdims is None: kdims = eltype.kdims if vdims is None: vdims = eltype.vdims dimensions = [d.name if isinstance(d, Dimension) else d for d in kdims + vdims] if isinstance(data, tuple): data = {d: v for d, v in zip(dimensions, data)} elif util.is_dataframe(data) and all(d in data for d in dimensions): data = {d: data[d] for d in dimensions} elif isinstance(data, np.ndarray): if data.ndim == 1: if eltype._auto_indexable_1d and len(kdims)+len(vdims)>1: data = np.column_stack([np.arange(len(data)), data]) else: data = np.atleast_2d(data).T data = {k: data[:,i] for i,k in enumerate(dimensions)} elif isinstance(data, list) and data == []: data = OrderedDict([(d, []) for d in dimensions]) elif isinstance(data, list) and np.isscalar(data[0]): data = {dimensions[0]: np.arange(len(data)), dimensions[1]: data} elif (isinstance(data, list) and isinstance(data[0], tuple) and len(data[0]) == 2 and any(isinstance(v, tuple) for v in data[0])): dict_data = zip(*((util.wrap_tuple(k)+util.wrap_tuple(v)) for k, v in data)) data = {k: np.array(v) for k, v in zip(dimensions, dict_data)} # Ensure that interface does not consume data of other types # with an iterator interface elif not any(isinstance(data, tuple(t for t in interface.types if t is not None)) for interface in cls.interfaces.values()): data = {k: v for k, v in zip(dimensions, zip(*data))} elif (isinstance(data, dict) and not any(d in data or any(d in k for k in data if isinstance(k, tuple)) for d in dimensions)): dict_data = sorted(data.items()) dict_data = zip(*((util.wrap_tuple(k)+util.wrap_tuple(v)) for k, v in dict_data)) data = {k: np.array(v) for k, v in zip(dimensions, dict_data)} if not isinstance(data, cls.types): raise ValueError("DictInterface interface couldn't convert data.""") elif isinstance(data, dict): unpacked = [] for d, vals in data.items(): if isinstance(d, tuple): vals = np.asarray(vals) if not vals.ndim == 2 and vals.shape[1] == len(d): raise ValueError("Values for %s dimensions did not have " "the expected shape.") for i, sd in enumerate(d): unpacked.append((sd, vals[:, i])) else: unpacked.append((d, vals if np.isscalar(vals) else np.asarray(vals))) if not cls.expanded([d[1] for d in unpacked if not np.isscalar(d[1])]): raise ValueError('DictInterface expects data to be of uniform shape.') if isinstance(data, odict_types): data.update(unpacked) else: data = OrderedDict(unpacked) return data, {'kdims':kdims, 'vdims':vdims}, {} @classmethod def validate(cls, dataset): dimensions = dataset.dimensions(label='name') not_found = [d for d in dimensions if d not in dataset.data] if not_found: raise DataError('Following columns specified as dimensions ' 'but not found in data: %s' % not_found, cls) lengths = [(dim, 1 if np.isscalar(dataset.data[dim]) else len(dataset.data[dim])) for dim in dimensions] if len({l for d, l in lengths if l > 1}) > 1: lengths = ', '.join(['%s: %d' % l for l in sorted(lengths)]) raise DataError('Length of columns must be equal or scalar, ' 'columns have lengths: %s' % lengths, cls) @classmethod def unpack_scalar(cls, dataset, data): """ Given a dataset object and data in the appropriate format for the interface, return a simple scalar. """ if len(data) != 1: return data key = list(data.keys())[0] if len(data[key]) == 1 and key in dataset.vdims: return data[key][0] @classmethod def isscalar(cls, dataset, dim): name = dataset.get_dimension(dim, strict=True).name values = dataset.data[name] return np.isscalar(values) or len(np.unique(values)) == 1 @classmethod def shape(cls, dataset): return cls.length(dataset), len(dataset.data), @classmethod def length(cls, dataset): lengths = [len(vals) for vals in dataset.data.values() if not np.isscalar(vals)] return max(lengths) if lengths else 1 @classmethod def array(cls, dataset, dimensions): if not dimensions: dimensions = dataset.dimensions(label='name') else: dimensions = [dataset.get_dimensions(d).name for d in dimensions] arrays = [dataset.data[dim.name] for dim in dimensions] return np.column_stack([np.full(len(dataset), arr) if np.isscalar(arr) else arr for arr in arrays]) @classmethod def add_dimension(cls, dataset, dimension, dim_pos, values, vdim): dim = dimension.name if isinstance(dimension, Dimension) else dimension data = list(dataset.data.items()) data.insert(dim_pos, (dim, values)) return OrderedDict(data) @classmethod def redim(cls, dataset, dimensions): all_dims = dataset.dimensions() renamed = [] for k, v in dataset.data.items(): if k in dimensions: k = dimensions[k].name elif k in all_dims: k = dataset.get_dimension(k).name renamed.append((k, v)) return OrderedDict(renamed) @classmethod def concat(cls, dataset_objs): cast_objs = cls.cast(dataset_objs) cols = set(tuple(c.data.keys()) for c in cast_objs) if len(cols) != 1: raise Exception("In order to concatenate, all Dataset objects " "should have matching set of columns.") concatenated = OrderedDict() for column in cols.pop(): concatenated[column] = np.concatenate([obj[column] for obj in cast_objs]) return concatenated @classmethod def sort(cls, dataset, by=[], reverse=False): by = [dataset.get_dimension(d).name for d in by] if len(by) == 1: sorting = cls.values(dataset, by[0]).argsort() else: arrays = [dataset.dimension_values(d) for d in by] sorting = util.arglexsort(arrays) return OrderedDict([(d, v if np.isscalar(v) else (v[sorting][::-1] if reverse else v[sorting])) for d, v in dataset.data.items()]) @classmethod def values(cls, dataset, dim, expanded=True, flat=True): dim = dataset.get_dimension(dim).name values = dataset.data.get(dim) if np.isscalar(values): if not expanded: return np.array([values]) values = np.full(len(dataset), values) else: if not expanded: return util.unique_array(values) values = np.array(values) return values @classmethod def reindex(cls, dataset, kdims, vdims): dimensions = [dataset.get_dimension(d).name for d in kdims+vdims] return OrderedDict([(d, dataset.dimension_values(d)) for d in dimensions]) @classmethod def groupby(cls, dataset, dimensions, container_type, group_type, **kwargs): # Get dimensions information dimensions = [dataset.get_dimension(d) for d in dimensions] kdims = [kdim for kdim in dataset.kdims if kdim not in dimensions] vdims = dataset.vdims # Update the kwargs appropriately for Element group types group_kwargs = {} group_type = dict if group_type == 'raw' else group_type if issubclass(group_type, Element): group_kwargs.update(util.get_param_values(dataset)) group_kwargs['kdims'] = kdims group_kwargs.update(kwargs) # Find all the keys along supplied dimensions keys = (tuple(dataset.data[d.name] if np.isscalar(dataset.data[d.name]) else dataset.data[d.name][i] for d in dimensions) for i in range(len(dataset))) # Iterate over the unique entries applying selection masks grouped_data = [] for unique_key in util.unique_iterator(keys): mask = cls.select_mask(dataset, dict(zip(dimensions, unique_key))) group_data = OrderedDict(((d.name, dataset.data[d.name] if np.isscalar(dataset.data[d.name]) else dataset.data[d.name][mask]) for d in kdims+vdims)) group_data = group_type(group_data, **group_kwargs) grouped_data.append((unique_key, group_data)) if issubclass(container_type, NdMapping): with item_check(False): return container_type(grouped_data, kdims=dimensions) else: return container_type(grouped_data) @classmethod def select(cls, dataset, selection_mask=None, **selection): if selection_mask is None: selection_mask = cls.select_mask(dataset, selection) indexed = cls.indexed(dataset, selection) data = OrderedDict((k, v if np.isscalar(v) else v[selection_mask]) for k, v in dataset.data.items()) if indexed and len(list(data.values())[0]) == 1 and len(dataset.vdims) == 1: value = data[dataset.vdims[0].name] return value if np.isscalar(value) else value[0] return data @classmethod def sample(cls, dataset, samples=[]): mask = False for sample in samples: sample_mask = True if np.isscalar(sample): sample = [sample] for i, v in enumerate(sample): name = dataset.get_dimension(i).name sample_mask &= (dataset.data[name]==v) mask |= sample_mask return {k: col if np.isscalar(col) else np.array(col)[mask] for k, col in dataset.data.items()} @classmethod def aggregate(cls, dataset, kdims, function, **kwargs): kdims = [dataset.get_dimension(d, strict=True).name for d in kdims] vdims = dataset.dimensions('value', label='name') groups = cls.groupby(dataset, kdims, list, OrderedDict) aggregated = OrderedDict([(k, []) for k in kdims+vdims]) for key, group in groups: key = key if isinstance(key, tuple) else (key,) for kdim, val in zip(kdims, key): aggregated[kdim].append(val) for vdim, arr in group.items(): if vdim in dataset.vdims: if np.isscalar(arr): reduced = arr elif isinstance(function, np.ufunc): reduced = function.reduce(arr, **kwargs) else: reduced = function(arr, **kwargs) aggregated[vdim].append(reduced) return aggregated @classmethod def iloc(cls, dataset, index): rows, cols = index scalar = False if np.isscalar(cols): scalar = np.isscalar(rows) cols = [dataset.get_dimension(cols, strict=True)] elif isinstance(cols, slice): cols = dataset.dimensions()[cols] else: cols = [dataset.get_dimension(d, strict=True) for d in cols] if np.isscalar(rows): rows = [rows] new_data = OrderedDict() for d, values in dataset.data.items(): if d in cols: if np.isscalar(values): new_data[d] = values else: new_data[d] = values[rows] if scalar: arr = new_data[cols[0].name] return arr if np.isscalar(arr) else arr[0] return new_data Interface.register(DictInterface)
1
18,936
Why not make the ``derived_vdims`` flag (or similar, ``validate_vdims`` maybe?) an explicit argument to ``validate``?
holoviz-holoviews
py
@@ -155,6 +155,16 @@ func (s *VolumeServer) create( err.Error()) } + if spec.IsPureVolume() { + id, err = s.driver(ctx).Create(locator, source, spec) + if err != nil { + return "", status.Errorf( + codes.Internal, + "Failed to create snapshot for Pure FA volume: %v", + err.Error()) + } + return id, nil + } // Check ownership // Snapshots just need read access if !parent.IsPermitted(ctx, api.Ownership_Read) {
1
/* Package sdk is the gRPC implementation of the SDK gRPC server Copyright 2018 Portworx Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package sdk import ( "context" "fmt" "time" "github.com/sirupsen/logrus" "github.com/libopenstorage/openstorage/api" "github.com/libopenstorage/openstorage/pkg/auth" policy "github.com/libopenstorage/openstorage/pkg/storagepolicy" "github.com/libopenstorage/openstorage/pkg/util" "github.com/libopenstorage/openstorage/volume" "github.com/portworx/kvdb" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) // When create is called for an existing volume, this function is called to make sure // the SDK only returns that the volume is ready when the status is UP func (s *VolumeServer) waitForVolumeReady(ctx context.Context, id string) (*api.Volume, error) { var v *api.Volume minTimeout := 1 * time.Second maxTimeout := 60 * time.Minute defaultTimeout := 5 * time.Minute logrus.Infof("Waiting for volume %s to become available", id) e := util.WaitForWithContext( ctx, minTimeout, maxTimeout, defaultTimeout, // timeouts 5*time.Second, // period func() (bool, error) { var err error // Get the latest status from the volume v, err = util.VolumeFromName(s.driver(ctx), id) if err != nil { return false, status.Errorf(codes.Internal, err.Error()) } // Check if the volume is ready if v.GetStatus() == api.VolumeStatus_VOLUME_STATUS_UP && v.GetState() != api.VolumeState_VOLUME_STATE_ATTACHED { return false, nil } // Continue waiting return true, nil }) return v, e } func (s *VolumeServer) waitForVolumeRemoved(ctx context.Context, id string) error { minTimeout := 1 * time.Second maxTimeout := 10 * time.Minute defaultTimeout := 5 * time.Minute logrus.Infof("Waiting for volume %s to be removed", id) return util.WaitForWithContext( ctx, minTimeout, maxTimeout, defaultTimeout, // timeouts 250*time.Millisecond, // period func() (bool, error) { // Get the latest status from the volume if _, err := util.VolumeFromName(s.driver(ctx), id); err != nil { // Removed return false, nil } // Continue waiting return true, nil }) } func (s *VolumeServer) create( ctx context.Context, locator *api.VolumeLocator, source *api.Source, spec *api.VolumeSpec, ) (string, error) { // Check if the volume has already been created or is in process of creation volName := locator.GetName() v, err := util.VolumeFromName(s.driver(ctx), volName) // If the volume is still there but it is being delete, then wait until it is removed if err == nil && v.GetState() == api.VolumeState_VOLUME_STATE_DELETED { if err = s.waitForVolumeRemoved(ctx, volName); err != nil { return "", status.Errorf(codes.Internal, "Volume with same name %s is in the process of being deleted. Timed out waiting for deletion to complete: %v", volName, err) } // If the volume is there but it is not being deleted then just return the current id } else if err == nil { // Check ownership if !v.IsPermitted(ctx, api.Ownership_Admin) { return "", status.Errorf(codes.PermissionDenied, "Volume %s already exists and is owned by another user", volName) } // Wait until ready v, err = s.waitForVolumeReady(ctx, volName) if err != nil { return "", status.Errorf(codes.Internal, "Timed out waiting for volume %s to be in ready state: %v", volName, err) } // Check the requested arguments match that of the existing volume if v.GetSpec().GetSize() != spec.GetSize() { return "", status.Errorf( codes.AlreadyExists, "Existing volume has a size of %v which differs from requested size of %v", v.GetSpec().GetSize(), spec.Size) } if v.GetSpec().GetShared() != spec.GetShared() { return "", status.Errorf( codes.AlreadyExists, "Existing volume has shared=%v while request is asking for shared=%v", v.GetSpec().GetShared(), spec.GetShared()) } if v.GetSource().GetParent() != source.GetParent() { return "", status.Error(codes.AlreadyExists, "Existing volume has conflicting parent value") } // Return information on existing volume return v.GetId(), nil } // Check if the caller is asking to create a snapshot or for a new volume var id string if len(source.GetParent()) != 0 { // Get parent volume information parent, err := util.VolumeFromName(s.driver(ctx), source.Parent) if err != nil { return "", status.Errorf( codes.NotFound, "unable to get parent volume information: %s", err.Error()) } // Check ownership // Snapshots just need read access if !parent.IsPermitted(ctx, api.Ownership_Read) { return "", status.Errorf(codes.PermissionDenied, "Access denied to volume %s", parent.GetId()) } // Create a snapshot from the parent id, err = s.driver(ctx).Snapshot(parent.GetId(), false, &api.VolumeLocator{ Name: volName, }, false) if err != nil { return "", status.Errorf( codes.Internal, "unable to create snapshot: %s", err.Error()) } // If this is a different owner, make adjust the clone to this owner clone, err := s.Inspect(ctx, &api.SdkVolumeInspectRequest{ VolumeId: id, }) if err != nil { return "", err } newOwnership, updateNeeded := clone.Volume.Spec.GetCloneCreatorOwnership(ctx) if updateNeeded { // Set no authentication so that we can override the ownership ctxNoAuth := context.Background() // New owner for the snapshot, let's make the change _, err := s.Update(ctxNoAuth, &api.SdkVolumeUpdateRequest{ VolumeId: id, Spec: &api.VolumeSpecUpdate{ Ownership: newOwnership, }, }) if err != nil { return "", err } } } else { // New volume, set ownership spec.Ownership = api.OwnershipSetUsernameFromContext(ctx, spec.Ownership) // Create the volume id, err = s.driver(ctx).Create(locator, source, spec) if err != nil { return "", status.Errorf( codes.Internal, "Failed to create volume: %v", err.Error()) } } return id, nil } // Create creates a new volume func (s *VolumeServer) Create( ctx context.Context, req *api.SdkVolumeCreateRequest, ) (*api.SdkVolumeCreateResponse, error) { if s.cluster() == nil || s.driver(ctx) == nil { return nil, status.Error(codes.Unavailable, "Resource has not been initialized") } if len(req.GetName()) == 0 { return nil, status.Error( codes.InvalidArgument, "Must supply a unique name") } else if req.GetSpec() == nil { return nil, status.Error( codes.InvalidArgument, "Must supply spec object") } locator := &api.VolumeLocator{ Name: req.GetName(), VolumeLabels: req.GetLabels(), } source := &api.Source{} // Validate/Update given spec according to default storage policy set // In case policy is not set, should fall back to default way // of creating volume spec, err := GetDefaultVolSpecs(ctx, req.GetSpec(), false) if err != nil { return nil, err } // Copy any labels from the spec to the locator locator = locator.MergeVolumeSpecLabels(spec) // Convert node IP to ID if necessary for API calls if err := s.updateReplicaSpecNodeIPstoIds(spec.GetReplicaSet()); err != nil { return nil, status.Errorf(codes.Internal, "Failed to get replicat set information: %v", err) } // Create volume id, err := s.create(ctx, locator, source, spec) if err != nil { return nil, err } s.auditLog(ctx, "volume.create", "Volume %s created", id) return &api.SdkVolumeCreateResponse{ VolumeId: id, }, nil } // Clone creates a new volume from an existing volume func (s *VolumeServer) Clone( ctx context.Context, req *api.SdkVolumeCloneRequest, ) (*api.SdkVolumeCloneResponse, error) { if s.cluster() == nil || s.driver(ctx) == nil { return nil, status.Error(codes.Unavailable, "Resource has not been initialized") } if len(req.GetName()) == 0 { return nil, status.Error( codes.InvalidArgument, "Must supply a uniqe name") } else if len(req.GetParentId()) == 0 { return nil, status.Error( codes.InvalidArgument, "Must parent volume id") } locator := &api.VolumeLocator{ Name: req.GetName(), } source := &api.Source{ Parent: req.GetParentId(), } // Get spec. This also checks if the parend id exists. // This will also check for Ownership_Read access. parentVol, err := s.Inspect(ctx, &api.SdkVolumeInspectRequest{ VolumeId: req.GetParentId(), }) if err != nil { return nil, err } // Create the clone id, err := s.create(ctx, locator, source, parentVol.GetVolume().GetSpec()) if err != nil { return nil, err } s.auditLog(ctx, "volume.clone", "Volume %s created from %s", id, req.GetParentId()) return &api.SdkVolumeCloneResponse{ VolumeId: id, }, nil } // Delete deletes a volume func (s *VolumeServer) Delete( ctx context.Context, req *api.SdkVolumeDeleteRequest, ) (*api.SdkVolumeDeleteResponse, error) { if s.cluster() == nil || s.driver(ctx) == nil { return nil, status.Error(codes.Unavailable, "Resource has not been initialized") } if len(req.GetVolumeId()) == 0 { return nil, status.Error(codes.InvalidArgument, "Must supply volume id") } // If the volume is not found, return OK to be idempotent // This checks access rights also resp, err := s.Inspect(ctx, &api.SdkVolumeInspectRequest{ VolumeId: req.GetVolumeId(), }) if err != nil { if IsErrorNotFound(err) { return &api.SdkVolumeDeleteResponse{}, nil } return nil, err } vol := resp.GetVolume() // Only the owner or the admin can delete if !vol.IsPermitted(ctx, api.Ownership_Admin) { return nil, status.Errorf(codes.PermissionDenied, "Cannot delete volume %v", vol.GetId()) } // Delete the volume err = s.driver(ctx).Delete(req.GetVolumeId()) if err != nil { return nil, status.Errorf( codes.Internal, "Failed to delete volume %s: %v", req.GetVolumeId(), err.Error()) } s.auditLog(ctx, "volume.delete", "Volume %s deleted", req.GetVolumeId()) return &api.SdkVolumeDeleteResponse{}, nil } // InspectWithFilters is a helper function returning information about volumes which match a filter func (s *VolumeServer) InspectWithFilters( ctx context.Context, req *api.SdkVolumeInspectWithFiltersRequest, ) (*api.SdkVolumeInspectWithFiltersResponse, error) { if s.cluster() == nil || s.driver(ctx) == nil { return nil, status.Error(codes.Unavailable, "Resource has not been initialized") } var locator *api.VolumeLocator if len(req.GetName()) != 0 || len(req.GetLabels()) != 0 || req.GetOwnership() != nil { locator = &api.VolumeLocator{ Name: req.GetName(), VolumeLabels: req.GetLabels(), Ownership: req.GetOwnership(), } } enumVols, err := s.driver(ctx).Enumerate(locator, nil) if err != nil { return nil, status.Errorf( codes.Internal, "Failed to enumerate volumes: %v", err.Error()) } vols := make([]*api.SdkVolumeInspectResponse, 0, len(enumVols)) for _, vol := range enumVols { // Check access if vol.IsPermitted(ctx, api.Ownership_Read) { // Check if the caller wants more information if req.GetOptions().GetDeep() { resp, err := s.Inspect(ctx, &api.SdkVolumeInspectRequest{ VolumeId: vol.GetId(), Options: req.GetOptions(), }) if IsErrorNotFound(err) { continue } else if err != nil { return nil, err } vols = append(vols, resp) } else { // Caller does not require a deep inspect // Add the object now vols = append(vols, &api.SdkVolumeInspectResponse{ Volume: vol, Name: vol.GetLocator().GetName(), Labels: vol.GetLocator().GetVolumeLabels(), }) } } } return &api.SdkVolumeInspectWithFiltersResponse{ Volumes: vols, }, nil } // Inspect returns information about a volume func (s *VolumeServer) Inspect( ctx context.Context, req *api.SdkVolumeInspectRequest, ) (*api.SdkVolumeInspectResponse, error) { if s.cluster() == nil || s.driver(ctx) == nil { return nil, status.Error(codes.Unavailable, "Resource has not been initialized") } if len(req.GetVolumeId()) == 0 { return nil, status.Error(codes.InvalidArgument, "Must supply volume id") } var v *api.Volume if !req.GetOptions().GetDeep() { vols, err := s.driver(ctx).Enumerate(&api.VolumeLocator{ VolumeIds: []string{req.GetVolumeId()}, }, nil) if err != nil { return nil, status.Errorf( codes.Internal, "Failed to inspect volume %s: %v", req.GetVolumeId(), err) } if len(vols) == 0 { return nil, status.Errorf( codes.NotFound, "Volume id %s not found", req.GetVolumeId()) } v = vols[0] } else { vols, err := s.driver(ctx).Inspect([]string{req.GetVolumeId()}) if err == kvdb.ErrNotFound || (err == nil && len(vols) == 0) { return nil, status.Errorf( codes.NotFound, "Volume id %s not found", req.GetVolumeId()) } else if err != nil { return nil, status.Errorf( codes.Internal, "Failed to inspect volume %s: %v", req.GetVolumeId(), err) } v = vols[0] } // Check ownership if !v.IsPermitted(ctx, api.Ownership_Read) { return nil, status.Errorf(codes.PermissionDenied, "Access denied to volume %s", v.GetId()) } return &api.SdkVolumeInspectResponse{ Volume: v, Name: v.GetLocator().GetName(), Labels: v.GetLocator().GetVolumeLabels(), }, nil } // Enumerate returns a list of volumes func (s *VolumeServer) Enumerate( ctx context.Context, req *api.SdkVolumeEnumerateRequest, ) (*api.SdkVolumeEnumerateResponse, error) { if s.cluster() == nil || s.driver(ctx) == nil { return nil, status.Error(codes.Unavailable, "Resource has not been initialized") } resp, err := s.EnumerateWithFilters( ctx, &api.SdkVolumeEnumerateWithFiltersRequest{}, ) if err != nil { return nil, err } return &api.SdkVolumeEnumerateResponse{ VolumeIds: resp.GetVolumeIds(), }, nil } // EnumerateWithFilters returns a list of volumes for the provided filters func (s *VolumeServer) EnumerateWithFilters( ctx context.Context, req *api.SdkVolumeEnumerateWithFiltersRequest, ) (*api.SdkVolumeEnumerateWithFiltersResponse, error) { if s.cluster() == nil || s.driver(ctx) == nil { return nil, status.Error(codes.Unavailable, "Resource has not been initialized") } var locator *api.VolumeLocator if len(req.GetName()) != 0 || len(req.GetLabels()) != 0 || req.GetOwnership() != nil { locator = &api.VolumeLocator{ Name: req.GetName(), VolumeLabels: req.GetLabels(), Ownership: req.GetOwnership(), } } vols, err := s.driver(ctx).Enumerate(locator, nil) if err != nil { return nil, status.Errorf( codes.Internal, "Failed to enumerate volumes: %v", err.Error()) } ids := make([]string, 0) for _, vol := range vols { // Check access if vol.IsPermitted(ctx, api.Ownership_Read) { ids = append(ids, vol.GetId()) } } return &api.SdkVolumeEnumerateWithFiltersResponse{ VolumeIds: ids, }, nil } // Update allows the caller to change values in the volume specification func (s *VolumeServer) Update( ctx context.Context, req *api.SdkVolumeUpdateRequest, ) (*api.SdkVolumeUpdateResponse, error) { if s.cluster() == nil || s.driver(ctx) == nil { return nil, status.Error(codes.Unavailable, "Resource has not been initialized") } if len(req.GetVolumeId()) == 0 { return nil, status.Error(codes.InvalidArgument, "Must supply volume id") } // Get current state // This checks for Read access in ownership resp, err := s.Inspect(ctx, &api.SdkVolumeInspectRequest{ VolumeId: req.GetVolumeId(), }) if err != nil { return nil, err } // Check if the caller can update the volume if !resp.GetVolume().IsPermitted(ctx, api.Ownership_Write) { return nil, status.Errorf(codes.PermissionDenied, "Cannot update volume") } // Merge specs spec := s.mergeVolumeSpecs(resp.GetVolume().GetSpec(), req.GetSpec()) // Update Ownership... carefully // First point to the original ownership spec.Ownership = resp.GetVolume().GetSpec().GetOwnership() // Check if we have been provided an update to the ownership if req.GetSpec().GetOwnership() != nil { if spec.Ownership == nil { spec.Ownership = &api.Ownership{} } user, _ := auth.NewUserInfoFromContext(ctx) if err := spec.Ownership.Update(req.GetSpec().GetOwnership(), user); err != nil { return nil, err } } // Check if labels have been updated var locator *api.VolumeLocator if len(req.GetLabels()) != 0 { locator = &api.VolumeLocator{VolumeLabels: req.GetLabels()} } // Validate/Update given spec according to default storage policy set // to make sure if update does not violates default policy updatedSpec, err := GetDefaultVolSpecs(ctx, spec, true) if err != nil { return nil, err } // Send to driver if err := s.driver(ctx).Set(req.GetVolumeId(), locator, updatedSpec); err != nil { return nil, status.Errorf(codes.Internal, "Failed to update volume: %v", err) } s.auditLog(ctx, "volume.update", "Volume %s updated", req.GetVolumeId()) return &api.SdkVolumeUpdateResponse{}, nil } // Stats returns volume statistics func (s *VolumeServer) Stats( ctx context.Context, req *api.SdkVolumeStatsRequest, ) (*api.SdkVolumeStatsResponse, error) { if s.cluster() == nil || s.driver(ctx) == nil { return nil, status.Error(codes.Unavailable, "Resource has not been initialized") } if len(req.GetVolumeId()) == 0 { return nil, status.Error(codes.InvalidArgument, "Must supply volume id") } // Get access rights if err := s.checkAccessForVolumeId(ctx, req.GetVolumeId(), api.Ownership_Read); err != nil { return nil, err } stats, err := s.driver(ctx).Stats(req.GetVolumeId(), !req.GetNotCumulative()) if err != nil { return nil, status.Errorf( codes.Internal, "Failed to obtain stats for volume %s: %v", req.GetVolumeId(), err.Error()) } return &api.SdkVolumeStatsResponse{ Stats: stats, }, nil } func (s *VolumeServer) CapacityUsage( ctx context.Context, req *api.SdkVolumeCapacityUsageRequest, ) (*api.SdkVolumeCapacityUsageResponse, error) { if len(req.GetVolumeId()) == 0 { return nil, status.Error(codes.InvalidArgument, "Must supply volume id") } // Get access rights if err := s.checkAccessForVolumeId(ctx, req.GetVolumeId(), api.Ownership_Read); err != nil { return nil, err } dResp, err := s.driver(ctx).CapacityUsage(req.GetVolumeId()) if err != nil { return nil, status.Errorf( codes.Internal, "Failed to obtain stats for volume %s: %v", req.GetVolumeId(), err.Error()) } resp := &api.SdkVolumeCapacityUsageResponse{} resp.CapacityUsageInfo = &api.CapacityUsageInfo{} resp.CapacityUsageInfo.ExclusiveBytes = dResp.CapacityUsageInfo.ExclusiveBytes resp.CapacityUsageInfo.SharedBytes = dResp.CapacityUsageInfo.SharedBytes resp.CapacityUsageInfo.TotalBytes = dResp.CapacityUsageInfo.TotalBytes if dResp.Error != nil { if dResp.Error == volume.ErrAborted { return resp, status.Errorf( codes.Aborted, "Failed to obtain stats for volume %s: %v", req.GetVolumeId(), volume.ErrAborted.Error()) } else if dResp.Error == volume.ErrNotSupported { return resp, status.Errorf( codes.Unimplemented, "Failed to obtain stats for volume %s: %v", req.GetVolumeId(), volume.ErrNotSupported.Error()) } } return resp, nil } func (s *VolumeServer) mergeVolumeSpecs(vol *api.VolumeSpec, req *api.VolumeSpecUpdate) *api.VolumeSpec { spec := &api.VolumeSpec{} spec.Shared = setSpecBool(vol.GetShared(), req.GetShared(), req.GetSharedOpt()) spec.Sharedv4 = setSpecBool(vol.GetSharedv4(), req.GetSharedv4(), req.GetSharedv4Opt()) spec.Sticky = setSpecBool(vol.GetSticky(), req.GetSticky(), req.GetStickyOpt()) spec.Journal = setSpecBool(vol.GetJournal(), req.GetJournal(), req.GetJournalOpt()) spec.Nodiscard = setSpecBool(vol.GetNodiscard(), req.GetNodiscard(), req.GetNodiscardOpt()) // fastpath extensions if req.GetFastpathOpt() != nil { spec.FpPreference = req.GetFastpath() } else { spec.FpPreference = vol.GetFpPreference() } if req.GetIoStrategy() != nil { spec.IoStrategy = req.GetIoStrategy() } else { spec.IoStrategy = vol.GetIoStrategy() } // Cos if req.GetCosOpt() != nil { spec.Cos = req.GetCos() } else { spec.Cos = vol.GetCos() } // Passphrase if req.GetPassphraseOpt() != nil { spec.Passphrase = req.GetPassphrase() } else { spec.Passphrase = vol.GetPassphrase() } // Snapshot schedule as a string if req.GetSnapshotScheduleOpt() != nil { spec.SnapshotSchedule = req.GetSnapshotSchedule() } else { spec.SnapshotSchedule = vol.GetSnapshotSchedule() } // Scale if req.GetScaleOpt() != nil { spec.Scale = req.GetScale() } else { spec.Scale = vol.GetScale() } // Snapshot Interval if req.GetSnapshotIntervalOpt() != nil { spec.SnapshotInterval = req.GetSnapshotInterval() } else { spec.SnapshotInterval = vol.GetSnapshotInterval() } // Io Profile if req.GetIoProfileOpt() != nil { spec.IoProfile = req.GetIoProfile() } else { spec.IoProfile = vol.GetIoProfile() } // GroupID if req.GetGroupOpt() != nil { spec.Group = req.GetGroup() } else { spec.Group = vol.GetGroup() } // Size if req.GetSizeOpt() != nil { spec.Size = req.GetSize() } else { spec.Size = vol.GetSize() } // ReplicaSet if req.GetReplicaSet() != nil { spec.ReplicaSet = req.GetReplicaSet() } else { spec.ReplicaSet = vol.GetReplicaSet() } // HA Level if req.GetHaLevelOpt() != nil { spec.HaLevel = req.GetHaLevel() } else { spec.HaLevel = vol.GetHaLevel() } // Queue depth if req.GetQueueDepthOpt() != nil { spec.QueueDepth = req.GetQueueDepth() } else { spec.QueueDepth = vol.GetQueueDepth() } // ExportSpec if req.GetExportSpec() != nil { spec.ExportSpec = req.GetExportSpec() } else { spec.ExportSpec = vol.GetExportSpec() } // Xattr if req.GetXattrOpt() != nil { spec.Xattr = req.GetXattr() } else { spec.Xattr = vol.GetXattr() } // ScanPolicy if req.GetScanPolicy() != nil { spec.ScanPolicy = req.GetScanPolicy() } else { spec.ScanPolicy = vol.GetScanPolicy() } // MountOptions if req.GetMountOptSpec() != nil { spec.MountOptions = req.GetMountOptSpec() } else { spec.MountOptions = vol.GetMountOptions() } // Sharedv4MountOptions if req.GetSharedv4MountOptSpec() != nil { spec.Sharedv4MountOptions = req.GetSharedv4MountOptSpec() } else { spec.Sharedv4MountOptions = vol.GetSharedv4MountOptions() } // ProxyWrite spec.ProxyWrite = setSpecBool(vol.GetProxyWrite(), req.GetProxyWrite(), req.GetProxyWriteOpt()) // ProxySpec if req.GetProxySpec() != nil { spec.ProxySpec = req.GetProxySpec() } else { spec.ProxySpec = vol.GetProxySpec() } // Sharedv4ServiceSpec if req.GetSharedv4ServiceSpec() != nil { spec.Sharedv4ServiceSpec = req.GetSharedv4ServiceSpec() } else { spec.Sharedv4ServiceSpec = vol.GetSharedv4ServiceSpec() } // Sharedv4Spec if req.GetSharedv4Spec() != nil { spec.Sharedv4Spec = req.GetSharedv4Spec() } else { spec.Sharedv4Spec = vol.GetSharedv4Spec() } // AutoFstrim spec.AutoFstrim = setSpecBool(vol.GetAutoFstrim(), req.GetAutoFstrim(), req.GetAutoFstrimOpt()) // ProxySpec if req.GetIoThrottle() != nil { spec.IoThrottle = req.GetIoThrottle() } else { spec.IoThrottle = vol.GetIoThrottle() } return spec } func (s *VolumeServer) nodeIPtoIds(nodes []string) ([]string, error) { nodeIds := make([]string, 0) for _, idIp := range nodes { if idIp != "" { id, err := s.cluster().GetNodeIdFromIp(idIp) if err != nil { return nodeIds, err } nodeIds = append(nodeIds, id) } } return nodeIds, nil } // Convert any replica set node values which are IPs to the corresponding Node ID. // Update the replica set node list. func (s *VolumeServer) updateReplicaSpecNodeIPstoIds(rspecRef *api.ReplicaSet) error { if rspecRef != nil && len(rspecRef.Nodes) > 0 { nodeIds, err := s.nodeIPtoIds(rspecRef.Nodes) if err != nil { return err } if len(nodeIds) > 0 { rspecRef.Nodes = nodeIds } } return nil } func setSpecBool(current, req bool, reqSet interface{}) bool { if reqSet != nil { return req } return current } // GetDefaultVolSpecs returns volume spec merged with default storage policy applied if any func GetDefaultVolSpecs( ctx context.Context, spec *api.VolumeSpec, isUpdate bool, ) (*api.VolumeSpec, error) { storPolicy, err := policy.Inst() if err != nil { return nil, status.Errorf(codes.Internal, "Unable to get storage policy instance %v", err) } var policy *api.SdkStoragePolicy // check if custom policy passed with volume if spec.GetStoragePolicy() != "" { inspReq := &api.SdkOpenStoragePolicyInspectRequest{ // name of storage policy specified in volSpecs Name: spec.GetStoragePolicy(), } // inspect will make sure user will atleast have read access customPolicy, customErr := storPolicy.Inspect(ctx, inspReq) if customErr != nil { return nil, customErr } policy = customPolicy.GetStoragePolicy() } else { // check if default storage policy is set defPolicy, err := storPolicy.DefaultInspect(context.Background(), &api.SdkOpenStoragePolicyDefaultInspectRequest{}) if err != nil { // err means there is policy stored, but we are not able to retrive it // hence we are not allowing volume create operation return nil, status.Errorf(codes.Internal, "Unable to get default policy details %v", err) } else if defPolicy.GetStoragePolicy() == nil { // no default storage policy found return spec, nil } policy = defPolicy.GetStoragePolicy() } // track volume created using storage policy spec.StoragePolicy = policy.GetName() // check if volume update request, if allowupdate is set // return spec received as it is if isUpdate && policy.GetAllowUpdate() { if !policy.IsPermitted(ctx, api.Ownership_Write) { return nil, status.Errorf(codes.PermissionDenied, "Cannot use storage policy %v", policy.GetName()) } return spec, nil } return mergeVolumeSpecsPolicy(spec, policy.GetPolicy(), policy.GetForce()) } func mergeVolumeSpecsPolicy(vol *api.VolumeSpec, req *api.VolumeSpecPolicy, isValidate bool) (*api.VolumeSpec, error) { errMsg := fmt.Errorf("Storage Policy Violation, valid specs are : %v", req.String()) spec := vol // Shared if req.GetSharedOpt() != nil { if isValidate && vol.GetShared() != req.GetShared() { return nil, errMsg } spec.Shared = req.GetShared() } //sharedv4 if req.GetSharedv4Opt() != nil { if isValidate && vol.GetSharedv4() != req.GetSharedv4() { return vol, errMsg } spec.Sharedv4 = req.GetSharedv4() } //sticky if req.GetStickyOpt() != nil { if isValidate && vol.GetSticky() != req.GetSticky() { return vol, errMsg } spec.Sticky = req.GetSticky() } //journal if req.GetJournalOpt() != nil { if isValidate && vol.GetJournal() != req.GetJournal() { return vol, errMsg } spec.Journal = req.GetJournal() } // encrypt if req.GetEncryptedOpt() != nil { if isValidate && vol.GetEncrypted() != req.GetEncrypted() { return vol, errMsg } spec.Encrypted = req.GetEncrypted() } // cos level if req.GetCosOpt() != nil { if isValidate && vol.GetCos() != req.GetCos() { return vol, errMsg } spec.Cos = req.GetCos() } // passphrase if req.GetPassphraseOpt() != nil { if isValidate && vol.GetPassphrase() != req.GetPassphrase() { return vol, errMsg } spec.Passphrase = req.GetPassphrase() } // IO profile if req.GetIoProfileOpt() != nil { if isValidate && req.GetIoProfile() != vol.GetIoProfile() { return vol, errMsg } spec.IoProfile = req.GetIoProfile() } // Group if req.GetGroupOpt() != nil { if isValidate && req.GetGroup() != vol.GetGroup() { return vol, errMsg } spec.Group = req.GetGroup() } // Replicaset if req.GetReplicaSet() != nil { if isValidate && req.GetReplicaSet() != vol.GetReplicaSet() { return vol, errMsg } spec.ReplicaSet = req.GetReplicaSet() } // QueueDepth if req.GetQueueDepthOpt() != nil { if isValidate && req.GetQueueDepth() != vol.GetQueueDepth() { return vol, errMsg } spec.QueueDepth = req.GetQueueDepth() } // SnapshotSchedule if req.GetSnapshotScheduleOpt() != nil { if isValidate && req.GetSnapshotSchedule() != vol.GetSnapshotSchedule() { return vol, errMsg } spec.SnapshotSchedule = req.GetSnapshotSchedule() } // aggr level if req.GetAggregationLevelOpt() != nil { if isValidate && req.GetAggregationLevel() != vol.GetAggregationLevel() { return vol, errMsg } spec.AggregationLevel = req.GetAggregationLevel() } // Size if req.GetSizeOpt() != nil { isCorrect := validateMinMaxParams(uint64(req.GetSize()), uint64(vol.Size), req.GetSizeOperator()) if !isCorrect { if isValidate { return vol, errMsg } spec.Size = req.GetSize() } } // HA Level if req.GetHaLevelOpt() != nil { isCorrect := validateMinMaxParams(uint64(req.GetHaLevel()), uint64(vol.HaLevel), req.GetHaLevelOperator()) if !isCorrect { if isValidate { return vol, errMsg } spec.HaLevel = req.GetHaLevel() } } // Scale if req.GetScaleOpt() != nil { isCorrect := validateMinMaxParams(uint64(req.GetScale()), uint64(vol.Scale), req.GetScaleOperator()) if !isCorrect { if isValidate { return vol, errMsg } spec.Scale = req.GetScale() } } // Snapshot Interval if req.GetSnapshotIntervalOpt() != nil { isCorrect := validateMinMaxParams(uint64(req.GetSnapshotInterval()), uint64(vol.SnapshotInterval), req.GetSnapshotIntervalOperator()) if !isCorrect { if isValidate { return vol, errMsg } spec.SnapshotInterval = req.GetSnapshotInterval() } } // Nodiscard if req.GetNodiscardOpt() != nil { if isValidate && vol.GetNodiscard() != req.GetNodiscard() { return vol, errMsg } spec.Nodiscard = req.GetNodiscard() } // IoStrategy if req.GetIoStrategy() != nil { if isValidate && vol.GetIoStrategy() != req.GetIoStrategy() { return vol, errMsg } spec.IoStrategy = req.GetIoStrategy() } // ExportSpec if req.GetExportSpec() != nil { if isValidate && vol.GetExportSpec() != req.GetExportSpec() { return vol, errMsg } if exportPolicy := vol.GetExportSpec(); exportPolicy == nil { spec.ExportSpec = req.GetExportSpec() } else { // If the spec has an ExportSpec then only modify the fields that came in // the request. reqExportSpec := req.GetExportSpec() if reqExportSpec.ExportProtocol != api.ExportProtocol_INVALID { spec.ExportSpec.ExportProtocol = reqExportSpec.ExportProtocol } if len(reqExportSpec.ExportOptions) != 0 { if reqExportSpec.ExportOptions == api.SpecExportOptionsEmpty { spec.ExportSpec.ExportOptions = "" } else { spec.ExportSpec.ExportOptions = reqExportSpec.ExportOptions } } } } if req.GetProxySpecOpt() != nil { if isValidate && vol.GetProxySpec() != req.GetProxySpec() { return vol, errMsg } spec.ProxySpec = req.GetProxySpec() } // ScanPolicy if req.GetScanPolicy() != nil { if isValidate && vol.GetScanPolicy() != req.GetScanPolicy() { return vol, errMsg } spec.ScanPolicy = req.GetScanPolicy() } // ProxyWrite if req.GetProxyWriteOpt() != nil { if isValidate && vol.GetProxyWrite() != req.GetProxyWrite() { return vol, errMsg } spec.ProxyWrite = req.GetProxyWrite() } // Sharedv4ServiceSpec if req.GetSharedv4ServiceSpecOpt() != nil { if isValidate && vol.GetSharedv4ServiceSpec() != req.GetSharedv4ServiceSpec() { return vol, errMsg } spec.Sharedv4ServiceSpec = req.GetSharedv4ServiceSpec() } // FpPreference if req.GetFastpathOpt() != nil { if isValidate && vol.GetFpPreference() != req.GetFastpath() { return vol, errMsg } spec.FpPreference = req.GetFastpath() } // IoThrottle if req.GetIoThrottleOpt() != nil { if isValidate && vol.GetIoThrottle() != req.GetIoThrottle() { return vol, errMsg } spec.IoThrottle = req.GetIoThrottle() } // AutoFstrim if req.GetAutoFstrimOpt() != nil { if isValidate && vol.GetAutoFstrim() != req.GetAutoFstrim() { return vol, errMsg } spec.AutoFstrim = req.GetAutoFstrim() } // Sharedv4Spec if req.GetSharedv4SpecOpt() != nil { if isValidate && vol.GetSharedv4Spec() != req.GetSharedv4Spec() { return vol, errMsg } spec.Sharedv4Spec = req.GetSharedv4Spec() } logrus.Debugf("Updated VolumeSpecs %v", spec) return spec, nil } // VolumeCatalog returns a list of volumes for the provided filters func (s *VolumeServer) VolumeCatalog( ctx context.Context, req *api.SdkVolumeCatalogRequest, ) (*api.SdkVolumeCatalogResponse, error) { if s.cluster() == nil || s.driver(ctx) == nil { return nil, status.Error(codes.Unavailable, "Resource has not been initialized") } if len(req.GetVolumeId()) == 0 { return nil, status.Error(codes.Unavailable, "VolumeId not provided.") } catalog, err := s.driver(ctx).Catalog(req.GetVolumeId(), req.GetPath(), req.GetDepth()) if err != nil { return nil, status.Errorf( codes.Internal, "Failed to get the catalog: %v", err.Error()) } return &api.SdkVolumeCatalogResponse{ Catalog: &catalog, }, nil } func validateMinMaxParams(policy uint64, specified uint64, op api.VolumeSpecPolicy_PolicyOp) bool { switch op { case api.VolumeSpecPolicy_Maximum: if specified > policy { return false } case api.VolumeSpecPolicy_Minimum: if specified < policy { return false } default: if specified != policy { return false } } return true }
1
8,991
was it not possible to do this at the filter or porx driver layer? We typically try to avoid driver-specific things in the SDK layer
libopenstorage-openstorage
go
@@ -1,3 +1,19 @@ +/* + * Copyright 2012 LinkedIn Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + package azkaban.user; /**
1
package azkaban.user; /** * Lambda interface for parsing user config file. */ public interface ParseConfigFile { void parseConfigFile(); }
1
17,973
please update the year. it can be setup in intellij template.
azkaban-azkaban
java
@@ -144,6 +144,16 @@ public class PreventTokenLoggingTests { passSlf4j("log.trace(message);"); } + @Test + public void testSlf4jTraceNullMessageNoArgs() { + passSlf4j("log.trace(null);"); + } + + @Test + public void testSlf4jTraceNullArg() { + passSlf4j("log.trace(message, arg1, null);"); + } + @Test public void testSlf4jDebug() { passSlf4j("log.debug(message, arg1);");
1
/* * (c) Copyright 2019 Palantir Technologies Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.palantir.baseline.errorprone; import com.google.errorprone.CompilationTestHelper; import org.junit.Before; import org.junit.Test; public class PreventTokenLoggingTests { private CompilationTestHelper compilationHelper; @Before public void before() { compilationHelper = CompilationTestHelper.newInstance(PreventTokenLogging.class, getClass()); } @Test public void testSlf4jAuthHeaderTrace() { failSlf4j("log.trace(message, authHeader);"); } @Test public void testSlf4jAuthHeaderTraceMultipleArgs() { failSlf4j("log.trace(message, arg1, authHeader);"); } @Test public void testSlf4jAuthHeaderDebug() { failSlf4j("log.debug(message, authHeader);"); } @Test public void testSlf4jAuthHeaderDebugMultipleArgs() { failSlf4j("log.debug(message, arg1, authHeader);"); } @Test public void testSlf4jAuthHeaderInfo() { failSlf4j("log.info(message, authHeader);"); } @Test public void testSlf4jAuthHeaderInfoMultipleArgs() { failSlf4j("log.info(message, arg1, authHeader);"); } @Test public void testSlf4jAuthHeaderWarn() { failSlf4j("log.warn(message, authHeader);"); } @Test public void testSlf4jAuthHeaderWarnMultipleArgs() { failSlf4j("log.warn(message, arg1, authHeader);"); } @Test public void testSlf4jAuthHeaderError() { failSlf4j("log.error(message, authHeader);"); } @Test public void testSlf4jAuthHeaderErrorMultipleArgs() { failSlf4j("log.error(message, arg1, authHeader);"); } @Test public void testSlf4jBearerTokenTrace() { failSlf4j("log.trace(message, bearerToken);"); } @Test public void testSlf4jBearerTokenTraceMultipleArgs() { failSlf4j("log.trace(message, arg1, bearerToken);"); } @Test public void testSlf4jBearerTokenDebug() { failSlf4j("log.debug(message, bearerToken);"); } @Test public void testSlf4jBearerTokenDebugMultipleArgs() { failSlf4j("log.debug(message, arg1, bearerToken);"); } @Test public void testSlf4jBearerTokenInfo() { failSlf4j("log.info(message, bearerToken);"); } @Test public void testSlf4jBearerTokenInfoMultipleArgs() { failSlf4j("log.info(message, arg1, bearerToken);"); } @Test public void testSlf4jBearerTokenWarn() { failSlf4j("log.warn(message, bearerToken);"); } @Test public void testSlf4jBearerTokenWarnMultipleArgs() { failSlf4j("log.warn(message, arg1, bearerToken);"); } @Test public void testSlf4jBearerTokenError() { failSlf4j("log.error(message, bearerToken);"); } @Test public void testSlf4jBearerTokenErrorMultipleArgs() { failSlf4j("log.error(message, arg1, bearerToken);"); } @Test public void testSlf4jTrace() { passSlf4j("log.trace(message, arg1);"); } @Test public void testSlf4jTraceMultipleArgs() { passSlf4j("log.trace(message, arg1, arg2);"); } @Test public void testSlf4jTraceNoArgs() { passSlf4j("log.trace(message);"); } @Test public void testSlf4jDebug() { passSlf4j("log.debug(message, arg1);"); } @Test public void testSlf4jDebugMultipleArgs() { passSlf4j("log.debug(message, arg1, arg2);"); } @Test public void testSlf4jDebugNoArgs() { passSlf4j("log.debug(message);"); } @Test public void testSlf4jInfo() { passSlf4j("log.info(message, arg1);"); } @Test public void testSlf4jInfoMultipleArgs() { passSlf4j("log.info(message, arg1, arg2);"); } @Test public void testSlf4jInfoNoArgs() { passSlf4j("log.info(message);"); } @Test public void testSlf4jWarn() { passSlf4j("log.warn(message, arg1);"); } @Test public void testSlf4jWarnMultipleArgs() { passSlf4j("log.warn(message, arg1, arg2);"); } @Test public void testSlf4jWarnNoArgs() { passSlf4j("log.warn(message);"); } @Test public void testSlf4jError() { passSlf4j("log.error(message, arg1);"); } @Test public void testSlf4jErrorMultipleArgs() { passSlf4j("log.error(message, arg1, arg2);"); } @Test public void testSlf4jErrorNoArgs() { passSlf4j("log.error(message);"); } @Test public void testSafeArgAuthHeader() { failLogSafe("SafeArg.of(name, authHeader);"); } @Test public void testUnsafeArgAuthHeader() { failLogSafe("UnsafeArg.of(name, bearerToken);"); } @Test public void testSafeArgBearerToken() { failLogSafe("SafeArg.of(name, authHeader);"); } @Test public void testUnsafeArgBearerToken() { failLogSafe("UnsafeArg.of(name, bearerToken);"); } @Test public void testSafeArg() { passLogSafe("SafeArg.of(name, value);"); } @Test public void testUnsafeArg() { passLogSafe("UnsafeArg.of(name, value);"); } private void passSlf4j(String statement) { compilationHelper.addSourceLines( "Test.java", "import org.slf4j.Logger;", "import org.slf4j.LoggerFactory;", "import com.palantir.tokens.auth.AuthHeader;", "import com.palantir.tokens.auth.BearerToken;", "class Test {", " private static final Logger log = LoggerFactory.getLogger(Test.class);", " void f(AuthHeader authHeader, BearerToken bearerToken, String message, Object arg1, Object arg2) {", " " + statement, " }", "}") .doTest(); } private void failSlf4j(String statement) { compilationHelper.addSourceLines( "Test.java", "import org.slf4j.Logger;", "import org.slf4j.LoggerFactory;", "import com.palantir.tokens.auth.AuthHeader;", "import com.palantir.tokens.auth.BearerToken;", "class Test {", " private static final Logger log = LoggerFactory.getLogger(Test.class);", " void f(AuthHeader authHeader, BearerToken bearerToken, String message, Object arg1, Object arg2) {", " // BUG: Diagnostic contains: not allowed to be logged", " " + statement, " }", "}") .doTest(); } private void passLogSafe(String statement) { compilationHelper.addSourceLines( "Test.java", "import com.palantir.logsafe.SafeArg;", "import com.palantir.logsafe.UnsafeArg;", "import com.palantir.tokens.auth.AuthHeader;", "import com.palantir.tokens.auth.BearerToken;", "class Test {", " void f(AuthHeader authHeader, BearerToken bearerToken, String name, Object value) {", " " + statement, " }", "}") .doTest(); } private void failLogSafe(String statement) { compilationHelper.addSourceLines( "Test.java", "import com.palantir.logsafe.SafeArg;", "import com.palantir.logsafe.UnsafeArg;", "import com.palantir.tokens.auth.AuthHeader;", "import com.palantir.tokens.auth.BearerToken;", "class Test {", " void f(AuthHeader authHeader, BearerToken bearerToken, String name, Object value) {", " // BUG: Diagnostic contains: not allowed to be logged", " " + statement, " }", "}") .doTest(); } }
1
7,060
what's the use-case for this?
palantir-gradle-baseline
java
@@ -1281,6 +1281,9 @@ class _Frame(object): raise ValueError("Grouper for '{}' not 1-dimensional".format(type(by))) if not len(by): raise ValueError('No group keys passed!') + if not isinstance(as_index, bool): + raise TypeError('as_index must be an boolean; however, ' + 'got [%s]' % type(as_index)) if isinstance(df_or_s, DataFrame): df = df_or_s # type: DataFrame col_by = [_resolve_col(df, col_or_s) for col_or_s in by]
1
# # Copyright (C) 2019 Databricks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ A base class to be monkey-patched to DataFrame/Column to behave similar to pandas DataFrame/Series. """ import warnings from collections import Counter from collections.abc import Iterable from distutils.version import LooseVersion from functools import reduce import numpy as np import pandas as pd from pyspark import sql as spark from pyspark.sql import functions as F from pyspark.sql.readwriter import OptionUtils from pyspark.sql.types import DataType, DoubleType, FloatType from databricks import koalas as ks # For running doctests and reference resolution in PyCharm. from databricks.koalas.indexing import AtIndexer, iAtIndexer, iLocIndexer, LocIndexer from databricks.koalas.internal import _InternalFrame, NATURAL_ORDER_COLUMN_NAME from databricks.koalas.utils import validate_arguments_and_invoke_function, scol_for from databricks.koalas.window import Rolling, Expanding class _Frame(object): """ The base class for both DataFrame and Series. """ def __init__(self, internal: _InternalFrame): self._internal = internal # type: _InternalFrame # TODO: add 'axis' parameter def cummin(self, skipna: bool = True): """ Return cumulative minimum over a DataFrame or Series axis. Returns a DataFrame or Series of the same size containing the cumulative minimum. .. note:: the current implementation of cummin uses Spark's Window without specifying partition specification. This leads to move all data into single partition in single machine and could cause serious performance degradation. Avoid this method against very large dataset. Parameters ---------- skipna : boolean, default True Exclude NA/null values. If an entire row/column is NA, the result will be NA. Returns ------- DataFrame or Series See Also -------- DataFrame.min : Return the minimum over DataFrame axis. DataFrame.cummax : Return cumulative maximum over DataFrame axis. DataFrame.cummin : Return cumulative minimum over DataFrame axis. DataFrame.cumsum : Return cumulative sum over DataFrame axis. Series.min : Return the minimum over Series axis. Series.cummax : Return cumulative maximum over Series axis. Series.cummin : Return cumulative minimum over Series axis. Series.cumsum : Return cumulative sum over Series axis. Series.cumprod : Return cumulative product over Series axis. Examples -------- >>> df = ks.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB')) >>> df A B 0 2.0 1.0 1 3.0 NaN 2 1.0 0.0 By default, iterates over rows and finds the minimum in each column. >>> df.cummin() A B 0 2.0 1.0 1 2.0 NaN 2 1.0 0.0 It works identically in Series. >>> df.A.cummin() 0 2.0 1 2.0 2 1.0 Name: A, dtype: float64 """ return self._apply_series_op(lambda kser: kser._cum(F.min, skipna)) # type: ignore # TODO: add 'axis' parameter def cummax(self, skipna: bool = True): """ Return cumulative maximum over a DataFrame or Series axis. Returns a DataFrame or Series of the same size containing the cumulative maximum. .. note:: the current implementation of cummax uses Spark's Window without specifying partition specification. This leads to move all data into single partition in single machine and could cause serious performance degradation. Avoid this method against very large dataset. Parameters ---------- skipna : boolean, default True Exclude NA/null values. If an entire row/column is NA, the result will be NA. Returns ------- DataFrame or Series See Also -------- DataFrame.max : Return the maximum over DataFrame axis. DataFrame.cummax : Return cumulative maximum over DataFrame axis. DataFrame.cummin : Return cumulative minimum over DataFrame axis. DataFrame.cumsum : Return cumulative sum over DataFrame axis. DataFrame.cumprod : Return cumulative product over DataFrame axis. Series.max : Return the maximum over Series axis. Series.cummax : Return cumulative maximum over Series axis. Series.cummin : Return cumulative minimum over Series axis. Series.cumsum : Return cumulative sum over Series axis. Series.cumprod : Return cumulative product over Series axis. Examples -------- >>> df = ks.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB')) >>> df A B 0 2.0 1.0 1 3.0 NaN 2 1.0 0.0 By default, iterates over rows and finds the maximum in each column. >>> df.cummax() A B 0 2.0 1.0 1 3.0 NaN 2 3.0 1.0 It works identically in Series. >>> df.B.cummax() 0 1.0 1 NaN 2 1.0 Name: B, dtype: float64 """ return self._apply_series_op(lambda kser: kser._cum(F.max, skipna)) # type: ignore # TODO: add 'axis' parameter def cumsum(self, skipna: bool = True): """ Return cumulative sum over a DataFrame or Series axis. Returns a DataFrame or Series of the same size containing the cumulative sum. .. note:: the current implementation of cumsum uses Spark's Window without specifying partition specification. This leads to move all data into single partition in single machine and could cause serious performance degradation. Avoid this method against very large dataset. Parameters ---------- skipna : boolean, default True Exclude NA/null values. If an entire row/column is NA, the result will be NA. Returns ------- DataFrame or Series See Also -------- DataFrame.sum : Return the sum over DataFrame axis. DataFrame.cummax : Return cumulative maximum over DataFrame axis. DataFrame.cummin : Return cumulative minimum over DataFrame axis. DataFrame.cumsum : Return cumulative sum over DataFrame axis. DataFrame.cumprod : Return cumulative product over DataFrame axis. Series.sum : Return the sum over Series axis. Series.cummax : Return cumulative maximum over Series axis. Series.cummin : Return cumulative minimum over Series axis. Series.cumsum : Return cumulative sum over Series axis. Series.cumprod : Return cumulative product over Series axis. Examples -------- >>> df = ks.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB')) >>> df A B 0 2.0 1.0 1 3.0 NaN 2 1.0 0.0 By default, iterates over rows and finds the sum in each column. >>> df.cumsum() A B 0 2.0 1.0 1 5.0 NaN 2 6.0 1.0 It works identically in Series. >>> df.A.cumsum() 0 2.0 1 5.0 2 6.0 Name: A, dtype: float64 """ return self._apply_series_op(lambda kser: kser._cum(F.sum, skipna)) # type: ignore # TODO: add 'axis' parameter # TODO: use pandas_udf to support negative values and other options later # other window except unbounded ones is supported as of Spark 3.0. def cumprod(self, skipna: bool = True): """ Return cumulative product over a DataFrame or Series axis. Returns a DataFrame or Series of the same size containing the cumulative product. .. note:: the current implementation of cumprod uses Spark's Window without specifying partition specification. This leads to move all data into single partition in single machine and could cause serious performance degradation. Avoid this method against very large dataset. .. note:: unlike pandas', Koalas' emulates cumulative product by ``exp(sum(log(...)))`` trick. Therefore, it only works for positive numbers. Parameters ---------- skipna : boolean, default True Exclude NA/null values. If an entire row/column is NA, the result will be NA. Returns ------- DataFrame or Series See Also -------- DataFrame.cummax : Return cumulative maximum over DataFrame axis. DataFrame.cummin : Return cumulative minimum over DataFrame axis. DataFrame.cumsum : Return cumulative sum over DataFrame axis. DataFrame.cumprod : Return cumulative product over DataFrame axis. Series.cummax : Return cumulative maximum over Series axis. Series.cummin : Return cumulative minimum over Series axis. Series.cumsum : Return cumulative sum over Series axis. Series.cumprod : Return cumulative product over Series axis. Raises ------ Exception : If the values is equal to or lower than 0. Examples -------- >>> df = ks.DataFrame([[2.0, 1.0], [3.0, None], [4.0, 10.0]], columns=list('AB')) >>> df A B 0 2.0 1.0 1 3.0 NaN 2 4.0 10.0 By default, iterates over rows and finds the sum in each column. >>> df.cumprod() A B 0 2.0 1.0 1 6.0 NaN 2 24.0 10.0 It works identically in Series. >>> df.A.cumprod() 0 2.0 1 6.0 2 24.0 Name: A, dtype: float64 """ return self._apply_series_op(lambda kser: kser._cumprod(skipna)) # type: ignore def get_dtype_counts(self): """ Return counts of unique dtypes in this object. .. deprecated:: 0.14.0 Returns ------- dtype : pd.Series Series with the count of columns with each dtype. See Also -------- dtypes : Return the dtypes in this object. Examples -------- >>> a = [['a', 1, 1], ['b', 2, 2], ['c', 3, 3]] >>> df = ks.DataFrame(a, columns=['str', 'int1', 'int2']) >>> df str int1 int2 0 a 1 1 1 b 2 2 2 c 3 3 >>> df.get_dtype_counts().sort_values() object 1 int64 2 dtype: int64 >>> df.str.get_dtype_counts().sort_values() object 1 dtype: int64 """ warnings.warn( "`get_dtype_counts` has been deprecated and will be " "removed in a future version. For DataFrames use " "`.dtypes.value_counts()", FutureWarning) if not isinstance(self.dtypes, Iterable): dtypes = [self.dtypes] else: dtypes = self.dtypes return pd.Series(dict(Counter([d.name for d in list(dtypes)]))) def pipe(self, func, *args, **kwargs): r""" Apply func(self, \*args, \*\*kwargs). Parameters ---------- func : function function to apply to the DataFrame. ``args``, and ``kwargs`` are passed into ``func``. Alternatively a ``(callable, data_keyword)`` tuple where ``data_keyword`` is a string indicating the keyword of ``callable`` that expects the DataFrames. args : iterable, optional positional arguments passed into ``func``. kwargs : mapping, optional a dictionary of keyword arguments passed into ``func``. Returns ------- object : the return type of ``func``. Notes ----- Use ``.pipe`` when chaining together functions that expect Series, DataFrames or GroupBy objects. For example, given >>> df = ks.DataFrame({'category': ['A', 'A', 'B'], ... 'col1': [1, 2, 3], ... 'col2': [4, 5, 6]}, ... columns=['category', 'col1', 'col2']) >>> def keep_category_a(df): ... return df[df['category'] == 'A'] >>> def add_one(df, column): ... return df.assign(col3=df[column] + 1) >>> def multiply(df, column1, column2): ... return df.assign(col4=df[column1] * df[column2]) instead of writing >>> multiply(add_one(keep_category_a(df), column="col1"), column1="col2", column2="col3") category col1 col2 col3 col4 0 A 1 4 2 8 1 A 2 5 3 15 You can write >>> (df.pipe(keep_category_a) ... .pipe(add_one, column="col1") ... .pipe(multiply, column1="col2", column2="col3") ... ) category col1 col2 col3 col4 0 A 1 4 2 8 1 A 2 5 3 15 If you have a function that takes the data as (say) the second argument, pass a tuple indicating which keyword expects the data. For example, suppose ``f`` takes its data as ``df``: >>> def multiply_2(column1, df, column2): ... return df.assign(col4=df[column1] * df[column2]) Then you can write >>> (df.pipe(keep_category_a) ... .pipe(add_one, column="col1") ... .pipe((multiply_2, 'df'), column1="col2", column2="col3") ... ) category col1 col2 col3 col4 0 A 1 4 2 8 1 A 2 5 3 15 You can use lambda as wel >>> ks.Series([1, 2, 3]).pipe(lambda x: (x + 1).rename("value")) 0 2 1 3 2 4 Name: value, dtype: int64 """ if isinstance(func, tuple): func, target = func if target in kwargs: raise ValueError('%s is both the pipe target and a keyword ' 'argument' % target) kwargs[target] = self return func(*args, **kwargs) else: return func(self, *args, **kwargs) def to_numpy(self): """ A NumPy ndarray representing the values in this DataFrame or Series. .. note:: This method should only be used if the resulting NumPy ndarray is expected to be small, as all the data is loaded into the driver's memory. Returns ------- numpy.ndarray """ return self.to_pandas().values def to_csv(self, path=None, sep=',', na_rep='', columns=None, header=True, quotechar='"', date_format=None, escapechar=None, num_files=None, **options): r""" Write object to a comma-separated values (csv) file. .. note:: Koalas `to_csv` writes files to a path or URI. Unlike pandas', Koalas respects HDFS's property such as 'fs.default.name'. .. note:: Koalas writes CSV files into the directory, `path`, and writes multiple `part-...` files in the directory when `path` is specified. This behaviour was inherited from Apache Spark. The number of files can be controlled by `num_files`. Parameters ---------- path : str, default None File path. If None is provided the result is returned as a string. sep : str, default ',' String of length 1. Field delimiter for the output file. na_rep : str, default '' Missing data representation. columns : sequence, optional Columns to write. header : bool or list of str, default True Write out the column names. If a list of strings is given it is assumed to be aliases for the column names. quotechar : str, default '\"' String of length 1. Character used to quote fields. date_format : str, default None Format string for datetime objects. escapechar : str, default None String of length 1. Character used to escape `sep` and `quotechar` when appropriate. num_files : the number of files to be written in `path` directory when this is a path. options: keyword arguments for additional options specific to PySpark. This kwargs are specific to PySpark's CSV options to pass. Check the options in PySpark's API documentation for spark.write.csv(...). It has higher priority and overwrites all other options. This parameter only works when `path` is specified. See Also -------- read_csv DataFrame.to_delta DataFrame.to_table DataFrame.to_parquet DataFrame.to_spark_io Examples -------- >>> df = ks.DataFrame(dict( ... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')), ... country=['KR', 'US', 'JP'], ... code=[1, 2 ,3]), columns=['date', 'country', 'code']) >>> df.sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE date country code ... 2012-01-31 12:00:00 KR 1 ... 2012-02-29 12:00:00 US 2 ... 2012-03-31 12:00:00 JP 3 >>> print(df.to_csv()) # doctest: +NORMALIZE_WHITESPACE date,country,code 2012-01-31 12:00:00,KR,1 2012-02-29 12:00:00,US,2 2012-03-31 12:00:00,JP,3 >>> df.to_csv(path=r'%s/to_csv/foo.csv' % path, num_files=1) >>> ks.read_csv( ... path=r'%s/to_csv/foo.csv' % path ... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE date country code ... 2012-01-31 12:00:00 KR 1 ... 2012-02-29 12:00:00 US 2 ... 2012-03-31 12:00:00 JP 3 In case of Series, >>> print(df.date.to_csv()) # doctest: +NORMALIZE_WHITESPACE date 2012-01-31 12:00:00 2012-02-29 12:00:00 2012-03-31 12:00:00 >>> df.date.to_csv(path=r'%s/to_csv/foo.csv' % path, num_files=1) >>> ks.read_csv( ... path=r'%s/to_csv/foo.csv' % path ... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE date ... 2012-01-31 12:00:00 ... 2012-02-29 12:00:00 ... 2012-03-31 12:00:00 """ if path is None: # If path is none, just collect and use pandas's to_csv. kdf_or_ser = self if (LooseVersion("0.24") > LooseVersion(pd.__version__)) and \ isinstance(self, ks.Series): # 0.23 seems not having 'columns' parameter in Series' to_csv. return kdf_or_ser.to_pandas().to_csv( None, sep=sep, na_rep=na_rep, header=header, date_format=date_format, index=False) else: return kdf_or_ser.to_pandas().to_csv( None, sep=sep, na_rep=na_rep, columns=columns, header=header, quotechar=quotechar, date_format=date_format, escapechar=escapechar, index=False) kdf = self if isinstance(self, ks.Series): kdf = self.to_frame() if columns is None: column_index = kdf._internal.column_index elif isinstance(columns, str): column_index = [(columns,)] elif isinstance(columns, tuple): column_index = [columns] else: column_index = [idx if isinstance(idx, tuple) else (idx,) for idx in columns] if header is True and kdf._internal.column_index_level > 1: raise ValueError('to_csv only support one-level index column now') elif isinstance(header, list): sdf = kdf._sdf.select( [self._internal.scol_for(idx).alias(new_name) for (idx, new_name) in zip(column_index, header)]) header = True else: sdf = kdf._sdf.select([kdf._internal.scol_for(idx) for idx in column_index]) if num_files is not None: sdf = sdf.repartition(num_files) builder = sdf.write.mode("overwrite") OptionUtils._set_opts( builder, path=path, sep=sep, nullValue=na_rep, header=header, quote=quotechar, dateFormat=date_format, charToEscapeQuoteEscaping=escapechar) builder.options(**options).format("csv").save(path) def to_json(self, path=None, compression='uncompressed', num_files=None, **options): """ Convert the object to a JSON string. .. note:: Koalas `to_json` writes files to a path or URI. Unlike pandas', Koalas respects HDFS's property such as 'fs.default.name'. .. note:: Koalas writes JSON files into the directory, `path`, and writes multiple `part-...` files in the directory when `path` is specified. This behaviour was inherited from Apache Spark. The number of files can be controlled by `num_files`. .. note:: output JSON format is different from pandas'. It always use `orient='records'` for its output. This behaviour might have to change in the near future. Note NaN's and None will be converted to null and datetime objects will be converted to UNIX timestamps. Parameters ---------- path : string, optional File path. If not specified, the result is returned as a string. compression : {'gzip', 'bz2', 'xz', None} A string representing the compression to use in the output file, only used when the first argument is a filename. By default, the compression is inferred from the filename. num_files : the number of files to be written in `path` directory when this is a path. options: keyword arguments for additional options specific to PySpark. It is specific to PySpark's JSON options to pass. Check the options in PySpark's API documentation for `spark.write.json(...)`. It has a higher priority and overwrites all other options. This parameter only works when `path` is specified. Examples -------- >>> df = ks.DataFrame([['a', 'b'], ['c', 'd']], ... columns=['col 1', 'col 2']) >>> df.to_json() '[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]' >>> df['col 1'].to_json() '[{"col 1":"a"},{"col 1":"c"}]' >>> df.to_json(path=r'%s/to_json/foo.json' % path, num_files=1) >>> ks.read_json( ... path=r'%s/to_json/foo.json' % path ... ).sort_values(by="col 1") col 1 col 2 0 a b 1 c d >>> df['col 1'].to_json(path=r'%s/to_json/foo.json' % path, num_files=1) >>> ks.read_json( ... path=r'%s/to_json/foo.json' % path ... ).sort_values(by="col 1") col 1 0 a 1 c """ if path is None: # If path is none, just collect and use pandas's to_json. kdf_or_ser = self pdf = kdf_or_ser.to_pandas() if isinstance(self, ks.Series): pdf = pdf.to_frame() # To make the format consistent and readable by `read_json`, convert it to pandas' and # use 'records' orient for now. return pdf.to_json(orient='records') kdf = self if isinstance(self, ks.Series): kdf = self.to_frame() sdf = kdf.to_spark() if num_files is not None: sdf = sdf.repartition(num_files) builder = sdf.write.mode("overwrite") OptionUtils._set_opts(builder, compression=compression) builder.options(**options).format("json").save(path) def to_excel(self, excel_writer, sheet_name="Sheet1", na_rep="", float_format=None, columns=None, header=True, index=True, index_label=None, startrow=0, startcol=0, engine=None, merge_cells=True, encoding=None, inf_rep="inf", verbose=True, freeze_panes=None): """ Write object to an Excel sheet. .. note:: This method should only be used if the resulting DataFrame is expected to be small, as all the data is loaded into the driver's memory. To write a single object to an Excel .xlsx file it is only necessary to specify a target file name. To write to multiple sheets it is necessary to create an `ExcelWriter` object with a target file name, and specify a sheet in the file to write to. Multiple sheets may be written to by specifying unique `sheet_name`. With all data written to the file it is necessary to save the changes. Note that creating an `ExcelWriter` object with a file name that already exists will result in the contents of the existing file being erased. Parameters ---------- excel_writer : str or ExcelWriter object File path or existing ExcelWriter. sheet_name : str, default 'Sheet1' Name of sheet which will contain DataFrame. na_rep : str, default '' Missing data representation. float_format : str, optional Format string for floating point numbers. For example ``float_format="%%.2f"`` will format 0.1234 to 0.12. columns : sequence or list of str, optional Columns to write. header : bool or list of str, default True Write out the column names. If a list of string is given it is assumed to be aliases for the column names. index : bool, default True Write row names (index). index_label : str or sequence, optional Column label for index column(s) if desired. If not specified, and `header` and `index` are True, then the index names are used. A sequence should be given if the DataFrame uses MultiIndex. startrow : int, default 0 Upper left cell row to dump data frame. startcol : int, default 0 Upper left cell column to dump data frame. engine : str, optional Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and ``io.excel.xlsm.writer``. merge_cells : bool, default True Write MultiIndex and Hierarchical Rows as merged cells. encoding : str, optional Encoding of the resulting excel file. Only necessary for xlwt, other writers support unicode natively. inf_rep : str, default 'inf' Representation for infinity (there is no native representation for infinity in Excel). verbose : bool, default True Display more information in the error logs. freeze_panes : tuple of int (length 2), optional Specifies the one-based bottommost row and rightmost column that is to be frozen. Notes ----- Once a workbook has been saved it is not possible write further data without rewriting the whole workbook. See Also -------- read_excel : Read Excel file. Examples -------- Create, write to and save a workbook: >>> df1 = ks.DataFrame([['a', 'b'], ['c', 'd']], ... index=['row 1', 'row 2'], ... columns=['col 1', 'col 2']) >>> df1.to_excel("output.xlsx") # doctest: +SKIP To specify the sheet name: >>> df1.to_excel("output.xlsx") # doctest: +SKIP >>> df1.to_excel("output.xlsx", ... sheet_name='Sheet_name_1') # doctest: +SKIP If you wish to write to more than one sheet in the workbook, it is necessary to specify an ExcelWriter object: >>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP ... df1.to_excel(writer, sheet_name='Sheet_name_1') ... df2.to_excel(writer, sheet_name='Sheet_name_2') To set the library that is used to write the Excel file, you can pass the `engine` keyword (the default engine is automatically chosen depending on the file extension): >>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP """ # Make sure locals() call is at the top of the function so we don't capture local variables. args = locals() kdf = self if isinstance(self, ks.DataFrame): f = pd.DataFrame.to_excel elif isinstance(self, ks.Series): f = pd.Series.to_excel else: raise TypeError('Constructor expects DataFrame or Series; however, ' 'got [%s]' % (self,)) return validate_arguments_and_invoke_function( kdf._to_internal_pandas(), self.to_excel, f, args) def mean(self, axis=None, numeric_only=True): """ Return the mean of the values. Parameters ---------- axis : {index (0), columns (1)} Axis for the function to be applied on. numeric_only : bool, default None Include only float, int, boolean columns. If None, will attempt to use everything, then use only numeric data. Not implemented for Series. Returns ------- mean : scalar for a Series, and a Series for a DataFrame. Examples -------- >>> df = ks.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]}, ... columns=['a', 'b']) On a DataFrame: >>> df.mean() a 2.0 b 0.2 dtype: float64 >>> df.mean(axis=1) 0 0.55 1 1.10 2 1.65 3 NaN Name: 0, dtype: float64 On a Series: >>> df['a'].mean() 2.0 """ return self._reduce_for_stat_function( F.mean, name="mean", numeric_only=numeric_only, axis=axis) def sum(self, axis=None, numeric_only=True): """ Return the sum of the values. Parameters ---------- axis : {index (0), columns (1)} Axis for the function to be applied on. numeric_only : bool, default None Include only float, int, boolean columns. If None, will attempt to use everything, then use only numeric data. Not implemented for Series. Returns ------- sum : scalar for a Series, and a Series for a DataFrame. Examples -------- >>> df = ks.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]}, ... columns=['a', 'b']) On a DataFrame: >>> df.sum() a 6.0 b 0.6 dtype: float64 >>> df.sum(axis=1) 0 1.1 1 2.2 2 3.3 3 0.0 Name: 0, dtype: float64 On a Series: >>> df['a'].sum() 6.0 """ return self._reduce_for_stat_function( F.sum, name="sum", numeric_only=numeric_only, axis=axis) def skew(self, axis=None, numeric_only=True): """ Return unbiased skew normalized by N-1. Parameters ---------- axis : {index (0), columns (1)} Axis for the function to be applied on. numeric_only : bool, default None Include only float, int, boolean columns. If None, will attempt to use everything, then use only numeric data. Not implemented for Series. Returns ------- skew : scalar for a Series, and a Series for a DataFrame. Examples -------- >>> df = ks.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]}, ... columns=['a', 'b']) On a DataFrame: >>> df.skew() # doctest: +SKIP a 0.000000e+00 b -3.319678e-16 dtype: float64 On a Series: >>> df['a'].skew() 0.0 """ return self._reduce_for_stat_function( F.skewness, name="skew", numeric_only=numeric_only, axis=axis) def kurtosis(self, axis=None, numeric_only=True): """ Return unbiased kurtosis using Fisher’s definition of kurtosis (kurtosis of normal == 0.0). Normalized by N-1. Parameters ---------- axis : {index (0), columns (1)} Axis for the function to be applied on. numeric_only : bool, default None Include only float, int, boolean columns. If None, will attempt to use everything, then use only numeric data. Not implemented for Series. Returns ------- kurt : scalar for a Series, and a Series for a DataFrame. Examples -------- >>> df = ks.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]}, ... columns=['a', 'b']) On a DataFrame: >>> df.kurtosis() a -1.5 b -1.5 dtype: float64 On a Series: >>> df['a'].kurtosis() -1.5 """ return self._reduce_for_stat_function( F.kurtosis, name="kurtosis", numeric_only=numeric_only, axis=axis) kurt = kurtosis def min(self, axis=None, numeric_only=False): """ Return the minimum of the values. Parameters ---------- axis : {index (0), columns (1)} Axis for the function to be applied on. numeric_only : bool, default None Include only float, int, boolean columns. If None, will attempt to use everything, then use only numeric data. Not implemented for Series. Returns ------- min : scalar for a Series, and a Series for a DataFrame. Examples -------- >>> df = ks.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]}, ... columns=['a', 'b']) On a DataFrame: >>> df.min() a 1.0 b 0.1 dtype: float64 >>> df.min(axis=1) 0 0.1 1 0.2 2 0.3 3 NaN Name: 0, dtype: float64 On a Series: >>> df['a'].min() 1.0 """ return self._reduce_for_stat_function( F.min, name="min", numeric_only=numeric_only, axis=axis) def max(self, axis=None, numeric_only=False): """ Return the maximum of the values. Parameters ---------- axis : {index (0), columns (1)} Axis for the function to be applied on. numeric_only : bool, default None Include only float, int, boolean columns. If None, will attempt to use everything, then use only numeric data. Not implemented for Series. Returns ------- max : scalar for a Series, and a Series for a DataFrame. Examples -------- >>> df = ks.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]}, ... columns=['a', 'b']) On a DataFrame: >>> df.max() a 3.0 b 0.3 dtype: float64 >>> df.max(axis=1) 0 1.0 1 2.0 2 3.0 3 NaN Name: 0, dtype: float64 On a Series: >>> df['a'].max() 3.0 """ return self._reduce_for_stat_function( F.max, name="max", numeric_only=numeric_only, axis=axis) def std(self, axis=None, numeric_only=True): """ Return sample standard deviation. Parameters ---------- axis : {index (0), columns (1)} Axis for the function to be applied on. numeric_only : bool, default None Include only float, int, boolean columns. If None, will attempt to use everything, then use only numeric data. Not implemented for Series. Returns ------- std : scalar for a Series, and a Series for a DataFrame. Examples -------- >>> df = ks.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]}, ... columns=['a', 'b']) On a DataFrame: >>> df.std() a 1.0 b 0.1 dtype: float64 >>> df.std(axis=1) 0 0.636396 1 1.272792 2 1.909188 3 NaN Name: 0, dtype: float64 On a Series: >>> df['a'].std() 1.0 """ return self._reduce_for_stat_function( F.stddev, name="std", numeric_only=numeric_only, axis=axis) def var(self, axis=None, numeric_only=True): """ Return unbiased variance. Parameters ---------- axis : {index (0), columns (1)} Axis for the function to be applied on. numeric_only : bool, default None Include only float, int, boolean columns. If None, will attempt to use everything, then use only numeric data. Not implemented for Series. Returns ------- var : scalar for a Series, and a Series for a DataFrame. Examples -------- >>> df = ks.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]}, ... columns=['a', 'b']) On a DataFrame: >>> df.var() a 1.00 b 0.01 dtype: float64 >>> df.var(axis=1) 0 0.405 1 1.620 2 3.645 3 NaN Name: 0, dtype: float64 On a Series: >>> df['a'].var() 1.0 """ return self._reduce_for_stat_function( F.variance, name="var", numeric_only=numeric_only, axis=axis) @property def size(self) -> int: """ Return an int representing the number of elements in this object. Return the number of rows if Series. Otherwise return the number of rows times number of columns if DataFrame. Examples -------- >>> s = ks.Series({'a': 1, 'b': 2, 'c': None}) >>> s.size 3 >>> df = ks.DataFrame({'col1': [1, 2, None], 'col2': [3, 4, None]}) >>> df.size 3 """ return len(self) # type: ignore def abs(self): """ Return a Series/DataFrame with absolute numeric value of each element. Returns ------- abs : Series/DataFrame containing the absolute value of each element. Examples -------- Absolute numeric values in a Series. >>> s = ks.Series([-1.10, 2, -3.33, 4]) >>> s.abs() 0 1.10 1 2.00 2 3.33 3 4.00 Name: 0, dtype: float64 Absolute numeric values in a DataFrame. >>> df = ks.DataFrame({ ... 'a': [4, 5, 6, 7], ... 'b': [10, 20, 30, 40], ... 'c': [100, 50, -30, -50] ... }, ... columns=['a', 'b', 'c']) >>> df.abs() a b c 0 4 10 100 1 5 20 50 2 6 30 30 3 7 40 50 """ # TODO: The first example above should not have "Name: 0". return self._apply_series_op( lambda kser: kser._with_new_scol(F.abs(kser._scol)).rename(kser.name)) # TODO: by argument only support the grouping name and as_index only for now. Documentation # should be updated when it's supported. def groupby(self, by, as_index: bool = True): """ Group DataFrame or Series using a Series of columns. A groupby operation involves some combination of splitting the object, applying a function, and combining the results. This can be used to group large amounts of data and compute operations on these groups. Parameters ---------- by : Series, label, or list of labels Used to determine the groups for the groupby. If Series is passed, the Series or dict VALUES will be used to determine the groups. A label or list of labels may be passed to group by the columns in ``self``. as_index : bool, default True For aggregated output, return object with group labels as the index. Only relevant for DataFrame input. as_index=False is effectively "SQL-style" grouped output. Returns ------- DataFrameGroupBy or SeriesGroupBy Depends on the calling object and returns groupby object that contains information about the groups. See Also -------- koalas.groupby.GroupBy Examples -------- >>> df = ks.DataFrame({'Animal': ['Falcon', 'Falcon', ... 'Parrot', 'Parrot'], ... 'Max Speed': [380., 370., 24., 26.]}, ... columns=['Animal', 'Max Speed']) >>> df Animal Max Speed 0 Falcon 380.0 1 Falcon 370.0 2 Parrot 24.0 3 Parrot 26.0 >>> df.groupby(['Animal']).mean().sort_index() # doctest: +NORMALIZE_WHITESPACE Max Speed Animal Falcon 375.0 Parrot 25.0 >>> df.groupby(['Animal'], as_index=False).mean().sort_values('Animal') ... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE Animal Max Speed ...Falcon 375.0 ...Parrot 25.0 """ from databricks.koalas.frame import DataFrame from databricks.koalas.series import Series from databricks.koalas.groupby import DataFrameGroupBy, SeriesGroupBy df_or_s = self if isinstance(by, DataFrame): raise ValueError("Grouper for '{}' not 1-dimensional".format(type(by))) elif isinstance(by, str): if isinstance(df_or_s, Series): raise KeyError(by) by = [(by,)] elif isinstance(by, tuple): if isinstance(df_or_s, Series): for key in by: if isinstance(key, str): raise KeyError(key) for key in by: if isinstance(key, DataFrame): raise ValueError("Grouper for '{}' not 1-dimensional".format(type(key))) by = [by] elif isinstance(by, Series): by = [by] elif isinstance(by, Iterable): if isinstance(df_or_s, Series): for key in by: if isinstance(key, str): raise KeyError(key) by = [key if isinstance(key, (tuple, Series)) else (key,) for key in by] else: raise ValueError("Grouper for '{}' not 1-dimensional".format(type(by))) if not len(by): raise ValueError('No group keys passed!') if isinstance(df_or_s, DataFrame): df = df_or_s # type: DataFrame col_by = [_resolve_col(df, col_or_s) for col_or_s in by] return DataFrameGroupBy(df_or_s, col_by, as_index=as_index) if isinstance(df_or_s, Series): col = df_or_s # type: Series anchor = df_or_s._kdf col_by = [_resolve_col(anchor, col_or_s) for col_or_s in by] return SeriesGroupBy(col, col_by, as_index=as_index) raise TypeError('Constructor expects DataFrame or Series; however, ' 'got [%s]' % (df_or_s,)) def bool(self): """ Return the bool of a single element in the current object. This must be a boolean scalar value, either True or False. Raise a ValueError if the object does not have exactly 1 element, or that element is not boolean Examples -------- >>> ks.DataFrame({'a': [True]}).bool() True >>> ks.Series([False]).bool() False If there are non-boolean or multiple values exist, it raises an exception in all cases as below. >>> ks.DataFrame({'a': ['a']}).bool() Traceback (most recent call last): ... ValueError: bool cannot act on a non-boolean single element DataFrame >>> ks.DataFrame({'a': [True], 'b': [False]}).bool() # doctest: +NORMALIZE_WHITESPACE Traceback (most recent call last): ... ValueError: The truth value of a DataFrame is ambiguous. Use a.empty, a.bool(), a.item(), a.any() or a.all(). >>> ks.Series([1]).bool() Traceback (most recent call last): ... ValueError: bool cannot act on a non-boolean single element DataFrame """ if isinstance(self, ks.DataFrame): df = self elif isinstance(self, ks.Series): df = self.to_dataframe() else: raise TypeError('bool() expects DataFrame or Series; however, ' 'got [%s]' % (self,)) return df.head(2)._to_internal_pandas().bool() def first_valid_index(self): """ Retrieves the index of the first valid value. Returns ------- idx_first_valid : type of index Examples -------- Support for DataFrame >>> kdf = ks.DataFrame({'a': [None, 2, 3, 2], ... 'b': [None, 2.0, 3.0, 1.0], ... 'c': [None, 200, 400, 200]}, ... index=['Q', 'W', 'E', 'R']) >>> kdf a b c Q NaN NaN NaN W 2.0 2.0 200.0 E 3.0 3.0 400.0 R 2.0 1.0 200.0 >>> kdf.first_valid_index() 'W' Support for MultiIndex columns >>> kdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')]) >>> kdf a b c x y z Q NaN NaN NaN W 2.0 2.0 200.0 E 3.0 3.0 400.0 R 2.0 1.0 200.0 >>> kdf.first_valid_index() 'W' Support for Series. >>> s = ks.Series([None, None, 3, 4, 5], index=[100, 200, 300, 400, 500]) >>> s 100 NaN 200 NaN 300 3.0 400 4.0 500 5.0 Name: 0, dtype: float64 >>> s.first_valid_index() 300 Support for MultiIndex >>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'], ... ['speed', 'weight', 'length']], ... [[0, 0, 0, 1, 1, 1, 2, 2, 2], ... [0, 1, 2, 0, 1, 2, 0, 1, 2]]) >>> s = ks.Series([None, None, None, None, 250, 1.5, 320, 1, 0.3], index=midx) >>> s lama speed NaN weight NaN length NaN cow speed NaN weight 250.0 length 1.5 falcon speed 320.0 weight 1.0 length 0.3 Name: 0, dtype: float64 >>> s.first_valid_index() ('cow', 'weight') """ sdf = self._internal.sdf column_scols = self._internal.column_scols cond = reduce(lambda x, y: x & y, map(lambda x: x.isNotNull(), column_scols)) first_valid_row = sdf.drop(NATURAL_ORDER_COLUMN_NAME).filter(cond).first() first_valid_idx = tuple(first_valid_row[idx_col] for idx_col in self._internal.index_columns) if len(first_valid_idx) == 1: first_valid_idx = first_valid_idx[0] return first_valid_idx def median(self, accuracy=10000): """ Return the median of the values for the requested axis. .. note:: Unlike pandas', the median in Koalas is an approximated median based upon approximate percentile computation because computing median across a large dataset is extremely expensive. Parameters ---------- accuracy : int, optional Default accuracy of approximation. Larger value means better accuracy. The relative error can be deduced by 1.0 / accuracy. Returns ------- median : scalar or Series Examples -------- >>> df = ks.DataFrame({ ... 'a': [24., 21., 25., 33., 26.], 'b': [1, 2, 3, 4, 5]}, columns=['a', 'b']) >>> df a b 0 24.0 1 1 21.0 2 2 25.0 3 3 33.0 4 4 26.0 5 On a DataFrame: >>> df.median() a 25.0 b 3.0 Name: 0, dtype: float64 On a Series: >>> df['a'].median() 25.0 >>> (df['a'] + 100).median() 125.0 For multi-index columns, >>> df.columns = pd.MultiIndex.from_tuples([('x', 'a'), ('y', 'b')]) >>> df x y a b 0 24.0 1 1 21.0 2 2 25.0 3 3 33.0 4 4 26.0 5 On a DataFrame: >>> df.median() x a 25.0 y b 3.0 Name: 0, dtype: float64 On a Series: >>> df[('x', 'a')].median() 25.0 >>> (df[('x', 'a')] + 100).median() 125.0 """ if not isinstance(accuracy, int): raise ValueError("accuracy must be an integer; however, got [%s]" % type(accuracy)) from databricks.koalas.frame import DataFrame from databricks.koalas.series import Series, _col kdf_or_kser = self if isinstance(kdf_or_kser, Series): kser = _col(kdf_or_kser.to_frame()) return kser._reduce_for_stat_function( lambda _: F.expr("approx_percentile(`%s`, 0.5, %s)" % (kser._internal.data_columns[0], accuracy)), name="median") assert isinstance(kdf_or_kser, DataFrame) # This code path cannot reuse `_reduce_for_stat_function` since there looks no proper way # to get a column name from Spark column but we need it to pass it through `expr`. kdf = kdf_or_kser sdf = kdf._sdf.select(kdf._internal.scols) median = lambda name: F.expr("approx_percentile(`%s`, 0.5, %s)" % (name, accuracy)) sdf = sdf.select([median(col).alias(col) for col in kdf._internal.data_columns]) # Attach a dummy column for index to avoid default index. sdf = sdf.withColumn('__DUMMY__', F.monotonically_increasing_id()) # This is expected to be small so it's fine to transpose. return DataFrame(kdf._internal.copy( sdf=sdf, index_map=[('__DUMMY__', None)], column_scols=[scol_for(sdf, col) for col in kdf._internal.data_columns])) \ ._to_internal_pandas().transpose().iloc[:, 0] # TODO: 'center', 'win_type', 'on', 'axis' parameter should be implemented. def rolling(self, window, min_periods=None): """ Provide rolling transformations. .. note:: 'min_periods' in Koalas works as a fixed window size unlike pandas. Unlike pandas, NA is also counted as the period. This might be changed in the near future. Parameters ---------- window : int, or offset Size of the moving window. This is the number of observations used for calculating the statistic. Each window will be a fixed size. min_periods : int, default None Minimum number of observations in window required to have a value (otherwise result is NA). For a window that is specified by an offset, min_periods will default to 1. Otherwise, min_periods will default to the size of the window. Returns ------- a Window sub-classed for the particular operation """ return Rolling(self, window=window, min_periods=min_periods) # TODO: 'center' and 'axis' parameter should be implemented. # 'axis' implementation, refer https://github.com/databricks/koalas/pull/607 def expanding(self, min_periods=1): """ Provide expanding transformations. .. note:: 'min_periods' in Koalas works as a fixed window size unlike pandas. Unlike pandas, NA is also counted as the period. This might be changed in the near future. Parameters ---------- min_periods : int, default 1 Minimum number of observations in window required to have a value (otherwise result is NA). Returns ------- a Window sub-classed for the particular operation """ return Expanding(self, min_periods=min_periods) def get(self, key, default=None): """ Get item from object for given key (DataFrame column, Panel slice, etc.). Returns default value if not found. Parameters ---------- key : object Returns ------- value : same type as items contained in object Examples -------- >>> df = ks.DataFrame({'x':range(3), 'y':['a','b','b'], 'z':['a','b','b']}, ... columns=['x', 'y', 'z'], index=[10, 20, 20]) >>> df x y z 10 0 a a 20 1 b b 20 2 b b >>> df.get('x') 10 0 20 1 20 2 Name: x, dtype: int64 >>> df.get(['x', 'y']) x y 10 0 a 20 1 b 20 2 b >>> df.x.get(10) 0 >>> df.x.get(20) 20 1 20 2 Name: x, dtype: int64 >>> df.x.get(15, -1) -1 """ try: return self[key] except (KeyError, ValueError, IndexError): return default @property def at(self): return AtIndexer(self) at.__doc__ = AtIndexer.__doc__ @property def iat(self): return iAtIndexer(self) iat.__doc__ = iAtIndexer.__doc__ @property def iloc(self): return iLocIndexer(self) iloc.__doc__ = iLocIndexer.__doc__ @property def loc(self): return LocIndexer(self) loc.__doc__ = LocIndexer.__doc__ def compute(self): """Alias of `to_pandas()` to mimic dask for easily porting tests.""" return self.toPandas() @staticmethod def _count_expr(col: spark.Column, spark_type: DataType) -> spark.Column: # Special handle floating point types because Spark's count treats nan as a valid value, # whereas Pandas count doesn't include nan. if isinstance(spark_type, (FloatType, DoubleType)): return F.count(F.nanvl(col, F.lit(None))) else: return F.count(col) def _resolve_col(kdf, col_like): if isinstance(col_like, ks.Series): if kdf is not col_like._kdf: raise ValueError( "Cannot combine the series because it comes from a different dataframe. " "In order to allow this operation, enable 'compute.ops_on_diff_frames' option.") return col_like elif isinstance(col_like, tuple): return kdf[col_like] else: raise ValueError(col_like)
1
14,069
nit: `as_index must be an boolean` -> `as_index must be a boolean`
databricks-koalas
py
@@ -26,5 +26,7 @@ interface BaseModuleInterface public function postDeactivation(ConnectionInterface $con = null); + public function update($currentVersion, $newVersion, ConnectionInterface $con = null); + public function destroy(ConnectionInterface $con = null, $deleteModuleData = false); }
1
<?php /*************************************************************************************/ /* This file is part of the Thelia package. */ /* */ /* Copyright (c) OpenStudio */ /* email : [email protected] */ /* web : http://www.thelia.net */ /* */ /* For the full copyright and license information, please view the LICENSE.txt */ /* file that was distributed with this source code. */ /*************************************************************************************/ namespace Thelia\Module; use Propel\Runtime\Connection\ConnectionInterface; interface BaseModuleInterface { public function install(ConnectionInterface $con = null); public function preActivation(ConnectionInterface $con = null); public function postActivation(ConnectionInterface $con = null); public function preDeactivation(ConnectionInterface $con = null); public function postDeactivation(ConnectionInterface $con = null); public function destroy(ConnectionInterface $con = null, $deleteModuleData = false); }
1
11,562
What is the goal of that method ? And what are `$currentVersion` and `$newVersion` ? Are they Thelia or Module version ? :)
thelia-thelia
php
@@ -34,6 +34,8 @@ function assertServerError (res) { } module.exports = class AwsS3Multipart extends Plugin { + static VERSION = require('../package.json').version + constructor (uppy, opts) { super(uppy, opts) this.type = 'uploader'
1
const { Plugin } = require('@uppy/core') const { Socket, Provider, RequestClient } = require('@uppy/companion-client') const emitSocketProgress = require('@uppy/utils/lib/emitSocketProgress') const getSocketHost = require('@uppy/utils/lib/getSocketHost') const limitPromises = require('@uppy/utils/lib/limitPromises') const Uploader = require('./MultipartUploader') /** * Create a wrapper around an event emitter with a `remove` method to remove * all events that were added using the wrapped emitter. */ function createEventTracker (emitter) { const events = [] return { on (event, fn) { events.push([ event, fn ]) return emitter.on(event, fn) }, remove () { events.forEach(([ event, fn ]) => { emitter.off(event, fn) }) } } } function assertServerError (res) { if (res && res.error) { const error = new Error(res.message) Object.assign(error, res.error) throw error } return res } module.exports = class AwsS3Multipart extends Plugin { constructor (uppy, opts) { super(uppy, opts) this.type = 'uploader' this.id = 'AwsS3Multipart' this.title = 'AWS S3 Multipart' this.client = new RequestClient(uppy, opts) const defaultOptions = { timeout: 30 * 1000, limit: 0, createMultipartUpload: this.createMultipartUpload.bind(this), listParts: this.listParts.bind(this), prepareUploadPart: this.prepareUploadPart.bind(this), abortMultipartUpload: this.abortMultipartUpload.bind(this), completeMultipartUpload: this.completeMultipartUpload.bind(this) } this.opts = Object.assign({}, defaultOptions, opts) this.upload = this.upload.bind(this) if (typeof this.opts.limit === 'number' && this.opts.limit !== 0) { this.limitRequests = limitPromises(this.opts.limit) } else { this.limitRequests = (fn) => fn } this.uploaders = Object.create(null) this.uploaderEvents = Object.create(null) this.uploaderSockets = Object.create(null) } /** * Clean up all references for a file's upload: the MultipartUploader instance, * any events related to the file, and the Companion WebSocket connection. */ resetUploaderReferences (fileID, opts = {}) { if (this.uploaders[fileID]) { this.uploaders[fileID].abort({ really: opts.abort || false }) this.uploaders[fileID] = null } if (this.uploaderEvents[fileID]) { this.uploaderEvents[fileID].remove() this.uploaderEvents[fileID] = null } if (this.uploaderSockets[fileID]) { this.uploaderSockets[fileID].close() this.uploaderSockets[fileID] = null } } assertHost () { if (!this.opts.companionUrl) { throw new Error('Expected a `companionUrl` option containing a Companion address.') } } createMultipartUpload (file) { this.assertHost() return this.client.post('s3/multipart', { filename: file.name, type: file.type }).then(assertServerError) } listParts (file, { key, uploadId }) { this.assertHost() const filename = encodeURIComponent(key) return this.client.get(`s3/multipart/${uploadId}?key=${filename}`) .then(assertServerError) } prepareUploadPart (file, { key, uploadId, number }) { this.assertHost() const filename = encodeURIComponent(key) return this.client.get(`s3/multipart/${uploadId}/${number}?key=${filename}`) .then(assertServerError) } completeMultipartUpload (file, { key, uploadId, parts }) { this.assertHost() const filename = encodeURIComponent(key) const uploadIdEnc = encodeURIComponent(uploadId) return this.client.post(`s3/multipart/${uploadIdEnc}/complete?key=${filename}`, { parts }) .then(assertServerError) } abortMultipartUpload (file, { key, uploadId }) { this.assertHost() const filename = encodeURIComponent(key) const uploadIdEnc = encodeURIComponent(uploadId) return this.client.delete(`s3/multipart/${uploadIdEnc}?key=${filename}`) .then(assertServerError) } uploadFile (file) { return new Promise((resolve, reject) => { const upload = new Uploader(file.data, Object.assign({ // .bind to pass the file object to each handler. createMultipartUpload: this.limitRequests(this.opts.createMultipartUpload.bind(this, file)), listParts: this.limitRequests(this.opts.listParts.bind(this, file)), prepareUploadPart: this.opts.prepareUploadPart.bind(this, file), completeMultipartUpload: this.limitRequests(this.opts.completeMultipartUpload.bind(this, file)), abortMultipartUpload: this.limitRequests(this.opts.abortMultipartUpload.bind(this, file)), limit: this.opts.limit || 5, onStart: (data) => { const cFile = this.uppy.getFile(file.id) this.uppy.setFileState(file.id, { s3Multipart: Object.assign({}, cFile.s3Multipart, { key: data.key, uploadId: data.uploadId, parts: [] }) }) }, onProgress: (bytesUploaded, bytesTotal) => { this.uppy.emit('upload-progress', file, { uploader: this, bytesUploaded: bytesUploaded, bytesTotal: bytesTotal }) }, onError: (err) => { this.uppy.log(err) this.uppy.emit('upload-error', file, err) err.message = `Failed because: ${err.message}` this.resetUploaderReferences(file.id) reject(err) }, onSuccess: (result) => { const uploadResp = { uploadURL: result.location } this.uppy.emit('upload-success', file, uploadResp) if (result.location) { this.uppy.log('Download ' + upload.file.name + ' from ' + result.location) } this.resetUploaderReferences(file.id) resolve(upload) }, onPartComplete: (part) => { // Store completed parts in state. const cFile = this.uppy.getFile(file.id) if (!cFile) { return } this.uppy.setFileState(file.id, { s3Multipart: Object.assign({}, cFile.s3Multipart, { parts: [ ...cFile.s3Multipart.parts, part ] }) }) this.uppy.emit('s3-multipart:part-uploaded', cFile, part) } }, file.s3Multipart)) this.uploaders[file.id] = upload this.uploaderEvents[file.id] = createEventTracker(this.uppy) this.onFileRemove(file.id, (removed) => { this.resetUploaderReferences(file.id, { abort: true }) resolve(`upload ${removed.id} was removed`) }) this.onFilePause(file.id, (isPaused) => { if (isPaused) { upload.pause() } else { upload.start() } }) this.onPauseAll(file.id, () => { upload.pause() }) this.onResumeAll(file.id, () => { upload.start() }) if (!file.isPaused) { upload.start() } if (!file.isRestored) { this.uppy.emit('upload-started', file, upload) } }) } uploadRemote (file) { this.resetUploaderReferences(file.id) return new Promise((resolve, reject) => { if (file.serverToken) { return this.connectToServerSocket(file) .then(() => resolve()) .catch(reject) } this.uppy.emit('upload-started', file) const Client = file.remote.providerOptions.provider ? Provider : RequestClient const client = new Client(this.uppy, file.remote.providerOptions) client.post( file.remote.url, Object.assign({}, file.remote.body, { protocol: 's3-multipart', size: file.data.size, metadata: file.meta }) ).then((res) => { this.uppy.setFileState(file.id, { serverToken: res.token }) file = this.uppy.getFile(file.id) return file }).then((file) => { return this.connectToServerSocket(file) }).then(() => { resolve() }).catch((err) => { reject(new Error(err)) }) }) } connectToServerSocket (file) { return new Promise((resolve, reject) => { const token = file.serverToken const host = getSocketHost(file.remote.companionUrl) const socket = new Socket({ target: `${host}/api/${token}` }) this.uploaderSockets[socket] = socket this.uploaderEvents[file.id] = createEventTracker(this.uppy) this.onFileRemove(file.id, (removed) => { this.resetUploaderReferences(file.id, { abort: true }) resolve(`upload ${file.id} was removed`) }) this.onFilePause(file.id, (isPaused) => { socket.send(isPaused ? 'pause' : 'resume', {}) }) this.onPauseAll(file.id, () => socket.send('pause', {})) this.onResumeAll(file.id, () => { if (file.error) { socket.send('pause', {}) } socket.send('resume', {}) }) this.onRetry(file.id, () => { socket.send('pause', {}) socket.send('resume', {}) }) this.onRetryAll(file.id, () => { socket.send('pause', {}) socket.send('resume', {}) }) if (file.isPaused) { socket.send('pause', {}) } socket.on('progress', (progressData) => emitSocketProgress(this, progressData, file)) socket.on('error', (errData) => { this.uppy.emit('upload-error', file, new Error(errData.error)) reject(new Error(errData.error)) }) socket.on('success', (data) => { const uploadResp = { uploadURL: data.url } this.uppy.emit('upload-success', file, uploadResp) resolve() }) }) } upload (fileIDs) { if (fileIDs.length === 0) return Promise.resolve() const promises = fileIDs.map((id) => { const file = this.uppy.getFile(id) if (file.isRemote) { return this.uploadRemote(file) } return this.uploadFile(file) }) return Promise.all(promises) } onFileRemove (fileID, cb) { this.uploaderEvents[fileID].on('file-removed', (file) => { if (fileID === file.id) cb(file.id) }) } onFilePause (fileID, cb) { this.uploaderEvents[fileID].on('upload-pause', (targetFileID, isPaused) => { if (fileID === targetFileID) { // const isPaused = this.uppy.pauseResume(fileID) cb(isPaused) } }) } onRetry (fileID, cb) { this.uploaderEvents[fileID].on('upload-retry', (targetFileID) => { if (fileID === targetFileID) { cb() } }) } onRetryAll (fileID, cb) { this.uploaderEvents[fileID].on('retry-all', (filesToRetry) => { if (!this.uppy.getFile(fileID)) return cb() }) } onPauseAll (fileID, cb) { this.uploaderEvents[fileID].on('pause-all', () => { if (!this.uppy.getFile(fileID)) return cb() }) } onResumeAll (fileID, cb) { this.uploaderEvents[fileID].on('resume-all', () => { if (!this.uppy.getFile(fileID)) return cb() }) } install () { const { capabilities } = this.uppy.getState() this.uppy.setState({ capabilities: { ...capabilities, resumableUploads: true } }) this.uppy.addUploader(this.upload) this.uppy.on('cancel-all', () => { this.uppy.getFiles().forEach((file) => { this.resetUploaderReferences(file.id, { abort: true }) }) }) } uninstall () { this.uppy.setState({ capabilities: Object.assign({}, this.uppy.getState().capabilities, { resumableUploads: false }) }) this.uppy.removeUploader(this.upload) } }
1
11,947
Is there an advantage to this vs setting `this.version` in the constructor? Cleaner this way, at the top?
transloadit-uppy
js
@@ -270,7 +270,7 @@ public class OAuthWebviewHelper { * @return login url */ protected String getLoginUrl() { - return SalesforceSDKManager.getInstance().getLoginServerManager().getSelectedLoginServer().url; + return SalesforceSDKManager.getInstance().getLoginServerManager().getSelectedLoginServer().url.trim(); } /**
1
/* * Copyright (c) 2011-2012, salesforce.com, inc. * All rights reserved. * Redistribution and use of this software in source and binary forms, with or * without modification, are permitted provided that the following conditions * are met: * - Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * - Neither the name of salesforce.com, inc. nor the names of its contributors * may be used to endorse or promote products derived from this software without * specific prior written permission of salesforce.com, inc. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ package com.salesforce.androidsdk.ui; import java.net.URI; import java.net.URISyntaxException; import java.util.Map; import android.app.Activity; import android.content.Context; import android.net.Uri; import android.net.http.SslError; import android.os.AsyncTask; import android.os.Bundle; import android.util.Log; import android.webkit.CookieManager; import android.webkit.SslErrorHandler; import android.webkit.WebChromeClient; import android.webkit.WebView; import android.webkit.WebViewClient; import android.widget.Toast; import com.salesforce.androidsdk.R; import com.salesforce.androidsdk.app.SalesforceSDKManager; import com.salesforce.androidsdk.auth.HttpAccess; import com.salesforce.androidsdk.auth.OAuth2; import com.salesforce.androidsdk.auth.OAuth2.IdServiceResponse; import com.salesforce.androidsdk.auth.OAuth2.TokenEndpointResponse; import com.salesforce.androidsdk.push.PushMessaging; import com.salesforce.androidsdk.rest.AdminPrefsManager; import com.salesforce.androidsdk.rest.BootConfig; import com.salesforce.androidsdk.rest.ClientManager; import com.salesforce.androidsdk.rest.ClientManager.LoginOptions; import com.salesforce.androidsdk.security.PasscodeManager; import com.salesforce.androidsdk.util.EventsObservable; import com.salesforce.androidsdk.util.EventsObservable.EventType; import com.salesforce.androidsdk.util.UriFragmentParser; /** * Helper class to manage a WebView instance that is going through the OAuth login process. * Basic flow is * a) load and show the login page to the user * b) user logins in and authorizes app * c) we see the navigation to the auth complete Url, and grab the tokens * d) we call the Id service to obtain additional info about the user * e) we create a local account, and return an authentication result bundle. * f) done! * */ public class OAuthWebviewHelper { private static final String ACCOUNT_OPTIONS = "accountOptions"; /** * the host activity/fragment should pass in an implementation of this * interface so that it can notify it of things it needs to do as part of * the oauth process. */ public interface OAuthWebviewHelperEvents { /** we're starting to load this login page into the webview */ void loadingLoginPage(String loginUrl); /** * progress update of loading the webview, totalProgress will go from * 0..10000 (you can pass this directly to the activity progressbar) */ void onLoadingProgress(int totalProgress); /** We're doing something that takes some unknown amount of time */ void onIndeterminateProgress(boolean show); /** We've completed the auth process and here's the resulting Authentication Result bundle to return to the Authenticator */ void onAccountAuthenticatorResult(Bundle authResult); /** we're in some end state and requesting that the host activity be finished/closed. */ void finish(); } /** * Construct a new OAuthWebviewHelper and perform the initial configuration of the Webview. */ public OAuthWebviewHelper(OAuthWebviewHelperEvents callback, LoginOptions options, WebView webview, Bundle savedInstanceState) { assert options != null && callback != null && webview != null; this.callback = callback; this.loginOptions = options; this.webview = webview; webview.getSettings().setJavaScriptEnabled(true); webview.getSettings().setSavePassword(false); webview.setWebViewClient(makeWebViewClient()); webview.setWebChromeClient(makeWebChromeClient()); // Restore webview's state if available. // This ensures the user is not forced to type in credentials again // once the auth process has been kicked off. if (savedInstanceState != null) { webview.restoreState(savedInstanceState); accountOptions = AccountOptions.fromBundle(savedInstanceState.getBundle(ACCOUNT_OPTIONS)); } else { clearCookies(); } } private final OAuthWebviewHelperEvents callback; protected final LoginOptions loginOptions; private final WebView webview; private AccountOptions accountOptions; public void saveState(Bundle outState) { webview.saveState(outState); if (accountOptions != null) { // we have completed the auth flow but not created the account, because we need to create a pin outState.putBundle(ACCOUNT_OPTIONS, accountOptions.asBundle()); } } public WebView getWebView() { return webview; } public void clearCookies() { CookieManager cm = CookieManager.getInstance(); cm.removeAllCookie(); } public void clearView() { webview.clearView(); } /** * Method called by login activity when it resumes after the passcode activity * * When the server has a mobile policy requiring a passcode, we start the passcode activity after completing the * auth flow (see onAuthFlowComplete). * When the passcode activity completes, the login activity's onActivityResult gets invoked, and it calls this method * to finalize the account creation. */ public void onNewPasscode() { if (accountOptions != null) { loginOptions.passcodeHash = SalesforceSDKManager.getInstance().getPasscodeHash(); addAccount(); callback.finish(); } } /** Factory method for the WebViewClient, you can replace this with something else if you need to */ protected WebViewClient makeWebViewClient() { return new AuthWebViewClient(); } /** Factory method for the WebChromeClient, you can replace this with something else if you need to */ protected WebChromeClient makeWebChromeClient() { return new AuthWebChromeClient(); } protected Context getContext() { return webview.getContext(); } /** * Called when the user facing part of the auth flow completed with an error. * We show the user an error and end the activity. */ protected void onAuthFlowError(String error, String errorDesc) { Log.w("LoginActivity:onAuthFlowError", error + ":" + errorDesc); // look for deny. kick them back to login, so clear cookies and repoint browser if ("access_denied".equals(error) && "end-user denied authorization".equals(errorDesc)) { webview.post(new Runnable() { @Override public void run() { clearCookies(); loadLoginPage(); } }); } else { Toast t = Toast.makeText(webview.getContext(), error + " : " + errorDesc, Toast.LENGTH_LONG); webview.postDelayed(new Runnable() { @Override public void run() { callback.finish(); } }, t.getDuration()); t.show(); } } protected void showError(Exception exception) { Toast.makeText(getContext(), getContext().getString(SalesforceSDKManager.getInstance().getSalesforceR().stringGenericError(), exception.toString()), Toast.LENGTH_LONG).show(); } /** * Tells the webview to load the authorization page. * We also update the window title, so its easier to * see which system you're logging in to */ public void loadLoginPage() { // Filling in loginUrl loginOptions.loginUrl = getLoginUrl(); try { URI uri = getAuthorizationUrl(); callback.loadingLoginPage(loginOptions.loginUrl); webview.loadUrl(uri.toString()); } catch (URISyntaxException ex) { showError(ex); } } protected String getOAuthClientId() { return loginOptions.oauthClientId; } protected URI getAuthorizationUrl() throws URISyntaxException { return OAuth2.getAuthorizationUrl( new URI(loginOptions.loginUrl), getOAuthClientId(), loginOptions.oauthCallbackUrl, loginOptions.oauthScopes, null, getAuthorizationDisplayType()); } /** * If you're only supporting recent versions of Android (e.g. 3.x and up), you can override this to be touch and get a better looking login UI * You can override this by either subclass this class, or adding <string name="sf__oauth_display_type">touch</string> to your app's value * resource so that it overrides the default value in the SDK library. * * @return the OAuth login display type, e.g. mobile, touch, see the OAuth docs for the complete list of valid values. */ protected String getAuthorizationDisplayType() { return this.getContext().getString(R.string.oauth_display_type); } /** * Override this method to customize the login url. * @return login url */ protected String getLoginUrl() { return SalesforceSDKManager.getInstance().getLoginServerManager().getSelectedLoginServer().url; } /** * WebViewClient which intercepts the redirect to the oauth callback url. * That redirect marks the end of the user facing portion of the authentication flow. * */ protected class AuthWebViewClient extends WebViewClient { @Override public void onPageFinished(WebView view, String url) { EventsObservable.get().notifyEvent(EventType.AuthWebViewPageFinished, url); super.onPageFinished(view, url); } @Override public boolean shouldOverrideUrlLoading(WebView view, String url) { boolean isDone = url.replace("///", "/").startsWith(loginOptions.oauthCallbackUrl.replace("///", "/")); if (isDone) { Uri callbackUri = Uri.parse(url); Map<String, String> params = UriFragmentParser.parse(callbackUri); String error = params.get("error"); // Did we fail? if (error != null) { String errorDesc = params.get("error_description"); onAuthFlowError(error, errorDesc); } // Or succeed? else { TokenEndpointResponse tr = new TokenEndpointResponse(params); onAuthFlowComplete(tr); } } return isDone; } @Override public void onReceivedSslError(WebView view, SslErrorHandler handler, SslError error) { int primError = error.getPrimaryError(); // Figuring out string resource id SalesforceR r = SalesforceSDKManager.getInstance().getSalesforceR(); int primErrorStringId = r.stringSSLUnknownError(); switch (primError) { case SslError.SSL_EXPIRED: primErrorStringId = r.stringSSLExpired(); break; case SslError.SSL_IDMISMATCH: primErrorStringId = r.stringSSLIdMismatch(); break; case SslError.SSL_NOTYETVALID: primErrorStringId = r.stringSSLNotYetValid(); break; case SslError.SSL_UNTRUSTED: primErrorStringId = r.stringSSLUntrusted(); break; } // Building text message to show String text = getContext().getString(r.stringSSLError(), getContext().getString(primErrorStringId)); // Bringing up toast Toast.makeText(getContext(), text, Toast.LENGTH_LONG).show(); handler.cancel(); } } /** * Called when the user facing part of the auth flow completed successfully. * The last step is to call the identity service to get the username. */ protected void onAuthFlowComplete(TokenEndpointResponse tr) { FinishAuthTask t = new FinishAuthTask(); t.execute(tr); } // base class with common code for the background task that finishes off the auth process protected abstract class BaseFinishAuthFlowTask<RequestType> extends AsyncTask<RequestType, Boolean, TokenEndpointResponse> { protected volatile Exception backgroundException; protected volatile IdServiceResponse id = null; public BaseFinishAuthFlowTask() { } @Override protected final TokenEndpointResponse doInBackground(RequestType ... params) { try { publishProgress(true); return performRequest(params[0]); } catch (Exception ex) { handleException(ex); } return null; } protected abstract TokenEndpointResponse performRequest(RequestType param) throws Exception; @Override protected void onPostExecute(OAuth2.TokenEndpointResponse tr) { if (backgroundException != null) { Log.w("LoginActiviy.onAuthFlowComplete", backgroundException); // Error onAuthFlowError(getContext().getString(SalesforceSDKManager.getInstance().getSalesforceR().stringGenericAuthenticationErrorTitle()), getContext().getString(SalesforceSDKManager.getInstance().getSalesforceR().stringGenericAuthenticationErrorBody())); callback.finish(); } else { // Register for push notifications, if push notification client ID is present. final Context appContext = SalesforceSDKManager.getInstance().getAppContext(); final String pushNotificationId = BootConfig.getBootConfig(appContext).getPushNotificationClientId(); if (pushNotificationId != null && !pushNotificationId.trim().isEmpty()) { PushMessaging.register(appContext); } // Putting together all the information needed to create the new account. accountOptions = new AccountOptions(id.username, tr.refreshToken, tr.authToken, tr.idUrl, tr.instanceUrl, tr.orgId, tr.userId); // Sets additional admin prefs, if they exist. if (id.adminPrefs != null) { final AdminPrefsManager prefManager = SalesforceSDKManager.getInstance().getAdminPrefsManager(); prefManager.setPrefs(id.adminPrefs); } // Screen lock required by mobile policy if (id.screenLockTimeout > 0) { PasscodeManager passcodeManager = SalesforceSDKManager.getInstance().getPasscodeManager(); passcodeManager.reset(getContext()); // get rid of existing passcode if any passcodeManager.setTimeoutMs(id.screenLockTimeout * 1000 * 60 /* converting minutes to milliseconds*/); passcodeManager.setMinPasscodeLength(id.pinLength); // This will bring up the create passcode screen - we will create the account in onResume SalesforceSDKManager.getInstance().getPasscodeManager().setEnabled(true); SalesforceSDKManager.getInstance().getPasscodeManager().lockIfNeeded((Activity) getContext(), true); } // No screen lock required or no mobile policy specified else { addAccount(); callback.finish(); } } } protected void handleException(Exception ex) { if (ex.getMessage() != null) Log.w("BaseFinishAuthFlowTask", "handleException", ex); backgroundException = ex; } @Override protected void onProgressUpdate(Boolean... values) { callback.onIndeterminateProgress(values[0]); } } /** * This is a background process that will call the identity service to get the info we need from * the Identity service, and finally wrap up and create account. */ private class FinishAuthTask extends BaseFinishAuthFlowTask<TokenEndpointResponse> { @Override protected TokenEndpointResponse performRequest(TokenEndpointResponse tr) throws Exception { try { id = OAuth2.callIdentityService( HttpAccess.DEFAULT, tr.idUrlWithInstance, tr.authToken); } catch(Exception e) { backgroundException = e; } return tr; } } protected void addAccount() { ClientManager clientManager = new ClientManager(getContext(), SalesforceSDKManager.getInstance().getAccountType(), loginOptions, SalesforceSDKManager.getInstance().shouldLogoutWhenTokenRevoked()); // Create account name (shown in Settings -> Accounts & sync) String accountName = buildAccountName(accountOptions.username); // New account Bundle extras = clientManager.createNewAccount(accountName, accountOptions.username, accountOptions.refreshToken, accountOptions.authToken, accountOptions.instanceUrl, loginOptions.loginUrl, accountOptions.identityUrl, getOAuthClientId(), accountOptions.orgId, accountOptions.userId, loginOptions.passcodeHash, loginOptions.clientSecret); callback.onAccountAuthenticatorResult(extras); } /** * @return name to be shown for account in Settings -> Accounts & Sync */ protected String buildAccountName(String username) { return String.format("%s (%s)", username, SalesforceSDKManager.getInstance().getApplicationName()); } /** * WebChromeClient used to report back loading progress. */ protected class AuthWebChromeClient extends WebChromeClient { @Override public void onProgressChanged(WebView view, int newProgress) { callback.onLoadingProgress(newProgress * 100); } } /** * Class encapsulating the parameters required to create a new account */ public static class AccountOptions { private static final String USER_ID = "userId"; private static final String ORG_ID = "orgId"; private static final String IDENTITY_URL = "identityUrl"; private static final String INSTANCE_URL = "instanceUrl"; private static final String AUTH_TOKEN = "authToken"; private static final String REFRESH_TOKEN = "refreshToken"; private static final String USERNAME = "username"; public final String username; public final String refreshToken; public final String authToken; public final String identityUrl; public final String instanceUrl; public final String orgId; public final String userId; private final Bundle bundle; public AccountOptions(String username, String refreshToken, String authToken, String identityUrl, String instanceUrl, String orgId, String userId) { super(); this.username = username; this.refreshToken = refreshToken; this.authToken = authToken; this.identityUrl = identityUrl; this.instanceUrl = instanceUrl; this.orgId = orgId; this.userId = userId; bundle = new Bundle(); bundle.putString(USERNAME, username); bundle.putString(REFRESH_TOKEN, refreshToken); bundle.putString(AUTH_TOKEN, authToken); bundle.putString(INSTANCE_URL, instanceUrl); bundle.putString(ORG_ID, orgId); bundle.putString(USER_ID, userId); } public Bundle asBundle() { return bundle; } public static AccountOptions fromBundle(Bundle options) { if (options == null) return null; return new AccountOptions( options.getString(USERNAME), options.getString(REFRESH_TOKEN), options.getString(AUTH_TOKEN), options.getString(IDENTITY_URL), options.getString(INSTANCE_URL), options.getString(ORG_ID), options.getString(USER_ID) ); } } }
1
13,802
Fix for URISyntaxException.
forcedotcom-SalesforceMobileSDK-Android
java
@@ -121,6 +121,10 @@ mainLoop: logrus.WithField("route", routeUpd).Debug("Ignoring route with no link index.") continue } + if routeUpd.Dst == nil { + logrus.WithField("route", routeUpd).Debug("Ignoring route with no destination") + continue + } idx := routeUpd.LinkIndex oldUpds := updatesByIfaceIdx[idx]
1
// Copyright (c) 2020-2021 Tigera, Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package ifacemonitor import ( "context" "net" "syscall" "time" "github.com/sirupsen/logrus" "github.com/vishvananda/netlink" "golang.org/x/sys/unix" "github.com/projectcalico/felix/timeshim" ) const FlapDampingDelay = 100 * time.Millisecond type updateFilter struct { Time timeshim.Interface } type UpdateFilterOp func(filter *updateFilter) func WithTimeShim(t timeshim.Interface) UpdateFilterOp { return func(filter *updateFilter) { filter.Time = t } } // FilterUpdates filters out updates that occur when IPs are quickly removed and re-added. // Some DHCP clients flap the IP during an IP renewal, for example. // // Algorithm: // * Maintain a queue of link and address updates per interface. // * When we see a potential flap (i.e. an IP deletion), defer processing the queue for a while. // * If the flap resolves itself (i.e. the IP is added back), suppress the IP deletion. func FilterUpdates(ctx context.Context, routeOutC chan<- netlink.RouteUpdate, routeInC <-chan netlink.RouteUpdate, linkOutC chan<- netlink.LinkUpdate, linkInC <-chan netlink.LinkUpdate, options ...UpdateFilterOp, ) { // Propagate failures to the downstream channels. defer close(routeOutC) defer close(linkOutC) u := &updateFilter{ Time: timeshim.RealTime(), } for _, op := range options { op(u) } logrus.Debug("FilterUpdates: starting") var timerC <-chan time.Time type timestampedUpd struct { ReadyAt time.Time Update interface{} // RouteUpdate or LinkUpdate } updatesByIfaceIdx := map[int][]timestampedUpd{} mainLoop: for { select { case <-ctx.Done(): logrus.Info("FilterUpdates: Context expired, stopping") return case linkUpd, ok := <-linkInC: if !ok { logrus.Error("FilterUpdates: link input channel closed.") return } idx := int(linkUpd.Index) linkIsUp := linkUpd.Header.Type == syscall.RTM_NEWLINK && linkIsOperUp(linkUpd.Link) var delay time.Duration if linkIsUp { if len(updatesByIfaceIdx[idx]) == 0 { // Empty queue (so no flap in progress) and the link is up, no need to delay the message. linkOutC <- linkUpd continue mainLoop } // Link is up but potential flap in progress, queue the update behind the other messages. delay = 0 } else { // We delay link down updates because a flap can involve both a link down and an IP removal. // Since we receive those two messages over separate channels, the two messages can race. delay = FlapDampingDelay } updatesByIfaceIdx[idx] = append(updatesByIfaceIdx[idx], timestampedUpd{ ReadyAt: u.Time.Now().Add(delay), Update: linkUpd, }) case routeUpd, ok := <-routeInC: if !ok { logrus.Error("FilterUpdates: route input channel closed.") return } logrus.WithField("route", routeUpd).Debug("Route update") if !routeIsLocalUnicast(routeUpd.Route) { logrus.WithField("route", routeUpd).Debug("Ignoring non-local route.") continue } if routeUpd.LinkIndex == 0 { logrus.WithField("route", routeUpd).Debug("Ignoring route with no link index.") continue } idx := routeUpd.LinkIndex oldUpds := updatesByIfaceIdx[idx] var readyToSendTime time.Time if routeUpd.Type == unix.RTM_NEWROUTE { logrus.WithField("addr", routeUpd.Dst).Debug("FilterUpdates: got address ADD") if len(oldUpds) == 0 { // This is an add for a new IP and there's nothing else in the queue for this interface. // Short circuit. We care about flaps where IPs are temporarily removed so no need to // delay an add. logrus.Debug("FilterUpdates: add with empty queue, short circuit.") routeOutC <- routeUpd continue } // Else, there's something else in the queue, need to process the queue... logrus.Debug("FilterUpdates: add with non-empty queue.") // We don't actually need to delay the add itself so we don't set any delay here. It will // still be queued up behind other updates. readyToSendTime = u.Time.Now() } else { // Got a delete, it might be a flap so queue the update. logrus.WithField("addr", routeUpd.Dst).Debug("FilterUpdates: got address DEL") readyToSendTime = u.Time.Now().Add(FlapDampingDelay) } // Coalesce updates for the same IP by squashing any previous updates for the same CIDR before // we append this update to the queue. We need to scan the whole queue because there may be // updates for different IPs in flight. upds := oldUpds[:0] for _, upd := range oldUpds { logrus.WithField("previous", upd).Debug("FilterUpdates: examining previous update.") if oldAddrUpd, ok := upd.Update.(netlink.RouteUpdate); ok { if ipNetsEqual(oldAddrUpd.Dst, routeUpd.Dst) { // New update for the same IP, suppress the old update logrus.WithField("address", oldAddrUpd.Dst.String()).Debug( "Received update for same IP within a short time, squashed the old update.") continue } } upds = append(upds, upd) } upds = append(upds, timestampedUpd{ReadyAt: readyToSendTime, Update: routeUpd}) updatesByIfaceIdx[idx] = upds case <-timerC: logrus.Debug("FilterUpdates: timer popped.") timerC = nil } if timerC != nil { // Optimisation: we much have just queued an update but there's already a timer set and we know // that timer must pop before the one for the new update. Skip recalculating the timer. logrus.Debug("FilterUpdates: timer already set.") continue mainLoop } var nextUpdTime time.Time for idx, upds := range updatesByIfaceIdx { logrus.WithField("ifaceIdx", idx).Debug("FilterUpdates: examining updates for interface.") for len(upds) > 0 { firstUpd := upds[0] if u.Time.Since(firstUpd.ReadyAt) >= 0 { // Either update is old enough to prevent flapping or it's an address being added. // Ready to send... logrus.WithField("update", firstUpd).Debug("FilterUpdates: update ready to send.") switch u := firstUpd.Update.(type) { case netlink.RouteUpdate: routeOutC <- u case netlink.LinkUpdate: linkOutC <- u } upds = upds[1:] } else { // Update is too new, figure out when it'll be safe to send it. logrus.WithField("update", firstUpd).Debug("FilterUpdates: update not ready.") if nextUpdTime.IsZero() || firstUpd.ReadyAt.Before(nextUpdTime) { nextUpdTime = firstUpd.ReadyAt } break } } if len(upds) == 0 { logrus.WithField("ifaceIdx", idx).Debug("FilterUpdates: no more updates for interface.") delete(updatesByIfaceIdx, idx) } else { logrus.WithField("ifaceIdx", idx).WithField("num", len(upds)).Debug( "FilterUpdates: still updates for interface.") updatesByIfaceIdx[idx] = upds } } if nextUpdTime.IsZero() { // Queue is empty so no need to schedule a timer. continue mainLoop } // Schedule timer to process the rest of the queue. delay := u.Time.Until(nextUpdTime) if delay <= 0 { delay = 1 } logrus.WithField("delay", delay).Debug("FilterUpdates: calculated delay.") timerC = u.Time.After(delay) } } func ipNetsEqual(a *net.IPNet, b *net.IPNet) bool { if a == b { return true } if a == nil || b == nil { return false } aSize, aBits := a.Mask.Size() bSize, bBits := b.Mask.Size() return a.IP.Equal(b.IP) && aSize == bSize && aBits == bBits } func routeIsLocalUnicast(route netlink.Route) bool { return route.Type == unix.RTN_LOCAL }
1
19,028
Would be good to UT this case
projectcalico-felix
c
@@ -44,6 +44,8 @@ module.exports = { 'arrow-body-style': ['warn', 'as-needed'], 'no-param-reassign': ['error', { props: false }], 'import/prefer-default-export': 'off', + 'import/no-extraneous-dependencies': 'off', + 'import/no-unresolved': 'off', 'no-console': 'off', 'eol-last': ['error', 'always'], 'no-debugger': 'error',
1
module.exports = { env: { browser: true, es6: true, 'jest/globals': true, }, extends: [ 'airbnb', 'plugin:@typescript-eslint/recommended', 'prettier', 'prettier/@typescript-eslint', 'plugin:prettier/recommended', ], globals: { Atomics: 'readonly', SharedArrayBuffer: 'readonly', }, parser: '@typescript-eslint/parser', parserOptions: { project: './tsconfig.json', tsconfigRootDir: './', // TODO: we need this because of an issue with @typescript-eslint/parser: https://github.com/typescript-eslint/typescript-eslint/issues/864 createDefaultProgram: true, }, settings: { 'import/resolver': { node: { extensions: ['.js', '.jsx', '.ts', '.tsx'], }, }, }, plugins: ['react', '@typescript-eslint', 'prettier', 'jest'], rules: { '@typescript-eslint/member-delimiter-style': 'off', '@typescript-eslint/explicit-function-return-type': 'off', '@typescript-eslint/no-explicit-any': 'off', '@typescript-eslint/no-unused-vars': 'off', '@typescript-eslint/unified-signatures': 'error', '@typescript-eslint/no-inferrable-types': ['error', { ignoreParameters: true }], 'react/jsx-filename-extension': ['error', { extensions: ['.tsx'] }], 'react/jsx-one-expression-per-line': 'off', 'react/jsx-wrap-multilines': 'off', 'react/jsx-props-no-spreading': 'off', 'arrow-body-style': ['warn', 'as-needed'], 'no-param-reassign': ['error', { props: false }], 'import/prefer-default-export': 'off', 'no-console': 'off', 'eol-last': ['error', 'always'], 'no-debugger': 'error', 'no-nested-ternary': 'off', curly: ['error', 'all'], }, }
1
14,558
Why we need to add this?
HospitalRun-hospitalrun-frontend
js
@@ -1,6 +1,6 @@ -require 'spec_helper' +require 'rails_helper' -describe Api::V1::CompletionsController, '#show' do +describe Api::V1::CompletionsController, '#show', type: :controller do it 'returns a 401 when users are not authenticated' do get :index expect(response.code).to eq "401"
1
require 'spec_helper' describe Api::V1::CompletionsController, '#show' do it 'returns a 401 when users are not authenticated' do get :index expect(response.code).to eq "401" end end
1
10,598
Prefer double-quoted strings unless you need single quotes to avoid extra backslashes for escaping.
thoughtbot-upcase
rb
@@ -2259,11 +2259,12 @@ type JSInfo struct { Disabled bool `json:"disabled,omitempty"` Config JetStreamConfig `json:"config,omitempty"` JetStreamStats - StreamCnt int `json:"total_streams,omitempty"` - ConsumerCnt int `json:"total_consumers,omitempty"` - MessageCnt uint64 `json:"total_messages,omitempty"` - MessageBytes uint64 `json:"total_message_bytes,omitempty"` - Meta *ClusterInfo `json:"meta_cluster,omitempty"` + APICalls int64 `json:"current_api_calls"` + Streams int `json:"total_streams,omitempty"` + Consumers int `json:"total_consumers,omitempty"` + Messages uint64 `json:"total_messages,omitempty"` + Bytes uint64 `json:"total_message_bytes,omitempty"` + Meta *ClusterInfo `json:"meta_cluster,omitempty"` // aggregate raft info AccountDetails []*AccountDetail `json:"account_details,omitempty"` }
1
// Copyright 2013-2019 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package server import ( "crypto/tls" "encoding/json" "fmt" "net" "net/http" "net/url" "runtime" "sort" "strconv" "strings" "sync/atomic" "time" "github.com/nats-io/jwt/v2" "github.com/nats-io/nats-server/v2/server/pse" ) // Snapshot this var numCores int var maxProcs int func init() { numCores = runtime.NumCPU() maxProcs = runtime.GOMAXPROCS(0) } // Connz represents detailed information on current client connections. type Connz struct { ID string `json:"server_id"` Now time.Time `json:"now"` NumConns int `json:"num_connections"` Total int `json:"total"` Offset int `json:"offset"` Limit int `json:"limit"` Conns []*ConnInfo `json:"connections"` } // ConnzOptions are the options passed to Connz() type ConnzOptions struct { // Sort indicates how the results will be sorted. Check SortOpt for possible values. // Only the sort by connection ID (ByCid) is ascending, all others are descending. Sort SortOpt `json:"sort"` // Username indicates if user names should be included in the results. Username bool `json:"auth"` // Subscriptions indicates if subscriptions should be included in the results. Subscriptions bool `json:"subscriptions"` // SubscriptionsDetail indicates if subscription details should be included in the results SubscriptionsDetail bool `json:"subscriptions_detail"` // Offset is used for pagination. Connz() only returns connections starting at this // offset from the global results. Offset int `json:"offset"` // Limit is the maximum number of connections that should be returned by Connz(). Limit int `json:"limit"` // Filter for this explicit client connection. CID uint64 `json:"cid"` // Filter by connection state. State ConnState `json:"state"` // The below options only apply if auth is true. // Filter by username. User string `json:"user"` // Filter by account. Account string `json:"acc"` } // ConnState is for filtering states of connections. We will only have two, open and closed. type ConnState int const ( // ConnOpen filters on open clients. ConnOpen = ConnState(iota) // ConnClosed filters on closed clients. ConnClosed // ConnAll returns all clients. ConnAll ) // ConnInfo has detailed information on a per connection basis. type ConnInfo struct { Cid uint64 `json:"cid"` IP string `json:"ip"` Port int `json:"port"` Start time.Time `json:"start"` LastActivity time.Time `json:"last_activity"` Stop *time.Time `json:"stop,omitempty"` Reason string `json:"reason,omitempty"` RTT string `json:"rtt,omitempty"` Uptime string `json:"uptime"` Idle string `json:"idle"` Pending int `json:"pending_bytes"` InMsgs int64 `json:"in_msgs"` OutMsgs int64 `json:"out_msgs"` InBytes int64 `json:"in_bytes"` OutBytes int64 `json:"out_bytes"` NumSubs uint32 `json:"subscriptions"` Name string `json:"name,omitempty"` Lang string `json:"lang,omitempty"` Version string `json:"version,omitempty"` TLSVersion string `json:"tls_version,omitempty"` TLSCipher string `json:"tls_cipher_suite,omitempty"` AuthorizedUser string `json:"authorized_user,omitempty"` Account string `json:"account,omitempty"` Subs []string `json:"subscriptions_list,omitempty"` SubsDetail []SubDetail `json:"subscriptions_list_detail,omitempty"` JWT string `json:"jwt,omitempty"` IssuerKey string `json:"issuer_key,omitempty"` NameTag string `json:"name_tag,omitempty"` Tags jwt.TagList `json:"tags,omitempty"` } // DefaultConnListSize is the default size of the connection list. const DefaultConnListSize = 1024 // DefaultSubListSize is the default size of the subscriptions list. const DefaultSubListSize = 1024 const defaultStackBufSize = 10000 func newSubsDetailList(client *client) []SubDetail { subsDetail := make([]SubDetail, 0, len(client.subs)) for _, sub := range client.subs { subsDetail = append(subsDetail, newClientSubDetail(sub)) } return subsDetail } func newSubsList(client *client) []string { subs := make([]string, 0, len(client.subs)) for _, sub := range client.subs { subs = append(subs, string(sub.subject)) } return subs } // Connz returns a Connz struct containing information about connections. func (s *Server) Connz(opts *ConnzOptions) (*Connz, error) { var ( sortOpt = ByCid auth bool subs bool subsDet bool offset int limit = DefaultConnListSize cid = uint64(0) state = ConnOpen user string acc string ) if opts != nil { // If no sort option given or sort is by uptime, then sort by cid if opts.Sort == "" { sortOpt = ByCid } else { sortOpt = opts.Sort if !sortOpt.IsValid() { return nil, fmt.Errorf("invalid sorting option: %s", sortOpt) } } // Auth specifics. auth = opts.Username if !auth && (user != "" || acc != "") { return nil, fmt.Errorf("filter by user or account only allowed with auth option") } user = opts.User acc = opts.Account subs = opts.Subscriptions subsDet = opts.SubscriptionsDetail offset = opts.Offset if offset < 0 { offset = 0 } limit = opts.Limit if limit <= 0 { limit = DefaultConnListSize } // state state = opts.State // ByStop only makes sense on closed connections if sortOpt == ByStop && state != ConnClosed { return nil, fmt.Errorf("sort by stop only valid on closed connections") } // ByReason is the same. if sortOpt == ByReason && state != ConnClosed { return nil, fmt.Errorf("sort by reason only valid on closed connections") } // If searching by CID if opts.CID > 0 { cid = opts.CID limit = 1 } } c := &Connz{ Offset: offset, Limit: limit, Now: time.Now(), } // Open clients var openClients []*client // Hold for closed clients if requested. var closedClients []*closedClient // Walk the open client list with server lock held. s.mu.Lock() // copy the server id for monitoring c.ID = s.info.ID // Number of total clients. The resulting ConnInfo array // may be smaller if pagination is used. switch state { case ConnOpen: c.Total = len(s.clients) case ConnClosed: c.Total = s.closed.len() closedClients = s.closed.closedClients() c.Total = len(closedClients) case ConnAll: closedClients = s.closed.closedClients() c.Total = len(s.clients) + len(closedClients) } totalClients := c.Total if cid > 0 { // Meaning we only want 1. totalClients = 1 } if state == ConnOpen || state == ConnAll { openClients = make([]*client, 0, totalClients) } // Data structures for results. var conns []ConnInfo // Limits allocs for actual ConnInfos. var pconns ConnInfos switch state { case ConnOpen: conns = make([]ConnInfo, totalClients) pconns = make(ConnInfos, totalClients) case ConnClosed: pconns = make(ConnInfos, totalClients) case ConnAll: conns = make([]ConnInfo, cap(openClients)) pconns = make(ConnInfos, totalClients) } // Search by individual CID. if cid > 0 { if state == ConnClosed || state == ConnAll { copyClosed := closedClients closedClients = nil for _, cc := range copyClosed { if cc.Cid == cid { closedClients = []*closedClient{cc} break } } } else if state == ConnOpen || state == ConnAll { client := s.clients[cid] if client != nil { openClients = append(openClients, client) } } } else { // Gather all open clients. if state == ConnOpen || state == ConnAll { for _, client := range s.clients { // If we have an account specified we need to filter. if acc != "" && (client.acc == nil || client.acc.Name != acc) { continue } // Do user filtering second if user != "" && client.opts.Username != user { continue } openClients = append(openClients, client) } } } s.mu.Unlock() // Just return with empty array if nothing here. if len(openClients) == 0 && len(closedClients) == 0 { c.Conns = ConnInfos{} return c, nil } // Now whip through and generate ConnInfo entries // Open Clients i := 0 for _, client := range openClients { client.mu.Lock() ci := &conns[i] ci.fill(client, client.nc, c.Now) // Fill in subscription data if requested. if len(client.subs) > 0 { if subsDet { ci.SubsDetail = newSubsDetailList(client) } else if subs { ci.Subs = newSubsList(client) } } // Fill in user if auth requested. if auth { ci.AuthorizedUser = client.getRawAuthUser() // Add in account iff not the global account. if client.acc != nil && (client.acc.Name != globalAccountName) { ci.Account = client.acc.Name } ci.JWT = client.opts.JWT ci.IssuerKey = issuerForClient(client) ci.Tags = client.tags ci.NameTag = client.nameTag } client.mu.Unlock() pconns[i] = ci i++ } // Closed Clients var needCopy bool if subs || auth { needCopy = true } for _, cc := range closedClients { // If we have an account specified we need to filter. if acc != "" && cc.acc != acc { continue } // Do user filtering second if user != "" && cc.user != user { continue } // Copy if needed for any changes to the ConnInfo if needCopy { cx := *cc cc = &cx } // Fill in subscription data if requested. if len(cc.subs) > 0 { if subsDet { cc.SubsDetail = cc.subs } else if subs { cc.Subs = make([]string, 0, len(cc.subs)) for _, sub := range cc.subs { cc.Subs = append(cc.Subs, sub.Subject) } } } // Fill in user if auth requested. if auth { cc.AuthorizedUser = cc.user // Add in account iff not the global account. if cc.acc != "" && (cc.acc != globalAccountName) { cc.Account = cc.acc } } pconns[i] = &cc.ConnInfo i++ } // This will trip if we have filtered out client connections. if len(pconns) != i { pconns = pconns[:i] totalClients = i } switch sortOpt { case ByCid, ByStart: sort.Sort(byCid{pconns}) case BySubs: sort.Sort(sort.Reverse(bySubs{pconns})) case ByPending: sort.Sort(sort.Reverse(byPending{pconns})) case ByOutMsgs: sort.Sort(sort.Reverse(byOutMsgs{pconns})) case ByInMsgs: sort.Sort(sort.Reverse(byInMsgs{pconns})) case ByOutBytes: sort.Sort(sort.Reverse(byOutBytes{pconns})) case ByInBytes: sort.Sort(sort.Reverse(byInBytes{pconns})) case ByLast: sort.Sort(sort.Reverse(byLast{pconns})) case ByIdle: sort.Sort(sort.Reverse(byIdle{pconns})) case ByUptime: sort.Sort(byUptime{pconns, time.Now()}) case ByStop: sort.Sort(sort.Reverse(byStop{pconns})) case ByReason: sort.Sort(byReason{pconns}) } minoff := c.Offset maxoff := c.Offset + c.Limit maxIndex := totalClients // Make sure these are sane. if minoff > maxIndex { minoff = maxIndex } if maxoff > maxIndex { maxoff = maxIndex } // Now pare down to the requested size. // TODO(dlc) - for very large number of connections we // could save the whole list in a hash, send hash on first // request and allow users to use has for subsequent pages. // Low TTL, say < 1sec. c.Conns = pconns[minoff:maxoff] c.NumConns = len(c.Conns) return c, nil } // Fills in the ConnInfo from the client. // client should be locked. func (ci *ConnInfo) fill(client *client, nc net.Conn, now time.Time) { ci.Cid = client.cid ci.Start = client.start ci.LastActivity = client.last ci.Uptime = myUptime(now.Sub(client.start)) ci.Idle = myUptime(now.Sub(client.last)) ci.RTT = client.getRTT().String() ci.OutMsgs = client.outMsgs ci.OutBytes = client.outBytes ci.NumSubs = uint32(len(client.subs)) ci.Pending = int(client.out.pb) ci.Name = client.opts.Name ci.Lang = client.opts.Lang ci.Version = client.opts.Version // inMsgs and inBytes are updated outside of the client's lock, so // we need to use atomic here. ci.InMsgs = atomic.LoadInt64(&client.inMsgs) ci.InBytes = atomic.LoadInt64(&client.inBytes) // If the connection is gone, too bad, we won't set TLSVersion and TLSCipher. // Exclude clients that are still doing handshake so we don't block in // ConnectionState(). if client.flags.isSet(handshakeComplete) && nc != nil { conn := nc.(*tls.Conn) cs := conn.ConnectionState() ci.TLSVersion = tlsVersion(cs.Version) ci.TLSCipher = tlsCipher(cs.CipherSuite) } if client.port != 0 { ci.Port = int(client.port) ci.IP = client.host } } // Assume lock is held func (c *client) getRTT() time.Duration { if c.rtt == 0 { // If a real client, go ahead and send ping now to get a value // for RTT. For tests and telnet, or if client is closing, etc skip. if c.opts.Lang != "" { c.sendRTTPingLocked() } return 0 } var rtt time.Duration if c.rtt > time.Microsecond && c.rtt < time.Millisecond { rtt = c.rtt.Truncate(time.Microsecond) } else { rtt = c.rtt.Truncate(time.Nanosecond) } return rtt } func decodeBool(w http.ResponseWriter, r *http.Request, param string) (bool, error) { str := r.URL.Query().Get(param) if str == "" { return false, nil } val, err := strconv.ParseBool(str) if err != nil { w.WriteHeader(http.StatusBadRequest) w.Write([]byte(fmt.Sprintf("Error decoding boolean for '%s': %v", param, err))) return false, err } return val, nil } func decodeUint64(w http.ResponseWriter, r *http.Request, param string) (uint64, error) { str := r.URL.Query().Get(param) if str == "" { return 0, nil } val, err := strconv.ParseUint(str, 10, 64) if err != nil { w.WriteHeader(http.StatusBadRequest) w.Write([]byte(fmt.Sprintf("Error decoding uint64 for '%s': %v", param, err))) return 0, err } return val, nil } func decodeInt(w http.ResponseWriter, r *http.Request, param string) (int, error) { str := r.URL.Query().Get(param) if str == "" { return 0, nil } val, err := strconv.Atoi(str) if err != nil { w.WriteHeader(http.StatusBadRequest) w.Write([]byte(fmt.Sprintf("Error decoding int for '%s': %v", param, err))) return 0, err } return val, nil } func decodeState(w http.ResponseWriter, r *http.Request) (ConnState, error) { str := r.URL.Query().Get("state") if str == "" { return ConnOpen, nil } switch strings.ToLower(str) { case "open": return ConnOpen, nil case "closed": return ConnClosed, nil case "any", "all": return ConnAll, nil } // We do not understand intended state here. w.WriteHeader(http.StatusBadRequest) err := fmt.Errorf("Error decoding state for %s", str) w.Write([]byte(err.Error())) return 0, err } func decodeSubs(w http.ResponseWriter, r *http.Request) (subs bool, subsDet bool, err error) { subsDet = strings.ToLower(r.URL.Query().Get("subs")) == "detail" if !subsDet { subs, err = decodeBool(w, r, "subs") } return } // HandleConnz process HTTP requests for connection information. func (s *Server) HandleConnz(w http.ResponseWriter, r *http.Request) { sortOpt := SortOpt(r.URL.Query().Get("sort")) auth, err := decodeBool(w, r, "auth") if err != nil { return } subs, subsDet, err := decodeSubs(w, r) if err != nil { return } offset, err := decodeInt(w, r, "offset") if err != nil { return } limit, err := decodeInt(w, r, "limit") if err != nil { return } cid, err := decodeUint64(w, r, "cid") if err != nil { return } state, err := decodeState(w, r) if err != nil { return } user := r.URL.Query().Get("user") acc := r.URL.Query().Get("acc") connzOpts := &ConnzOptions{ Sort: sortOpt, Username: auth, Subscriptions: subs, SubscriptionsDetail: subsDet, Offset: offset, Limit: limit, CID: cid, State: state, User: user, Account: acc, } s.mu.Lock() s.httpReqStats[ConnzPath]++ s.mu.Unlock() c, err := s.Connz(connzOpts) if err != nil { w.WriteHeader(http.StatusBadRequest) w.Write([]byte(err.Error())) return } b, err := json.MarshalIndent(c, "", " ") if err != nil { s.Errorf("Error marshaling response to /connz request: %v", err) } // Handle response ResponseHandler(w, r, b) } // Routez represents detailed information on current client connections. type Routez struct { ID string `json:"server_id"` Now time.Time `json:"now"` Import *SubjectPermission `json:"import,omitempty"` Export *SubjectPermission `json:"export,omitempty"` NumRoutes int `json:"num_routes"` Routes []*RouteInfo `json:"routes"` } // RoutezOptions are options passed to Routez type RoutezOptions struct { // Subscriptions indicates that Routez will return a route's subscriptions Subscriptions bool `json:"subscriptions"` // SubscriptionsDetail indicates if subscription details should be included in the results SubscriptionsDetail bool `json:"subscriptions_detail"` } // RouteInfo has detailed information on a per connection basis. type RouteInfo struct { Rid uint64 `json:"rid"` RemoteID string `json:"remote_id"` DidSolicit bool `json:"did_solicit"` IsConfigured bool `json:"is_configured"` IP string `json:"ip"` Port int `json:"port"` Import *SubjectPermission `json:"import,omitempty"` Export *SubjectPermission `json:"export,omitempty"` Pending int `json:"pending_size"` RTT string `json:"rtt,omitempty"` InMsgs int64 `json:"in_msgs"` OutMsgs int64 `json:"out_msgs"` InBytes int64 `json:"in_bytes"` OutBytes int64 `json:"out_bytes"` NumSubs uint32 `json:"subscriptions"` Subs []string `json:"subscriptions_list,omitempty"` SubsDetail []SubDetail `json:"subscriptions_list_detail,omitempty"` } // Routez returns a Routez struct containing information about routes. func (s *Server) Routez(routezOpts *RoutezOptions) (*Routez, error) { rs := &Routez{Routes: []*RouteInfo{}} rs.Now = time.Now() if routezOpts == nil { routezOpts = &RoutezOptions{} } s.mu.Lock() rs.NumRoutes = len(s.routes) // copy the server id for monitoring rs.ID = s.info.ID // Check for defined permissions for all connected routes. if perms := s.getOpts().Cluster.Permissions; perms != nil { rs.Import = perms.Import rs.Export = perms.Export } // Walk the list for _, r := range s.routes { r.mu.Lock() ri := &RouteInfo{ Rid: r.cid, RemoteID: r.route.remoteID, DidSolicit: r.route.didSolicit, IsConfigured: r.route.routeType == Explicit, InMsgs: atomic.LoadInt64(&r.inMsgs), OutMsgs: r.outMsgs, InBytes: atomic.LoadInt64(&r.inBytes), OutBytes: r.outBytes, NumSubs: uint32(len(r.subs)), Import: r.opts.Import, Export: r.opts.Export, RTT: r.getRTT().String(), } if len(r.subs) > 0 { if routezOpts.SubscriptionsDetail { ri.SubsDetail = newSubsDetailList(r) } else if routezOpts.Subscriptions { ri.Subs = newSubsList(r) } } switch conn := r.nc.(type) { case *net.TCPConn, *tls.Conn: addr := conn.RemoteAddr().(*net.TCPAddr) ri.Port = addr.Port ri.IP = addr.IP.String() } r.mu.Unlock() rs.Routes = append(rs.Routes, ri) } s.mu.Unlock() return rs, nil } // HandleRoutez process HTTP requests for route information. func (s *Server) HandleRoutez(w http.ResponseWriter, r *http.Request) { subs, subsDetail, err := decodeSubs(w, r) if err != nil { return } opts := RoutezOptions{Subscriptions: subs, SubscriptionsDetail: subsDetail} s.mu.Lock() s.httpReqStats[RoutezPath]++ s.mu.Unlock() // As of now, no error is ever returned. rs, _ := s.Routez(&opts) b, err := json.MarshalIndent(rs, "", " ") if err != nil { s.Errorf("Error marshaling response to /routez request: %v", err) } // Handle response ResponseHandler(w, r, b) } // Subsz represents detail information on current connections. type Subsz struct { ID string `json:"server_id"` Now time.Time `json:"now"` *SublistStats Total int `json:"total"` Offset int `json:"offset"` Limit int `json:"limit"` Subs []SubDetail `json:"subscriptions_list,omitempty"` } // SubszOptions are the options passed to Subsz. // As of now, there are no options defined. type SubszOptions struct { // Offset is used for pagination. Subsz() only returns connections starting at this // offset from the global results. Offset int `json:"offset"` // Limit is the maximum number of subscriptions that should be returned by Subsz(). Limit int `json:"limit"` // Subscriptions indicates if subscription details should be included in the results. Subscriptions bool `json:"subscriptions"` // Filter based on this account name. Account string `json:"account,omitempty"` // Test the list against this subject. Needs to be literal since it signifies a publish subject. // We will only return subscriptions that would match if a message was sent to this subject. Test string `json:"test,omitempty"` } // SubDetail is for verbose information for subscriptions. type SubDetail struct { Account string `json:"account,omitempty"` Subject string `json:"subject"` Queue string `json:"qgroup,omitempty"` Sid string `json:"sid"` Msgs int64 `json:"msgs"` Max int64 `json:"max,omitempty"` Cid uint64 `json:"cid"` } // Subscription client should be locked and guaranteed to be present. func newSubDetail(sub *subscription) SubDetail { sd := newClientSubDetail(sub) if sub.client.acc != nil { sd.Account = sub.client.acc.GetName() } return sd } // For subs details under clients. func newClientSubDetail(sub *subscription) SubDetail { return SubDetail{ Subject: string(sub.subject), Queue: string(sub.queue), Sid: string(sub.sid), Msgs: sub.nm, Max: sub.max, Cid: sub.client.cid, } } // Subsz returns a Subsz struct containing subjects statistics func (s *Server) Subsz(opts *SubszOptions) (*Subsz, error) { var ( subdetail bool test bool offset int limit = DefaultSubListSize testSub = "" filterAcc = "" ) if opts != nil { subdetail = opts.Subscriptions offset = opts.Offset if offset < 0 { offset = 0 } limit = opts.Limit if limit <= 0 { limit = DefaultSubListSize } if opts.Test != "" { testSub = opts.Test test = true if !IsValidLiteralSubject(testSub) { return nil, fmt.Errorf("invalid test subject, must be valid publish subject: %s", testSub) } } if opts.Account != "" { filterAcc = opts.Account } } slStats := &SublistStats{} // FIXME(dlc) - Make account aware. sz := &Subsz{s.info.ID, time.Now(), slStats, 0, offset, limit, nil} if subdetail { var raw [4096]*subscription subs := raw[:0] s.accounts.Range(func(k, v interface{}) bool { acc := v.(*Account) if filterAcc != "" && acc.GetName() != filterAcc { return true } slStats.add(acc.sl.Stats()) acc.sl.localSubs(&subs) return true }) details := make([]SubDetail, len(subs)) i := 0 // TODO(dlc) - may be inefficient and could just do normal match when total subs is large and filtering. for _, sub := range subs { // Check for filter if test && !matchLiteral(testSub, string(sub.subject)) { continue } if sub.client == nil { continue } sub.client.mu.Lock() details[i] = newSubDetail(sub) sub.client.mu.Unlock() i++ } minoff := sz.Offset maxoff := sz.Offset + sz.Limit maxIndex := i // Make sure these are sane. if minoff > maxIndex { minoff = maxIndex } if maxoff > maxIndex { maxoff = maxIndex } sz.Subs = details[minoff:maxoff] sz.Total = len(sz.Subs) } else { s.accounts.Range(func(k, v interface{}) bool { acc := v.(*Account) if filterAcc != "" && acc.GetName() != filterAcc { return true } slStats.add(acc.sl.Stats()) return true }) } return sz, nil } // HandleSubsz processes HTTP requests for subjects stats. func (s *Server) HandleSubsz(w http.ResponseWriter, r *http.Request) { s.mu.Lock() s.httpReqStats[SubszPath]++ s.mu.Unlock() subs, err := decodeBool(w, r, "subs") if err != nil { return } offset, err := decodeInt(w, r, "offset") if err != nil { return } limit, err := decodeInt(w, r, "limit") if err != nil { return } testSub := r.URL.Query().Get("test") // Filtered account. filterAcc := r.URL.Query().Get("acc") subszOpts := &SubszOptions{ Subscriptions: subs, Offset: offset, Limit: limit, Account: filterAcc, Test: testSub, } st, err := s.Subsz(subszOpts) if err != nil { w.WriteHeader(http.StatusBadRequest) w.Write([]byte(err.Error())) return } var b []byte if len(st.Subs) == 0 { b, err = json.MarshalIndent(st.SublistStats, "", " ") } else { b, err = json.MarshalIndent(st, "", " ") } if err != nil { s.Errorf("Error marshaling response to /subscriptionsz request: %v", err) } // Handle response ResponseHandler(w, r, b) } // HandleStacksz processes HTTP requests for getting stacks func (s *Server) HandleStacksz(w http.ResponseWriter, r *http.Request) { // Do not get any lock here that would prevent getting the stacks // if we were to have a deadlock somewhere. var defaultBuf [defaultStackBufSize]byte size := defaultStackBufSize buf := defaultBuf[:size] n := 0 for { n = runtime.Stack(buf, true) if n < size { break } size *= 2 buf = make([]byte, size) } // Handle response ResponseHandler(w, r, buf[:n]) } // Varz will output server information on the monitoring port at /varz. type Varz struct { ID string `json:"server_id"` Name string `json:"server_name"` Version string `json:"version"` Proto int `json:"proto"` GitCommit string `json:"git_commit,omitempty"` GoVersion string `json:"go"` Host string `json:"host"` Port int `json:"port"` AuthRequired bool `json:"auth_required,omitempty"` TLSRequired bool `json:"tls_required,omitempty"` TLSVerify bool `json:"tls_verify,omitempty"` IP string `json:"ip,omitempty"` ClientConnectURLs []string `json:"connect_urls,omitempty"` WSConnectURLs []string `json:"ws_connect_urls,omitempty"` MaxConn int `json:"max_connections"` MaxSubs int `json:"max_subscriptions,omitempty"` PingInterval time.Duration `json:"ping_interval"` MaxPingsOut int `json:"ping_max"` HTTPHost string `json:"http_host"` HTTPPort int `json:"http_port"` HTTPBasePath string `json:"http_base_path"` HTTPSPort int `json:"https_port"` AuthTimeout float64 `json:"auth_timeout"` MaxControlLine int32 `json:"max_control_line"` MaxPayload int `json:"max_payload"` MaxPending int64 `json:"max_pending"` Cluster ClusterOptsVarz `json:"cluster,omitempty"` Gateway GatewayOptsVarz `json:"gateway,omitempty"` LeafNode LeafNodeOptsVarz `json:"leaf,omitempty"` JetStream JetStreamVarz `json:"jetstream,omitempty"` TLSTimeout float64 `json:"tls_timeout"` WriteDeadline time.Duration `json:"write_deadline"` Start time.Time `json:"start"` Now time.Time `json:"now"` Uptime string `json:"uptime"` Mem int64 `json:"mem"` Cores int `json:"cores"` MaxProcs int `json:"gomaxprocs"` CPU float64 `json:"cpu"` Connections int `json:"connections"` TotalConnections uint64 `json:"total_connections"` Routes int `json:"routes"` Remotes int `json:"remotes"` Leafs int `json:"leafnodes"` InMsgs int64 `json:"in_msgs"` OutMsgs int64 `json:"out_msgs"` InBytes int64 `json:"in_bytes"` OutBytes int64 `json:"out_bytes"` SlowConsumers int64 `json:"slow_consumers"` Subscriptions uint32 `json:"subscriptions"` HTTPReqStats map[string]uint64 `json:"http_req_stats"` ConfigLoadTime time.Time `json:"config_load_time"` Tags jwt.TagList `json:"tags,omitempty"` TrustedOperatorsJwt []string `json:"trusted_operators_jwt,omitempty"` TrustedOperatorsClaim []*jwt.OperatorClaims `json:"trusted_operators_claim,omitempty"` SystemAccount string `json:"system_account,omitempty"` } // JetStreamVarz contains basic runtime information about jetstream type JetStreamVarz struct { Config JetStreamConfig `json:"config"` Stats *JetStreamStats `json:"stats"` } // ClusterOptsVarz contains monitoring cluster information type ClusterOptsVarz struct { Name string `json:"name,omitempty"` Host string `json:"addr,omitempty"` Port int `json:"cluster_port,omitempty"` AuthTimeout float64 `json:"auth_timeout,omitempty"` URLs []string `json:"urls,omitempty"` TLSTimeout float64 `json:"tls_timeout,omitempty"` TLSRequired bool `json:"tls_required,omitempty"` TLSVerify bool `json:"tls_verify,omitempty"` } // GatewayOptsVarz contains monitoring gateway information type GatewayOptsVarz struct { Name string `json:"name,omitempty"` Host string `json:"host,omitempty"` Port int `json:"port,omitempty"` AuthTimeout float64 `json:"auth_timeout,omitempty"` TLSTimeout float64 `json:"tls_timeout,omitempty"` TLSRequired bool `json:"tls_required,omitempty"` TLSVerify bool `json:"tls_verify,omitempty"` Advertise string `json:"advertise,omitempty"` ConnectRetries int `json:"connect_retries,omitempty"` Gateways []RemoteGatewayOptsVarz `json:"gateways,omitempty"` RejectUnknown bool `json:"reject_unknown,omitempty"` // config got renamed to reject_unknown_cluster } // RemoteGatewayOptsVarz contains monitoring remote gateway information type RemoteGatewayOptsVarz struct { Name string `json:"name"` TLSTimeout float64 `json:"tls_timeout,omitempty"` URLs []string `json:"urls,omitempty"` } // LeafNodeOptsVarz contains monitoring leaf node information type LeafNodeOptsVarz struct { Host string `json:"host,omitempty"` Port int `json:"port,omitempty"` AuthTimeout float64 `json:"auth_timeout,omitempty"` TLSTimeout float64 `json:"tls_timeout,omitempty"` TLSRequired bool `json:"tls_required,omitempty"` TLSVerify bool `json:"tls_verify,omitempty"` Remotes []RemoteLeafOptsVarz `json:"remotes,omitempty"` } // RemoteLeafOptsVarz contains monitoring remote leaf node information type RemoteLeafOptsVarz struct { LocalAccount string `json:"local_account,omitempty"` TLSTimeout float64 `json:"tls_timeout,omitempty"` URLs []string `json:"urls,omitempty"` } // VarzOptions are the options passed to Varz(). // Currently, there are no options defined. type VarzOptions struct{} func myUptime(d time.Duration) string { // Just use total seconds for uptime, and display days / years tsecs := d / time.Second tmins := tsecs / 60 thrs := tmins / 60 tdays := thrs / 24 tyrs := tdays / 365 if tyrs > 0 { return fmt.Sprintf("%dy%dd%dh%dm%ds", tyrs, tdays%365, thrs%24, tmins%60, tsecs%60) } if tdays > 0 { return fmt.Sprintf("%dd%dh%dm%ds", tdays, thrs%24, tmins%60, tsecs%60) } if thrs > 0 { return fmt.Sprintf("%dh%dm%ds", thrs, tmins%60, tsecs%60) } if tmins > 0 { return fmt.Sprintf("%dm%ds", tmins, tsecs%60) } return fmt.Sprintf("%ds", tsecs) } // HandleRoot will show basic info and links to others handlers. func (s *Server) HandleRoot(w http.ResponseWriter, r *http.Request) { // This feels dumb to me, but is required: https://code.google.com/p/go/issues/detail?id=4799 if r.URL.Path != s.httpBasePath { http.NotFound(w, r) return } s.mu.Lock() s.httpReqStats[RootPath]++ s.mu.Unlock() fmt.Fprintf(w, `<html lang="en"> <head> <link rel="shortcut icon" href="https://nats.io/img/favicon.ico"> <style type="text/css"> body { font-family: "Century Gothic", CenturyGothic, AppleGothic, sans-serif; font-size: 22; } a { margin-left: 32px; } </style> </head> <body> <img src="https://nats.io/img/logo.png" alt="NATS"> <br/> <a href=.%s>varz</a><br/> <a href=.%s>connz</a><br/> <a href=.%s>routez</a><br/> <a href=.%s>gatewayz</a><br/> <a href=.%s>leafz</a><br/> <a href=.%s>subsz</a><br/> <a href=.%s>accountz</a><br/> <a href=.%s>jsz</a><br/> <br/> <a href=https://docs.nats.io/nats-server/configuration/monitoring.html>help</a> </body> </html>`, s.basePath(VarzPath), s.basePath(ConnzPath), s.basePath(RoutezPath), s.basePath(GatewayzPath), s.basePath(LeafzPath), s.basePath(SubszPath), s.basePath(AccountzPath), s.basePath(JszPath), ) } // Varz returns a Varz struct containing the server information. func (s *Server) Varz(varzOpts *VarzOptions) (*Varz, error) { var rss, vss int64 var pcpu float64 // We want to do that outside of the lock. pse.ProcUsage(&pcpu, &rss, &vss) s.mu.Lock() // We need to create a new instance of Varz (with no reference // whatsoever to anything stored in the server) since the user // has access to the returned value. v := s.createVarz(pcpu, rss) s.mu.Unlock() return v, nil } // Returns a Varz instance. // Server lock is held on entry. func (s *Server) createVarz(pcpu float64, rss int64) *Varz { info := s.info opts := s.getOpts() c := &opts.Cluster gw := &opts.Gateway ln := &opts.LeafNode clustTlsReq := c.TLSConfig != nil gatewayTlsReq := gw.TLSConfig != nil leafTlsReq := ln.TLSConfig != nil leafTlsVerify := leafTlsReq && ln.TLSConfig.ClientAuth == tls.RequireAndVerifyClientCert varz := &Varz{ ID: info.ID, Version: info.Version, Proto: info.Proto, GitCommit: info.GitCommit, GoVersion: info.GoVersion, Name: info.Name, Host: info.Host, Port: info.Port, IP: info.IP, HTTPHost: opts.HTTPHost, HTTPPort: opts.HTTPPort, HTTPBasePath: opts.HTTPBasePath, HTTPSPort: opts.HTTPSPort, Cluster: ClusterOptsVarz{ Name: info.Cluster, Host: c.Host, Port: c.Port, AuthTimeout: c.AuthTimeout, TLSTimeout: c.TLSTimeout, TLSRequired: clustTlsReq, TLSVerify: clustTlsReq, }, Gateway: GatewayOptsVarz{ Name: gw.Name, Host: gw.Host, Port: gw.Port, AuthTimeout: gw.AuthTimeout, TLSTimeout: gw.TLSTimeout, TLSRequired: gatewayTlsReq, TLSVerify: gatewayTlsReq, Advertise: gw.Advertise, ConnectRetries: gw.ConnectRetries, Gateways: []RemoteGatewayOptsVarz{}, RejectUnknown: gw.RejectUnknown, }, LeafNode: LeafNodeOptsVarz{ Host: ln.Host, Port: ln.Port, AuthTimeout: ln.AuthTimeout, TLSTimeout: ln.TLSTimeout, TLSRequired: leafTlsReq, TLSVerify: leafTlsVerify, Remotes: []RemoteLeafOptsVarz{}, }, Start: s.start, MaxSubs: opts.MaxSubs, Cores: numCores, MaxProcs: maxProcs, Tags: opts.Tags, TrustedOperatorsJwt: opts.operatorJWT, TrustedOperatorsClaim: opts.TrustedOperators, } if len(opts.Routes) > 0 { varz.Cluster.URLs = urlsToStrings(opts.Routes) } if l := len(gw.Gateways); l > 0 { rgwa := make([]RemoteGatewayOptsVarz, l) for i, r := range gw.Gateways { rgwa[i] = RemoteGatewayOptsVarz{ Name: r.Name, TLSTimeout: r.TLSTimeout, } } varz.Gateway.Gateways = rgwa } if l := len(ln.Remotes); l > 0 { rlna := make([]RemoteLeafOptsVarz, l) for i, r := range ln.Remotes { rlna[i] = RemoteLeafOptsVarz{ LocalAccount: r.LocalAccount, URLs: urlsToStrings(r.URLs), TLSTimeout: r.TLSTimeout, } } varz.LeafNode.Remotes = rlna } if s.js != nil { s.js.mu.RLock() varz.JetStream = JetStreamVarz{ Config: s.js.config, } s.js.mu.RUnlock() } // Finish setting it up with fields that can be updated during // configuration reload and runtime. s.updateVarzConfigReloadableFields(varz) s.updateVarzRuntimeFields(varz, true, pcpu, rss) return varz } func urlsToStrings(urls []*url.URL) []string { sURLs := make([]string, len(urls)) for i, u := range urls { sURLs[i] = u.Host } return sURLs } // Invoked during configuration reload once options have possibly be changed // and config load time has been set. If s.varz has not been initialized yet // (because no pooling of /varz has been made), this function does nothing. // Server lock is held on entry. func (s *Server) updateVarzConfigReloadableFields(v *Varz) { if v == nil { return } opts := s.getOpts() info := &s.info v.AuthRequired = info.AuthRequired v.TLSRequired = info.TLSRequired v.TLSVerify = info.TLSVerify v.MaxConn = opts.MaxConn v.PingInterval = opts.PingInterval v.MaxPingsOut = opts.MaxPingsOut v.AuthTimeout = opts.AuthTimeout v.MaxControlLine = opts.MaxControlLine v.MaxPayload = int(opts.MaxPayload) v.MaxPending = opts.MaxPending v.TLSTimeout = opts.TLSTimeout v.WriteDeadline = opts.WriteDeadline v.ConfigLoadTime = s.configTime // Update route URLs if applicable if s.varzUpdateRouteURLs { v.Cluster.URLs = urlsToStrings(opts.Routes) s.varzUpdateRouteURLs = false } if s.sys != nil && s.sys.account != nil { v.SystemAccount = s.sys.account.GetName() } } // Updates the runtime Varz fields, that is, fields that change during // runtime and that should be updated any time Varz() or polling of /varz // is done. // Server lock is held on entry. func (s *Server) updateVarzRuntimeFields(v *Varz, forceUpdate bool, pcpu float64, rss int64) { v.Now = time.Now() v.Uptime = myUptime(time.Since(s.start)) v.Mem = rss v.CPU = pcpu if l := len(s.info.ClientConnectURLs); l > 0 { v.ClientConnectURLs = append([]string(nil), s.info.ClientConnectURLs...) } if l := len(s.info.WSConnectURLs); l > 0 { v.WSConnectURLs = append([]string(nil), s.info.WSConnectURLs...) } v.Connections = len(s.clients) v.TotalConnections = s.totalClients v.Routes = len(s.routes) v.Remotes = len(s.remotes) v.Leafs = len(s.leafs) v.InMsgs = atomic.LoadInt64(&s.inMsgs) v.InBytes = atomic.LoadInt64(&s.inBytes) v.OutMsgs = atomic.LoadInt64(&s.outMsgs) v.OutBytes = atomic.LoadInt64(&s.outBytes) v.SlowConsumers = atomic.LoadInt64(&s.slowConsumers) // FIXME(dlc) - make this multi-account aware. v.Subscriptions = s.gacc.sl.Count() v.HTTPReqStats = make(map[string]uint64, len(s.httpReqStats)) for key, val := range s.httpReqStats { v.HTTPReqStats[key] = val } // Update Gateway remote urls if applicable gw := s.gateway gw.RLock() if gw.enabled { for i := 0; i < len(v.Gateway.Gateways); i++ { g := &v.Gateway.Gateways[i] rgw := gw.remotes[g.Name] if rgw != nil { rgw.RLock() // forceUpdate is needed if user calls Varz() programmatically, // since we need to create a new instance every time and the // gateway's varzUpdateURLs may have been set to false after // a web /varz inspection. if forceUpdate || rgw.varzUpdateURLs { // Make reuse of backend array g.URLs = g.URLs[:0] // rgw.urls is a map[string]*url.URL where the key is // already in the right format (host:port, without any // user info present). for u := range rgw.urls { g.URLs = append(g.URLs, u) } rgw.varzUpdateURLs = false } rgw.RUnlock() } } } gw.RUnlock() if s.js != nil { v.JetStream.Stats = s.js.usageStats() } } // HandleVarz will process HTTP requests for server information. func (s *Server) HandleVarz(w http.ResponseWriter, r *http.Request) { var rss, vss int64 var pcpu float64 // We want to do that outside of the lock. pse.ProcUsage(&pcpu, &rss, &vss) // In response to http requests, we want to minimize mem copies // so we use an object stored in the server. Creating/collecting // server metrics is done under server lock, but we don't want // to marshal under that lock. Still, we need to prevent concurrent // http requests to /varz to update s.varz while marshal is // happening, so we need a new lock that serialize those http // requests and include marshaling. s.varzMu.Lock() // Use server lock to create/update the server's varz object. s.mu.Lock() s.httpReqStats[VarzPath]++ if s.varz == nil { s.varz = s.createVarz(pcpu, rss) } else { s.updateVarzRuntimeFields(s.varz, false, pcpu, rss) } s.mu.Unlock() // Do the marshaling outside of server lock, but under varzMu lock. b, err := json.MarshalIndent(s.varz, "", " ") s.varzMu.Unlock() if err != nil { s.Errorf("Error marshaling response to /varz request: %v", err) } // Handle response ResponseHandler(w, r, b) } // GatewayzOptions are the options passed to Gatewayz() type GatewayzOptions struct { // Name will output only remote gateways with this name Name string `json:"name"` // Accounts indicates if accounts with its interest should be included in the results. Accounts bool `json:"accounts"` // AccountName will limit the list of accounts to that account name (makes Accounts implicit) AccountName string `json:"account_name"` } // Gatewayz represents detailed information on Gateways type Gatewayz struct { ID string `json:"server_id"` Now time.Time `json:"now"` Name string `json:"name,omitempty"` Host string `json:"host,omitempty"` Port int `json:"port,omitempty"` OutboundGateways map[string]*RemoteGatewayz `json:"outbound_gateways"` InboundGateways map[string][]*RemoteGatewayz `json:"inbound_gateways"` } // RemoteGatewayz represents information about an outbound connection to a gateway type RemoteGatewayz struct { IsConfigured bool `json:"configured"` Connection *ConnInfo `json:"connection,omitempty"` Accounts []*AccountGatewayz `json:"accounts,omitempty"` } // AccountGatewayz represents interest mode for this account type AccountGatewayz struct { Name string `json:"name"` InterestMode string `json:"interest_mode"` NoInterestCount int `json:"no_interest_count,omitempty"` InterestOnlyThreshold int `json:"interest_only_threshold,omitempty"` TotalSubscriptions int `json:"num_subs,omitempty"` NumQueueSubscriptions int `json:"num_queue_subs,omitempty"` } // Gatewayz returns a Gatewayz struct containing information about gateways. func (s *Server) Gatewayz(opts *GatewayzOptions) (*Gatewayz, error) { srvID := s.ID() now := time.Now() gw := s.gateway gw.RLock() if !gw.enabled { gw.RUnlock() gwz := &Gatewayz{ ID: srvID, Now: now, OutboundGateways: map[string]*RemoteGatewayz{}, InboundGateways: map[string][]*RemoteGatewayz{}, } return gwz, nil } // Here gateways are enabled, so fill up more. gwz := &Gatewayz{ ID: srvID, Now: now, Name: gw.name, Host: gw.info.Host, Port: gw.info.Port, } gw.RUnlock() gwz.OutboundGateways = s.createOutboundsRemoteGatewayz(opts, now) gwz.InboundGateways = s.createInboundsRemoteGatewayz(opts, now) return gwz, nil } // Based on give options struct, returns if there is a filtered // Gateway Name and if we should do report Accounts. // Note that if Accounts is false but AccountName is not empty, // then Accounts is implicitly set to true. func getMonitorGWOptions(opts *GatewayzOptions) (string, bool) { var name string var accs bool if opts != nil { if opts.Name != _EMPTY_ { name = opts.Name } accs = opts.Accounts if !accs && opts.AccountName != _EMPTY_ { accs = true } } return name, accs } // Returns a map of gateways outbound connections. // Based on options, will include a single or all gateways, // with no/single/or all accounts interest information. func (s *Server) createOutboundsRemoteGatewayz(opts *GatewayzOptions, now time.Time) map[string]*RemoteGatewayz { targetGWName, doAccs := getMonitorGWOptions(opts) if targetGWName != _EMPTY_ { c := s.getOutboundGatewayConnection(targetGWName) if c == nil { return nil } outbounds := make(map[string]*RemoteGatewayz, 1) _, rgw := createOutboundRemoteGatewayz(c, opts, now, doAccs) outbounds[targetGWName] = rgw return outbounds } var connsa [16]*client var conns = connsa[:0] s.getOutboundGatewayConnections(&conns) outbounds := make(map[string]*RemoteGatewayz, len(conns)) for _, c := range conns { name, rgw := createOutboundRemoteGatewayz(c, opts, now, doAccs) if rgw != nil { outbounds[name] = rgw } } return outbounds } // Returns a RemoteGatewayz for a given outbound gw connection func createOutboundRemoteGatewayz(c *client, opts *GatewayzOptions, now time.Time, doAccs bool) (string, *RemoteGatewayz) { var name string var rgw *RemoteGatewayz c.mu.Lock() if c.gw != nil { rgw = &RemoteGatewayz{} if doAccs { rgw.Accounts = createOutboundAccountsGatewayz(opts, c.gw) } if c.gw.cfg != nil { rgw.IsConfigured = !c.gw.cfg.isImplicit() } rgw.Connection = &ConnInfo{} rgw.Connection.fill(c, c.nc, now) name = c.gw.name } c.mu.Unlock() return name, rgw } // Returns the list of accounts for this outbound gateway connection. // Based on the options, it will be a single or all accounts for // this outbound. func createOutboundAccountsGatewayz(opts *GatewayzOptions, gw *gateway) []*AccountGatewayz { if gw.outsim == nil { return nil } var accName string if opts != nil { accName = opts.AccountName } if accName != _EMPTY_ { ei, ok := gw.outsim.Load(accName) if !ok { return nil } a := createAccountOutboundGatewayz(accName, ei) return []*AccountGatewayz{a} } accs := make([]*AccountGatewayz, 0, 4) gw.outsim.Range(func(k, v interface{}) bool { name := k.(string) a := createAccountOutboundGatewayz(name, v) accs = append(accs, a) return true }) return accs } // Returns an AccountGatewayz for this gateway outbound connection func createAccountOutboundGatewayz(name string, ei interface{}) *AccountGatewayz { a := &AccountGatewayz{ Name: name, InterestOnlyThreshold: gatewayMaxRUnsubBeforeSwitch, } if ei != nil { e := ei.(*outsie) e.RLock() a.InterestMode = e.mode.String() a.NoInterestCount = len(e.ni) a.NumQueueSubscriptions = e.qsubs a.TotalSubscriptions = int(e.sl.Count()) e.RUnlock() } else { a.InterestMode = Optimistic.String() } return a } // Returns a map of gateways inbound connections. // Each entry is an array of RemoteGatewayz since a given server // may have more than one inbound from the same remote gateway. // Based on options, will include a single or all gateways, // with no/single/or all accounts interest information. func (s *Server) createInboundsRemoteGatewayz(opts *GatewayzOptions, now time.Time) map[string][]*RemoteGatewayz { targetGWName, doAccs := getMonitorGWOptions(opts) var connsa [16]*client var conns = connsa[:0] s.getInboundGatewayConnections(&conns) m := make(map[string][]*RemoteGatewayz) for _, c := range conns { c.mu.Lock() if c.gw != nil && (targetGWName == _EMPTY_ || targetGWName == c.gw.name) { igws := m[c.gw.name] if igws == nil { igws = make([]*RemoteGatewayz, 0, 2) } rgw := &RemoteGatewayz{} if doAccs { rgw.Accounts = createInboundAccountsGatewayz(opts, c.gw) } rgw.Connection = &ConnInfo{} rgw.Connection.fill(c, c.nc, now) igws = append(igws, rgw) m[c.gw.name] = igws } c.mu.Unlock() } return m } // Returns the list of accounts for this inbound gateway connection. // Based on the options, it will be a single or all accounts for // this inbound. func createInboundAccountsGatewayz(opts *GatewayzOptions, gw *gateway) []*AccountGatewayz { if gw.insim == nil { return nil } var accName string if opts != nil { accName = opts.AccountName } if accName != _EMPTY_ { e, ok := gw.insim[accName] if !ok { return nil } a := createInboundAccountGatewayz(accName, e) return []*AccountGatewayz{a} } accs := make([]*AccountGatewayz, 0, 4) for name, e := range gw.insim { a := createInboundAccountGatewayz(name, e) accs = append(accs, a) } return accs } // Returns an AccountGatewayz for this gateway inbound connection func createInboundAccountGatewayz(name string, e *insie) *AccountGatewayz { a := &AccountGatewayz{ Name: name, InterestOnlyThreshold: gatewayMaxRUnsubBeforeSwitch, } if e != nil { a.InterestMode = e.mode.String() a.NoInterestCount = len(e.ni) } else { a.InterestMode = Optimistic.String() } return a } // HandleGatewayz process HTTP requests for route information. func (s *Server) HandleGatewayz(w http.ResponseWriter, r *http.Request) { s.mu.Lock() s.httpReqStats[GatewayzPath]++ s.mu.Unlock() accs, err := decodeBool(w, r, "accs") if err != nil { return } gwName := r.URL.Query().Get("gw_name") accName := r.URL.Query().Get("acc_name") if accName != _EMPTY_ { accs = true } opts := &GatewayzOptions{ Name: gwName, Accounts: accs, AccountName: accName, } gw, err := s.Gatewayz(opts) if err != nil { w.WriteHeader(http.StatusBadRequest) w.Write([]byte(err.Error())) return } b, err := json.MarshalIndent(gw, "", " ") if err != nil { s.Errorf("Error marshaling response to /gatewayz request: %v", err) } // Handle response ResponseHandler(w, r, b) } // Leafz represents detailed information on Leafnodes. type Leafz struct { ID string `json:"server_id"` Now time.Time `json:"now"` NumLeafs int `json:"leafnodes"` Leafs []*LeafInfo `json:"leafs"` } // LeafzOptions are options passed to Leafz type LeafzOptions struct { // Subscriptions indicates that Leafz will return a leafnode's subscriptions Subscriptions bool `json:"subscriptions"` Account string `json:"account"` } // LeafInfo has detailed information on each remote leafnode connection. type LeafInfo struct { Account string `json:"account"` IP string `json:"ip"` Port int `json:"port"` RTT string `json:"rtt,omitempty"` InMsgs int64 `json:"in_msgs"` OutMsgs int64 `json:"out_msgs"` InBytes int64 `json:"in_bytes"` OutBytes int64 `json:"out_bytes"` NumSubs uint32 `json:"subscriptions"` Subs []string `json:"subscriptions_list,omitempty"` } // Leafz returns a Leafz structure containing information about leafnodes. func (s *Server) Leafz(opts *LeafzOptions) (*Leafz, error) { // Grab leafnodes var lconns []*client s.mu.Lock() if len(s.leafs) > 0 { lconns = make([]*client, 0, len(s.leafs)) for _, ln := range s.leafs { if opts != nil && opts.Account != "" { ln.mu.Lock() ok := ln.acc.Name == opts.Account ln.mu.Unlock() if !ok { continue } } lconns = append(lconns, ln) } } s.mu.Unlock() var leafnodes []*LeafInfo if len(lconns) > 0 { leafnodes = make([]*LeafInfo, 0, len(lconns)) for _, ln := range lconns { ln.mu.Lock() lni := &LeafInfo{ Account: ln.acc.Name, IP: ln.host, Port: int(ln.port), RTT: ln.getRTT().String(), InMsgs: atomic.LoadInt64(&ln.inMsgs), OutMsgs: ln.outMsgs, InBytes: atomic.LoadInt64(&ln.inBytes), OutBytes: ln.outBytes, NumSubs: uint32(len(ln.subs)), } if opts != nil && opts.Subscriptions { lni.Subs = make([]string, 0, len(ln.subs)) for _, sub := range ln.subs { lni.Subs = append(lni.Subs, string(sub.subject)) } } ln.mu.Unlock() leafnodes = append(leafnodes, lni) } } return &Leafz{ ID: s.ID(), Now: time.Now(), NumLeafs: len(leafnodes), Leafs: leafnodes, }, nil } // HandleLeafz process HTTP requests for leafnode information. func (s *Server) HandleLeafz(w http.ResponseWriter, r *http.Request) { s.mu.Lock() s.httpReqStats[LeafzPath]++ s.mu.Unlock() subs, err := decodeBool(w, r, "subs") if err != nil { return } l, err := s.Leafz(&LeafzOptions{subs, r.URL.Query().Get("acc")}) if err != nil { w.WriteHeader(http.StatusBadRequest) w.Write([]byte(err.Error())) return } b, err := json.MarshalIndent(l, "", " ") if err != nil { s.Errorf("Error marshaling response to /leafz request: %v", err) } // Handle response ResponseHandler(w, r, b) } // ResponseHandler handles responses for monitoring routes func ResponseHandler(w http.ResponseWriter, r *http.Request, data []byte) { // Get callback from request callback := r.URL.Query().Get("callback") // If callback is not empty then if callback != "" { // Response for JSONP w.Header().Set("Content-Type", "application/javascript") fmt.Fprintf(w, "%s(%s)", callback, data) } else { // Otherwise JSON w.Header().Set("Content-Type", "application/json") w.Write(data) } } func (reason ClosedState) String() string { switch reason { case ClientClosed: return "Client Closed" case AuthenticationTimeout: return "Authentication Timeout" case AuthenticationViolation: return "Authentication Failure" case TLSHandshakeError: return "TLS Handshake Failure" case SlowConsumerPendingBytes: return "Slow Consumer (Pending Bytes)" case SlowConsumerWriteDeadline: return "Slow Consumer (Write Deadline)" case WriteError: return "Write Error" case ReadError: return "Read Error" case ParseError: return "Parse Error" case StaleConnection: return "Stale Connection" case ProtocolViolation: return "Protocol Violation" case BadClientProtocolVersion: return "Bad Client Protocol Version" case WrongPort: return "Incorrect Port" case MaxConnectionsExceeded: return "Maximum Connections Exceeded" case MaxAccountConnectionsExceeded: return "Maximum Account Connections Exceeded" case MaxPayloadExceeded: return "Maximum Message Payload Exceeded" case MaxControlLineExceeded: return "Maximum Control Line Exceeded" case MaxSubscriptionsExceeded: return "Maximum Subscriptions Exceeded" case DuplicateRoute: return "Duplicate Route" case RouteRemoved: return "Route Removed" case ServerShutdown: return "Server Shutdown" case AuthenticationExpired: return "Authentication Expired" case WrongGateway: return "Wrong Gateway" case MissingAccount: return "Missing Account" case Revocation: return "Credentials Revoked" case InternalClient: return "Internal Client" case MsgHeaderViolation: return "Message Header Violation" case NoRespondersRequiresHeaders: return "No Responders Requires Headers" case ClusterNameConflict: return "Cluster Name Conflict" case DuplicateRemoteLeafnodeConnection: return "Duplicate Remote LeafNode Connection" case DuplicateClientID: return "Duplicate Client ID" } return "Unknown State" } // AccountzOptions are options passed to Accountz type AccountzOptions struct { // Account indicates that Accountz will return details for the account Account string `json:"account"` } func newExtServiceLatency(l *serviceLatency) *jwt.ServiceLatency { if l == nil { return nil } return &jwt.ServiceLatency{ Sampling: jwt.SamplingRate(l.sampling), Results: jwt.Subject(l.subject), } } type ExtImport struct { jwt.Import Invalid bool `json:"invalid"` Share bool `json:"share"` Tracking bool `json:"tracking"` TrackingHdr http.Header `json:"tracking_header,omitempty"` Latency *jwt.ServiceLatency `json:"latency,omitempty"` M1 *ServiceLatency `json:"m1,omitempty"` } type ExtExport struct { jwt.Export ApprovedAccounts []string `json:"approved_accounts,omitempty"` } type ExtVrIssues struct { Description string `json:"description"` Blocking bool `json:"blocking"` Time bool `json:"time_check"` } type ExtMap map[string][]*MapDest type AccountInfo struct { AccountName string `json:"account_name"` LastUpdate time.Time `json:"update_time,omitempty"` IsSystem bool `json:"is_system,omitempty"` Expired bool `json:"expired"` Complete bool `json:"complete"` JetStream bool `json:"jetstream_enabled"` LeafCnt int `json:"leafnode_connections"` ClientCnt int `json:"client_connections"` SubCnt uint32 `json:"subscriptions"` Mappings ExtMap `json:"mappings,omitempty"` Exports []ExtExport `json:"exports,omitempty"` Imports []ExtImport `json:"imports,omitempty"` Jwt string `json:"jwt,omitempty"` IssuerKey string `json:"issuer_key,omitempty"` NameTag string `json:"name_tag,omitempty"` Tags jwt.TagList `json:"tags,omitempty"` Claim *jwt.AccountClaims `json:"decoded_jwt,omitempty"` Vr []ExtVrIssues `json:"validation_result_jwt,omitempty"` RevokedUser map[string]time.Time `json:"revoked_user,omitempty"` RevokedAct map[string]time.Time `json:"revoked_activations,omitempty"` Sublist *SublistStats `json:"sublist_stats,omitempty"` Responses map[string]ExtImport `json:"responses,omitempty"` } type Accountz struct { ID string `json:"server_id"` Now time.Time `json:"now"` SystemAccount string `json:"system_account,omitempty"` Accounts []string `json:"accounts,omitempty"` Account *AccountInfo `json:"account_detail,omitempty"` } // HandleAccountz process HTTP requests for account information. func (s *Server) HandleAccountz(w http.ResponseWriter, r *http.Request) { s.mu.Lock() s.httpReqStats[AccountzPath]++ s.mu.Unlock() if l, err := s.Accountz(&AccountzOptions{r.URL.Query().Get("acc")}); err != nil { w.WriteHeader(http.StatusBadRequest) w.Write([]byte(err.Error())) } else if b, err := json.MarshalIndent(l, "", " "); err != nil { s.Errorf("Error marshaling response to %s request: %v", AccountzPath, err) w.WriteHeader(http.StatusBadRequest) w.Write([]byte(err.Error())) } else { ResponseHandler(w, r, b) // Handle response } } func (s *Server) Accountz(optz *AccountzOptions) (*Accountz, error) { a := &Accountz{ ID: s.ID(), Now: time.Now(), } if sacc := s.SystemAccount(); sacc != nil { a.SystemAccount = sacc.GetName() } if optz.Account == "" { a.Accounts = []string{} s.accounts.Range(func(key, value interface{}) bool { a.Accounts = append(a.Accounts, key.(string)) return true }) return a, nil } else if aInfo, err := s.accountInfo(optz.Account); err != nil { return nil, err } else { a.Account = aInfo return a, nil } } func newExtImport(v *serviceImport) ExtImport { imp := ExtImport{ Invalid: true, Import: jwt.Import{Type: jwt.Service}, } if v != nil { imp.Share = v.share imp.Tracking = v.tracking imp.Invalid = v.invalid imp.Import = jwt.Import{ Subject: jwt.Subject(v.from), Account: v.acc.Name, Type: jwt.Service, To: jwt.Subject(v.to), } imp.TrackingHdr = v.trackingHdr imp.Latency = newExtServiceLatency(v.latency) if v.m1 != nil { m1 := *v.m1 imp.M1 = &m1 } } return imp } func (s *Server) accountInfo(accName string) (*AccountInfo, error) { var a *Account if v, ok := s.accounts.Load(accName); !ok { return nil, fmt.Errorf("Account %s does not exist", accName) } else { a = v.(*Account) } isSys := a == s.SystemAccount() a.mu.RLock() defer a.mu.RUnlock() var vrIssues []ExtVrIssues claim, _ := jwt.DecodeAccountClaims(a.claimJWT) //ignore error if claim != nil { vr := jwt.ValidationResults{} claim.Validate(&vr) vrIssues = make([]ExtVrIssues, len(vr.Issues)) for i, v := range vr.Issues { vrIssues[i] = ExtVrIssues{v.Description, v.Blocking, v.TimeCheck} } } exports := []ExtExport{} for k, v := range a.exports.services { e := ExtExport{ Export: jwt.Export{ Subject: jwt.Subject(k), Type: jwt.Service, }, ApprovedAccounts: []string{}, } if v != nil { e.Latency = newExtServiceLatency(v.latency) e.TokenReq = v.tokenReq e.ResponseType = jwt.ResponseType(v.respType.String()) for name := range v.approved { e.ApprovedAccounts = append(e.ApprovedAccounts, name) } } exports = append(exports, e) } for k, v := range a.exports.streams { e := ExtExport{ Export: jwt.Export{ Subject: jwt.Subject(k), Type: jwt.Stream, }, ApprovedAccounts: []string{}, } if v != nil { e.TokenReq = v.tokenReq for name := range v.approved { e.ApprovedAccounts = append(e.ApprovedAccounts, name) } } exports = append(exports, e) } imports := []ExtImport{} for _, v := range a.imports.streams { imp := ExtImport{ Invalid: true, Import: jwt.Import{Type: jwt.Stream}, } if v != nil { imp.Invalid = v.invalid imp.Import = jwt.Import{ Subject: jwt.Subject(v.from), Account: v.acc.Name, Type: jwt.Stream, To: jwt.Subject(v.to), } } imports = append(imports, imp) } for _, v := range a.imports.services { imports = append(imports, newExtImport(v)) } responses := map[string]ExtImport{} for k, v := range a.exports.responses { responses[k] = newExtImport(v) } mappings := ExtMap{} for _, m := range a.mappings { var dests []*MapDest src := "" if m == nil { src = "nil" if _, ok := mappings[src]; ok { // only set if not present (keep orig in case nil is used) continue } dests = append(dests, &MapDest{}) } else { src = m.src for _, d := range m.dests { dests = append(dests, &MapDest{d.tr.dest, d.weight, ""}) } for c, cd := range m.cdests { for _, d := range cd { dests = append(dests, &MapDest{d.tr.dest, d.weight, c}) } } } mappings[src] = dests } collectRevocations := func(revocations map[string]int64) map[string]time.Time { rev := map[string]time.Time{} for k, v := range a.usersRevoked { rev[k] = time.Unix(v, 0) } return rev } return &AccountInfo{ accName, a.updated, isSys, a.expired, !a.incomplete, a.js != nil, a.numLocalLeafNodes(), a.numLocalConnections(), a.sl.Count(), mappings, exports, imports, a.claimJWT, a.Issuer, a.nameTag, a.tags, claim, vrIssues, collectRevocations(a.usersRevoked), collectRevocations(a.actsRevoked), a.sl.Stats(), responses, }, nil } // JSzOptions are options passed to Jsz type JSzOptions struct { Account string `json:"account,omitempty"` Accounts bool `json:"accounts,omitempty"` Streams bool `json:"streams,omitempty"` Consumer bool `json:"consumer,omitempty"` Config bool `json:"config,omitempty"` LeaderOnly bool `json:"leader_only,omitempty"` Offset int `json:"offset,omitempty"` Limit int `json:"limit,omitempty"` } type StreamDetail struct { Name string `json:"name"` Cluster *ClusterInfo `json:"cluster,omitempty"` Config *StreamConfig `json:"config,omitempty"` State StreamState `json:"state,omitempty"` Consumer []*ConsumerInfo `json:"consumer_detail,omitempty"` } type AccountDetail struct { Name string `json:"name"` Id string `json:"id"` JetStreamStats Streams []StreamDetail `json:"stream_detail,omitempty"` } // LeafInfo has detailed information on each remote leafnode connection. type JSInfo struct { ID string `json:"server_id"` Now time.Time `json:"now"` Disabled bool `json:"disabled,omitempty"` Config JetStreamConfig `json:"config,omitempty"` JetStreamStats StreamCnt int `json:"total_streams,omitempty"` ConsumerCnt int `json:"total_consumers,omitempty"` MessageCnt uint64 `json:"total_messages,omitempty"` MessageBytes uint64 `json:"total_message_bytes,omitempty"` Meta *ClusterInfo `json:"meta_cluster,omitempty"` // aggregate raft info AccountDetails []*AccountDetail `json:"account_details,omitempty"` } func (s *Server) accountDetail(jsa *jsAccount, optStreams, optConsumers, optCfg bool) *AccountDetail { jsa.mu.RLock() defer jsa.mu.RUnlock() acc := jsa.account name := acc.GetName() id := name if acc.nameTag != "" { name = acc.nameTag } detail := AccountDetail{ Name: name, Id: id, JetStreamStats: JetStreamStats{ Memory: uint64(jsa.memTotal), Store: uint64(jsa.storeTotal), API: JetStreamAPIStats{ Total: jsa.apiTotal, Errors: jsa.apiErrors, }, }, Streams: make([]StreamDetail, 0, len(jsa.streams)), } if optStreams { for _, stream := range jsa.streams { ci := s.js.clusterInfo(stream.raftGroup()) var cfg *StreamConfig if optCfg { c := stream.config() cfg = &c } sdet := StreamDetail{ Name: stream.name(), State: stream.state(), Cluster: ci, Config: cfg} if optConsumers { for _, consumer := range stream.consumers { cInfo := consumer.info() if !optCfg { cInfo.Config = nil } sdet.Consumer = append(sdet.Consumer, cInfo) } } detail.Streams = append(detail.Streams, sdet) } } return &detail } func (s *Server) JszAccount(opts *JSzOptions) (*AccountDetail, error) { if s.js == nil { return nil, fmt.Errorf("jetstream not enabled") } acc := opts.Account account, ok := s.accounts.Load(acc) if !ok { return nil, fmt.Errorf("account %q not found", acc) } s.js.mu.RLock() jsa, ok := s.js.accounts[account.(*Account)] s.js.mu.RUnlock() if !ok { return nil, fmt.Errorf("account %q not jetstream enabled", acc) } return s.accountDetail(jsa, opts.Streams, opts.Consumer, opts.Config), nil } // Leafz returns a Leafz structure containing information about leafnodes. func (s *Server) Jsz(opts *JSzOptions) (*JSInfo, error) { // set option defaults if opts == nil { opts = &JSzOptions{} } if opts.Limit == 0 { opts.Limit = 1024 } if opts.Consumer { opts.Streams = true } if opts.Streams { opts.Accounts = true } // Check if we want a response from the leader only. if opts.LeaderOnly { js, cc := s.getJetStreamCluster() if js == nil { // Ignore return nil, fmt.Errorf("%w: no cluster", errSkipZreq) } // So if we have JS but no clustering, we are the leader so allow. if cc != nil { js.mu.RLock() isLeader := cc.isLeader() js.mu.RUnlock() if !isLeader { return nil, fmt.Errorf("%w: not leader", errSkipZreq) } } } // helper to get cluster info from node via dummy group toClusterInfo := func(node RaftNode) *ClusterInfo { if node == nil { return nil } peers := node.Peers() peerList := make([]string, len(peers)) for i, p := range node.Peers() { peerList[i] = p.ID } group := &raftGroup{ Name: "", Peers: peerList, node: node, } return s.js.clusterInfo(group) } jsi := &JSInfo{ ID: s.ID(), Now: time.Now(), } if !s.JetStreamEnabled() { jsi.Disabled = true return jsi, nil } accounts := []*jsAccount{} s.js.mu.RLock() jsi.Config = s.js.config for _, info := range s.js.accounts { accounts = append(accounts, info) } s.js.mu.RUnlock() jsi.Meta = toClusterInfo(s.js.getMetaGroup()) filterIdx := -1 for i, jsa := range accounts { jsa.mu.RLock() if jsa.acc().GetName() == opts.Account { filterIdx = i } jsi.StreamCnt += len(jsa.streams) jsi.Memory += uint64(jsa.usage.mem) jsi.Store += uint64(jsa.usage.store) jsi.API.Total += jsa.usage.api jsi.API.Errors += jsa.usage.err for _, stream := range jsa.streams { streamState := stream.state() jsi.MessageCnt += streamState.Msgs jsi.MessageBytes += streamState.Bytes jsi.ConsumerCnt += streamState.Consumers } jsa.mu.RUnlock() } // filter logic if filterIdx != -1 { accounts = []*jsAccount{accounts[filterIdx]} } else if opts.Accounts { if opts.Offset != 0 { sort.Slice(accounts, func(i, j int) bool { return strings.Compare(accounts[i].acc().Name, accounts[j].acc().Name) < 0 }) if opts.Offset > len(accounts) { accounts = []*jsAccount{} } else { accounts = accounts[opts.Offset:] } } if opts.Limit != 0 { if opts.Limit < len(accounts) { accounts = accounts[:opts.Limit] } } } else { accounts = []*jsAccount{} } if len(accounts) > 0 { jsi.AccountDetails = make([]*AccountDetail, 0, len(accounts)) } // if wanted, obtain accounts/streams/consumer for _, jsa := range accounts { detail := s.accountDetail(jsa, opts.Streams, opts.Consumer, opts.Config) jsi.AccountDetails = append(jsi.AccountDetails, detail) } return jsi, nil } // HandleJSz process HTTP requests for jetstream information. func (s *Server) HandleJsz(w http.ResponseWriter, r *http.Request) { s.mu.Lock() s.httpReqStats[JszPath]++ s.mu.Unlock() accounts, err := decodeBool(w, r, "accounts") if err != nil { return } streams, err := decodeBool(w, r, "streams") if err != nil { return } consumers, err := decodeBool(w, r, "consumers") if err != nil { return } config, err := decodeBool(w, r, "config") if err != nil { return } offset, err := decodeInt(w, r, "offset") if err != nil { return } limit, err := decodeInt(w, r, "limit") if err != nil { return } leader, err := decodeBool(w, r, "leader-only") if err != nil { return } l, err := s.Jsz(&JSzOptions{ r.URL.Query().Get("acc"), accounts, streams, consumers, config, leader, offset, limit}) if err != nil { w.WriteHeader(http.StatusBadRequest) w.Write([]byte(err.Error())) return } b, err := json.MarshalIndent(l, "", " ") if err != nil { s.Errorf("Error marshaling response to /leafz request: %v", err) } // Handle response ResponseHandler(w, r, b) }
1
12,690
This is ok to change since I believe that these were added just in main and not in public release.
nats-io-nats-server
go
@@ -26,7 +26,7 @@ import ( ) func MakeAuditLogsOrDie(client *Client, - auditlogsName, methodName, project, resourceName, serviceName, targetName string, + auditlogsName, methodName, project, resourceName, serviceName, targetName, pubsubServiceAccount string, so ...kngcptesting.CloudAuditLogsSourceOption, ) { so = append(so, kngcptesting.WithCloudAuditLogsSourceServiceName(serviceName))
1
/* Copyright 2020 Google LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package lib import ( "fmt" "github.com/google/knative-gcp/pkg/apis/events/v1alpha1" kngcptesting "github.com/google/knative-gcp/pkg/reconciler/testing" "github.com/google/knative-gcp/test/e2e/lib/resources" v1 "k8s.io/api/core/v1" ) func MakeAuditLogsOrDie(client *Client, auditlogsName, methodName, project, resourceName, serviceName, targetName string, so ...kngcptesting.CloudAuditLogsSourceOption, ) { so = append(so, kngcptesting.WithCloudAuditLogsSourceServiceName(serviceName)) so = append(so, kngcptesting.WithCloudAuditLogsSourceMethodName(methodName)) so = append(so, kngcptesting.WithCloudAuditLogsSourceProject(project)) so = append(so, kngcptesting.WithCloudAuditLogsSourceResourceName(resourceName)) so = append(so, kngcptesting.WithCloudAuditLogsSourceSink(ServiceGVK, targetName)) eventsAuditLogs := kngcptesting.NewCloudAuditLogsSource(auditlogsName, client.Namespace, so...) client.CreateAuditLogsOrFail(eventsAuditLogs) client.Core.WaitForResourceReadyOrFail(auditlogsName, CloudAuditLogsSourceTypeMeta) } func MakeAuditLogsJobOrDie(client *Client, methodName, project, resourceName, serviceName, targetName string) { job := resources.AuditLogsTargetJob(targetName, []v1.EnvVar{{ Name: "SERVICENAME", Value: serviceName, }, { Name: "METHODNAME", Value: methodName, }, { Name: "RESOURCENAME", Value: resourceName, }, { Name: "TYPE", Value: v1alpha1.CloudAuditLogsSourceEvent, }, { Name: "SOURCE", Value: v1alpha1.CloudAuditLogsSourceEventSource(serviceName, fmt.Sprintf("projects/%s", project)), }, { Name: "SUBJECT", Value: resourceName, }, { Name: "TIME", Value: "360", }}) client.CreateJobOrFail(job, WithServiceForJob(targetName)) }
1
11,456
Not needed in this PR, just want to make sure people think about this (maybe make an issue)? This is a lot of strings in a row. It will be hard/impossible for someone reading the code to see that everything is in the correct position. I recommend creating a struct instead of passing seven strings in a row. A similar problem exists on most of the `MakeFoo(...)` style methods in this change.
google-knative-gcp
go
@@ -197,6 +197,16 @@ namespace Microsoft.CodeAnalysis.Sarif.Driver.Sdk { _issueLogJsonWriter.CloseResults(); + if (_run.ConfigurationNotifications != null) + { + _issueLogJsonWriter.WriteConfigurationNotifications(_run.ConfigurationNotifications); + } + + if (_run.ToolNotifications != null) + { + _issueLogJsonWriter.WriteToolNotifications(_run.ToolNotifications); + } + if (_run?.Invocation?.StartTime != new DateTime()) { _run.Invocation.EndTime = DateTime.UtcNow;
1
// Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. using System; using System.Collections.Generic; using System.IO; using System.Linq; using Microsoft.CodeAnalysis.Sarif.Readers; using Microsoft.CodeAnalysis.Sarif.Writers; using Newtonsoft.Json; namespace Microsoft.CodeAnalysis.Sarif.Driver.Sdk { public class SarifLogger : IDisposable, IAnalysisLogger { private Run _run; private TextWriter _textWriter; private JsonTextWriter _jsonTextWriter; private ResultLogJsonWriter _issueLogJsonWriter; private Dictionary<string, IRule> _rules; private static Run CreateRun( IEnumerable<string> analysisTargets, bool computeTargetsHash, bool logEnvironment, IEnumerable<string> invocationTokensToRedact) { var run = new Run(); if (analysisTargets != null) { run.Files = new Dictionary<string, IList<FileData>>(); foreach (string target in analysisTargets) { var fileReference = new FileData(); if (computeTargetsHash) { string md5, sha1, sha256; HashUtilities.ComputeHashes(target, out md5, out sha1, out sha256); fileReference.Hashes = new List<Hash> { new Hash() { Value = md5, Algorithm = AlgorithmKind.MD5, }, new Hash() { Value = sha1, Algorithm = AlgorithmKind.Sha1, }, new Hash() { Value = sha256, Algorithm = AlgorithmKind.Sha256, }, }; } run.Files.Add(new Uri(target).ToString(), new List<FileData> { fileReference }); } } run.Invocation = Invocation.Create(logEnvironment); // TODO we should actually redact across the complete log file context // by a dedicated rewriting visitor or some other approach. if (invocationTokensToRedact != null) { run.Invocation.CommandLine = Redact(run.Invocation.CommandLine, invocationTokensToRedact); run.Invocation.Machine = Redact(run.Invocation.Machine, invocationTokensToRedact); run.Invocation.Account = Redact(run.Invocation.Account, invocationTokensToRedact); run.Invocation.CommandLine = Redact(run.Invocation.CommandLine, invocationTokensToRedact); run.Invocation.WorkingDirectory = Redact(run.Invocation.WorkingDirectory, invocationTokensToRedact); if (run.Invocation.EnvironmentVariables != null) { string[] keys = run.Invocation.EnvironmentVariables.Keys.ToArray(); foreach (string key in keys) { string value = run.Invocation.EnvironmentVariables[key]; run.Invocation.EnvironmentVariables[key] = Redact(value, invocationTokensToRedact); } } } return run; } private static string Redact(string text, IEnumerable<string> tokensToRedact) { if (text == null ) { return text; } foreach (string tokenToRedact in tokensToRedact) { text = text.Replace(tokenToRedact, SarifConstants.RemovedMarker); } return text; } public SarifLogger( string outputFilePath, bool verbose, Tool tool, Run run) : this(new StreamWriter(new FileStream(outputFilePath, FileMode.Create, FileAccess.Write, FileShare.None)), verbose, tool, run) { } public SarifLogger( TextWriter textWriter, bool verbose, Tool tool, Run run) : this(textWriter, verbose) { _run = run; _issueLogJsonWriter.WriteTool(tool); } public SarifLogger( string outputFilePath, IEnumerable<string> analysisTargets, bool verbose, bool logEnvironment, bool computeTargetsHash, string prereleaseInfo, IEnumerable<string> invocationTokensToRedact) : this(new StreamWriter(new FileStream(outputFilePath, FileMode.Create, FileAccess.Write, FileShare.None)), analysisTargets, verbose, logEnvironment, computeTargetsHash, prereleaseInfo, invocationTokensToRedact) { } public SarifLogger( TextWriter textWriter, IEnumerable<string> analysisTargets, bool verbose, bool logEnvironment, bool computeTargetsHash, string prereleaseInfo, IEnumerable<string> invocationTokensToRedact) : this(textWriter, verbose) { Tool tool = Tool.CreateFromAssemblyData(prereleaseInfo); _issueLogJsonWriter.WriteTool(tool); _run = CreateRun( analysisTargets, computeTargetsHash, logEnvironment, invocationTokensToRedact); } public SarifLogger(TextWriter textWriter, bool verbose) { Verbose = verbose; _textWriter = textWriter; _jsonTextWriter = new JsonTextWriter(_textWriter); // for debugging it is nice to have the following line added. _jsonTextWriter.Formatting = Newtonsoft.Json.Formatting.Indented; _jsonTextWriter.DateFormatString = DateTimeConverter.DateTimeFormat; _issueLogJsonWriter = new ResultLogJsonWriter(_jsonTextWriter); } public Dictionary<string, IRule> Rules { get { _rules = _rules ?? new Dictionary<string, IRule>(); return _rules; } } public bool Verbose { get; set; } public void Dispose() { // Disposing the json writer closes the stream but the textwriter // still needs to be disposed or closed to write the results if (_issueLogJsonWriter != null) { _issueLogJsonWriter.CloseResults(); if (_run?.Invocation?.StartTime != new DateTime()) { _run.Invocation.EndTime = DateTime.UtcNow; } // Note: we write out the backing rules // to prevent the property accessor from populating // this data with an empty collection. if (_rules != null) { _issueLogJsonWriter.WriteRules(_rules); } if (_run.Files != null) { _issueLogJsonWriter.WriteFiles(_run.Files); } if (_run.Invocation != null) { _issueLogJsonWriter.WriteInvocation(invocation: _run.Invocation); } _issueLogJsonWriter.Dispose(); } if (_textWriter != null) { _textWriter.Dispose(); } } public void LogMessage(bool verbose, string message) { // We do not persist these to log file } public void AnalysisStarted() { _issueLogJsonWriter.OpenResults(); _run.Invocation = Invocation.Create(); } public void AnalysisStopped(RuntimeConditions runtimeConditions) { _run.Invocation.EndTime = DateTime.UtcNow; } public void Log(IRule rule, Result result) { if (!ShouldLog(result.Level)) { return; } if (rule != null) { Rules[rule.Id] = rule; } _issueLogJsonWriter.WriteResult(result); } public void AnalyzingTarget(IAnalysisContext context) { // This code doesn't call through a common helper, such as // provided by the SDK Notes class, becuase we are in a specifier // logger. If we called through a helper, we'd re-enter // through all aggregated loggers. context.Rule = Notes.AnalyzingTarget; Log(context.Rule, RuleUtilities.BuildResult(ResultLevel.Note, context, null, nameof(SdkResources.MSG1001_AnalyzingTarget))); } public void Log(ResultLevel messageKind, IAnalysisContext context, Region region, string formatId, params string[] arguments) { if (context.Rule != null) { Rules[context.Rule.Id] = context.Rule; } formatId = RuleUtilities.NormalizeFormatId(context.Rule.Id, formatId); LogJsonIssue(messageKind, context.TargetUri?.LocalPath, region, context.Rule.Id, formatId, arguments); } private void LogJsonIssue(ResultLevel level, string targetPath, Region region, string ruleId, string formatId, params string[] arguments) { if (!ShouldLog(level)) { return; } Result result = new Result(); result.RuleId = ruleId; result.FormattedRuleMessage = new FormattedRuleMessage() { FormatId = formatId, Arguments = arguments }; result.Level = level; if (targetPath != null) { result.Locations = new List<Location> { new Sarif.Location { AnalysisTarget = new PhysicalLocation { Uri = new Uri(targetPath), Region = region } }}; } _issueLogJsonWriter.WriteResult(result); } public bool ShouldLog(ResultLevel level) { switch (level) { case ResultLevel.Note: case ResultLevel.Pass: case ResultLevel.NotApplicable: { if (!Verbose) { return false; } break; } case ResultLevel.Error: case ResultLevel.Warning: { break; } default: { throw new InvalidOperationException(); } } return true; } public void LogToolNotification(Notification notification) { _run.ToolNotifications = _run.ToolNotifications ?? new List<Notification>(); _run.ToolNotifications.Add(notification); } public void LogConfigurationNotification(Notification notification) { _run.ConfigurationNotifications = _run.ConfigurationNotifications ?? new List<Notification>(); _run.ConfigurationNotifications.Add(notification); } } }
1
10,727
_jaw drops_ I would have sworn I wrote those lines. Good catch.
microsoft-sarif-sdk
.cs
@@ -3,6 +3,7 @@ // Clean up after resolve / reject function cleanup() { + axe._memoizedFns.forEach(fn => fn.clear()); axe._cache.clear(); axe._tree = undefined; axe._selectorData = undefined;
1
/*global Context */ /*exported runRules */ // Clean up after resolve / reject function cleanup() { axe._cache.clear(); axe._tree = undefined; axe._selectorData = undefined; } /** * Starts analysis on the current document and its subframes * @private * @param {Object} context The `Context` specification object @see Context * @param {Array} options Optional RuleOptions * @param {Function} resolve Called when done running rules, receives ([results : Object], cleanup : Function) * @param {Function} reject Called when execution failed, receives (err : Error) */ function runRules(context, options, resolve, reject) { 'use strict'; try { context = new Context(context); axe._tree = context.flatTree; axe._selectorData = axe.utils.getSelectorData(context.flatTree); } catch (e) { cleanup(); return reject(e); } var q = axe.utils.queue(); var audit = axe._audit; if (options.performanceTimer) { axe.utils.performanceTimer.auditStart(); } if (context.frames.length && options.iframes !== false) { q.defer(function(res, rej) { axe.utils.collectResultsFromFrames( context, options, 'rules', null, res, rej ); }); } let scrollState; q.defer(function(res, rej) { if (options.restoreScroll) { scrollState = axe.utils.getScrollState(); } audit.run(context, options, res, rej); }); q.then(function(data) { try { if (scrollState) { axe.utils.setScrollState(scrollState); } if (options.performanceTimer) { axe.utils.performanceTimer.auditEnd(); } // Add wrapper object so that we may use the same "merge" function for results from inside and outside frames var results = axe.utils.mergeResults( data.map(function(results) { return { results }; }) ); // after should only run once, so ensure we are in the top level window if (context.initiator) { results = audit.after(results, options); results.forEach(axe.utils.publishMetaData); results = results.map(axe.utils.finalizeRuleResult); } try { resolve(results, cleanup); } catch (e) { cleanup(); axe.log(e); } } catch (e) { cleanup(); reject(e); } }).catch(e => { cleanup(); reject(e); }); } axe._runRules = runRules;
1
15,098
This needs to be tested.
dequelabs-axe-core
js
@@ -1019,4 +1019,18 @@ describe Mongoid::Relations::Embedded::One do end end end + + context "when parent validation of child is set to false" do + + let(:building) do + building = Building.create + building.building_address = BuildingAddress.new + building.save + building.reload + end + + it "parent successfully embeds an invalid child" do + expect(building.building_address).to be_a(BuildingAddress) + end + end end
1
require "spec_helper" describe Mongoid::Relations::Embedded::One do describe "#===" do let(:base) do Person.new end let(:target) do Name.new end let(:metadata) do Person.relations["name"] end let(:relation) do described_class.new(base, target, metadata) end context "when the proxied document is same class" do it "returns true" do expect((relation === Name.new)).to be true end end end describe "#=" do context "when the relation is not cyclic" do context "when the parent is a new record" do let(:person) do Person.new end let(:name) do Name.new end before do person.name = name end it "sets the target of the relation" do expect(person.name).to eq(name) end it "sets the base on the inverse relation" do expect(name.namable).to eq(person) end it "sets the same instance on the inverse relation" do expect(name.namable).to eql(person) end it "does not save the target" do expect(name).to_not be_persisted end context "with overwritten getter" do before do person.name = nil def person.name_with_default name_without_default or (self.name = Name.new) end class << person alias_method_chain :name, :default end end it "sets the target without an invinite recursion" do person.name = name expect(person.name).to be_present end end end context "when the parent is not a new record" do let(:person) do Person.create end let(:name) do Name.new end context "when setting with a hash" do before do person.name = {} end let!(:child_name) do person.name end it "sets the target of the relation" do expect(person.name).to eq(child_name) end it "sets the base on the inverse relation" do expect(child_name.namable).to eq(person) end it "sets the same instance on the inverse relation" do expect(child_name.namable).to eql(person) end it "saves the target" do expect(child_name).to be_persisted end context "when replacing a relation with a hash" do before do person.name = {} end it "sets the relation with the proper object" do expect(person.name).to be_a(Name) end end end context "when setting to the same document" do before do person.name = name person.name = person.name end it "does not change the relation" do expect(person.name).to eq(name) end it "does not persist any change" do expect(person.reload.name).to eq(name) end end context "when setting directly" do before do person.name = name end it "sets the target of the relation" do expect(person.name).to eq(name) end it "sets the base on the inverse relation" do expect(name.namable).to eq(person) end it "sets the same instance on the inverse relation" do expect(name.namable).to eql(person) end it "saves the target" do expect(name).to be_persisted end context "when replacing an exising document" do let(:pet_owner) do PetOwner.create end let(:pet_one) do Pet.new(name: 'kika') end let(:pet_two) do Pet.new(name: 'tiksy') end before do pet_owner.pet = pet_one pet_owner.pet = pet_two end it "runs the destroy callbacks on the old document" do expect(pet_one.destroy_flag).to be true end it "keeps the name of the destroyed" do expect(pet_one.name).to eq("kika") end it "saves the new name" do expect(pet_owner.pet.name).to eq("tiksy") end end end context "when setting via the parent attributes" do before do person.attributes = { name: name } end it "sets the target of the relation" do expect(person.name).to eq(name) end it "does not save the target" do expect(name).to_not be_persisted end end end end context "when the relation is cyclic" do context "when the parent is a new record" do let(:parent_shelf) do Shelf.new end let(:child_shelf) do Shelf.new end before do parent_shelf.child_shelf = child_shelf end it "sets the target of the relation" do expect(parent_shelf.child_shelf).to eq(child_shelf) end it "sets the base on the inverse relation" do expect(child_shelf.parent_shelf).to eq(parent_shelf) end it "sets the same instance on the inverse relation" do expect(child_shelf.parent_shelf).to eql(parent_shelf) end it "does not save the target" do expect(child_shelf).to_not be_persisted end end context "when the parent is not a new record" do let(:parent_shelf) do Shelf.create end let(:child_shelf) do Shelf.new end before do parent_shelf.child_shelf = child_shelf end it "sets the target of the relation" do expect(parent_shelf.child_shelf).to eq(child_shelf) end it "sets the base on the inverse relation" do expect(child_shelf.parent_shelf).to eq(parent_shelf) end it "sets the same instance on the inverse relation" do expect(child_shelf.parent_shelf).to eql(parent_shelf) end it "saves the target" do expect(child_shelf).to be_persisted end end end context "when setting a new document multiple times in a row" do let(:parent) do Parent.create end before do parent.first_child = Child.new parent.first_child = Child.new parent.first_child = Child.new end it "saves the child document" do expect(parent.first_child).to be_a(Child) end end end describe "#= nil" do context "when the relation is not cyclic" do context "when the parent is a new record" do let(:person) do Person.new end let(:name) do Name.new end before do person.name = name person.name = nil end it "sets the relation to nil" do expect(person.name).to be_nil end it "removes the inverse relation" do expect(name.namable).to be_nil end end context "when the inverse is already nil" do let(:person) do Person.new end before do person.name = nil end it "sets the relation to nil" do expect(person.name).to be_nil end end context "when the parent is persisted" do let(:person) do Person.create end let(:name) do Name.new end context "when setting directly" do before do person.name = name person.name = nil end it "sets the relation to nil" do expect(person.name).to be_nil end it "removed the inverse relation" do expect(name.namable).to be_nil end it "deletes the child document" do expect(name).to be_destroyed end end context "when setting via parent attributes" do before do person.name = name person.attributes = { name: nil } end it "sets the relation to nil" do expect(person.name).to be_nil end it "does not delete the child document" do expect(name).to_not be_destroyed end end end end context "when the relation is cyclic" do context "when the parent is a new record" do let(:parent_shelf) do Shelf.new end let(:child_shelf) do Shelf.new end before do parent_shelf.child_shelf = child_shelf parent_shelf.child_shelf = nil end it "sets the relation to nil" do expect(parent_shelf.child_shelf).to be_nil end it "removes the inverse relation" do expect(child_shelf.parent_shelf).to be_nil end end context "when the inverse is already nil" do let(:parent_shelf) do Shelf.new end before do parent_shelf.child_shelf = nil end it "sets the relation to nil" do expect(parent_shelf.child_shelf).to be_nil end end context "when the documents are not new records" do let(:parent_shelf) do Shelf.create end let(:child_shelf) do Shelf.new end before do parent_shelf.child_shelf = child_shelf parent_shelf.child_shelf = nil end it "sets the relation to nil" do expect(parent_shelf.child_shelf).to be_nil end it "removed the inverse relation" do expect(child_shelf.parent_shelf).to be_nil end it "deletes the child document" do expect(child_shelf).to be_destroyed end end end end describe "#build_#\{name}" do context "when the relation is not cyclic" do context "when the parent is a new record" do context "when not providing any attributes" do context "when building once" do let(:person) do Person.new end let!(:name) do person.build_name end it "sets the target of the relation" do expect(person.name).to eq(name) end it "sets the base on the inverse relation" do expect(name.namable).to eq(person) end it "sets no attributes" do expect(name.first_name).to be_nil end it "does not save the target" do expect(name).to_not be_persisted end end context "when building twice" do let(:person) do Person.new end let!(:name) do person.build_name person.build_name end it "sets the target of the relation" do expect(person.name).to eq(name) end it "sets the base on the inverse relation" do expect(name.namable).to eq(person) end it "sets no attributes" do expect(name.first_name).to be_nil end it "does not save the target" do expect(name).to_not be_persisted end end end context "when passing nil as the attributes" do let(:person) do Person.new end let!(:name) do person.build_name(nil) end it "sets the target of the relation" do expect(person.name).to eq(name) end it "sets the base on the inverse relation" do expect(name.namable).to eq(person) end it "sets no attributes" do expect(name.first_name).to be_nil end it "does not save the target" do expect(name).to_not be_persisted end end context "when providing attributes" do let(:person) do Person.new end let!(:name) do person.build_name(first_name: "James") end it "sets the target of the relation" do expect(person.name).to eq(name) end it "sets the base on the inverse relation" do expect(name.namable).to eq(person) end it "sets the attributes" do expect(name.first_name).to eq("James") end it "does not save the target" do expect(name).to_not be_persisted end end end context "when the parent is not a new record" do let(:person) do Person.create end let!(:name) do person.build_name(first_name: "James") end it "does not save the target" do expect(name).to_not be_persisted end end end context "when the relation is cyclic" do context "when the parent is a new record" do let(:parent_shelf) do Shelf.new end let!(:child_shelf) do parent_shelf.build_child_shelf(level: 1) end it "sets the target of the relation" do expect(parent_shelf.child_shelf).to eq(child_shelf) end it "sets the base on the inverse relation" do expect(child_shelf.parent_shelf).to eq(parent_shelf) end it "sets the attributes" do expect(child_shelf.level).to eq(1) end it "does not save the target" do expect(child_shelf).to_not be_persisted end end context "when the parent is not a new record" do let(:parent_shelf) do Shelf.create end let!(:child_shelf) do parent_shelf.build_child_shelf(level: 2) end it "does not save the target" do expect(child_shelf).to_not be_persisted end end end end describe ".builder" do let(:base) do Person.new end let(:target) do Name.new end let(:metadata) do Person.relations["name"] end let(:builder_klass) do Mongoid::Relations::Builders::Embedded::One end it "returns the embedded one builder" do expect(described_class.builder(base, metadata, target)).to be_a(builder_klass) end end describe "#create_#\{name}" do context "when the parent is a new record" do context "when not providing any attributes" do let(:person) do Person.new end let!(:name) do person.create_name end it "sets the target of the relation" do expect(person.name).to eq(name) end it "sets the base on the inverse relation" do expect(name.namable).to eq(person) end it "sets no attributes" do expect(name.first_name).to be_nil end it "saves the target" do expect(name).to be_persisted end end context "when passing nil as the attributes" do let(:person) do Person.new end let!(:name) do person.create_name(nil) end it "sets the target of the relation" do expect(person.name).to eq(name) end it "sets the base on the inverse relation" do expect(name.namable).to eq(person) end it "sets no attributes" do expect(name.first_name).to be_nil end it "saves the target" do expect(name).to be_persisted end end context "when providing attributes" do let(:person) do Person.new end let!(:name) do person.create_name(first_name: "James") end it "sets the target of the relation" do expect(person.name).to eq(name) end it "sets the base on the inverse relation" do expect(name.namable).to eq(person) end it "sets the attributes" do expect(name.first_name).to eq("James") end it "saves the target" do expect(name).to be_persisted end end context "when the parent is not a new record" do let(:person) do Person.create end let!(:name) do person.create_name(first_name: "James") end it "does not save the target" do expect(name).to be_persisted end end end end describe ".embedded?" do it "returns true" do expect(described_class).to be_embedded end end describe ".foreign_key_suffix" do it "returns nil" do expect(described_class.foreign_key_suffix).to be_nil end end describe ".macro" do it "returns embeds_one" do expect(described_class.macro).to eq(:embeds_one) end end describe ".nested_builder" do let(:nested_builder_klass) do Mongoid::Relations::Builders::NestedAttributes::One end let(:metadata) do Person.relations["name"] end let(:attributes) do {} end it "returns the single nested builder" do expect( described_class.nested_builder(metadata, attributes, {}) ).to be_a(nested_builder_klass) end end describe "#respond_to?" do let(:person) do Person.new end let!(:name) do person.build_name(first_name: "Tony") end let(:document) do person.name end Mongoid::Document.public_instance_methods(true).each do |method| context "when checking #{method}" do it "returns true" do expect(document.respond_to?(method)).to be true end end end it "responds to persisted?" do expect(document).to respond_to(:persisted?) end end describe ".valid_options" do it "returns the valid options" do expect(described_class.valid_options).to eq( [ :autobuild, :as, :cascade_callbacks, :cyclic, :store_as ] ) end end describe ".validation_default" do it "returns true" do expect(described_class.validation_default).to be true end end context "when the embedded document has an array field" do let!(:person) do Person.create end let!(:name) do person.create_name( first_name: "Syd", last_name: "Vicious", aliases: nil ) end context "when saving the array on a persisted document" do let(:from_db) do Person.find(person.id).name end before do from_db.aliases = [ "Syd", "Sydney" ] from_db.save end it "sets the values of the array" do expect(from_db.aliases).to eq([ "Syd", "Sydney" ]) end it "persists the array" do expect(Person.find(person.id).name.aliases).to eq([ "Syd", "Sydney" ]) end end end context "when embedding a many under a one" do let!(:person) do Person.create end before do person.create_name end context "when the documents are reloaded from the database" do let(:from_db) do Person.first end context "when adding a new many" do let(:name) do from_db.name end let!(:translation) do name.translations.new end context "when saving the root" do before do from_db.save end it "persists the new document on the first save" do expect(from_db.reload.name.translations).to_not be_empty end end end end end context "when embedding a one under a many" do let!(:person) do Person.create end let!(:address_one) do person.addresses.create(street: "hobrecht") end let!(:address_two) do person.addresses.create(street: "kreuzberg") end context "when a parent was removed outside of mongoid" do before do person.collection.find(_id: person.id).update_one( "$pull" => { "addresses" => { _id: address_one.id }} ) end it "reloads the correct number" do expect(person.reload.addresses.count).to eq(1) end context "when adding a child" do let(:code) do Code.new end before do address_two.code = code end it "reloads the correct number" do expect(person.reload.addresses.count).to eq(1) end end end end context "when embedded documents are stored without ids" do let!(:band) do Band.create(name: "Moderat") end before do band.collection. find(_id: band.id). update_one("$set" => { label: { name: "Mute" }}) end context "when loading the documents" do before do band.reload end let(:label) do band.label end it "creates proper documents from the db" do expect(label.name).to eq("Mute") end it "assigns ids to the documents" do expect(label.id).to_not be_nil end context "when subsequently updating the documents" do before do label.update_attribute(:name, "Interscope") end it "updates the document" do expect(label.name).to eq("Interscope") end it "persists the change" do expect(label.reload.name).to eq("Interscope") end end end end end
1
11,252
I believe you're missing the comparison after 'be'
mongodb-mongoid
rb
@@ -94,12 +94,15 @@ func (s *KVStoreForTrie) Put(key []byte, value []byte) error { func (s *KVStoreForTrie) Get(key []byte) ([]byte, error) { trieKeystoreMtc.WithLabelValues("get").Inc() v, err := s.cb.Get(s.bucket, key) - if err != nil { - if v, err = s.dao.Get(s.bucket, key); err != nil { - return nil, errors.Wrapf(err, "failed to get key %x", key) + if errors.Cause(err) == ErrNotExist { + if v, err = s.dao.Get(s.bucket, key); errors.Cause(err) == ErrNotExist { + return nil, errors.Wrapf(ErrNotExist, "failed to get key %x", key) } // TODO: put it back to cache } + if errors.Cause(err) == ErrAlreadyDeleted { + return nil, errors.Wrapf(ErrNotExist, "failed to get key %x", key) + } return v, err }
1
// Copyright (c) 2018 IoTeX // This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no // warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent // permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache // License 2.0 that can be found in the LICENSE file. package db import ( "context" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/iotexproject/iotex-core/pkg/lifecycle" ) var ( trieKeystoreMtc = prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "iotex_trie_keystore", Help: "IoTeX Trie Keystore", }, []string{"type"}, ) ) func init() { prometheus.MustRegister(trieKeystoreMtc) } // KVStoreForTrie defines a kvstore with fixed bucket and cache layer for trie. // It may be used in other cases as well type KVStoreForTrie struct { lc lifecycle.Lifecycle bucket string dao KVStore cb CachedBatch } // Option defines an interface to initialize the kv store type Option func(*KVStoreForTrie) error // CachedBatchOption defines a way to set the cache layer for db func CachedBatchOption(cb CachedBatch) Option { return func(kvStore *KVStoreForTrie) error { kvStore.cb = cb return nil } } // NewKVStoreForTrie creates a new KVStoreForTrie func NewKVStoreForTrie(bucket string, dao KVStore, options ...Option) (*KVStoreForTrie, error) { s := &KVStoreForTrie{bucket: bucket, dao: dao} for _, opt := range options { if err := opt(s); err != nil { return nil, err } } if s.cb == nil { // always have a cache layer s.cb = NewCachedBatch() } s.lc.Add(s.dao) return s, nil } // Start starts the kv store func (s *KVStoreForTrie) Start(ctx context.Context) error { return s.lc.OnStart(ctx) } // Stop stops the kv store func (s *KVStoreForTrie) Stop(ctx context.Context) error { return s.lc.OnStop(ctx) } // Delete deletes key func (s *KVStoreForTrie) Delete(key []byte) error { trieKeystoreMtc.WithLabelValues("delete").Inc() s.cb.Delete(s.bucket, key, "failed to delete key %x", key) // TODO: bug, need to mark key as deleted return nil } // Put puts value for key func (s *KVStoreForTrie) Put(key []byte, value []byte) error { trieKeystoreMtc.WithLabelValues("put").Inc() s.cb.Put(s.bucket, key, value, "failed to put key %x value %x", key, value) return nil } // Get gets value of key func (s *KVStoreForTrie) Get(key []byte) ([]byte, error) { trieKeystoreMtc.WithLabelValues("get").Inc() v, err := s.cb.Get(s.bucket, key) if err != nil { if v, err = s.dao.Get(s.bucket, key); err != nil { return nil, errors.Wrapf(err, "failed to get key %x", key) } // TODO: put it back to cache } return v, err } // Flush flushs the data in cache layer to db func (s *KVStoreForTrie) Flush() error { return s.dao.Commit(s.cb) }
1
16,760
move 103~105 to in front of 97?
iotexproject-iotex-core
go
@@ -2,6 +2,9 @@ package com.fsck.k9.helper; +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.regex.Matcher;
1
package com.fsck.k9.helper; import java.util.ArrayList; import java.util.List; import java.util.regex.Matcher; import java.util.regex.Pattern; import android.content.Context; import android.database.Cursor; import android.net.ConnectivityManager; import android.net.NetworkInfo; import android.os.Handler; import android.os.Looper; import android.text.Editable; import android.text.TextUtils; import android.widget.EditText; import android.widget.TextView; import org.apache.james.mime4j.util.MimeUtil; import timber.log.Timber; public class Utility { // \u00A0 (non-breaking space) happens to be used by French MUA // Note: no longer using the ^ beginning character combined with (...)+ // repetition matching as we might want to strip ML tags. Ex: // Re: [foo] Re: RE : [foo] blah blah blah private static final Pattern RESPONSE_PATTERN = Pattern.compile( "((Re|Fw|Fwd|Aw|R\\u00E9f\\.)(\\[\\d+\\])?[\\u00A0 ]?: *)+", Pattern.CASE_INSENSITIVE); /** * Mailing-list tag pattern to match strings like "[foobar] " */ private static final Pattern TAG_PATTERN = Pattern.compile("\\[[-_a-z0-9]+\\] ", Pattern.CASE_INSENSITIVE); private static Handler sMainThreadHandler; public static boolean arrayContains(Object[] a, Object o) { for (Object element : a) { if (element.equals(o)) { return true; } } return false; } public static boolean isAnyMimeType(String o, String... a) { for (String element : a) { if (MimeUtil.isSameMimeType(element, o)) { return true; } } return false; } public static boolean arrayContainsAny(Object[] a, Object... o) { for (Object element : a) { if (arrayContains(o, element)) { return true; } } return false; } /** * Combines the given array of Objects into a single String using * each Object's toString() method and the separator character * between each part. * * @param parts * @param separator * @return new String */ public static String combine(Object[] parts, char separator) { if (parts == null) { return null; } return TextUtils.join(String.valueOf(separator), parts); } /** * Combines the given Objects into a single String using * each Object's toString() method and the separator character * between each part. * * @param parts * @param separator * @return new String */ public static String combine(Iterable<?> parts, char separator) { if (parts == null) { return null; } return TextUtils.join(String.valueOf(separator), parts); } public static boolean requiredFieldValid(TextView view) { return view.getText() != null && view.getText().length() > 0; } public static boolean requiredFieldValid(Editable s) { return s != null && s.length() > 0; } public static boolean domainFieldValid(EditText view) { if (view.getText() != null) { String s = view.getText().toString(); if (s.matches("^([a-zA-Z0-9]([a-zA-Z0-9\\-]{0,61}[a-zA-Z0-9])?\\.)*[a-zA-Z0-9]([a-zA-Z0-9\\-]{0,61}[a-zA-Z0-9])?$") && s.length() <= 253) { return true; } if (s.matches("^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$")) { return true; } } return false; } /* * TODO disabled this method globally. It is used in all the settings screens but I just * noticed that an unrelated icon was dimmed. Android must share drawables internally. */ public static void setCompoundDrawablesAlpha(TextView view, int alpha) { // Drawable[] drawables = view.getCompoundDrawables(); // for (Drawable drawable : drawables) { // if (drawable != null) { // drawable.setAlpha(alpha); // } // } } /** * Extract the 'original' subject value, by ignoring leading * response/forward marker and '[XX]' formatted tags (as many mailing-list * softwares do). * * <p> * Result is also trimmed. * </p> * * @param subject * Never <code>null</code>. * @return Never <code>null</code>. */ public static String stripSubject(final String subject) { int lastPrefix = 0; final Matcher tagMatcher = TAG_PATTERN.matcher(subject); String tag = null; // whether tag stripping logic should be active boolean tagPresent = false; // whether the last action stripped a tag boolean tagStripped = false; if (tagMatcher.find(0)) { tagPresent = true; if (tagMatcher.start() == 0) { // found at beginning of subject, considering it an actual tag tag = tagMatcher.group(); // now need to find response marker after that tag lastPrefix = tagMatcher.end(); tagStripped = true; } } final Matcher matcher = RESPONSE_PATTERN.matcher(subject); // while: // - lastPrefix is within the bounds // - response marker found at lastPrefix position // (to make sure we don't catch response markers that are part of // the actual subject) while (lastPrefix < subject.length() - 1 && matcher.find(lastPrefix) && matcher.start() == lastPrefix && (!tagPresent || tag == null || subject.regionMatches(matcher.end(), tag, 0, tag.length()))) { lastPrefix = matcher.end(); if (tagPresent) { tagStripped = false; if (tag == null) { // attempt to find tag if (tagMatcher.start() == lastPrefix) { tag = tagMatcher.group(); lastPrefix += tag.length(); tagStripped = true; } } else if (lastPrefix < subject.length() - 1 && subject.startsWith(tag, lastPrefix)) { // Re: [foo] Re: [foo] blah blah blah // ^ ^ // ^ ^ // ^ new position // ^ // initial position lastPrefix += tag.length(); tagStripped = true; } } } // Null pointer check is to make the static analysis component of Eclipse happy. if (tagStripped && (tag != null)) { // restore the last tag lastPrefix -= tag.length(); } if (lastPrefix > -1 && lastPrefix < subject.length() - 1) { return subject.substring(lastPrefix).trim(); } else { return subject.trim(); } } public static String stripNewLines(String multiLineString) { return multiLineString.replaceAll("[\\r\\n]", ""); } private static final String IMG_SRC_REGEX = "(?is:<img[^>]+src\\s*=\\s*['\"]?([a-z]+)\\:)"; private static final Pattern IMG_PATTERN = Pattern.compile(IMG_SRC_REGEX); /** * Figure out if this part has images. * TODO: should only return true if we're an html part * @param message Content to evaluate * @return True if it has external images; false otherwise. */ public static boolean hasExternalImages(final String message) { Matcher imgMatches = IMG_PATTERN.matcher(message); while (imgMatches.find()) { String uriScheme = imgMatches.group(1); if (uriScheme.equals("http") || uriScheme.equals("https")) { Timber.d("External images found"); return true; } } Timber.d("No external images."); return false; } /** * Unconditionally close a Cursor. Equivalent to {@link Cursor#close()}, * if cursor is non-null. This is typically used in finally blocks. * * @param cursor cursor to close */ public static void closeQuietly(final Cursor cursor) { if (cursor != null) { cursor.close(); } } /** * Check to see if we have network connectivity. */ public static boolean hasConnectivity(final Context context) { final ConnectivityManager connectivityManager = (ConnectivityManager) context.getSystemService(Context.CONNECTIVITY_SERVICE); if (connectivityManager == null) { return false; } final NetworkInfo netInfo = connectivityManager.getActiveNetworkInfo(); if (netInfo != null && netInfo.getState() == NetworkInfo.State.CONNECTED) { return true; } else { return false; } } private static final Pattern MESSAGE_ID = Pattern.compile("<" + "(?:" + "[a-zA-Z0-9!#$%&'*+\\-/=?^_`{|}~]+" + "(?:\\.[a-zA-Z0-9!#$%&'*+\\-/=?^_`{|}~]+)*" + "|" + "\"(?:[^\\\\\"]|\\\\.)*\"" + ")" + "@" + "(?:" + "[a-zA-Z0-9!#$%&'*+\\-/=?^_`{|}~]+" + "(?:\\.[a-zA-Z0-9!#$%&'*+\\-/=?^_`{|}~]+)*" + "|" + "\\[(?:[^\\\\\\]]|\\\\.)*\\]" + ")" + ">"); public static List<String> extractMessageIds(final String text) { List<String> messageIds = new ArrayList<>(); Matcher matcher = MESSAGE_ID.matcher(text); int start = 0; while (matcher.find(start)) { String messageId = text.substring(matcher.start(), matcher.end()); messageIds.add(messageId); start = matcher.end(); } return messageIds; } public static String extractMessageId(final String text) { Matcher matcher = MESSAGE_ID.matcher(text); if (matcher.find()) { return text.substring(matcher.start(), matcher.end()); } return null; } /** * @return a {@link Handler} tied to the main thread. */ public static Handler getMainThreadHandler() { if (sMainThreadHandler == null) { // No need to synchronize -- it's okay to create an extra Handler, which will be used // only once and then thrown away. sMainThreadHandler = new Handler(Looper.getMainLooper()); } return sMainThreadHandler; } }
1
17,363
Lots of unnecessary imports left in this file.
k9mail-k-9
java
@@ -805,8 +805,9 @@ bool CoreChecks::ValidateDescriptorSetBindingData(const CMD_BUFFER_STATE *cb_nod for (uint32_t i = index_range.start; i < index_range.end; ++i, ++array_idx) { uint32_t index = i - index_range.start; const auto *descriptor = descriptor_set->GetDescriptorFromGlobalIndex(i); + const auto descriptor_class = descriptor->GetClass(); - if (descriptor->GetClass() == DescriptorClass::InlineUniform) { + if (descriptor_class == DescriptorClass::InlineUniform) { // Can't validate the descriptor because it may not have been updated. continue; } else if (!descriptor->updated) {
1
/* Copyright (c) 2015-2021 The Khronos Group Inc. * Copyright (c) 2015-2021 Valve Corporation * Copyright (c) 2015-2021 LunarG, Inc. * Copyright (C) 2015-2021 Google Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * Author: Tobin Ehlis <[email protected]> * John Zulauf <[email protected]> * Jeremy Kniager <[email protected]> */ #include "chassis.h" #include "core_validation_error_enums.h" #include "core_validation.h" #include "descriptor_sets.h" #include "hash_vk_types.h" #include "vk_enum_string_helper.h" #include "vk_safe_struct.h" #include "vk_typemap_helper.h" #include "buffer_validation.h" #include <sstream> #include <algorithm> #include <array> #include <memory> // ExtendedBinding collects a VkDescriptorSetLayoutBinding and any extended // state that comes from a different array/structure so they can stay together // while being sorted by binding number. struct ExtendedBinding { ExtendedBinding(const VkDescriptorSetLayoutBinding *l, VkDescriptorBindingFlags f) : layout_binding(l), binding_flags(f) {} const VkDescriptorSetLayoutBinding *layout_binding; VkDescriptorBindingFlags binding_flags; }; struct BindingNumCmp { bool operator()(const ExtendedBinding &a, const ExtendedBinding &b) const { return a.layout_binding->binding < b.layout_binding->binding; } }; using DescriptorSet = cvdescriptorset::DescriptorSet; using DescriptorSetLayout = cvdescriptorset::DescriptorSetLayout; using DescriptorSetLayoutDef = cvdescriptorset::DescriptorSetLayoutDef; using DescriptorSetLayoutId = cvdescriptorset::DescriptorSetLayoutId; // Canonical dictionary of DescriptorSetLayoutDef (without any handle/device specific information) cvdescriptorset::DescriptorSetLayoutDict descriptor_set_layout_dict; DescriptorSetLayoutId GetCanonicalId(const VkDescriptorSetLayoutCreateInfo *p_create_info) { return descriptor_set_layout_dict.look_up(DescriptorSetLayoutDef(p_create_info)); } // Construct DescriptorSetLayout instance from given create info // Proactively reserve and resize as possible, as the reallocation was visible in profiling cvdescriptorset::DescriptorSetLayoutDef::DescriptorSetLayoutDef(const VkDescriptorSetLayoutCreateInfo *p_create_info) : flags_(p_create_info->flags), binding_count_(0), descriptor_count_(0), dynamic_descriptor_count_(0) { const auto *flags_create_info = LvlFindInChain<VkDescriptorSetLayoutBindingFlagsCreateInfo>(p_create_info->pNext); binding_type_stats_ = {0, 0}; std::set<ExtendedBinding, BindingNumCmp> sorted_bindings; const uint32_t input_bindings_count = p_create_info->bindingCount; // Sort the input bindings in binding number order, eliminating duplicates for (uint32_t i = 0; i < input_bindings_count; i++) { VkDescriptorBindingFlags flags = 0; if (flags_create_info && flags_create_info->bindingCount == p_create_info->bindingCount) { flags = flags_create_info->pBindingFlags[i]; } sorted_bindings.insert(ExtendedBinding(p_create_info->pBindings + i, flags)); } // Store the create info in the sorted order from above uint32_t index = 0; binding_count_ = static_cast<uint32_t>(sorted_bindings.size()); bindings_.reserve(binding_count_); binding_flags_.reserve(binding_count_); binding_to_index_map_.reserve(binding_count_); for (const auto &input_binding : sorted_bindings) { // Add to binding and map, s.t. it is robust to invalid duplication of binding_num const auto binding_num = input_binding.layout_binding->binding; binding_to_index_map_[binding_num] = index++; bindings_.emplace_back(input_binding.layout_binding); auto &binding_info = bindings_.back(); binding_flags_.emplace_back(input_binding.binding_flags); descriptor_count_ += binding_info.descriptorCount; if (binding_info.descriptorCount > 0) { non_empty_bindings_.insert(binding_num); } if (IsDynamicDescriptor(binding_info.descriptorType)) { dynamic_descriptor_count_ += binding_info.descriptorCount; } // Get stats depending on descriptor type for caching later if (IsBufferDescriptor(binding_info.descriptorType)) { if (IsDynamicDescriptor(binding_info.descriptorType)) { binding_type_stats_.dynamic_buffer_count++; } else { binding_type_stats_.non_dynamic_buffer_count++; } } } assert(bindings_.size() == binding_count_); assert(binding_flags_.size() == binding_count_); uint32_t global_index = 0; global_index_range_.reserve(binding_count_); // Vector order is finalized so build vectors of descriptors and dynamic offsets by binding index for (uint32_t i = 0; i < binding_count_; ++i) { auto final_index = global_index + bindings_[i].descriptorCount; global_index_range_.emplace_back(global_index, final_index); global_index = final_index; } } size_t cvdescriptorset::DescriptorSetLayoutDef::hash() const { hash_util::HashCombiner hc; hc << flags_; hc.Combine(bindings_); hc.Combine(binding_flags_); return hc.Value(); } // // Return valid index or "end" i.e. binding_count_; // The asserts in "Get" are reduced to the set where no valid answer(like null or 0) could be given // Common code for all binding lookups. uint32_t cvdescriptorset::DescriptorSetLayoutDef::GetIndexFromBinding(uint32_t binding) const { const auto &bi_itr = binding_to_index_map_.find(binding); if (bi_itr != binding_to_index_map_.cend()) return bi_itr->second; return GetBindingCount(); } VkDescriptorSetLayoutBinding const *cvdescriptorset::DescriptorSetLayoutDef::GetDescriptorSetLayoutBindingPtrFromIndex( const uint32_t index) const { if (index >= bindings_.size()) return nullptr; return bindings_[index].ptr(); } // Return descriptorCount for given index, 0 if index is unavailable uint32_t cvdescriptorset::DescriptorSetLayoutDef::GetDescriptorCountFromIndex(const uint32_t index) const { if (index >= bindings_.size()) return 0; return bindings_[index].descriptorCount; } // For the given index, return descriptorType VkDescriptorType cvdescriptorset::DescriptorSetLayoutDef::GetTypeFromIndex(const uint32_t index) const { assert(index < bindings_.size()); if (index < bindings_.size()) return bindings_[index].descriptorType; return VK_DESCRIPTOR_TYPE_MAX_ENUM; } // For the given index, return stageFlags VkShaderStageFlags cvdescriptorset::DescriptorSetLayoutDef::GetStageFlagsFromIndex(const uint32_t index) const { assert(index < bindings_.size()); if (index < bindings_.size()) return bindings_[index].stageFlags; return VkShaderStageFlags(0); } // Return binding flags for given index, 0 if index is unavailable VkDescriptorBindingFlags cvdescriptorset::DescriptorSetLayoutDef::GetDescriptorBindingFlagsFromIndex(const uint32_t index) const { if (index >= binding_flags_.size()) return 0; return binding_flags_[index]; } const cvdescriptorset::IndexRange &cvdescriptorset::DescriptorSetLayoutDef::GetGlobalIndexRangeFromIndex(uint32_t index) const { const static IndexRange k_invalid_range = {0xFFFFFFFF, 0xFFFFFFFF}; if (index >= binding_flags_.size()) return k_invalid_range; return global_index_range_[index]; } // For the given binding, return the global index range (half open) // As start and end are often needed in pairs, get both with a single lookup. const cvdescriptorset::IndexRange &cvdescriptorset::DescriptorSetLayoutDef::GetGlobalIndexRangeFromBinding( const uint32_t binding) const { uint32_t index = GetIndexFromBinding(binding); return GetGlobalIndexRangeFromIndex(index); } // For given binding, return ptr to ImmutableSampler array VkSampler const *cvdescriptorset::DescriptorSetLayoutDef::GetImmutableSamplerPtrFromBinding(const uint32_t binding) const { const auto &bi_itr = binding_to_index_map_.find(binding); if (bi_itr != binding_to_index_map_.end()) { return bindings_[bi_itr->second].pImmutableSamplers; } return nullptr; } // Move to next valid binding having a non-zero binding count uint32_t cvdescriptorset::DescriptorSetLayoutDef::GetNextValidBinding(const uint32_t binding) const { auto it = non_empty_bindings_.upper_bound(binding); assert(it != non_empty_bindings_.cend()); if (it != non_empty_bindings_.cend()) return *it; return GetMaxBinding() + 1; } // For given index, return ptr to ImmutableSampler array VkSampler const *cvdescriptorset::DescriptorSetLayoutDef::GetImmutableSamplerPtrFromIndex(const uint32_t index) const { if (index < bindings_.size()) { return bindings_[index].pImmutableSamplers; } return nullptr; } // If our layout is compatible with rh_ds_layout, return true. bool cvdescriptorset::DescriptorSetLayout::IsCompatible(DescriptorSetLayout const *rh_ds_layout) const { bool compatible = (this == rh_ds_layout) || (GetLayoutDef() == rh_ds_layout->GetLayoutDef()); return compatible; } // TODO: Find a way to add smarts to the autogenerated version of this static std::string smart_string_VkShaderStageFlags(VkShaderStageFlags stage_flags) { if (stage_flags == VK_SHADER_STAGE_ALL) { return string_VkShaderStageFlagBits(VK_SHADER_STAGE_ALL); } return string_VkShaderStageFlags(stage_flags); } // If our layout is compatible with bound_dsl, return true, // else return false and fill in error_msg will description of what causes incompatibility bool cvdescriptorset::VerifySetLayoutCompatibility(const debug_report_data *report_data, DescriptorSetLayout const *layout_dsl, DescriptorSetLayout const *bound_dsl, std::string *error_msg) { // Short circuit the detailed check. if (layout_dsl->IsCompatible(bound_dsl)) return true; // Do a detailed compatibility check of this lhs def (referenced by layout_dsl), vs. the rhs (layout and def) // Should only be run if trivial accept has failed, and in that context should return false. VkDescriptorSetLayout layout_dsl_handle = layout_dsl->GetDescriptorSetLayout(); VkDescriptorSetLayout bound_dsl_handle = bound_dsl->GetDescriptorSetLayout(); DescriptorSetLayoutDef const *layout_ds_layout_def = layout_dsl->GetLayoutDef(); DescriptorSetLayoutDef const *bound_ds_layout_def = bound_dsl->GetLayoutDef(); // Check descriptor counts const auto bound_total_count = bound_ds_layout_def->GetTotalDescriptorCount(); if (layout_ds_layout_def->GetTotalDescriptorCount() != bound_ds_layout_def->GetTotalDescriptorCount()) { std::stringstream error_str; error_str << report_data->FormatHandle(layout_dsl_handle) << " from pipeline layout has " << layout_ds_layout_def->GetTotalDescriptorCount() << " total descriptors, but " << report_data->FormatHandle(bound_dsl_handle) << ", which is bound, has " << bound_total_count << " total descriptors."; *error_msg = error_str.str(); return false; // trivial fail case } // Descriptor counts match so need to go through bindings one-by-one // and verify that type and stageFlags match for (const auto &layout_binding : layout_ds_layout_def->GetBindings()) { // TODO : Do we also need to check immutable samplers? const auto bound_binding = bound_ds_layout_def->GetBindingInfoFromBinding(layout_binding.binding); if (layout_binding.descriptorCount != bound_binding->descriptorCount) { std::stringstream error_str; error_str << "Binding " << layout_binding.binding << " for " << report_data->FormatHandle(layout_dsl_handle) << " from pipeline layout has a descriptorCount of " << layout_binding.descriptorCount << " but binding " << layout_binding.binding << " for " << report_data->FormatHandle(bound_dsl_handle) << ", which is bound, has a descriptorCount of " << bound_binding->descriptorCount; *error_msg = error_str.str(); return false; } else if (layout_binding.descriptorType != bound_binding->descriptorType) { std::stringstream error_str; error_str << "Binding " << layout_binding.binding << " for " << report_data->FormatHandle(layout_dsl_handle) << " from pipeline layout is type '" << string_VkDescriptorType(layout_binding.descriptorType) << "' but binding " << layout_binding.binding << " for " << report_data->FormatHandle(bound_dsl_handle) << ", which is bound, is type '" << string_VkDescriptorType(bound_binding->descriptorType) << "'"; *error_msg = error_str.str(); return false; } else if (layout_binding.stageFlags != bound_binding->stageFlags) { std::stringstream error_str; error_str << "Binding " << layout_binding.binding << " for " << report_data->FormatHandle(layout_dsl_handle) << " from pipeline layout has stageFlags " << smart_string_VkShaderStageFlags(layout_binding.stageFlags) << " but binding " << layout_binding.binding << " for " << report_data->FormatHandle(bound_dsl_handle) << ", which is bound, has stageFlags " << smart_string_VkShaderStageFlags(bound_binding->stageFlags); *error_msg = error_str.str(); return false; } } const auto &ds_layout_flags = layout_ds_layout_def->GetBindingFlags(); const auto &bound_layout_flags = bound_ds_layout_def->GetBindingFlags(); if (bound_layout_flags != ds_layout_flags) { std::stringstream error_str; assert(ds_layout_flags.size() == bound_layout_flags.size()); size_t i; for (i = 0; i < ds_layout_flags.size(); i++) { if (ds_layout_flags[i] != bound_layout_flags[i]) break; } error_str << report_data->FormatHandle(layout_dsl_handle) << " from pipeline layout does not have the same binding flags at binding " << i << " ( " << string_VkDescriptorBindingFlagsEXT(ds_layout_flags[i]) << " ) as " << report_data->FormatHandle(bound_dsl_handle) << " ( " << string_VkDescriptorBindingFlagsEXT(bound_layout_flags[i]) << " ), which is bound"; *error_msg = error_str.str(); return false; } // No detailed check should succeed if the trivial check failed -- or the dictionary has failed somehow. bool compatible = true; assert(!compatible); return compatible; } bool cvdescriptorset::DescriptorSetLayoutDef::IsNextBindingConsistent(const uint32_t binding) const { if (!binding_to_index_map_.count(binding + 1)) return false; auto const &bi_itr = binding_to_index_map_.find(binding); if (bi_itr != binding_to_index_map_.end()) { const auto &next_bi_itr = binding_to_index_map_.find(binding + 1); if (next_bi_itr != binding_to_index_map_.end()) { auto type = bindings_[bi_itr->second].descriptorType; auto stage_flags = bindings_[bi_itr->second].stageFlags; auto immut_samp = bindings_[bi_itr->second].pImmutableSamplers ? true : false; auto flags = binding_flags_[bi_itr->second]; if ((type != bindings_[next_bi_itr->second].descriptorType) || (stage_flags != bindings_[next_bi_itr->second].stageFlags) || (immut_samp != (bindings_[next_bi_itr->second].pImmutableSamplers ? true : false)) || (flags != binding_flags_[next_bi_itr->second])) { return false; } return true; } } return false; } // The DescriptorSetLayout stores the per handle data for a descriptor set layout, and references the common defintion for the // handle invariant portion cvdescriptorset::DescriptorSetLayout::DescriptorSetLayout(const VkDescriptorSetLayoutCreateInfo *p_create_info, const VkDescriptorSetLayout layout) : layout_(layout), layout_id_(GetCanonicalId(p_create_info)) {} // Validate descriptor set layout create info bool cvdescriptorset::ValidateDescriptorSetLayoutCreateInfo( const ValidationObject *val_obj, const VkDescriptorSetLayoutCreateInfo *create_info, const bool push_descriptor_ext, const uint32_t max_push_descriptors, const bool descriptor_indexing_ext, const VkPhysicalDeviceVulkan12Features *core12_features, const VkPhysicalDeviceInlineUniformBlockFeaturesEXT *inline_uniform_block_features, const VkPhysicalDeviceInlineUniformBlockPropertiesEXT *inline_uniform_block_props, const DeviceExtensions *device_extensions) { bool skip = false; std::unordered_set<uint32_t> bindings; uint64_t total_descriptors = 0; const auto *flags_create_info = LvlFindInChain<VkDescriptorSetLayoutBindingFlagsCreateInfo>(create_info->pNext); const bool push_descriptor_set = !!(create_info->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR); if (push_descriptor_set && !push_descriptor_ext) { skip |= val_obj->LogError( val_obj->device, kVUID_Core_DrawState_ExtensionNotEnabled, "vkCreateDescriptorSetLayout(): Attempted to use %s in %s but its required extension %s has not been enabled.\n", "VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR", "VkDescriptorSetLayoutCreateInfo::flags", VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME); } const bool update_after_bind_set = !!(create_info->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT); if (update_after_bind_set && !descriptor_indexing_ext) { skip |= val_obj->LogError( val_obj->device, kVUID_Core_DrawState_ExtensionNotEnabled, "vkCreateDescriptorSetLayout(): Attemped to use %s in %s but its required extension %s has not been enabled.\n", "VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT", "VkDescriptorSetLayoutCreateInfo::flags", VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME); } auto valid_type = [push_descriptor_set](const VkDescriptorType type) { return !push_descriptor_set || ((type != VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) && (type != VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) && (type != VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT)); }; uint32_t max_binding = 0; for (uint32_t i = 0; i < create_info->bindingCount; ++i) { const auto &binding_info = create_info->pBindings[i]; max_binding = std::max(max_binding, binding_info.binding); if (!bindings.insert(binding_info.binding).second) { skip |= val_obj->LogError(val_obj->device, "VUID-VkDescriptorSetLayoutCreateInfo-binding-00279", "vkCreateDescriptorSetLayout(): pBindings[%u] has duplicated binding number (%u).", i, binding_info.binding); } if (!valid_type(binding_info.descriptorType)) { skip |= val_obj->LogError(val_obj->device, (binding_info.descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT) ? "VUID-VkDescriptorSetLayoutCreateInfo-flags-02208" : "VUID-VkDescriptorSetLayoutCreateInfo-flags-00280", "vkCreateDescriptorSetLayout(): pBindings[%u] has invalid type %s , for push descriptors.", i, string_VkDescriptorType(binding_info.descriptorType)); } if (binding_info.descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT) { if (!inline_uniform_block_features->inlineUniformBlock) { skip |= val_obj->LogError(val_obj->device, "VUID-VkDescriptorSetLayoutBinding-descriptorType-04604", "vkCreateDescriptorSetLayout(): pBindings[%u] is creating VkDescriptorSetLayout with " "descriptor type VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT " "but inlineUniformBlock is not enabled", i, VK_EXT_INLINE_UNIFORM_BLOCK_EXTENSION_NAME); } else { if ((binding_info.descriptorCount % 4) != 0) { skip |= val_obj->LogError(val_obj->device, "VUID-VkDescriptorSetLayoutBinding-descriptorType-02209", "vkCreateDescriptorSetLayout(): pBindings[%u] has descriptorCount =(%" PRIu32 ") but must be a multiple of 4", i, binding_info.descriptorCount); } if (binding_info.descriptorCount > inline_uniform_block_props->maxInlineUniformBlockSize) { skip |= val_obj->LogError(val_obj->device, "VUID-VkDescriptorSetLayoutBinding-descriptorType-02210", "vkCreateDescriptorSetLayout(): pBindings[%u] has descriptorCount =(%" PRIu32 ") but must be less than or equal to maxInlineUniformBlockSize (%u)", i, binding_info.descriptorCount, inline_uniform_block_props->maxInlineUniformBlockSize); } } } if ((binding_info.descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER || binding_info.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) && binding_info.pImmutableSamplers && device_extensions->vk_ext_custom_border_color) { const CoreChecks *core_checks = reinterpret_cast<const CoreChecks *>(val_obj); for (uint32_t j = 0; j < binding_info.descriptorCount; j++) { const SAMPLER_STATE *sampler_state = core_checks->GetSamplerState(binding_info.pImmutableSamplers[j]); if (sampler_state && (sampler_state->createInfo.borderColor == VK_BORDER_COLOR_INT_CUSTOM_EXT || sampler_state->createInfo.borderColor == VK_BORDER_COLOR_FLOAT_CUSTOM_EXT)) { skip |= val_obj->LogError( val_obj->device, "VUID-VkDescriptorSetLayoutBinding-pImmutableSamplers-04009", "vkCreateDescriptorSetLayout(): pBindings[%u].pImmutableSamplers[%u] has VkSampler %" PRIu64 " presented as immutable has a custom border color", i, j, binding_info.pImmutableSamplers[j]); } } } total_descriptors += binding_info.descriptorCount; } if (flags_create_info) { if (flags_create_info->bindingCount != 0 && flags_create_info->bindingCount != create_info->bindingCount) { skip |= val_obj->LogError(val_obj->device, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfo-bindingCount-03002", "vkCreateDescriptorSetLayout(): VkDescriptorSetLayoutCreateInfo::bindingCount (%d) != " "VkDescriptorSetLayoutBindingFlagsCreateInfo::bindingCount (%d)", create_info->bindingCount, flags_create_info->bindingCount); } if (flags_create_info->bindingCount == create_info->bindingCount) { for (uint32_t i = 0; i < create_info->bindingCount; ++i) { const auto &binding_info = create_info->pBindings[i]; if (flags_create_info->pBindingFlags[i] & VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT) { if (!update_after_bind_set) { skip |= val_obj->LogError(val_obj->device, "VUID-VkDescriptorSetLayoutCreateInfo-flags-03000", "vkCreateDescriptorSetLayout(): pBindings[%u] does not have " "VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT.", i); } if (binding_info.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER && !core12_features->descriptorBindingUniformBufferUpdateAfterBind) { skip |= val_obj->LogError( val_obj->device, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfo-" "descriptorBindingUniformBufferUpdateAfterBind-03005", "vkCreateDescriptorSetLayout(): pBindings[%u] can't have VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT " "for %s since descriptorBindingUniformBufferUpdateAfterBind is not enabled.", i, string_VkDescriptorType(binding_info.descriptorType)); } if ((binding_info.descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER || binding_info.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER || binding_info.descriptorType == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE) && !core12_features->descriptorBindingSampledImageUpdateAfterBind) { skip |= val_obj->LogError( val_obj->device, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfo-" "descriptorBindingSampledImageUpdateAfterBind-03006", "vkCreateDescriptorSetLayout(): pBindings[%u] can't have VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT " "for %s since descriptorBindingSampledImageUpdateAfterBind is not enabled.", i, string_VkDescriptorType(binding_info.descriptorType)); } if (binding_info.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE && !core12_features->descriptorBindingStorageImageUpdateAfterBind) { skip |= val_obj->LogError( val_obj->device, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfo-" "descriptorBindingStorageImageUpdateAfterBind-03007", "vkCreateDescriptorSetLayout(): pBindings[%u] can't have VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT " "for %s since descriptorBindingStorageImageUpdateAfterBind is not enabled.", i, string_VkDescriptorType(binding_info.descriptorType)); } if (binding_info.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER && !core12_features->descriptorBindingStorageBufferUpdateAfterBind) { skip |= val_obj->LogError( val_obj->device, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfo-" "descriptorBindingStorageBufferUpdateAfterBind-03008", "vkCreateDescriptorSetLayout(): pBindings[%u] can't have VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT " "for %s since descriptorBindingStorageBufferUpdateAfterBind is not enabled.", i, string_VkDescriptorType(binding_info.descriptorType)); } if (binding_info.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER && !core12_features->descriptorBindingUniformTexelBufferUpdateAfterBind) { skip |= val_obj->LogError( val_obj->device, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfo-" "descriptorBindingUniformTexelBufferUpdateAfterBind-03009", "vkCreateDescriptorSetLayout(): pBindings[%u] can't have VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT " "for %s since descriptorBindingUniformTexelBufferUpdateAfterBind is not enabled.", i, string_VkDescriptorType(binding_info.descriptorType)); } if (binding_info.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER && !core12_features->descriptorBindingStorageTexelBufferUpdateAfterBind) { skip |= val_obj->LogError( val_obj->device, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfo-" "descriptorBindingStorageTexelBufferUpdateAfterBind-03010", "vkCreateDescriptorSetLayout(): pBindings[%u] can't have VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT " "for %s since descriptorBindingStorageTexelBufferUpdateAfterBind is not enabled.", i, string_VkDescriptorType(binding_info.descriptorType)); } if ((binding_info.descriptorType == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT || binding_info.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC || binding_info.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) { skip |= val_obj->LogError(val_obj->device, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfo-None-03011", "vkCreateDescriptorSetLayout(): pBindings[%u] can't have " "VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT for %s.", i, string_VkDescriptorType(binding_info.descriptorType)); } if (binding_info.descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT && !inline_uniform_block_features->descriptorBindingInlineUniformBlockUpdateAfterBind) { skip |= val_obj->LogError( val_obj->device, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfo-" "descriptorBindingInlineUniformBlockUpdateAfterBind-02211", "vkCreateDescriptorSetLayout(): pBindings[%u] can't have VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT " "for %s since descriptorBindingInlineUniformBlockUpdateAfterBind is not enabled.", i, string_VkDescriptorType(binding_info.descriptorType)); } } if (flags_create_info->pBindingFlags[i] & VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT) { if (!core12_features->descriptorBindingUpdateUnusedWhilePending) { skip |= val_obj->LogError( val_obj->device, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfo-descriptorBindingUpdateUnusedWhilePending-03012", "vkCreateDescriptorSetLayout(): pBindings[%u] can't have " "VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT for %s since " "descriptorBindingUpdateUnusedWhilePending is not enabled.", i, string_VkDescriptorType(binding_info.descriptorType)); } } if (flags_create_info->pBindingFlags[i] & VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT) { if (!core12_features->descriptorBindingPartiallyBound) { skip |= val_obj->LogError( val_obj->device, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfo-descriptorBindingPartiallyBound-03013", "vkCreateDescriptorSetLayout(): pBindings[%u] can't have VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT for " "%s since descriptorBindingPartiallyBound is not enabled.", i, string_VkDescriptorType(binding_info.descriptorType)); } } if (flags_create_info->pBindingFlags[i] & VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT) { if (binding_info.binding != max_binding) { skip |= val_obj->LogError( val_obj->device, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfo-pBindingFlags-03004", "vkCreateDescriptorSetLayout(): pBindings[%u] has VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT " "but %u is the largest value of all the bindings.", i, binding_info.binding); } if (!core12_features->descriptorBindingVariableDescriptorCount) { skip |= val_obj->LogError( val_obj->device, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfo-descriptorBindingVariableDescriptorCount-03014", "vkCreateDescriptorSetLayout(): pBindings[%u] can't have " "VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT for %s since " "descriptorBindingVariableDescriptorCount is not enabled.", i, string_VkDescriptorType(binding_info.descriptorType)); } if ((binding_info.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) || (binding_info.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) { skip |= val_obj->LogError(val_obj->device, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfo-pBindingFlags-03015", "vkCreateDescriptorSetLayout(): pBindings[%u] can't have " "VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT for %s.", i, string_VkDescriptorType(binding_info.descriptorType)); } } if (push_descriptor_set && (flags_create_info->pBindingFlags[i] & (VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT | VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT | VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT))) { skip |= val_obj->LogError( val_obj->device, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfo-flags-03003", "vkCreateDescriptorSetLayout(): pBindings[%u] can't have VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT, " "VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT, or " "VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT for with " "VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR.", i); } } } } if ((push_descriptor_set) && (total_descriptors > max_push_descriptors)) { const char *undefined = push_descriptor_ext ? "" : " -- undefined"; skip |= val_obj->LogError( val_obj->device, "VUID-VkDescriptorSetLayoutCreateInfo-flags-00281", "vkCreateDescriptorSetLayout(): for push descriptor, total descriptor count in layout (%" PRIu64 ") must not be greater than VkPhysicalDevicePushDescriptorPropertiesKHR::maxPushDescriptors (%" PRIu32 "%s).", total_descriptors, max_push_descriptors, undefined); } return skip; } void cvdescriptorset::AllocateDescriptorSetsData::Init(uint32_t count) { layout_nodes.resize(count); } cvdescriptorset::DescriptorSet::DescriptorSet(const VkDescriptorSet set, DESCRIPTOR_POOL_STATE *pool_state, const std::shared_ptr<DescriptorSetLayout const> &layout, uint32_t variable_count, const cvdescriptorset::DescriptorSet::StateTracker *state_data) : some_update_(false), set_(set), pool_state_(pool_state), layout_(layout), state_data_(state_data), variable_count_(variable_count), change_count_(0) { // Foreach binding, create default descriptors of given type descriptors_.reserve(layout_->GetTotalDescriptorCount()); descriptor_store_.resize(layout_->GetTotalDescriptorCount()); auto free_descriptor = descriptor_store_.data(); for (uint32_t i = 0; i < layout_->GetBindingCount(); ++i) { auto type = layout_->GetTypeFromIndex(i); switch (type) { case VK_DESCRIPTOR_TYPE_SAMPLER: { auto immut_sampler = layout_->GetImmutableSamplerPtrFromIndex(i); for (uint32_t di = 0; di < layout_->GetDescriptorCountFromIndex(i); ++di) { if (immut_sampler) { descriptors_.emplace_back(new ((free_descriptor++)->Sampler()) SamplerDescriptor(state_data, immut_sampler + di)); some_update_ = true; // Immutable samplers are updated at creation } else { descriptors_.emplace_back(new ((free_descriptor++)->Sampler()) SamplerDescriptor(state_data, nullptr)); } } break; } case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: { auto immut = layout_->GetImmutableSamplerPtrFromIndex(i); for (uint32_t di = 0; di < layout_->GetDescriptorCountFromIndex(i); ++di) { if (immut) { descriptors_.emplace_back(new ((free_descriptor++)->ImageSampler()) ImageSamplerDescriptor(state_data, immut + di)); some_update_ = true; // Immutable samplers are updated at creation } else { descriptors_.emplace_back(new ((free_descriptor++)->ImageSampler()) ImageSamplerDescriptor(state_data, nullptr)); } } break; } // ImageDescriptors case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE: case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT: case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: for (uint32_t di = 0; di < layout_->GetDescriptorCountFromIndex(i); ++di) { descriptors_.emplace_back(new ((free_descriptor++)->Image()) ImageDescriptor(type)); } break; case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER: case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: for (uint32_t di = 0; di < layout_->GetDescriptorCountFromIndex(i); ++di) { descriptors_.emplace_back(new ((free_descriptor++)->Texel()) TexelDescriptor(type)); } break; case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER: case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER: for (uint32_t di = 0; di < layout_->GetDescriptorCountFromIndex(i); ++di) { descriptors_.emplace_back(new ((free_descriptor++)->Buffer()) BufferDescriptor(type)); } break; case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT: for (uint32_t di = 0; di < layout_->GetDescriptorCountFromIndex(i); ++di) { descriptors_.emplace_back(new ((free_descriptor++)->InlineUniform()) InlineUniformDescriptor(type)); } break; case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NV: case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR: for (uint32_t di = 0; di < layout_->GetDescriptorCountFromIndex(i); ++di) { descriptors_.emplace_back(new ((free_descriptor++)->AccelerationStructure()) AccelerationStructureDescriptor(type)); } break; default: if (IsDynamicDescriptor(type) && IsBufferDescriptor(type)) { for (uint32_t di = 0; di < layout_->GetDescriptorCountFromIndex(i); ++di) { dynamic_offset_idx_to_descriptor_list_.push_back(descriptors_.size()); descriptors_.emplace_back(new ((free_descriptor++)->Buffer()) BufferDescriptor(type)); } } else { assert(0); // Bad descriptor type specified } break; } } } cvdescriptorset::DescriptorSet::~DescriptorSet() {} static std::string StringDescriptorReqViewType(descriptor_req req) { std::string result(""); for (unsigned i = 0; i <= VK_IMAGE_VIEW_TYPE_CUBE_ARRAY; i++) { if (req & (1 << i)) { if (result.size()) result += ", "; result += string_VkImageViewType(VkImageViewType(i)); } } if (!result.size()) result = "(none)"; return result; } static char const *StringDescriptorReqComponentType(descriptor_req req) { if (req & DESCRIPTOR_REQ_COMPONENT_TYPE_SINT) return "SINT"; if (req & DESCRIPTOR_REQ_COMPONENT_TYPE_UINT) return "UINT"; if (req & DESCRIPTOR_REQ_COMPONENT_TYPE_FLOAT) return "FLOAT"; return "(none)"; } unsigned DescriptorRequirementsBitsFromFormat(VkFormat fmt) { if (FormatIsSInt(fmt)) return DESCRIPTOR_REQ_COMPONENT_TYPE_SINT; if (FormatIsUInt(fmt)) return DESCRIPTOR_REQ_COMPONENT_TYPE_UINT; if (FormatIsDepthAndStencil(fmt)) return DESCRIPTOR_REQ_COMPONENT_TYPE_FLOAT | DESCRIPTOR_REQ_COMPONENT_TYPE_UINT; if (fmt == VK_FORMAT_UNDEFINED) return 0; // everything else -- UNORM/SNORM/FLOAT/USCALED/SSCALED is all float in the shader. return DESCRIPTOR_REQ_COMPONENT_TYPE_FLOAT; } // Validate that the state of this set is appropriate for the given bindings and dynamic_offsets at Draw time // This includes validating that all descriptors in the given bindings are updated, // that any update buffers are valid, and that any dynamic offsets are within the bounds of their buffers. // Return true if state is acceptable, or false and write an error message into error string bool CoreChecks::ValidateDrawState(const DescriptorSet *descriptor_set, const BindingReqMap &bindings, const std::vector<uint32_t> &dynamic_offsets, const CMD_BUFFER_STATE *cb_node, const std::vector<IMAGE_VIEW_STATE *> *attachments, const std::vector<SUBPASS_INFO> &subpasses, const char *caller, const DrawDispatchVuid &vuids) const { bool result = false; VkFramebuffer framebuffer = cb_node->activeFramebuffer ? cb_node->activeFramebuffer->framebuffer : VK_NULL_HANDLE; for (const auto &binding_pair : bindings) { const auto binding = binding_pair.first; DescriptorSetLayout::ConstBindingIterator binding_it(descriptor_set->GetLayout().get(), binding); if (binding_it.AtEnd()) { // End at construction is the condition for an invalid binding. auto set = descriptor_set->GetSet(); result |= LogError(set, vuids.descriptor_valid, "%s encountered the following validation error at %s time: Attempting to " "validate DrawState for binding #%u which is an invalid binding for this descriptor set.", report_data->FormatHandle(set).c_str(), caller, binding); return result; } if (binding_it.GetDescriptorBindingFlags() & (VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT | VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT)) { // Can't validate the descriptor because it may not have been updated, // or the view could have been destroyed continue; } // // This is a record time only path const bool record_time_validate = true; result |= ValidateDescriptorSetBindingData(cb_node, descriptor_set, dynamic_offsets, binding_pair, framebuffer, attachments, subpasses, record_time_validate, caller, vuids); } return result; } bool CoreChecks::ValidateDescriptorSetBindingData(const CMD_BUFFER_STATE *cb_node, const DescriptorSet *descriptor_set, const std::vector<uint32_t> &dynamic_offsets, const std::pair<const uint32_t, DescriptorRequirement> &binding_info, VkFramebuffer framebuffer, const std::vector<IMAGE_VIEW_STATE *> *attachments, const std::vector<SUBPASS_INFO> &subpasses, bool record_time_validate, const char *caller, const DrawDispatchVuid &vuids) const { using DescriptorClass = cvdescriptorset::DescriptorClass; using BufferDescriptor = cvdescriptorset::BufferDescriptor; using ImageDescriptor = cvdescriptorset::ImageDescriptor; using ImageSamplerDescriptor = cvdescriptorset::ImageSamplerDescriptor; using SamplerDescriptor = cvdescriptorset::SamplerDescriptor; using TexelDescriptor = cvdescriptorset::TexelDescriptor; using AccelerationStructureDescriptor = cvdescriptorset::AccelerationStructureDescriptor; const auto reqs = binding_info.second.reqs; const auto binding = binding_info.first; DescriptorSetLayout::ConstBindingIterator binding_it(descriptor_set->GetLayout().get(), binding); { // Copy the range, the end range is subject to update based on variable length descriptor arrays. cvdescriptorset::IndexRange index_range = binding_it.GetGlobalIndexRange(); auto array_idx = 0; // Track array idx if we're dealing with array descriptors if (binding_it.IsVariableDescriptorCount()) { // Only validate the first N descriptors if it uses variable_count index_range.end = index_range.start + descriptor_set->GetVariableDescriptorCount(); } for (uint32_t i = index_range.start; i < index_range.end; ++i, ++array_idx) { uint32_t index = i - index_range.start; const auto *descriptor = descriptor_set->GetDescriptorFromGlobalIndex(i); if (descriptor->GetClass() == DescriptorClass::InlineUniform) { // Can't validate the descriptor because it may not have been updated. continue; } else if (!descriptor->updated) { auto set = descriptor_set->GetSet(); return LogError( set, vuids.descriptor_valid, "Descriptor set %s encountered the following validation error at %s time: Descriptor in binding #%" PRIu32 " index %" PRIu32 " is being used in draw but has never been updated via vkUpdateDescriptorSets() or a similar call.", report_data->FormatHandle(set).c_str(), caller, binding, index); } else { auto descriptor_class = descriptor->GetClass(); if (descriptor_class == DescriptorClass::GeneralBuffer) { // Verify that buffers are valid auto buffer = static_cast<const BufferDescriptor *>(descriptor)->GetBuffer(); auto buffer_node = static_cast<const BufferDescriptor *>(descriptor)->GetBufferState(); if ((!buffer_node && !enabled_features.robustness2_features.nullDescriptor) || (buffer_node && buffer_node->destroyed)) { auto set = descriptor_set->GetSet(); return LogError(set, vuids.descriptor_valid, "Descriptor set %s encountered the following validation error at %s time: Descriptor in " "binding #%" PRIu32 " index %" PRIu32 " is using buffer %s that is invalid or has been destroyed.", report_data->FormatHandle(set).c_str(), caller, binding, index, report_data->FormatHandle(buffer).c_str()); } if (buffer) { if (!buffer_node->sparse) { for (const auto *mem_binding : buffer_node->GetBoundMemory()) { if (mem_binding->destroyed) { auto set = descriptor_set->GetSet(); return LogError( set, vuids.descriptor_valid, "Descriptor set %s encountered the following validation error at %s time: Descriptor in " "binding #%" PRIu32 " index %" PRIu32 " is uses buffer %s that references invalid memory %s.", report_data->FormatHandle(set).c_str(), caller, binding, index, report_data->FormatHandle(buffer).c_str(), report_data->FormatHandle(mem_binding->mem).c_str()); } } } if (enabled_features.core11.protectedMemory == VK_TRUE) { if (ValidateProtectedBuffer(cb_node, buffer_node, caller, vuids.unprotected_command_buffer, "Buffer is in a descriptorSet")) { return true; } if (binding_info.second.is_writable && ValidateUnprotectedBuffer(cb_node, buffer_node, caller, vuids.protected_command_buffer, "Buffer is in a descriptorSet")) { return true; } } } } else if (descriptor_class == DescriptorClass::ImageSampler || descriptor_class == DescriptorClass::Image) { VkImageView image_view; VkImageLayout image_layout; const IMAGE_VIEW_STATE *image_view_state; std::vector<const SAMPLER_STATE *> sampler_states; if (descriptor_class == DescriptorClass::ImageSampler) { const ImageSamplerDescriptor *image_descriptor = static_cast<const ImageSamplerDescriptor *>(descriptor); image_view = image_descriptor->GetImageView(); image_view_state = image_descriptor->GetImageViewState(); image_layout = image_descriptor->GetImageLayout(); sampler_states.emplace_back(image_descriptor->GetSamplerState()); } else { const ImageDescriptor *image_descriptor = static_cast<const ImageDescriptor *>(descriptor); image_view = image_descriptor->GetImageView(); image_view_state = image_descriptor->GetImageViewState(); image_layout = image_descriptor->GetImageLayout(); if (binding_info.second.samplers_used_by_image.size() > index) { for (auto &sampler : binding_info.second.samplers_used_by_image[index]) { // NOTE: This check _shouldn't_ be necessary due to the checks made in IsSpecificDescriptorType in // shader_validation.cpp. However, without this check some traces still crash. if (sampler.second && (sampler.second->GetClass() == cvdescriptorset::DescriptorClass::PlainSampler)) { const auto *sampler_state = static_cast<const cvdescriptorset::SamplerDescriptor *>(sampler.second)->GetSamplerState(); if (sampler_state) sampler_states.emplace_back(sampler_state); } } } } if ((!image_view_state && !enabled_features.robustness2_features.nullDescriptor) || (image_view_state && image_view_state->destroyed)) { // Image view must have been destroyed since initial update. Could potentially flag the descriptor // as "invalid" (updated = false) at DestroyImageView() time and detect this error at bind time auto set = descriptor_set->GetSet(); return LogError(set, vuids.descriptor_valid, "Descriptor set %s encountered the following validation error at %s time: Descriptor in " "binding #%" PRIu32 " index %" PRIu32 " is using imageView %s that is invalid or has been destroyed.", report_data->FormatHandle(set).c_str(), caller, binding, index, report_data->FormatHandle(image_view).c_str()); } if (image_view) { const auto &image_view_ci = image_view_state->create_info; const auto *image_state = image_view_state->image_state.get(); if (reqs & DESCRIPTOR_REQ_ALL_VIEW_TYPE_BITS) { if (~reqs & (1 << image_view_ci.viewType)) { auto set = descriptor_set->GetSet(); return LogError( set, vuids.descriptor_valid, "Descriptor set %s encountered the following validation error at %s time: Descriptor " "in binding #%" PRIu32 " index %" PRIu32 " requires an image view of type %s but got %s.", report_data->FormatHandle(set).c_str(), caller, binding, index, StringDescriptorReqViewType(reqs).c_str(), string_VkImageViewType(image_view_ci.viewType)); } if (!(reqs & image_view_state->descriptor_format_bits)) { // bad component type auto set = descriptor_set->GetSet(); return LogError(set, vuids.descriptor_valid, "Descriptor set %s encountered the following validation error at %s time: " "Descriptor in binding " "#%" PRIu32 " index %" PRIu32 " requires %s component type, but bound descriptor format is %s.", report_data->FormatHandle(set).c_str(), caller, binding, index, StringDescriptorReqComponentType(reqs), string_VkFormat(image_view_ci.format)); } } // NOTE: Submit time validation of UPDATE_AFTER_BIND image layout is not possible with the // image layout tracking as currently implemented, so only record_time_validation is done if (!disabled[image_layout_validation] && record_time_validate) { auto image_node = image_view_state->image_state.get(); assert(image_node); // Verify Image Layout // No "invalid layout" VUID required for this call, since the optimal_layout parameter is UNDEFINED. bool hit_error = false; VerifyImageLayout(cb_node, image_node, image_view_state->normalized_subresource_range, image_view_ci.subresourceRange.aspectMask, image_layout, VK_IMAGE_LAYOUT_UNDEFINED, caller, kVUIDUndefined, "VUID-VkDescriptorImageInfo-imageLayout-00344", &hit_error); if (hit_error) { auto set = descriptor_set->GetSet(); return LogError( set, vuids.descriptor_valid, "Descriptor set %s encountered the following validation error at %s time: Image layout " "specified " "at vkUpdateDescriptorSet* or vkCmdPushDescriptorSet* time " "doesn't match actual image layout at time descriptor is used. See previous error callback for " "specific details.", report_data->FormatHandle(set).c_str(), caller); } } // Verify Sample counts if ((reqs & DESCRIPTOR_REQ_SINGLE_SAMPLE) && image_view_state->samples != VK_SAMPLE_COUNT_1_BIT) { auto set = descriptor_set->GetSet(); return LogError( set, vuids.descriptor_valid, "Descriptor set %s encountered the following validation error at %s time: Descriptor in " "binding #%" PRIu32 " index %" PRIu32 " requires bound image to have VK_SAMPLE_COUNT_1_BIT but got %s.", report_data->FormatHandle(set).c_str(), caller, binding, index, string_VkSampleCountFlagBits(image_view_state->samples)); } if ((reqs & DESCRIPTOR_REQ_MULTI_SAMPLE) && image_view_state->samples == VK_SAMPLE_COUNT_1_BIT) { auto set = descriptor_set->GetSet(); return LogError(set, vuids.descriptor_valid, "Descriptor set %s encountered the following validation error at %s time: Descriptor " "in binding #%" PRIu32 " index %" PRIu32 " requires bound image to have multiple samples, but got VK_SAMPLE_COUNT_1_BIT.", report_data->FormatHandle(set).c_str(), caller, binding, index); } const VkDescriptorType descriptor_type = binding_it.GetType(); // Verify VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT if ((reqs & DESCRIPTOR_REQ_VIEW_ATOMIC_OPERATION) && (descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) && !(image_view_state->format_features & VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT)) { auto set = descriptor_set->GetSet(); LogObjectList objlist(set); objlist.add(image_view); return LogError(objlist, vuids.imageview_atomic, "Descriptor set %s encountered the following validation error at %s time: Descriptor " "in binding #%" PRIu32 " index %" PRIu32 ", %s, format %s, doesn't " "contain VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT.", report_data->FormatHandle(set).c_str(), caller, binding, index, report_data->FormatHandle(image_view).c_str(), string_VkFormat(image_view_ci.format)); } // Verify if attachments are used in DescriptorSet if (attachments && attachments->size() > 0 && (descriptor_type != VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT)) { bool ds_aspect = (image_view_state->create_info.subresourceRange.aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) ? true : false; uint32_t att_index = 0; for (const auto &view_state : *attachments) { if (!subpasses[att_index].used || !view_state || view_state->destroyed) { continue; } if (ds_aspect && subpasses[att_index].usage == VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) { if ((image_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL || image_layout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL || image_layout == VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL) && (subpasses[att_index].layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL || subpasses[att_index].layout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL || subpasses[att_index].layout == VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL)) { continue; } if ((image_layout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL && subpasses[att_index].layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL) || (subpasses[att_index].layout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL && image_layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL)) { continue; } } if (view_state->image_view == image_view) { auto set = descriptor_set->GetSet(); LogObjectList objlist(set); objlist.add(image_view); objlist.add(framebuffer); return LogError( objlist, vuids.image_subresources, "Descriptor set %s encountered the following validation error at %s time: %s is used in " "Descriptor in binding #%" PRIu32 " index %" PRIu32 " and %s attachment # %" PRIu32 ".", report_data->FormatHandle(set).c_str(), caller, report_data->FormatHandle(image_view).c_str(), binding, index, report_data->FormatHandle(framebuffer).c_str(), att_index); } else { if (image_view_state->OverlapSubresource(*view_state)) { auto set = descriptor_set->GetSet(); LogObjectList objlist(set); objlist.add(image_view); objlist.add(framebuffer); objlist.add(view_state->image_view); return LogError(objlist, vuids.image_subresources, "Descriptor set %s encountered the following validation error at %s time: " "Image subresources of %s in " "Descriptor in binding #%" PRIu32 " index %" PRIu32 " and %s in %s attachment # %" PRIu32 " overlap.", report_data->FormatHandle(set).c_str(), caller, report_data->FormatHandle(image_view).c_str(), binding, index, report_data->FormatHandle(view_state->image_view).c_str(), report_data->FormatHandle(framebuffer).c_str(), att_index); } } ++att_index; } if (enabled_features.core11.protectedMemory == VK_TRUE) { if (ValidateProtectedImage(cb_node, image_view_state->image_state.get(), caller, vuids.unprotected_command_buffer, "Image is in a descriptorSet")) { return true; } if (binding_info.second.is_writable && ValidateUnprotectedImage(cb_node, image_view_state->image_state.get(), caller, vuids.protected_command_buffer, "Image is in a descriptorSet")) { return true; } } } for (const auto *sampler_state : sampler_states) { if (!sampler_state || sampler_state->destroyed) { continue; } // TODO: Validate 04015 for DescriptorClass::PlainSampler if ((sampler_state->createInfo.borderColor == VK_BORDER_COLOR_INT_CUSTOM_EXT || sampler_state->createInfo.borderColor == VK_BORDER_COLOR_FLOAT_CUSTOM_EXT) && (sampler_state->customCreateInfo.format == VK_FORMAT_UNDEFINED)) { if (image_view_state->create_info.format == VK_FORMAT_B4G4R4A4_UNORM_PACK16 || image_view_state->create_info.format == VK_FORMAT_B5G6R5_UNORM_PACK16 || image_view_state->create_info.format == VK_FORMAT_B5G5R5A1_UNORM_PACK16) { auto set = descriptor_set->GetSet(); LogObjectList objlist(set); objlist.add(sampler_state->sampler); objlist.add(image_view_state->image_view); return LogError( objlist, "VUID-VkSamplerCustomBorderColorCreateInfoEXT-format-04015", "Descriptor set %s encountered the following validation error at %s time: Sampler %s in " "binding #%" PRIu32 " index %" PRIu32 " has a custom border color with format = VK_FORMAT_UNDEFINED and is used to " "sample an image view %s with format %s", report_data->FormatHandle(set).c_str(), caller, report_data->FormatHandle(sampler_state->sampler).c_str(), binding, index, report_data->FormatHandle(image_view_state->image_view).c_str(), string_VkFormat(image_view_state->create_info.format)); } } VkFilter sampler_mag_filter = sampler_state->createInfo.magFilter; VkFilter sampler_min_filter = sampler_state->createInfo.minFilter; VkBool32 sampler_compare_enable = sampler_state->createInfo.compareEnable; if ((sampler_mag_filter == VK_FILTER_LINEAR || sampler_min_filter == VK_FILTER_LINEAR) && (sampler_compare_enable == VK_FALSE) && !(image_view_state->format_features & VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT)) { auto set = descriptor_set->GetSet(); LogObjectList objlist(set); objlist.add(sampler_state->sampler); objlist.add(image_view_state->image_view); return LogError(objlist, vuids.linear_sampler, "Descriptor set %s encountered the following validation error at %s time: Sampler " "(%s) is set to use VK_FILTER_LINEAR with " "compareEnable is set to VK_TRUE, but image view's (%s) format (%s) does not " "contain VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT in its format features.", report_data->FormatHandle(set).c_str(), caller, report_data->FormatHandle(sampler_state->sampler).c_str(), report_data->FormatHandle(image_view_state->image_view).c_str(), string_VkFormat(image_view_state->create_info.format)); } if (sampler_mag_filter == VK_FILTER_CUBIC_EXT || sampler_min_filter == VK_FILTER_CUBIC_EXT) { if (!(image_view_state->format_features & VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_EXT)) { auto set = descriptor_set->GetSet(); LogObjectList objlist(set); objlist.add(sampler_state->sampler); objlist.add(image_view_state->image_view); return LogError(objlist, vuids.cubic_sampler, "Descriptor set %s encountered the following validation error at %s time: " "Sampler (%s) is set to use VK_FILTER_CUBIC_EXT, then " "image view's (%s) format (%s) MUST contain " "VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_EXT in its format features.", report_data->FormatHandle(set).c_str(), caller, report_data->FormatHandle(sampler_state->sampler).c_str(), report_data->FormatHandle(image_view_state->image_view).c_str(), string_VkFormat(image_view_state->create_info.format)); } if (IsExtEnabled(device_extensions.vk_ext_filter_cubic)) { const auto reduction_mode_info = LvlFindInChain<VkSamplerReductionModeCreateInfo>(sampler_state->createInfo.pNext); if (reduction_mode_info && (reduction_mode_info->reductionMode == VK_SAMPLER_REDUCTION_MODE_MIN || reduction_mode_info->reductionMode == VK_SAMPLER_REDUCTION_MODE_MAX) && !image_view_state->filter_cubic_props.filterCubicMinmax) { auto set = descriptor_set->GetSet(); LogObjectList objlist(set); objlist.add(sampler_state->sampler); objlist.add(image_view_state->image_view); return LogError(objlist, vuids.filter_cubic_min_max, "Descriptor set %s encountered the following validation error at %s time: " "Sampler (%s) is set to use VK_FILTER_CUBIC_EXT & %s, " "but image view (%s) doesn't support filterCubicMinmax.", report_data->FormatHandle(set).c_str(), caller, report_data->FormatHandle(sampler_state->sampler).c_str(), string_VkSamplerReductionMode(reduction_mode_info->reductionMode), report_data->FormatHandle(image_view_state->image_view).c_str()); } if (!image_view_state->filter_cubic_props.filterCubic) { auto set = descriptor_set->GetSet(); LogObjectList objlist(set); objlist.add(sampler_state->sampler); objlist.add(image_view_state->image_view); return LogError(objlist, vuids.filter_cubic, "Descriptor set %s encountered the following validation error at %s time: " "Sampler (%s) is set to use VK_FILTER_CUBIC_EXT, " "but image view (%s) doesn't support filterCubic.", report_data->FormatHandle(set).c_str(), caller, report_data->FormatHandle(sampler_state->sampler).c_str(), report_data->FormatHandle(image_view_state->image_view).c_str()); } } if (IsExtEnabled(device_extensions.vk_img_filter_cubic)) { if (image_view_state->create_info.viewType & (VK_IMAGE_VIEW_TYPE_3D | VK_IMAGE_VIEW_TYPE_CUBE | VK_IMAGE_VIEW_TYPE_CUBE_ARRAY)) { auto set = descriptor_set->GetSet(); LogObjectList objlist(set); objlist.add(sampler_state->sampler); objlist.add(image_view_state->image_view); return LogError( objlist, vuids.img_filter_cubic, "Descriptor set %s encountered the following validation error at %s time: Sampler " "(%s)is set to use VK_FILTER_CUBIC_EXT while the VK_IMG_filter_cubic extension " "is enabled, but image view (%s) has an invalid imageViewType (%s).", report_data->FormatHandle(set).c_str(), caller, report_data->FormatHandle(sampler_state->sampler).c_str(), report_data->FormatHandle(image_view_state->image_view).c_str(), string_VkImageViewType(image_view_state->create_info.viewType)); } } } if ((image_state->createInfo.flags & VK_IMAGE_CREATE_CORNER_SAMPLED_BIT_NV) && (sampler_state->createInfo.addressModeU != VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE || sampler_state->createInfo.addressModeV != VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE || sampler_state->createInfo.addressModeW != VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE)) { std::string address_mode_letter = (sampler_state->createInfo.addressModeU != VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE) ? "U" : (sampler_state->createInfo.addressModeV != VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE) ? "V" : "W"; VkSamplerAddressMode address_mode = (sampler_state->createInfo.addressModeU != VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE) ? sampler_state->createInfo.addressModeU : (sampler_state->createInfo.addressModeV != VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE) ? sampler_state->createInfo.addressModeV : sampler_state->createInfo.addressModeW; auto set = descriptor_set->GetSet(); LogObjectList objlist(set); objlist.add(sampler_state->sampler); objlist.add(image_state->image); objlist.add(image_view_state->image_view); return LogError(objlist, vuids.corner_sampled_address_mode, "Descriptor set %s encountered the following validation error at %s time: Image " "(%s) in image view (%s) is created with flag " "VK_IMAGE_CREATE_CORNER_SAMPLED_BIT_NV and can only be sampled using " "VK_SAMPLER_ADDRESS_MODE_CLAMP_EDGE, but sampler (%s) has " "createInfo.addressMode%s set to %s.", report_data->FormatHandle(set).c_str(), caller, report_data->FormatHandle(image_state->image).c_str(), report_data->FormatHandle(image_view_state->image_view).c_str(), report_data->FormatHandle(sampler_state->sampler).c_str(), address_mode_letter.c_str(), string_VkSamplerAddressMode(address_mode)); } // UnnormalizedCoordinates sampler validations if (sampler_state->createInfo.unnormalizedCoordinates) { // If ImageView is used by a unnormalizedCoordinates sampler, it needs to check ImageView type if (image_view_ci.viewType == VK_IMAGE_VIEW_TYPE_3D || image_view_ci.viewType == VK_IMAGE_VIEW_TYPE_CUBE || image_view_ci.viewType == VK_IMAGE_VIEW_TYPE_1D_ARRAY || image_view_ci.viewType == VK_IMAGE_VIEW_TYPE_2D_ARRAY || image_view_ci.viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY) { auto set = descriptor_set->GetSet(); LogObjectList objlist(set); objlist.add(image_view); objlist.add(sampler_state->sampler); return LogError( objlist, vuids.sampler_imageview_type, "Descriptor set %s encountered the following validation error at %s time: %s, type: %s in " "Descriptor in binding #%" PRIu32 " index %" PRIu32 "is used by %s.", report_data->FormatHandle(set).c_str(), caller, report_data->FormatHandle(image_view).c_str(), string_VkImageViewType(image_view_ci.viewType), binding, index, report_data->FormatHandle(sampler_state->sampler).c_str()); } // sampler must not be used with any of the SPIR-V OpImageSample* or OpImageSparseSample* // instructions with ImplicitLod, Dref or Proj in their name if (reqs & DESCRIPTOR_REQ_SAMPLER_IMPLICITLOD_DREF_PROJ) { auto set = descriptor_set->GetSet(); LogObjectList objlist(set); objlist.add(image_view); objlist.add(sampler_state->sampler); return LogError( objlist, vuids.sampler_implicitLod_dref_proj, "Descriptor set %s encountered the following validation error at %s time: %s in " "Descriptor in binding #%" PRIu32 " index %" PRIu32 " is used by %s that uses invalid operator.", report_data->FormatHandle(set).c_str(), caller, report_data->FormatHandle(image_view).c_str(), binding, index, report_data->FormatHandle(sampler_state->sampler).c_str()); } // sampler must not be used with any of the SPIR-V OpImageSample* or OpImageSparseSample* // instructions that includes a LOD bias or any offset values if (reqs & DESCRIPTOR_REQ_SAMPLER_BIAS_OFFSET) { auto set = descriptor_set->GetSet(); LogObjectList objlist(set); objlist.add(image_view); objlist.add(sampler_state->sampler); return LogError( objlist, vuids.sampler_bias_offset, "Descriptor set %s encountered the following validation error at %s time: %s in " "Descriptor in binding #%" PRIu32 " index %" PRIu32 " is used by %s that uses invalid bias or offset operator.", report_data->FormatHandle(set).c_str(), caller, report_data->FormatHandle(image_view).c_str(), binding, index, report_data->FormatHandle(sampler_state->sampler).c_str()); } } } } } else if (descriptor_class == DescriptorClass::TexelBuffer) { auto texel_buffer = static_cast<const TexelDescriptor *>(descriptor); auto buffer_view = texel_buffer->GetBufferView(); auto buffer_view_state = texel_buffer->GetBufferViewState(); if ((!buffer_view_state && !enabled_features.robustness2_features.nullDescriptor) || (buffer_view_state && buffer_view_state->destroyed)) { auto set = descriptor_set->GetSet(); return LogError(set, vuids.descriptor_valid, "Descriptor set %s encountered the following validation error at %s time: Descriptor in " "binding #%" PRIu32 " index %" PRIu32 " is using bufferView %s that is invalid or has been destroyed.", report_data->FormatHandle(set).c_str(), caller, binding, index, report_data->FormatHandle(buffer_view).c_str()); } if (buffer_view) { auto buffer = buffer_view_state->create_info.buffer; auto buffer_state = buffer_view_state->buffer_state.get(); if (buffer_state->destroyed) { auto set = descriptor_set->GetSet(); return LogError( set, vuids.descriptor_valid, "Descriptor set %s encountered the following validation error at %s time: Descriptor in " "binding #%" PRIu32 " index %" PRIu32 " is using buffer %s that has been destroyed.", report_data->FormatHandle(set).c_str(), caller, binding, index, report_data->FormatHandle(buffer).c_str()); } auto format_bits = DescriptorRequirementsBitsFromFormat(buffer_view_state->create_info.format); if (!(reqs & format_bits)) { // bad component type auto set = descriptor_set->GetSet(); return LogError( set, vuids.descriptor_valid, "Descriptor set %s encountered the following validation error at %s time: Descriptor in " "binding #%" PRIu32 " index %" PRIu32 " requires %s component type, but bound descriptor format is %s.", report_data->FormatHandle(set).c_str(), caller, binding, index, StringDescriptorReqComponentType(reqs), string_VkFormat(buffer_view_state->create_info.format)); } // Verify VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_ATOMIC_BIT if ((reqs & DESCRIPTOR_REQ_VIEW_ATOMIC_OPERATION) && (descriptor_set->GetTypeFromBinding(binding) == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER) && !(buffer_view_state->format_features & VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_ATOMIC_BIT)) { auto set = descriptor_set->GetSet(); LogObjectList objlist(set); objlist.add(buffer_view); return LogError(objlist, "UNASSIGNED-None-MismatchAtomicBufferFeature", "Descriptor set %s encountered the following validation error at %s time: Descriptor " "in binding #%" PRIu32 " index %" PRIu32 ", %s, format %s, doesn't " "contain VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT.", report_data->FormatHandle(set).c_str(), caller, binding, index, report_data->FormatHandle(buffer_view).c_str(), string_VkFormat(buffer_view_state->create_info.format)); } if (enabled_features.core11.protectedMemory == VK_TRUE) { if (ValidateProtectedBuffer(cb_node, buffer_view_state->buffer_state.get(), caller, vuids.unprotected_command_buffer, "Buffer is in a descriptorSet")) { return true; } if (binding_info.second.is_writable && ValidateUnprotectedBuffer(cb_node, buffer_view_state->buffer_state.get(), caller, vuids.protected_command_buffer, "Buffer is in a descriptorSet")) { return true; } } } } else if (descriptor_class == DescriptorClass::AccelerationStructure) { // Verify that acceleration structures are valid bool is_khr = (*((cvdescriptorset::AccelerationStructureDescriptor *)(descriptor))).is_khr(); if (is_khr) { auto acc = static_cast<const AccelerationStructureDescriptor *>(descriptor)->GetAccelerationStructure(); auto acc_node = static_cast<const AccelerationStructureDescriptor *>(descriptor)->GetAccelerationStructureStateKHR(); if (!acc_node || acc_node->destroyed) { if (acc != VK_NULL_HANDLE || !enabled_features.robustness2_features.nullDescriptor) { auto set = descriptor_set->GetSet(); return LogError(set, vuids.descriptor_valid, "Descriptor set %s encountered the following validation error at %s time: " "Descriptor in binding #%" PRIu32 " index %" PRIu32 " is using acceleration structure %s that is invalid or has been destroyed.", report_data->FormatHandle(set).c_str(), caller, binding, index, report_data->FormatHandle(acc).c_str()); } } else { for (const auto *mem_binding : acc_node->GetBoundMemory()) { if (mem_binding->destroyed) { auto set = descriptor_set->GetSet(); return LogError( set, vuids.descriptor_valid, "Descriptor set %s encountered the following validation error at %s time: Descriptor in " "binding #%" PRIu32 " index %" PRIu32 " is using acceleration structure %s that references invalid memory %s.", report_data->FormatHandle(set).c_str(), caller, binding, index, report_data->FormatHandle(acc).c_str(), report_data->FormatHandle(mem_binding->mem).c_str()); } } } } else { auto acc = static_cast<const AccelerationStructureDescriptor *>(descriptor)->GetAccelerationStructureNV(); auto acc_node = static_cast<const AccelerationStructureDescriptor *>(descriptor)->GetAccelerationStructureStateNV(); if (!acc_node || acc_node->destroyed) { if (acc != VK_NULL_HANDLE || !enabled_features.robustness2_features.nullDescriptor) { auto set = descriptor_set->GetSet(); return LogError(set, vuids.descriptor_valid, "Descriptor set %s encountered the following validation error at %s time: " "Descriptor in binding #%" PRIu32 " index %" PRIu32 " is using acceleration structure %s that is invalid or has been destroyed.", report_data->FormatHandle(set).c_str(), caller, binding, index, report_data->FormatHandle(acc).c_str()); } } else { for (const auto *mem_binding : acc_node->GetBoundMemory()) { if (mem_binding->destroyed) { auto set = descriptor_set->GetSet(); return LogError( set, vuids.descriptor_valid, "Descriptor set %s encountered the following validation error at %s time: Descriptor in " "binding #%" PRIu32 " index %" PRIu32 " is using acceleration structure %s that references invalid memory %s.", report_data->FormatHandle(set).c_str(), caller, binding, index, report_data->FormatHandle(acc).c_str(), report_data->FormatHandle(mem_binding->mem).c_str()); } } } } } // If the validation is related to both of image and sampler, // please leave it in (descriptor_class == DescriptorClass::ImageSampler || descriptor_class == // DescriptorClass::Image) Here is to validate for only sampler. if (descriptor_class == DescriptorClass::ImageSampler || descriptor_class == DescriptorClass::PlainSampler) { // Verify Sampler still valid VkSampler sampler; const SAMPLER_STATE *sampler_state; if (descriptor_class == DescriptorClass::ImageSampler) { sampler = static_cast<const ImageSamplerDescriptor *>(descriptor)->GetSampler(); sampler_state = static_cast<const ImageSamplerDescriptor *>(descriptor)->GetSamplerState(); } else { sampler = static_cast<const SamplerDescriptor *>(descriptor)->GetSampler(); sampler_state = static_cast<const SamplerDescriptor *>(descriptor)->GetSamplerState(); } if (!sampler_state || sampler_state->destroyed) { auto set = descriptor_set->GetSet(); return LogError(set, vuids.descriptor_valid, "Descriptor set %s encountered the following validation error at %s time: Descriptor in " "binding #%" PRIu32 " index %" PRIu32 " is using sampler %s that is invalid or has been destroyed.", report_data->FormatHandle(set).c_str(), caller, binding, index, report_data->FormatHandle(sampler).c_str()); } else { if (sampler_state->samplerConversion && !descriptor->IsImmutableSampler()) { auto set = descriptor_set->GetSet(); return LogError(set, vuids.descriptor_valid, "Descriptor set %s encountered the following validation error at %s time: sampler (%s) " "in the descriptor set (%s) caontains a YCBCR conversion (%s), then the sampler MUST " "also exist as an immutable sampler.", report_data->FormatHandle(set).c_str(), caller, report_data->FormatHandle(sampler).c_str(), report_data->FormatHandle(descriptor_set->GetSet()).c_str(), report_data->FormatHandle(sampler_state->samplerConversion).c_str()); } } } } } } return false; } // Set is being deleted or updates so invalidate all bound cmd buffers void cvdescriptorset::DescriptorSet::InvalidateBoundCmdBuffers(ValidationStateTracker *state_data) { state_data->InvalidateCommandBuffers(cb_bindings, VulkanTypedHandle(set_, kVulkanObjectTypeDescriptorSet), /*unlink*/ false); } // Loop through the write updates to do for a push descriptor set, ignoring dstSet void cvdescriptorset::DescriptorSet::PerformPushDescriptorsUpdate(ValidationStateTracker *dev_data, uint32_t write_count, const VkWriteDescriptorSet *p_wds) { assert(IsPushDescriptor()); for (uint32_t i = 0; i < write_count; i++) { PerformWriteUpdate(dev_data, &p_wds[i]); } push_descriptor_set_writes.clear(); push_descriptor_set_writes.reserve(static_cast<std::size_t>(write_count)); for (uint32_t i = 0; i < write_count; i++) { push_descriptor_set_writes.push_back(safe_VkWriteDescriptorSet(&p_wds[i])); } } // Perform write update in given update struct void cvdescriptorset::DescriptorSet::PerformWriteUpdate(ValidationStateTracker *dev_data, const VkWriteDescriptorSet *update) { // Perform update on a per-binding basis as consecutive updates roll over to next binding auto descriptors_remaining = update->descriptorCount; auto offset = update->dstArrayElement; auto orig_binding = DescriptorSetLayout::ConstBindingIterator(layout_.get(), update->dstBinding); auto current_binding = orig_binding; uint32_t update_index = 0; // Verify next consecutive binding matches type, stage flags & immutable sampler use and if AtEnd while (descriptors_remaining && orig_binding.IsConsistent(current_binding)) { const auto &index_range = current_binding.GetGlobalIndexRange(); auto global_idx = index_range.start + offset; // global_idx is which descriptor is needed to update. If global_idx > index_range.end, it means the descriptor isn't in // this binding, maybe in next binding. if (global_idx >= index_range.end) { offset -= current_binding.GetDescriptorCount(); ++current_binding; continue; } // Loop over the updates for a single binding at a time uint32_t update_count = std::min(descriptors_remaining, current_binding.GetDescriptorCount() - offset); for (uint32_t di = 0; di < update_count; ++di, ++update_index) { descriptors_[global_idx + di]->WriteUpdate(state_data_, update, update_index); } // Roll over to next binding in case of consecutive update descriptors_remaining -= update_count; if (descriptors_remaining) { // Starting offset is beyond the current binding. Check consistency, update counters and advance to the next binding, // looking for the start point. All bindings (even those skipped) must be consistent with the update and with the // original binding. offset = 0; ++current_binding; } } if (update->descriptorCount) { some_update_ = true; change_count_++; } if (!(layout_->GetDescriptorBindingFlagsFromBinding(update->dstBinding) & (VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT | VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT))) { InvalidateBoundCmdBuffers(dev_data); } } // Validate Copy update bool CoreChecks::ValidateCopyUpdate(const VkCopyDescriptorSet *update, const DescriptorSet *dst_set, const DescriptorSet *src_set, const char *func_name, std::string *error_code, std::string *error_msg) const { auto dst_layout = dst_set->GetLayout().get(); auto src_layout = src_set->GetLayout().get(); // Verify dst layout still valid if (dst_layout->destroyed) { *error_code = "VUID-VkCopyDescriptorSet-dstSet-parameter"; std::ostringstream str; str << "Cannot call " << func_name << " to perform copy update on dstSet " << report_data->FormatHandle(dst_set->GetSet()) << " created with destroyed " << report_data->FormatHandle(dst_layout->GetDescriptorSetLayout()) << "."; *error_msg = str.str(); return false; } // Verify src layout still valid if (src_layout->destroyed) { *error_code = "VUID-VkCopyDescriptorSet-srcSet-parameter"; std::ostringstream str; str << "Cannot call " << func_name << " to perform copy update on dstSet " << report_data->FormatHandle(dst_set->GetSet()) << " from srcSet " << report_data->FormatHandle(src_set->GetSet()) << " created with destroyed " << report_data->FormatHandle(src_layout->GetDescriptorSetLayout()) << "."; *error_msg = str.str(); return false; } if (!dst_layout->HasBinding(update->dstBinding)) { *error_code = "VUID-VkCopyDescriptorSet-dstBinding-00347"; std::stringstream error_str; error_str << "DescriptorSet " << report_data->FormatHandle(dst_set->GetSet()) << " does not have copy update dest binding of " << update->dstBinding; *error_msg = error_str.str(); return false; } if (!src_set->HasBinding(update->srcBinding)) { *error_code = "VUID-VkCopyDescriptorSet-srcBinding-00345"; std::stringstream error_str; error_str << "DescriptorSet " << report_data->FormatHandle(src_set->GetSet()) << " does not have copy update src binding of " << update->srcBinding; *error_msg = error_str.str(); return false; } // Verify idle ds if (dst_set->in_use.load() && !(dst_layout->GetDescriptorBindingFlagsFromBinding(update->dstBinding) & (VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT | VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT))) { // TODO : Re-using Free Idle error code, need copy update idle error code *error_code = "VUID-vkFreeDescriptorSets-pDescriptorSets-00309"; std::stringstream error_str; error_str << "Cannot call " << func_name << " to perform copy update on descriptor set " << report_data->FormatHandle(dst_set->GetSet()) << " that is in use by a command buffer"; *error_msg = error_str.str(); return false; } // src & dst set bindings are valid // Check bounds of src & dst auto src_start_idx = src_set->GetGlobalIndexRangeFromBinding(update->srcBinding).start + update->srcArrayElement; if ((src_start_idx + update->descriptorCount) > src_set->GetTotalDescriptorCount()) { // SRC update out of bounds *error_code = "VUID-VkCopyDescriptorSet-srcArrayElement-00346"; std::stringstream error_str; error_str << "Attempting copy update from descriptorSet " << report_data->FormatHandle(update->srcSet) << " binding#" << update->srcBinding << " with offset index of " << src_set->GetGlobalIndexRangeFromBinding(update->srcBinding).start << " plus update array offset of " << update->srcArrayElement << " and update of " << update->descriptorCount << " descriptors oversteps total number of descriptors in set: " << src_set->GetTotalDescriptorCount(); *error_msg = error_str.str(); return false; } auto dst_start_idx = dst_layout->GetGlobalIndexRangeFromBinding(update->dstBinding).start + update->dstArrayElement; if ((dst_start_idx + update->descriptorCount) > dst_layout->GetTotalDescriptorCount()) { // DST update out of bounds *error_code = "VUID-VkCopyDescriptorSet-dstArrayElement-00348"; std::stringstream error_str; error_str << "Attempting copy update to descriptorSet " << report_data->FormatHandle(dst_set->GetSet()) << " binding#" << update->dstBinding << " with offset index of " << dst_layout->GetGlobalIndexRangeFromBinding(update->dstBinding).start << " plus update array offset of " << update->dstArrayElement << " and update of " << update->descriptorCount << " descriptors oversteps total number of descriptors in set: " << dst_layout->GetTotalDescriptorCount(); *error_msg = error_str.str(); return false; } // Check that types match // TODO : Base default error case going from here is "VUID-VkAcquireNextImageInfoKHR-semaphore-parameter" 2ba which covers all // consistency issues, need more fine-grained error codes *error_code = "VUID-VkCopyDescriptorSet-srcSet-00349"; auto src_type = src_set->GetTypeFromBinding(update->srcBinding); auto dst_type = dst_layout->GetTypeFromBinding(update->dstBinding); if (src_type != dst_type) { *error_code = "VUID-VkCopyDescriptorSet-dstBinding-02632"; std::stringstream error_str; error_str << "Attempting copy update to descriptorSet " << report_data->FormatHandle(dst_set->GetSet()) << " binding #" << update->dstBinding << " with type " << string_VkDescriptorType(dst_type) << " from descriptorSet " << report_data->FormatHandle(src_set->GetSet()) << " binding #" << update->srcBinding << " with type " << string_VkDescriptorType(src_type) << ". Types do not match"; *error_msg = error_str.str(); return false; } // Verify consistency of src & dst bindings if update crosses binding boundaries if ((!VerifyUpdateConsistency(report_data, DescriptorSetLayout::ConstBindingIterator(src_layout, update->srcBinding), update->srcArrayElement, update->descriptorCount, "copy update from", src_set->GetSet(), error_msg)) || (!VerifyUpdateConsistency(report_data, DescriptorSetLayout::ConstBindingIterator(dst_layout, update->dstBinding), update->dstArrayElement, update->descriptorCount, "copy update to", dst_set->GetSet(), error_msg))) { return false; } if ((src_layout->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT) && !(dst_layout->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT)) { *error_code = "VUID-VkCopyDescriptorSet-srcSet-01918"; std::stringstream error_str; error_str << "If pname:srcSet's (" << report_data->FormatHandle(update->srcSet) << ") layout was created with the " "ename:VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT flag " "set, then pname:dstSet's (" << report_data->FormatHandle(update->dstSet) << ") layout must: also have been created with the " "ename:VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT flag set"; *error_msg = error_str.str(); return false; } if (!(src_layout->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT) && (dst_layout->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT)) { *error_code = "VUID-VkCopyDescriptorSet-srcSet-01919"; std::stringstream error_str; error_str << "If pname:srcSet's (" << report_data->FormatHandle(update->srcSet) << ") layout was created without the " "ename:VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT flag " "set, then pname:dstSet's (" << report_data->FormatHandle(update->dstSet) << ") layout must: also have been created without the " "ename:VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT flag set"; *error_msg = error_str.str(); return false; } if ((src_set->GetPoolState()->createInfo.flags & VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT) && !(dst_set->GetPoolState()->createInfo.flags & VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT)) { *error_code = "VUID-VkCopyDescriptorSet-srcSet-01920"; std::stringstream error_str; error_str << "If the descriptor pool from which pname:srcSet (" << report_data->FormatHandle(update->srcSet) << ") was allocated was created " "with the ename:VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT flag " "set, then the descriptor pool from which pname:dstSet (" << report_data->FormatHandle(update->dstSet) << ") was allocated must: " "also have been created with the ename:VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT flag set"; *error_msg = error_str.str(); return false; } if (!(src_set->GetPoolState()->createInfo.flags & VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT) && (dst_set->GetPoolState()->createInfo.flags & VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT)) { *error_code = "VUID-VkCopyDescriptorSet-srcSet-01921"; std::stringstream error_str; error_str << "If the descriptor pool from which pname:srcSet (" << report_data->FormatHandle(update->srcSet) << ") was allocated was created " "without the ename:VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT flag " "set, then the descriptor pool from which pname:dstSet (" << report_data->FormatHandle(update->dstSet) << ") was allocated must: " "also have been created without the ename:VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT flag set"; *error_msg = error_str.str(); return false; } if (src_type == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT) { if ((update->srcArrayElement % 4) != 0) { *error_code = "VUID-VkCopyDescriptorSet-srcBinding-02223"; std::stringstream error_str; error_str << "Attempting copy update to VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT binding with " << "srcArrayElement " << update->srcArrayElement << " not a multiple of 4"; *error_msg = error_str.str(); return false; } if ((update->dstArrayElement % 4) != 0) { *error_code = "VUID-VkCopyDescriptorSet-dstBinding-02224"; std::stringstream error_str; error_str << "Attempting copy update to VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT binding with " << "dstArrayElement " << update->dstArrayElement << " not a multiple of 4"; *error_msg = error_str.str(); return false; } if ((update->descriptorCount % 4) != 0) { *error_code = "VUID-VkCopyDescriptorSet-srcBinding-02225"; std::stringstream error_str; error_str << "Attempting copy update to VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT binding with " << "descriptorCount " << update->descriptorCount << " not a multiple of 4"; *error_msg = error_str.str(); return false; } } // Update parameters all look good and descriptor updated so verify update contents if (!VerifyCopyUpdateContents(update, src_set, src_type, src_start_idx, dst_set, dst_type, dst_start_idx, func_name, error_code, error_msg)) { return false; } // All checks passed so update is good return true; } // Perform Copy update void cvdescriptorset::DescriptorSet::PerformCopyUpdate(ValidationStateTracker *dev_data, const VkCopyDescriptorSet *update, const DescriptorSet *src_set) { auto src_start_idx = src_set->GetGlobalIndexRangeFromBinding(update->srcBinding).start + update->srcArrayElement; auto dst_start_idx = layout_->GetGlobalIndexRangeFromBinding(update->dstBinding).start + update->dstArrayElement; // Update parameters all look good so perform update for (uint32_t di = 0; di < update->descriptorCount; ++di) { auto src = src_set->descriptors_[src_start_idx + di].get(); auto dst = descriptors_[dst_start_idx + di].get(); if (src->updated) { dst->CopyUpdate(state_data_, src); some_update_ = true; change_count_++; } else { dst->updated = false; } } if (!(layout_->GetDescriptorBindingFlagsFromBinding(update->dstBinding) & (VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT | VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT))) { InvalidateBoundCmdBuffers(dev_data); } } // Update the drawing state for the affected descriptors. // Set cb_node to this set and this set to cb_node. // Add the bindings of the descriptor // Set the layout based on the current descriptor layout (will mask subsequent layer mismatch errors) // TODO: Modify the UpdateDrawState virtural functions to *only* set initial layout and not change layouts // Prereq: This should be called for a set that has been confirmed to be active for the given cb_node, meaning it's going // to be used in a draw by the given cb_node void cvdescriptorset::DescriptorSet::UpdateDrawState(ValidationStateTracker *device_data, CMD_BUFFER_STATE *cb_node, CMD_TYPE cmd_type, const PIPELINE_STATE *pipe, const BindingReqMap &binding_req_map, const char *function) { if (!device_data->disabled[command_buffer_state] && !IsPushDescriptor()) { // bind cb to this descriptor set // Add bindings for descriptor set, the set's pool, and individual objects in the set if (device_data->AddCommandBufferBinding(cb_bindings, VulkanTypedHandle(set_, kVulkanObjectTypeDescriptorSet, this), cb_node)) { device_data->AddCommandBufferBinding(pool_state_->cb_bindings, VulkanTypedHandle(pool_state_->pool, kVulkanObjectTypeDescriptorPool, pool_state_), cb_node); } } // Descriptor UpdateDrawState functions do two things - associate resources to the command buffer, // and call image layout validation callbacks. If both are disabled, skip the entire loop. if (device_data->disabled[command_buffer_state] && device_data->disabled[image_layout_validation]) { return; } // For the active slots, use set# to look up descriptorSet from boundDescriptorSets, and bind all of that descriptor set's // resources CMD_BUFFER_STATE::CmdDrawDispatchInfo cmd_info = {}; for (const auto &binding_req_pair : binding_req_map) { auto index = layout_->GetIndexFromBinding(binding_req_pair.first); // We aren't validating descriptors created with PARTIALLY_BOUND or UPDATE_AFTER_BIND, so don't record state auto flags = layout_->GetDescriptorBindingFlagsFromIndex(index); if (flags & (VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT | VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT)) { if (!(flags & VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT)) { cmd_info.binding_infos.emplace_back(binding_req_pair); } continue; } auto range = layout_->GetGlobalIndexRangeFromIndex(index); for (uint32_t i = range.start; i < range.end; ++i) { descriptors_[i]->UpdateDrawState(device_data, cb_node); } } if (cmd_info.binding_infos.size() > 0) { cmd_info.cmd_type = cmd_type; cmd_info.function = function; if (cb_node->activeFramebuffer) { cmd_info.framebuffer = cb_node->activeFramebuffer->framebuffer; cmd_info.attachments = cb_node->active_attachments; cmd_info.subpasses = cb_node->active_subpasses; } cb_node->validate_descriptorsets_in_queuesubmit[set_].emplace_back(cmd_info); } } void cvdescriptorset::DescriptorSet::FilterOneBindingReq(const BindingReqMap::value_type &binding_req_pair, BindingReqMap *out_req, const TrackedBindings &bindings, uint32_t limit) { if (bindings.size() < limit) { const auto it = bindings.find(binding_req_pair.first); if (it == bindings.cend()) out_req->emplace(binding_req_pair); } } void cvdescriptorset::DescriptorSet::FilterBindingReqs(const CMD_BUFFER_STATE &cb_state, const PIPELINE_STATE &pipeline, const BindingReqMap &in_req, BindingReqMap *out_req) const { // For const cleanliness we have to find in the maps... const auto validated_it = cached_validation_.find(&cb_state); if (validated_it == cached_validation_.cend()) { // We have nothing validated, copy in to out for (const auto &binding_req_pair : in_req) { out_req->emplace(binding_req_pair); } return; } const auto &validated = validated_it->second; const auto image_sample_version_it = validated.image_samplers.find(&pipeline); const VersionedBindings *image_sample_version = nullptr; if (image_sample_version_it != validated.image_samplers.cend()) { image_sample_version = &(image_sample_version_it->second); } const auto &dynamic_buffers = validated.dynamic_buffers; const auto &non_dynamic_buffers = validated.non_dynamic_buffers; const auto &stats = layout_->GetBindingTypeStats(); for (const auto &binding_req_pair : in_req) { auto binding = binding_req_pair.first; VkDescriptorSetLayoutBinding const *layout_binding = layout_->GetDescriptorSetLayoutBindingPtrFromBinding(binding); if (!layout_binding) { continue; } // Caching criteria differs per type. // If image_layout have changed , the image descriptors need to be validated against them. if (IsBufferDescriptor(layout_binding->descriptorType)) { if (IsDynamicDescriptor(layout_binding->descriptorType)) { FilterOneBindingReq(binding_req_pair, out_req, dynamic_buffers, stats.dynamic_buffer_count); } else { FilterOneBindingReq(binding_req_pair, out_req, non_dynamic_buffers, stats.non_dynamic_buffer_count); } } else { // This is rather crude, as the changed layouts may not impact the bound descriptors, // but the simple "versioning" is a simple "dirt" test. bool stale = true; if (image_sample_version) { const auto version_it = image_sample_version->find(binding); if (version_it != image_sample_version->cend() && (version_it->second == cb_state.image_layout_change_count)) { stale = false; } } if (stale) { out_req->emplace(binding_req_pair); } } } } void cvdescriptorset::DescriptorSet::UpdateValidationCache(const CMD_BUFFER_STATE &cb_state, const PIPELINE_STATE &pipeline, const BindingReqMap &updated_bindings) { // For const cleanliness we have to find in the maps... auto &validated = cached_validation_[&cb_state]; auto &image_sample_version = validated.image_samplers[&pipeline]; auto &dynamic_buffers = validated.dynamic_buffers; auto &non_dynamic_buffers = validated.non_dynamic_buffers; for (const auto &binding_req_pair : updated_bindings) { auto binding = binding_req_pair.first; VkDescriptorSetLayoutBinding const *layout_binding = layout_->GetDescriptorSetLayoutBindingPtrFromBinding(binding); if (!layout_binding) { continue; } // Caching criteria differs per type. if (IsBufferDescriptor(layout_binding->descriptorType)) { if (IsDynamicDescriptor(layout_binding->descriptorType)) { dynamic_buffers.emplace(binding); } else { non_dynamic_buffers.emplace(binding); } } else { // Save the layout change version... image_sample_version[binding] = cb_state.image_layout_change_count; } } } cvdescriptorset::SamplerDescriptor::SamplerDescriptor(const ValidationStateTracker *dev_data, const VkSampler *immut) : immutable_(false) { updated = false; descriptor_class = PlainSampler; if (immut) { sampler_state_ = dev_data->GetConstCastShared<SAMPLER_STATE>(*immut); immutable_ = true; updated = true; } } // Validate given sampler. Currently this only checks to make sure it exists in the samplerMap bool CoreChecks::ValidateSampler(const VkSampler sampler) const { return (GetSamplerState(sampler) != nullptr); } bool CoreChecks::ValidateImageUpdate(VkImageView image_view, VkImageLayout image_layout, VkDescriptorType type, const char *func_name, std::string *error_code, std::string *error_msg) const { auto iv_state = GetImageViewState(image_view); assert(iv_state); // Note that when an imageview is created, we validated that memory is bound so no need to re-check here // Validate that imageLayout is compatible with aspect_mask and image format // and validate that image usage bits are correct for given usage VkImageAspectFlags aspect_mask = iv_state->create_info.subresourceRange.aspectMask; VkImage image = iv_state->create_info.image; VkFormat format = VK_FORMAT_MAX_ENUM; VkImageUsageFlags usage = 0; auto image_node = GetImageState(image); assert(image_node); format = image_node->createInfo.format; usage = image_node->createInfo.usage; const auto stencil_usage_info = LvlFindInChain<VkImageStencilUsageCreateInfo>(image_node->createInfo.pNext); if (stencil_usage_info) { usage |= stencil_usage_info->stencilUsage; } // Validate that memory is bound to image if (ValidateMemoryIsBoundToImage(image_node, func_name, "UNASSIGNED-CoreValidation-BoundResourceFreedMemoryAccess")) { *error_code = "UNASSIGNED-CoreValidation-BoundResourceFreedMemoryAccess"; *error_msg = "No memory bound to image."; return false; } // KHR_maintenance1 allows rendering into 2D or 2DArray views which slice a 3D image, // but not binding them to descriptor sets. if (image_node->createInfo.imageType == VK_IMAGE_TYPE_3D && (iv_state->create_info.viewType == VK_IMAGE_VIEW_TYPE_2D || iv_state->create_info.viewType == VK_IMAGE_VIEW_TYPE_2D_ARRAY)) { *error_code = "VUID-VkDescriptorImageInfo-imageView-00343"; *error_msg = "ImageView must not be a 2D or 2DArray view of a 3D image"; return false; } // TODO : The various image aspect and format checks here are based on general spec language in 11.5 Image Views section under // vkCreateImageView(). What's the best way to create unique id for these cases? *error_code = "UNASSIGNED-CoreValidation-DrawState-InvalidImageView"; bool ds = FormatIsDepthOrStencil(format); switch (image_layout) { case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: // Only Color bit must be set if ((aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT) != VK_IMAGE_ASPECT_COLOR_BIT) { std::stringstream error_str; error_str << "ImageView (" << report_data->FormatHandle(image_view) << ") uses layout VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL but does not have VK_IMAGE_ASPECT_COLOR_BIT set."; *error_msg = error_str.str(); return false; } // format must NOT be DS if (ds) { std::stringstream error_str; error_str << "ImageView (" << report_data->FormatHandle(image_view) << ") uses layout VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL but the image format is " << string_VkFormat(format) << " which is not a color format."; *error_msg = error_str.str(); return false; } break; case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: // Depth or stencil bit must be set, but both must NOT be set if (aspect_mask & VK_IMAGE_ASPECT_DEPTH_BIT) { if (aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT) { // both must NOT be set std::stringstream error_str; error_str << "ImageView (" << report_data->FormatHandle(image_view) << ") has both STENCIL and DEPTH aspects set"; *error_msg = error_str.str(); return false; } } else if (!(aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT)) { // Neither were set std::stringstream error_str; error_str << "ImageView (" << report_data->FormatHandle(image_view) << ") has layout " << string_VkImageLayout(image_layout) << " but does not have STENCIL or DEPTH aspects set"; *error_msg = error_str.str(); return false; } // format must be DS if (!ds) { std::stringstream error_str; error_str << "ImageView (" << report_data->FormatHandle(image_view) << ") has layout " << string_VkImageLayout(image_layout) << " but the image format is " << string_VkFormat(format) << " which is not a depth/stencil format."; *error_msg = error_str.str(); return false; } break; default: // For other layouts if the source is depth/stencil image, both aspect bits must not be set if (ds) { if (aspect_mask & VK_IMAGE_ASPECT_DEPTH_BIT) { if (aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT) { // both must NOT be set std::stringstream error_str; error_str << "ImageView (" << report_data->FormatHandle(image_view) << ") has layout " << string_VkImageLayout(image_layout) << " and is using depth/stencil image of format " << string_VkFormat(format) << " but it has both STENCIL and DEPTH aspects set, which is illegal. When using a depth/stencil " "image in a descriptor set, please only set either VK_IMAGE_ASPECT_DEPTH_BIT or " "VK_IMAGE_ASPECT_STENCIL_BIT depending on whether it will be used for depth reads or stencil " "reads respectively."; *error_code = "VUID-VkDescriptorImageInfo-imageView-01976"; *error_msg = error_str.str(); return false; } } } break; } // Now validate that usage flags are correctly set for given type of update // As we're switching per-type, if any type has specific layout requirements, check those here as well // TODO : The various image usage bit requirements are in general spec language for VkImageUsageFlags bit block in 11.3 Images // under vkCreateImage() const char *error_usage_bit = nullptr; switch (type) { case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE: if (iv_state->samplerConversion != VK_NULL_HANDLE) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-01946"; std::stringstream error_str; error_str << "ImageView (" << report_data->FormatHandle(image_view) << ")" << "used as a VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE can't be created with VkSamplerYcbcrConversion"; *error_msg = error_str.str(); return false; } // drop through case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: { if (!(usage & VK_IMAGE_USAGE_SAMPLED_BIT)) { error_usage_bit = "VK_IMAGE_USAGE_SAMPLED_BIT"; *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00337"; } break; } case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: { if (!(usage & VK_IMAGE_USAGE_STORAGE_BIT)) { error_usage_bit = "VK_IMAGE_USAGE_STORAGE_BIT"; *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00339"; } else if ((VK_IMAGE_LAYOUT_GENERAL != image_layout) || ((VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR != image_layout) && (device_extensions.vk_khr_shared_presentable_image))) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-04152"; std::stringstream error_str; error_str << "Descriptor update with descriptorType VK_DESCRIPTOR_TYPE_STORAGE_IMAGE" << " is being updated with invalid imageLayout " << string_VkImageLayout(image_layout) << " for image " << report_data->FormatHandle(image) << " in imageView " << report_data->FormatHandle(image_view) << ". Allowed layouts are: VK_IMAGE_LAYOUT_GENERAL"; if (device_extensions.vk_khr_shared_presentable_image) { error_str << " or VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR"; } *error_msg = error_str.str(); return false; } break; } case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT: { if (!(usage & VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT)) { error_usage_bit = "VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT"; *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00338"; } break; } default: break; } if (error_usage_bit) { std::stringstream error_str; error_str << "ImageView (" << report_data->FormatHandle(image_view) << ") with usage mask " << std::hex << std::showbase << usage << " being used for a descriptor update of type " << string_VkDescriptorType(type) << " does not have " << error_usage_bit << " set."; *error_msg = error_str.str(); return false; } // All the following types share the same image layouts // checkf or Storage Images above if ((type == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE) || (type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) || (type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT)) { // Test that the layout is compatible with the descriptorType for the two sampled image types const static std::array<VkImageLayout, 3> valid_layouts = { {VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL}}; struct ExtensionLayout { VkImageLayout layout; ExtEnabled DeviceExtensions::*extension; }; const static std::array<ExtensionLayout, 5> extended_layouts{{ // Note double brace req'd for aggregate initialization {VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR, &DeviceExtensions::vk_khr_shared_presentable_image}, {VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL, &DeviceExtensions::vk_khr_maintenance2}, {VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL, &DeviceExtensions::vk_khr_maintenance2}, {VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL_KHR, &DeviceExtensions::vk_khr_synchronization_2}, {VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL_KHR, &DeviceExtensions::vk_khr_synchronization_2}, }}; auto is_layout = [image_layout, this](const ExtensionLayout &ext_layout) { return device_extensions.*(ext_layout.extension) && (ext_layout.layout == image_layout); }; bool valid_layout = (std::find(valid_layouts.cbegin(), valid_layouts.cend(), image_layout) != valid_layouts.cend()) || std::any_of(extended_layouts.cbegin(), extended_layouts.cend(), is_layout); if (!valid_layout) { // The following works as currently all 3 descriptor types share the same set of valid layouts switch (type) { case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE: *error_code = "VUID-VkWriteDescriptorSet-descriptorType-04149"; break; case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: *error_code = "VUID-VkWriteDescriptorSet-descriptorType-04150"; break; case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT: *error_code = "VUID-VkWriteDescriptorSet-descriptorType-04151"; break; default: break; } std::stringstream error_str; error_str << "Descriptor update with descriptorType " << string_VkDescriptorType(type) << " is being updated with invalid imageLayout " << string_VkImageLayout(image_layout) << " for image " << report_data->FormatHandle(image) << " in imageView " << report_data->FormatHandle(image_view) << ". Allowed layouts are: VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, " << "VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL"; for (auto &ext_layout : extended_layouts) { if (device_extensions.*(ext_layout.extension)) { error_str << ", " << string_VkImageLayout(ext_layout.layout); } } *error_msg = error_str.str(); return false; } } if ((type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) || (type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT)) { const VkComponentMapping components = iv_state->create_info.components; if (IsIdentitySwizzle(components) == false) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00336"; std::stringstream error_str; error_str << "ImageView (" << report_data->FormatHandle(image_view) << ") has a non-identiy swizzle component, " << " r swizzle = " << string_VkComponentSwizzle(components.r) << "," << " g swizzle = " << string_VkComponentSwizzle(components.g) << "," << " b swizzle = " << string_VkComponentSwizzle(components.b) << "," << " a swizzle = " << string_VkComponentSwizzle(components.a) << "."; *error_msg = error_str.str(); return false; } } return true; } void cvdescriptorset::SamplerDescriptor::WriteUpdate(const ValidationStateTracker *dev_data, const VkWriteDescriptorSet *update, const uint32_t index) { if (!immutable_) { sampler_state_ = dev_data->GetConstCastShared<SAMPLER_STATE>(update->pImageInfo[index].sampler); } updated = true; } void cvdescriptorset::SamplerDescriptor::CopyUpdate(const ValidationStateTracker *dev_data, const Descriptor *src) { if (!immutable_) { sampler_state_ = static_cast<const SamplerDescriptor *>(src)->sampler_state_; } updated = true; } void cvdescriptorset::SamplerDescriptor::UpdateDrawState(ValidationStateTracker *dev_data, CMD_BUFFER_STATE *cb_node) { if (!immutable_) { auto sampler_state = GetSamplerState(); if (sampler_state) dev_data->AddCommandBufferBindingSampler(cb_node, sampler_state); } } cvdescriptorset::ImageSamplerDescriptor::ImageSamplerDescriptor(const ValidationStateTracker *dev_data, const VkSampler *immut) : immutable_(false), image_layout_(VK_IMAGE_LAYOUT_UNDEFINED) { updated = false; descriptor_class = ImageSampler; if (immut) { sampler_state_ = dev_data->GetConstCastShared<SAMPLER_STATE>(*immut); immutable_ = true; } } void cvdescriptorset::ImageSamplerDescriptor::WriteUpdate(const ValidationStateTracker *dev_data, const VkWriteDescriptorSet *update, const uint32_t index) { updated = true; const auto &image_info = update->pImageInfo[index]; if (!immutable_) { sampler_state_ = dev_data->GetConstCastShared<SAMPLER_STATE>(image_info.sampler); } image_layout_ = image_info.imageLayout; image_view_state_ = dev_data->GetConstCastShared<IMAGE_VIEW_STATE>(image_info.imageView); } void cvdescriptorset::ImageSamplerDescriptor::CopyUpdate(const ValidationStateTracker *dev_data, const Descriptor *src) { if (!immutable_) { sampler_state_ = static_cast<const ImageSamplerDescriptor *>(src)->sampler_state_; } updated = true; image_layout_ = static_cast<const ImageSamplerDescriptor *>(src)->image_layout_; image_view_state_ = static_cast<const ImageSamplerDescriptor *>(src)->image_view_state_; } void cvdescriptorset::ImageSamplerDescriptor::UpdateDrawState(ValidationStateTracker *dev_data, CMD_BUFFER_STATE *cb_node) { // First add binding for any non-immutable sampler if (!immutable_) { auto sampler_state = GetSamplerState(); if (sampler_state) dev_data->AddCommandBufferBindingSampler(cb_node, sampler_state); } // Add binding for image auto iv_state = GetImageViewState(); if (iv_state) { dev_data->AddCommandBufferBindingImageView(cb_node, iv_state); dev_data->CallSetImageViewInitialLayoutCallback(cb_node, *iv_state, image_layout_); } } cvdescriptorset::ImageDescriptor::ImageDescriptor(const VkDescriptorType type) : image_layout_(VK_IMAGE_LAYOUT_UNDEFINED) { updated = false; descriptor_class = Image; } void cvdescriptorset::ImageDescriptor::WriteUpdate(const ValidationStateTracker *dev_data, const VkWriteDescriptorSet *update, const uint32_t index) { updated = true; const auto &image_info = update->pImageInfo[index]; image_layout_ = image_info.imageLayout; image_view_state_ = dev_data->GetConstCastShared<IMAGE_VIEW_STATE>(image_info.imageView); } void cvdescriptorset::ImageDescriptor::CopyUpdate(const ValidationStateTracker *dev_data, const Descriptor *src) { updated = true; image_layout_ = static_cast<const ImageDescriptor *>(src)->image_layout_; image_view_state_ = static_cast<const ImageDescriptor *>(src)->image_view_state_; } void cvdescriptorset::ImageDescriptor::UpdateDrawState(ValidationStateTracker *dev_data, CMD_BUFFER_STATE *cb_node) { // Add binding for image auto iv_state = GetImageViewState(); if (iv_state) { dev_data->AddCommandBufferBindingImageView(cb_node, iv_state); dev_data->CallSetImageViewInitialLayoutCallback(cb_node, *iv_state, image_layout_); } } cvdescriptorset::BufferDescriptor::BufferDescriptor(const VkDescriptorType type) : offset_(0), range_(0) { updated = false; descriptor_class = GeneralBuffer; } void cvdescriptorset::BufferDescriptor::WriteUpdate(const ValidationStateTracker *dev_data, const VkWriteDescriptorSet *update, const uint32_t index) { updated = true; const auto &buffer_info = update->pBufferInfo[index]; offset_ = buffer_info.offset; range_ = buffer_info.range; buffer_state_ = dev_data->GetConstCastShared<BUFFER_STATE>(buffer_info.buffer); } void cvdescriptorset::BufferDescriptor::CopyUpdate(const ValidationStateTracker *dev_data, const Descriptor *src) { updated = true; const auto buff_desc = static_cast<const BufferDescriptor *>(src); offset_ = buff_desc->offset_; range_ = buff_desc->range_; buffer_state_ = buff_desc->buffer_state_; } void cvdescriptorset::BufferDescriptor::UpdateDrawState(ValidationStateTracker *dev_data, CMD_BUFFER_STATE *cb_node) { auto buffer_node = GetBufferState(); if (buffer_node) dev_data->AddCommandBufferBindingBuffer(cb_node, buffer_node); } cvdescriptorset::TexelDescriptor::TexelDescriptor(const VkDescriptorType type) { updated = false; descriptor_class = TexelBuffer; } void cvdescriptorset::TexelDescriptor::WriteUpdate(const ValidationStateTracker *dev_data, const VkWriteDescriptorSet *update, const uint32_t index) { updated = true; buffer_view_state_ = dev_data->GetConstCastShared<BUFFER_VIEW_STATE>(update->pTexelBufferView[index]); } void cvdescriptorset::TexelDescriptor::CopyUpdate(const ValidationStateTracker *dev_data, const Descriptor *src) { updated = true; buffer_view_state_ = static_cast<const TexelDescriptor *>(src)->buffer_view_state_; } void cvdescriptorset::TexelDescriptor::UpdateDrawState(ValidationStateTracker *dev_data, CMD_BUFFER_STATE *cb_node) { auto bv_state = GetBufferViewState(); if (bv_state) { dev_data->AddCommandBufferBindingBufferView(cb_node, bv_state); } } cvdescriptorset::AccelerationStructureDescriptor::AccelerationStructureDescriptor(const VkDescriptorType type) : acc_(VK_NULL_HANDLE), acc_nv_(VK_NULL_HANDLE) { updated = false; is_khr_ = false; descriptor_class = AccelerationStructure; } void cvdescriptorset::AccelerationStructureDescriptor::WriteUpdate(const ValidationStateTracker *dev_data, const VkWriteDescriptorSet *update, const uint32_t index) { const auto *acc_info = LvlFindInChain<VkWriteDescriptorSetAccelerationStructureKHR>(update->pNext); const auto *acc_info_nv = LvlFindInChain<VkWriteDescriptorSetAccelerationStructureNV>(update->pNext); assert(acc_info || acc_info_nv); is_khr_ = (acc_info != NULL); updated = true; if (is_khr_) { acc_ = acc_info->pAccelerationStructures[index]; acc_state_ = dev_data->GetConstCastShared<ACCELERATION_STRUCTURE_STATE_KHR>(acc_); } else { acc_nv_ = acc_info_nv->pAccelerationStructures[index]; acc_state_nv_ = dev_data->GetConstCastShared<ACCELERATION_STRUCTURE_STATE>(acc_nv_); } } void cvdescriptorset::AccelerationStructureDescriptor::CopyUpdate(const ValidationStateTracker *dev_data, const Descriptor *src) { auto acc_desc = static_cast<const AccelerationStructureDescriptor *>(src); updated = true; if (is_khr_) { acc_ = acc_desc->acc_; acc_state_ = dev_data->GetConstCastShared<ACCELERATION_STRUCTURE_STATE_KHR>(acc_); } else { acc_nv_ = acc_desc->acc_nv_; acc_state_nv_ = dev_data->GetConstCastShared<ACCELERATION_STRUCTURE_STATE>(acc_nv_); } } void cvdescriptorset::AccelerationStructureDescriptor::UpdateDrawState(ValidationStateTracker *dev_data, CMD_BUFFER_STATE *cb_node) { if (is_khr_) { auto acc_node = GetAccelerationStructureStateKHR(); if (acc_node) dev_data->AddCommandBufferBindingAccelerationStructure(cb_node, acc_node); } else { auto acc_node = GetAccelerationStructureStateNV(); if (acc_node) dev_data->AddCommandBufferBindingAccelerationStructure(cb_node, acc_node); } } // This is a helper function that iterates over a set of Write and Copy updates, pulls the DescriptorSet* for updated // sets, and then calls their respective Validate[Write|Copy]Update functions. // If the update hits an issue for which the callback returns "true", meaning that the call down the chain should // be skipped, then true is returned. // If there is no issue with the update, then false is returned. bool CoreChecks::ValidateUpdateDescriptorSets(uint32_t write_count, const VkWriteDescriptorSet *p_wds, uint32_t copy_count, const VkCopyDescriptorSet *p_cds, const char *func_name) const { bool skip = false; // Validate Write updates for (uint32_t i = 0; i < write_count; i++) { auto dest_set = p_wds[i].dstSet; auto set_node = GetSetNode(dest_set); if (!set_node) { skip |= LogError(dest_set, kVUID_Core_DrawState_InvalidDescriptorSet, "Cannot call %s on %s that has not been allocated in pDescriptorWrites[%u].", func_name, report_data->FormatHandle(dest_set).c_str(), i); } else { std::string error_code; std::string error_str; if (!ValidateWriteUpdate(set_node, &p_wds[i], func_name, &error_code, &error_str)) { skip |= LogError(dest_set, error_code, "%s pDescriptorWrites[%u] failed write update validation for %s with error: %s.", func_name, i, report_data->FormatHandle(dest_set).c_str(), error_str.c_str()); } } if (p_wds[i].pNext) { const auto *pnext_struct = LvlFindInChain<VkWriteDescriptorSetAccelerationStructureKHR>(p_wds[i].pNext); if (pnext_struct) { for (uint32_t j = 0; j < pnext_struct->accelerationStructureCount; ++j) { const ACCELERATION_STRUCTURE_STATE_KHR *as_state = GetAccelerationStructureStateKHR(pnext_struct->pAccelerationStructures[j]); if (as_state && (as_state->create_infoKHR.sType == VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_KHR && (as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR && as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR))) { skip |= LogError(dest_set, "VUID-VkWriteDescriptorSetAccelerationStructureKHR-pAccelerationStructures-03579", "%s: For pDescriptorWrites[%u] acceleration structure in pAccelerationStructures[%u] must " "have been created with " "VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR or VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR.", func_name, i, j); } } } const auto *pnext_struct_nv = LvlFindInChain<VkWriteDescriptorSetAccelerationStructureNV>(p_wds[i].pNext); if (pnext_struct_nv) { for (uint32_t j = 0; j < pnext_struct_nv->accelerationStructureCount; ++j) { const ACCELERATION_STRUCTURE_STATE *as_state = GetAccelerationStructureStateNV(pnext_struct_nv->pAccelerationStructures[j]); if (as_state && (as_state->create_infoNV.sType == VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_NV && as_state->create_infoNV.info.type != VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_NV)) { skip |= LogError(dest_set, "VUID-VkWriteDescriptorSetAccelerationStructureNV-pAccelerationStructures-03748", "%s: For pDescriptorWrites[%u] acceleration structure in pAccelerationStructures[%u] must " "have been created with" " VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_NV.", func_name, i, j); } } } } } // Now validate copy updates for (uint32_t i = 0; i < copy_count; ++i) { auto dst_set = p_cds[i].dstSet; auto src_set = p_cds[i].srcSet; auto src_node = GetSetNode(src_set); auto dst_node = GetSetNode(dst_set); // Object_tracker verifies that src & dest descriptor set are valid assert(src_node); assert(dst_node); std::string error_code; std::string error_str; if (!ValidateCopyUpdate(&p_cds[i], dst_node, src_node, func_name, &error_code, &error_str)) { LogObjectList objlist(dst_set); objlist.add(src_set); skip |= LogError(objlist, error_code, "%s pDescriptorCopies[%u] failed copy update from %s to %s with error: %s.", func_name, i, report_data->FormatHandle(src_set).c_str(), report_data->FormatHandle(dst_set).c_str(), error_str.c_str()); } } return skip; } // This is a helper function that iterates over a set of Write and Copy updates, pulls the DescriptorSet* for updated // sets, and then calls their respective Perform[Write|Copy]Update functions. // Prerequisite : ValidateUpdateDescriptorSets() should be called and return "false" prior to calling PerformUpdateDescriptorSets() // with the same set of updates. // This is split from the validate code to allow validation prior to calling down the chain, and then update after // calling down the chain. void cvdescriptorset::PerformUpdateDescriptorSets(ValidationStateTracker *dev_data, uint32_t write_count, const VkWriteDescriptorSet *p_wds, uint32_t copy_count, const VkCopyDescriptorSet *p_cds) { // Write updates first uint32_t i = 0; for (i = 0; i < write_count; ++i) { auto dest_set = p_wds[i].dstSet; auto set_node = dev_data->GetSetNode(dest_set); if (set_node) { set_node->PerformWriteUpdate(dev_data, &p_wds[i]); } } // Now copy updates for (i = 0; i < copy_count; ++i) { auto dst_set = p_cds[i].dstSet; auto src_set = p_cds[i].srcSet; auto src_node = dev_data->GetSetNode(src_set); auto dst_node = dev_data->GetSetNode(dst_set); if (src_node && dst_node) { dst_node->PerformCopyUpdate(dev_data, &p_cds[i], src_node); } } } cvdescriptorset::DecodedTemplateUpdate::DecodedTemplateUpdate(const ValidationStateTracker *device_data, VkDescriptorSet descriptorSet, const TEMPLATE_STATE *template_state, const void *pData, VkDescriptorSetLayout push_layout) { auto const &create_info = template_state->create_info; inline_infos.resize(create_info.descriptorUpdateEntryCount); // Make sure we have one if we need it inline_infos_khr.resize(create_info.descriptorUpdateEntryCount); inline_infos_nv.resize(create_info.descriptorUpdateEntryCount); desc_writes.reserve(create_info.descriptorUpdateEntryCount); // emplaced, so reserved without initialization VkDescriptorSetLayout effective_dsl = create_info.templateType == VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET ? create_info.descriptorSetLayout : push_layout; auto layout_obj = device_data->GetDescriptorSetLayoutShared(effective_dsl); // Create a WriteDescriptorSet struct for each template update entry for (uint32_t i = 0; i < create_info.descriptorUpdateEntryCount; i++) { auto binding_count = layout_obj->GetDescriptorCountFromBinding(create_info.pDescriptorUpdateEntries[i].dstBinding); auto binding_being_updated = create_info.pDescriptorUpdateEntries[i].dstBinding; auto dst_array_element = create_info.pDescriptorUpdateEntries[i].dstArrayElement; desc_writes.reserve(desc_writes.size() + create_info.pDescriptorUpdateEntries[i].descriptorCount); for (uint32_t j = 0; j < create_info.pDescriptorUpdateEntries[i].descriptorCount; j++) { desc_writes.emplace_back(); auto &write_entry = desc_writes.back(); size_t offset = create_info.pDescriptorUpdateEntries[i].offset + j * create_info.pDescriptorUpdateEntries[i].stride; char *update_entry = (char *)(pData) + offset; if (dst_array_element >= binding_count) { dst_array_element = 0; binding_being_updated = layout_obj->GetNextValidBinding(binding_being_updated); } write_entry.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; write_entry.pNext = NULL; write_entry.dstSet = descriptorSet; write_entry.dstBinding = binding_being_updated; write_entry.dstArrayElement = dst_array_element; write_entry.descriptorCount = 1; write_entry.descriptorType = create_info.pDescriptorUpdateEntries[i].descriptorType; switch (create_info.pDescriptorUpdateEntries[i].descriptorType) { case VK_DESCRIPTOR_TYPE_SAMPLER: case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE: case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT: write_entry.pImageInfo = reinterpret_cast<VkDescriptorImageInfo *>(update_entry); break; case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER: case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER: case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC: case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: write_entry.pBufferInfo = reinterpret_cast<VkDescriptorBufferInfo *>(update_entry); break; case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER: case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: write_entry.pTexelBufferView = reinterpret_cast<VkBufferView *>(update_entry); break; case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT: { VkWriteDescriptorSetInlineUniformBlockEXT *inline_info = &inline_infos[i]; inline_info->sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK_EXT; inline_info->pNext = nullptr; inline_info->dataSize = create_info.pDescriptorUpdateEntries[i].descriptorCount; inline_info->pData = update_entry; write_entry.pNext = inline_info; // descriptorCount must match the dataSize member of the VkWriteDescriptorSetInlineUniformBlockEXT structure write_entry.descriptorCount = inline_info->dataSize; // skip the rest of the array, they just represent bytes in the update j = create_info.pDescriptorUpdateEntries[i].descriptorCount; break; } case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR: { VkWriteDescriptorSetAccelerationStructureKHR *inline_info_khr = &inline_infos_khr[i]; inline_info_khr->sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_KHR; inline_info_khr->pNext = nullptr; inline_info_khr->accelerationStructureCount = create_info.pDescriptorUpdateEntries[i].descriptorCount; inline_info_khr->pAccelerationStructures = reinterpret_cast<VkAccelerationStructureKHR *>(update_entry); write_entry.pNext = inline_info_khr; break; } case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NV: { VkWriteDescriptorSetAccelerationStructureNV *inline_info_nv = &inline_infos_nv[i]; inline_info_nv->sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_NV; inline_info_nv->pNext = nullptr; inline_info_nv->accelerationStructureCount = create_info.pDescriptorUpdateEntries[i].descriptorCount; inline_info_nv->pAccelerationStructures = reinterpret_cast<VkAccelerationStructureNV *>(update_entry); write_entry.pNext = inline_info_nv; break; } default: assert(0); break; } dst_array_element++; } } } // These helper functions carry out the validate and record descriptor updates peformed via update templates. They decode // the templatized data and leverage the non-template UpdateDescriptor helper functions. bool CoreChecks::ValidateUpdateDescriptorSetsWithTemplateKHR(VkDescriptorSet descriptorSet, const TEMPLATE_STATE *template_state, const void *pData) const { // Translate the templated update into a normal update for validation... cvdescriptorset::DecodedTemplateUpdate decoded_update(this, descriptorSet, template_state, pData); return ValidateUpdateDescriptorSets(static_cast<uint32_t>(decoded_update.desc_writes.size()), decoded_update.desc_writes.data(), 0, NULL, "vkUpdateDescriptorSetWithTemplate()"); } std::string cvdescriptorset::DescriptorSet::StringifySetAndLayout() const { std::string out; auto layout_handle = layout_->GetDescriptorSetLayout(); if (IsPushDescriptor()) { std::ostringstream str; str << "Push Descriptors defined with " << state_data_->report_data->FormatHandle(layout_handle); out = str.str(); } else { std::ostringstream str; str << state_data_->report_data->FormatHandle(set_) << " allocated with " << state_data_->report_data->FormatHandle(layout_handle); out = str.str(); } return out; }; // Loop through the write updates to validate for a push descriptor set, ignoring dstSet bool CoreChecks::ValidatePushDescriptorsUpdate(const DescriptorSet *push_set, uint32_t write_count, const VkWriteDescriptorSet *p_wds, const char *func_name) const { assert(push_set->IsPushDescriptor()); bool skip = false; for (uint32_t i = 0; i < write_count; i++) { std::string error_code; std::string error_str; if (!ValidateWriteUpdate(push_set, &p_wds[i], func_name, &error_code, &error_str)) { skip |= LogError(push_set->GetDescriptorSetLayout(), error_code, "%s VkWriteDescriptorSet[%u] failed update validation: %s.", func_name, i, error_str.c_str()); } } return skip; } // For the given buffer, verify that its creation parameters are appropriate for the given type // If there's an error, update the error_msg string with details and return false, else return true bool cvdescriptorset::ValidateBufferUsage(debug_report_data *report_data, BUFFER_STATE const *buffer_node, VkDescriptorType type, std::string *error_code, std::string *error_msg) { // Verify that usage bits set correctly for given type auto usage = buffer_node->createInfo.usage; const char *error_usage_bit = nullptr; switch (type) { case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER: if (!(usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT)) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00334"; error_usage_bit = "VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT"; } break; case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: if (!(usage & VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT)) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00335"; error_usage_bit = "VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT"; } break; case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER: case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC: if (!(usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT)) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00330"; error_usage_bit = "VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT"; } break; case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER: case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: if (!(usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT)) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00331"; error_usage_bit = "VK_BUFFER_USAGE_STORAGE_BUFFER_BIT"; } break; default: break; } if (error_usage_bit) { std::stringstream error_str; error_str << "Buffer (" << report_data->FormatHandle(buffer_node->buffer) << ") with usage mask " << std::hex << std::showbase << usage << " being used for a descriptor update of type " << string_VkDescriptorType(type) << " does not have " << error_usage_bit << " set."; *error_msg = error_str.str(); return false; } return true; } // For buffer descriptor updates, verify the buffer usage and VkDescriptorBufferInfo struct which includes: // 1. buffer is valid // 2. buffer was created with correct usage flags // 3. offset is less than buffer size // 4. range is either VK_WHOLE_SIZE or falls in (0, (buffer size - offset)] // 5. range and offset are within the device's limits // If there's an error, update the error_msg string with details and return false, else return true bool CoreChecks::ValidateBufferUpdate(VkDescriptorBufferInfo const *buffer_info, VkDescriptorType type, const char *func_name, std::string *error_code, std::string *error_msg) const { // First make sure that buffer is valid auto buffer_node = GetBufferState(buffer_info->buffer); // Any invalid buffer should already be caught by object_tracker assert(buffer_node); if (ValidateMemoryIsBoundToBuffer(buffer_node, func_name, "VUID-VkWriteDescriptorSet-descriptorType-00329")) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00329"; *error_msg = "No memory bound to buffer."; return false; } // Verify usage bits if (!cvdescriptorset::ValidateBufferUsage(report_data, buffer_node, type, error_code, error_msg)) { // error_msg will have been updated by ValidateBufferUsage() return false; } // offset must be less than buffer size if (buffer_info->offset >= buffer_node->createInfo.size) { *error_code = "VUID-VkDescriptorBufferInfo-offset-00340"; std::stringstream error_str; error_str << "VkDescriptorBufferInfo offset of " << buffer_info->offset << " is greater than or equal to buffer " << report_data->FormatHandle(buffer_node->buffer) << " size of " << buffer_node->createInfo.size; *error_msg = error_str.str(); return false; } if (buffer_info->range != VK_WHOLE_SIZE) { // Range must be VK_WHOLE_SIZE or > 0 if (!buffer_info->range) { *error_code = "VUID-VkDescriptorBufferInfo-range-00341"; std::stringstream error_str; error_str << "For buffer " << report_data->FormatHandle(buffer_node->buffer) << " VkDescriptorBufferInfo range is not VK_WHOLE_SIZE and is zero, which is not allowed."; *error_msg = error_str.str(); return false; } // Range must be VK_WHOLE_SIZE or <= (buffer size - offset) if (buffer_info->range > (buffer_node->createInfo.size - buffer_info->offset)) { *error_code = "VUID-VkDescriptorBufferInfo-range-00342"; std::stringstream error_str; error_str << "For buffer " << report_data->FormatHandle(buffer_node->buffer) << " VkDescriptorBufferInfo range is " << buffer_info->range << " which is greater than buffer size (" << buffer_node->createInfo.size << ") minus requested offset of " << buffer_info->offset; *error_msg = error_str.str(); return false; } } // Check buffer update sizes against device limits const auto &limits = phys_dev_props.limits; if (VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER == type || VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC == type) { auto max_ub_range = limits.maxUniformBufferRange; if (buffer_info->range != VK_WHOLE_SIZE && buffer_info->range > max_ub_range) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00332"; std::stringstream error_str; error_str << "For buffer " << report_data->FormatHandle(buffer_node->buffer) << " VkDescriptorBufferInfo range is " << buffer_info->range << " which is greater than this device's maxUniformBufferRange (" << max_ub_range << ")"; *error_msg = error_str.str(); return false; } else if (buffer_info->range == VK_WHOLE_SIZE && (buffer_node->createInfo.size - buffer_info->offset) > max_ub_range) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00332"; std::stringstream error_str; error_str << "For buffer " << report_data->FormatHandle(buffer_node->buffer) << " VkDescriptorBufferInfo range is VK_WHOLE_SIZE but effective range " << "(" << (buffer_node->createInfo.size - buffer_info->offset) << ") is greater than this device's " << "maxUniformBufferRange (" << max_ub_range << ")"; *error_msg = error_str.str(); return false; } } else if (VK_DESCRIPTOR_TYPE_STORAGE_BUFFER == type || VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC == type) { auto max_sb_range = limits.maxStorageBufferRange; if (buffer_info->range != VK_WHOLE_SIZE && buffer_info->range > max_sb_range) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00333"; std::stringstream error_str; error_str << "For buffer " << report_data->FormatHandle(buffer_node->buffer) << " VkDescriptorBufferInfo range is " << buffer_info->range << " which is greater than this device's maxStorageBufferRange (" << max_sb_range << ")"; *error_msg = error_str.str(); return false; } else if (buffer_info->range == VK_WHOLE_SIZE && (buffer_node->createInfo.size - buffer_info->offset) > max_sb_range) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00333"; std::stringstream error_str; error_str << "For buffer " << report_data->FormatHandle(buffer_node->buffer) << " VkDescriptorBufferInfo range is VK_WHOLE_SIZE but effective range " << "(" << (buffer_node->createInfo.size - buffer_info->offset) << ") is greater than this device's " << "maxStorageBufferRange (" << max_sb_range << ")"; *error_msg = error_str.str(); return false; } } return true; } template <typename T> bool CoreChecks::ValidateAccelerationStructureUpdate(T acc_node, const char *func_name, std::string *error_code, std::string *error_msg) const { // Any invalid acc struct should already be caught by object_tracker assert(acc_node); if (ValidateMemoryIsBoundToAccelerationStructure(acc_node, func_name, kVUIDUndefined)) { *error_code = kVUIDUndefined; *error_msg = "No memory bound to acceleration structure."; return false; } return true; } // Verify that the contents of the update are ok, but don't perform actual update bool CoreChecks::VerifyCopyUpdateContents(const VkCopyDescriptorSet *update, const DescriptorSet *src_set, VkDescriptorType src_type, uint32_t src_index, const DescriptorSet *dst_set, VkDescriptorType dst_type, uint32_t dst_index, const char *func_name, std::string *error_code, std::string *error_msg) const { // Note : Repurposing some Write update error codes here as specific details aren't called out for copy updates like they are // for write updates using DescriptorClass = cvdescriptorset::DescriptorClass; using BufferDescriptor = cvdescriptorset::BufferDescriptor; using ImageDescriptor = cvdescriptorset::ImageDescriptor; using ImageSamplerDescriptor = cvdescriptorset::ImageSamplerDescriptor; using SamplerDescriptor = cvdescriptorset::SamplerDescriptor; using TexelDescriptor = cvdescriptorset::TexelDescriptor; auto device_data = this; if (dst_type == VK_DESCRIPTOR_TYPE_SAMPLER) { for (uint32_t di = 0; di < update->descriptorCount; ++di) { const auto dst_desc = dst_set->GetDescriptorFromGlobalIndex(dst_index + di); if (!dst_desc->updated) continue; if (dst_desc->IsImmutableSampler()) { *error_code = "VUID-VkCopyDescriptorSet-dstBinding-02753"; std::stringstream error_str; error_str << "Attempted copy update to an immutable sampler descriptor."; *error_msg = error_str.str(); return false; } } } switch (src_set->GetDescriptorFromGlobalIndex(src_index)->descriptor_class) { case DescriptorClass::PlainSampler: { for (uint32_t di = 0; di < update->descriptorCount; ++di) { const auto src_desc = src_set->GetDescriptorFromGlobalIndex(src_index + di); if (!src_desc->updated) continue; if (!src_desc->IsImmutableSampler()) { auto update_sampler = static_cast<const SamplerDescriptor *>(src_desc)->GetSampler(); if (!ValidateSampler(update_sampler)) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00325"; std::stringstream error_str; error_str << "Attempted copy update to sampler descriptor with invalid sampler: " << report_data->FormatHandle(update_sampler) << "."; *error_msg = error_str.str(); return false; } } else { // TODO : Warn here } } break; } case DescriptorClass::ImageSampler: { for (uint32_t di = 0; di < update->descriptorCount; ++di) { const auto src_desc = src_set->GetDescriptorFromGlobalIndex(src_index + di); if (!src_desc->updated) continue; auto img_samp_desc = static_cast<const ImageSamplerDescriptor *>(src_desc); // First validate sampler if (!img_samp_desc->IsImmutableSampler()) { auto update_sampler = img_samp_desc->GetSampler(); if (!ValidateSampler(update_sampler)) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00325"; std::stringstream error_str; error_str << "Attempted copy update to sampler descriptor with invalid sampler: " << report_data->FormatHandle(update_sampler) << "."; *error_msg = error_str.str(); return false; } } else { // TODO : Warn here } // Validate image auto image_view = img_samp_desc->GetImageView(); auto image_layout = img_samp_desc->GetImageLayout(); if (image_view) { if (!ValidateImageUpdate(image_view, image_layout, src_type, func_name, error_code, error_msg)) { std::stringstream error_str; error_str << "Attempted copy update to combined image sampler descriptor failed due to: " << error_msg->c_str(); *error_msg = error_str.str(); return false; } } } break; } case DescriptorClass::Image: { for (uint32_t di = 0; di < update->descriptorCount; ++di) { const auto src_desc = src_set->GetDescriptorFromGlobalIndex(src_index + di); if (!src_desc->updated) continue; auto img_desc = static_cast<const ImageDescriptor *>(src_desc); auto image_view = img_desc->GetImageView(); auto image_layout = img_desc->GetImageLayout(); if (image_view) { if (!ValidateImageUpdate(image_view, image_layout, src_type, func_name, error_code, error_msg)) { std::stringstream error_str; error_str << "Attempted copy update to image descriptor failed due to: " << error_msg->c_str(); *error_msg = error_str.str(); return false; } } } break; } case DescriptorClass::TexelBuffer: { for (uint32_t di = 0; di < update->descriptorCount; ++di) { const auto src_desc = src_set->GetDescriptorFromGlobalIndex(src_index + di); if (!src_desc->updated) continue; auto buffer_view = static_cast<const TexelDescriptor *>(src_desc)->GetBufferView(); if (buffer_view) { auto bv_state = device_data->GetBufferViewState(buffer_view); if (!bv_state) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-02994"; std::stringstream error_str; error_str << "Attempted copy update to texel buffer descriptor with invalid buffer view: " << report_data->FormatHandle(buffer_view); *error_msg = error_str.str(); return false; } auto buffer = bv_state->create_info.buffer; if (!cvdescriptorset::ValidateBufferUsage(report_data, GetBufferState(buffer), src_type, error_code, error_msg)) { std::stringstream error_str; error_str << "Attempted copy update to texel buffer descriptor failed due to: " << error_msg->c_str(); *error_msg = error_str.str(); return false; } } } break; } case DescriptorClass::GeneralBuffer: { for (uint32_t di = 0; di < update->descriptorCount; ++di) { const auto src_desc = src_set->GetDescriptorFromGlobalIndex(src_index + di); if (!src_desc->updated) continue; auto buffer = static_cast<const BufferDescriptor *>(src_desc)->GetBuffer(); if (buffer) { if (!cvdescriptorset::ValidateBufferUsage(report_data, GetBufferState(buffer), src_type, error_code, error_msg)) { std::stringstream error_str; error_str << "Attempted copy update to buffer descriptor failed due to: " << error_msg->c_str(); *error_msg = error_str.str(); return false; } } } break; } case DescriptorClass::InlineUniform: case DescriptorClass::AccelerationStructure: break; default: assert(0); // We've already verified update type so should never get here break; } // All checks passed so update contents are good return true; } // Verify that the state at allocate time is correct, but don't actually allocate the sets yet bool CoreChecks::ValidateAllocateDescriptorSets(const VkDescriptorSetAllocateInfo *p_alloc_info, const cvdescriptorset::AllocateDescriptorSetsData *ds_data) const { bool skip = false; auto pool_state = GetDescriptorPoolState(p_alloc_info->descriptorPool); for (uint32_t i = 0; i < p_alloc_info->descriptorSetCount; i++) { auto layout = GetDescriptorSetLayoutShared(p_alloc_info->pSetLayouts[i]); if (layout) { // nullptr layout indicates no valid layout handle for this device, validated/logged in object_tracker if (layout->IsPushDescriptor()) { skip |= LogError(p_alloc_info->pSetLayouts[i], "VUID-VkDescriptorSetAllocateInfo-pSetLayouts-00308", "%s specified at pSetLayouts[%" PRIu32 "] in vkAllocateDescriptorSets() was created with invalid flag %s set.", report_data->FormatHandle(p_alloc_info->pSetLayouts[i]).c_str(), i, "VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR"); } if (layout->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT && !(pool_state->createInfo.flags & VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT)) { skip |= LogError( device, "VUID-VkDescriptorSetAllocateInfo-pSetLayouts-03044", "vkAllocateDescriptorSets(): Descriptor set layout create flags and pool create flags mismatch for index (%d)", i); } } } if (!device_extensions.vk_khr_maintenance1) { // Track number of descriptorSets allowable in this pool if (pool_state->availableSets < p_alloc_info->descriptorSetCount) { skip |= LogError(pool_state->pool, "VUID-VkDescriptorSetAllocateInfo-descriptorSetCount-00306", "vkAllocateDescriptorSets(): Unable to allocate %u descriptorSets from %s" ". This pool only has %d descriptorSets remaining.", p_alloc_info->descriptorSetCount, report_data->FormatHandle(pool_state->pool).c_str(), pool_state->availableSets); } // Determine whether descriptor counts are satisfiable for (auto it = ds_data->required_descriptors_by_type.begin(); it != ds_data->required_descriptors_by_type.end(); ++it) { auto count_iter = pool_state->availableDescriptorTypeCount.find(it->first); uint32_t available_count = (count_iter != pool_state->availableDescriptorTypeCount.end()) ? count_iter->second : 0; if (ds_data->required_descriptors_by_type.at(it->first) > available_count) { skip |= LogError(pool_state->pool, "VUID-VkDescriptorSetAllocateInfo-descriptorPool-00307", "vkAllocateDescriptorSets(): Unable to allocate %u descriptors of type %s from %s" ". This pool only has %d descriptors of this type remaining.", ds_data->required_descriptors_by_type.at(it->first), string_VkDescriptorType(VkDescriptorType(it->first)), report_data->FormatHandle(pool_state->pool).c_str(), available_count); } } } const auto *count_allocate_info = LvlFindInChain<VkDescriptorSetVariableDescriptorCountAllocateInfo>(p_alloc_info->pNext); if (count_allocate_info) { if (count_allocate_info->descriptorSetCount != 0 && count_allocate_info->descriptorSetCount != p_alloc_info->descriptorSetCount) { skip |= LogError(device, "VUID-VkDescriptorSetVariableDescriptorCountAllocateInfo-descriptorSetCount-03045", "vkAllocateDescriptorSets(): VkDescriptorSetAllocateInfo::descriptorSetCount (%d) != " "VkDescriptorSetVariableDescriptorCountAllocateInfo::descriptorSetCount (%d)", p_alloc_info->descriptorSetCount, count_allocate_info->descriptorSetCount); } if (count_allocate_info->descriptorSetCount == p_alloc_info->descriptorSetCount) { for (uint32_t i = 0; i < p_alloc_info->descriptorSetCount; i++) { auto layout = GetDescriptorSetLayoutShared(p_alloc_info->pSetLayouts[i]); if (count_allocate_info->pDescriptorCounts[i] > layout->GetDescriptorCountFromBinding(layout->GetMaxBinding())) { skip |= LogError(device, "VUID-VkDescriptorSetVariableDescriptorCountAllocateInfo-pSetLayouts-03046", "vkAllocateDescriptorSets(): pDescriptorCounts[%d] = (%d), binding's descriptorCount = (%d)", i, count_allocate_info->pDescriptorCounts[i], layout->GetDescriptorCountFromBinding(layout->GetMaxBinding())); } } } } return skip; } const BindingReqMap &cvdescriptorset::PrefilterBindRequestMap::FilteredMap(const CMD_BUFFER_STATE &cb_state, const PIPELINE_STATE &pipeline) { if (IsManyDescriptors()) { filtered_map_.reset(new BindingReqMap); descriptor_set_.FilterBindingReqs(cb_state, pipeline, orig_map_, filtered_map_.get()); return *filtered_map_; } return orig_map_; } // Starting at offset descriptor of given binding, parse over update_count // descriptor updates and verify that for any binding boundaries that are crossed, the next binding(s) are all consistent // Consistency means that their type, stage flags, and whether or not they use immutable samplers matches // If so, return true. If not, fill in error_msg and return false bool cvdescriptorset::VerifyUpdateConsistency(debug_report_data *report_data, DescriptorSetLayout::ConstBindingIterator current_binding, uint32_t offset, uint32_t update_count, const char *type, const VkDescriptorSet set, std::string *error_msg) { bool pass = true; // Verify consecutive bindings match (if needed) auto orig_binding = current_binding; while (pass && update_count) { // First, it's legal to offset beyond your own binding so handle that case if (offset > 0) { const auto &index_range = current_binding.GetGlobalIndexRange(); // index_range.start + offset is which descriptor is needed to update. If it > index_range.end, it means the descriptor // isn't in this binding, maybe in next binding. if ((index_range.start + offset) >= index_range.end) { // Advance to next binding, decrement offset by binding size offset -= current_binding.GetDescriptorCount(); ++current_binding; // Verify next consecutive binding matches type, stage flags & immutable sampler use and if AtEnd if (!orig_binding.IsConsistent(current_binding)) { pass = false; } continue; } } update_count -= std::min(update_count, current_binding.GetDescriptorCount() - offset); if (update_count) { // Starting offset is beyond the current binding. Check consistency, update counters and advance to the next binding, // looking for the start point. All bindings (even those skipped) must be consistent with the update and with the // original binding. offset = 0; ++current_binding; // Verify next consecutive binding matches type, stage flags & immutable sampler use and if AtEnd if (!orig_binding.IsConsistent(current_binding)) { pass = false; } } } if (!pass) { std::stringstream error_str; error_str << "Attempting " << type; if (current_binding.Layout()->IsPushDescriptor()) { error_str << " push descriptors"; } else { error_str << " descriptor set " << report_data->FormatHandle(set); } error_str << " binding #" << orig_binding.Binding() << " with #" << update_count << " descriptors being updated but this update oversteps the bounds of this binding and the next binding is " "not consistent with current binding so this update is invalid."; *error_msg = error_str.str(); } return pass; } // Validate the state for a given write update but don't actually perform the update // If an error would occur for this update, return false and fill in details in error_msg string bool CoreChecks::ValidateWriteUpdate(const DescriptorSet *dest_set, const VkWriteDescriptorSet *update, const char *func_name, std::string *error_code, std::string *error_msg) const { const auto dest_layout = dest_set->GetLayout().get(); // Verify dst layout still valid if (dest_layout->destroyed) { *error_code = "VUID-VkWriteDescriptorSet-dstSet-00320"; std::ostringstream str; str << "Cannot call " << func_name << " to perform write update on " << dest_set->StringifySetAndLayout() << " which has been destroyed"; *error_msg = str.str(); return false; } // Verify dst binding exists if (!dest_layout->HasBinding(update->dstBinding)) { *error_code = "VUID-VkWriteDescriptorSet-dstBinding-00315"; std::stringstream error_str; error_str << dest_set->StringifySetAndLayout() << " does not have binding " << update->dstBinding; *error_msg = error_str.str(); return false; } DescriptorSetLayout::ConstBindingIterator dest(dest_layout, update->dstBinding); // Make sure binding isn't empty if (0 == dest.GetDescriptorCount()) { *error_code = "VUID-VkWriteDescriptorSet-dstBinding-00316"; std::stringstream error_str; error_str << dest_set->StringifySetAndLayout() << " cannot updated binding " << update->dstBinding << " that has 0 descriptors"; *error_msg = error_str.str(); return false; } // Verify idle ds if (dest_set->in_use.load() && !(dest.GetDescriptorBindingFlags() & (VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT | VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT))) { // TODO : Re-using Free Idle error code, need write update idle error code *error_code = "VUID-vkFreeDescriptorSets-pDescriptorSets-00309"; std::stringstream error_str; error_str << "Cannot call " << func_name << " to perform write update on " << dest_set->StringifySetAndLayout() << " that is in use by a command buffer"; *error_msg = error_str.str(); return false; } // We know that binding is valid, verify update and do update on each descriptor auto start_idx = dest.GetGlobalIndexRange().start + update->dstArrayElement; auto type = dest.GetType(); if (type != update->descriptorType) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00319"; std::stringstream error_str; error_str << "Attempting write update to " << dest_set->StringifySetAndLayout() << " binding #" << update->dstBinding << " with type " << string_VkDescriptorType(type) << " but update type is " << string_VkDescriptorType(update->descriptorType); *error_msg = error_str.str(); return false; } if (type == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT) { if ((update->dstArrayElement % 4) != 0) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-02219"; std::stringstream error_str; error_str << "Attempting write update to " << dest_set->StringifySetAndLayout() << " binding #" << update->dstBinding << " with " << "dstArrayElement " << update->dstArrayElement << " not a multiple of 4"; *error_msg = error_str.str(); return false; } if ((update->descriptorCount % 4) != 0) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-02220"; std::stringstream error_str; error_str << "Attempting write update to " << dest_set->StringifySetAndLayout() << " binding #" << update->dstBinding << " with " << "descriptorCount " << update->descriptorCount << " not a multiple of 4"; *error_msg = error_str.str(); return false; } const auto *write_inline_info = LvlFindInChain<VkWriteDescriptorSetInlineUniformBlockEXT>(update->pNext); if (!write_inline_info || write_inline_info->dataSize != update->descriptorCount) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-02221"; std::stringstream error_str; if (!write_inline_info) { error_str << "Attempting write update to " << dest_set->StringifySetAndLayout() << " binding #" << update->dstBinding << " with " << "VkWriteDescriptorSetInlineUniformBlockEXT missing"; } else { error_str << "Attempting write update to " << dest_set->StringifySetAndLayout() << " binding #" << update->dstBinding << " with " << "VkWriteDescriptorSetInlineUniformBlockEXT dataSize " << write_inline_info->dataSize << " not equal to " << "VkWriteDescriptorSet descriptorCount " << update->descriptorCount; } *error_msg = error_str.str(); return false; } // This error is probably unreachable due to the previous two errors if (write_inline_info && (write_inline_info->dataSize % 4) != 0) { *error_code = "VUID-VkWriteDescriptorSetInlineUniformBlockEXT-dataSize-02222"; std::stringstream error_str; error_str << "Attempting write update to " << dest_set->StringifySetAndLayout() << " binding #" << update->dstBinding << " with " << "VkWriteDescriptorSetInlineUniformBlockEXT dataSize " << write_inline_info->dataSize << " not a multiple of 4"; *error_msg = error_str.str(); return false; } } // Verify all bindings update share identical properties across all items if (update->descriptorCount > 0) { // Save first binding information and error if something different is found DescriptorSetLayout::ConstBindingIterator current_binding(dest_layout, update->dstBinding); VkShaderStageFlags stage_flags = current_binding.GetStageFlags(); VkDescriptorType descriptor_type = current_binding.GetType(); bool immutable_samplers = (current_binding.GetImmutableSamplerPtr() == nullptr); uint32_t dst_array_element = update->dstArrayElement; for (uint32_t i = 0; i < update->descriptorCount;) { if (current_binding.AtEnd() == true) { break; // prevents setting error here if bindings don't exist } // Check for consistent stageFlags and descriptorType if ((current_binding.GetStageFlags() != stage_flags) || (current_binding.GetType() != descriptor_type)) { *error_code = "VUID-VkWriteDescriptorSet-descriptorCount-00317"; std::stringstream error_str; error_str << "Attempting write update to " << dest_set->StringifySetAndLayout() << " binding index #" << current_binding.GetIndex() << " (" << i << " from dstBinding offset)" << " with a different stageFlag and/or descriptorType from previous bindings." << " All bindings must have consecutive stageFlag and/or descriptorType across a VkWriteDescriptorSet"; *error_msg = error_str.str(); return false; } // Check if all immutableSamplers or not if ((current_binding.GetImmutableSamplerPtr() == nullptr) != immutable_samplers) { *error_code = "VUID-VkWriteDescriptorSet-descriptorCount-00318"; std::stringstream error_str; error_str << "Attempting write update to " << dest_set->StringifySetAndLayout() << " binding index #" << current_binding.GetIndex() << " (" << i << " from dstBinding offset)" << " with a different usage of immutable samplers from previous bindings." << " All bindings must have all or none usage of immutable samplers across a VkWriteDescriptorSet"; *error_msg = error_str.str(); return false; } // Skip the remaining descriptors for this binding, and move to the next binding i += (current_binding.GetDescriptorCount() - dst_array_element); dst_array_element = 0; ++current_binding; } } // Verify consecutive bindings match (if needed) if (!VerifyUpdateConsistency(report_data, DescriptorSetLayout::ConstBindingIterator(dest_layout, update->dstBinding), update->dstArrayElement, update->descriptorCount, "write update to", dest_set->GetSet(), error_msg)) { // TODO : Should break out "consecutive binding updates" language into valid usage statements *error_code = "VUID-VkWriteDescriptorSet-dstArrayElement-00321"; return false; } // Verify write to variable descriptor if (dest_set->IsVariableDescriptorCount(update->dstBinding)) { if ((update->dstArrayElement + update->descriptorCount) > dest_set->GetVariableDescriptorCount()) { std::stringstream error_str; *error_code = "VUID-VkWriteDescriptorSet-dstArrayElement-00321"; error_str << "Attempting write update to " << dest_set->StringifySetAndLayout() << " binding index #" << update->dstBinding << " array element " << update->dstArrayElement << " with " << update->descriptorCount << " writes but variable descriptor size is " << dest_set->GetVariableDescriptorCount(); *error_msg = error_str.str(); return false; } } // Update is within bounds and consistent so last step is to validate update contents if (!VerifyWriteUpdateContents(dest_set, update, start_idx, func_name, error_code, error_msg)) { std::stringstream error_str; error_str << "Write update to " << dest_set->StringifySetAndLayout() << " binding #" << update->dstBinding << " failed with error message: " << error_msg->c_str(); *error_msg = error_str.str(); return false; } // All checks passed, update is clean return true; } // Verify that the contents of the update are ok, but don't perform actual update bool CoreChecks::VerifyWriteUpdateContents(const DescriptorSet *dest_set, const VkWriteDescriptorSet *update, const uint32_t index, const char *func_name, std::string *error_code, std::string *error_msg) const { using ImageSamplerDescriptor = cvdescriptorset::ImageSamplerDescriptor; using Descriptor = cvdescriptorset::Descriptor; switch (update->descriptorType) { case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: { for (uint32_t di = 0; di < update->descriptorCount; ++di) { // Validate image auto image_view = update->pImageInfo[di].imageView; auto image_layout = update->pImageInfo[di].imageLayout; auto sampler = update->pImageInfo[di].sampler; auto iv_state = GetImageViewState(image_view); const ImageSamplerDescriptor *desc = (const ImageSamplerDescriptor *)dest_set->GetDescriptorFromGlobalIndex(index + di); if (image_view) { auto image_state = iv_state->image_state.get(); if (!ValidateImageUpdate(image_view, image_layout, update->descriptorType, func_name, error_code, error_msg)) { std::stringstream error_str; error_str << "Attempted write update to combined image sampler descriptor failed due to: " << error_msg->c_str(); *error_msg = error_str.str(); return false; } if (device_extensions.vk_khr_sampler_ycbcr_conversion) { if (desc->IsImmutableSampler()) { auto sampler_state = GetSamplerState(desc->GetSampler()); if (iv_state && sampler_state) { if (iv_state->samplerConversion != sampler_state->samplerConversion) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-01948"; std::stringstream error_str; error_str << "Attempted write update to combined image sampler and image view and sampler ycbcr " "conversions are not identical, sampler: " << report_data->FormatHandle(desc->GetSampler()) << " image view: " << report_data->FormatHandle(iv_state->image_view) << "."; *error_msg = error_str.str(); return false; } } } else { if (iv_state && (iv_state->samplerConversion != VK_NULL_HANDLE)) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-02738"; std::stringstream error_str; error_str << "Because dstSet (" << report_data->FormatHandle(update->dstSet) << ") is bound to image view (" << report_data->FormatHandle(iv_state->image_view) << ") that includes a YCBCR conversion, it must have been allocated with a layout that " "includes an immutable sampler."; *error_msg = error_str.str(); return false; } } } // If there is an immutable sampler then |sampler| isn't used, so the following VU does not apply. if (sampler && !desc->IsImmutableSampler() && FormatIsMultiplane(image_state->createInfo.format)) { // multiplane formats must be created with mutable format bit if (0 == (image_state->createInfo.flags & VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT)) { *error_code = "VUID-VkDescriptorImageInfo-sampler-01564"; std::stringstream error_str; error_str << "image " << report_data->FormatHandle(image_state->image) << " combined image sampler is a multi-planar " << "format and was not was not created with the VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT"; *error_msg = error_str.str(); return false; } // image view need aspect mask for only the planes supported of format VkImageAspectFlags legal_aspect_flags = (VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT); legal_aspect_flags |= (FormatPlaneCount(image_state->createInfo.format) == 3) ? VK_IMAGE_ASPECT_PLANE_2_BIT : 0; if (0 != (iv_state->create_info.subresourceRange.aspectMask & (~legal_aspect_flags))) { *error_code = "VUID-VkDescriptorImageInfo-sampler-01564"; std::stringstream error_str; error_str << "image " << report_data->FormatHandle(image_state->image) << " combined image sampler is a multi-planar " << "format and " << report_data->FormatHandle(iv_state->image_view) << " aspectMask must only include " << string_VkImageAspectFlags(legal_aspect_flags); *error_msg = error_str.str(); return false; } } // Verify portability auto sampler_state = GetSamplerState(sampler); if (sampler_state) { if (ExtEnabled::kNotEnabled != device_extensions.vk_khr_portability_subset) { if ((VK_FALSE == enabled_features.portability_subset_features.mutableComparisonSamplers) && (VK_FALSE != sampler_state->createInfo.compareEnable)) { LogError(device, "VUID-VkDescriptorImageInfo-mutableComparisonSamplers-04450", "%s (portability error): sampler comparison not available.", func_name); } } } } } } // Fall through case VK_DESCRIPTOR_TYPE_SAMPLER: { for (uint32_t di = 0; di < update->descriptorCount; ++di) { const auto *desc = static_cast<const Descriptor *>(dest_set->GetDescriptorFromGlobalIndex(index + di)); if (!desc->IsImmutableSampler()) { if (!ValidateSampler(update->pImageInfo[di].sampler)) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00325"; std::stringstream error_str; error_str << "Attempted write update to sampler descriptor with invalid sampler: " << report_data->FormatHandle(update->pImageInfo[di].sampler) << "."; *error_msg = error_str.str(); return false; } } else if (update->descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-02752"; std::stringstream error_str; error_str << "Attempted write update to an immutable sampler descriptor."; *error_msg = error_str.str(); return false; } } break; } case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE: case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT: case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: { for (uint32_t di = 0; di < update->descriptorCount; ++di) { auto image_view = update->pImageInfo[di].imageView; auto image_layout = update->pImageInfo[di].imageLayout; if (image_view) { if (!ValidateImageUpdate(image_view, image_layout, update->descriptorType, func_name, error_code, error_msg)) { std::stringstream error_str; error_str << "Attempted write update to image descriptor failed due to: " << error_msg->c_str(); *error_msg = error_str.str(); return false; } } } break; } case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER: case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: { for (uint32_t di = 0; di < update->descriptorCount; ++di) { auto buffer_view = update->pTexelBufferView[di]; if (buffer_view) { auto bv_state = GetBufferViewState(buffer_view); if (!bv_state) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-02994"; std::stringstream error_str; error_str << "Attempted write update to texel buffer descriptor with invalid buffer view: " << report_data->FormatHandle(buffer_view); *error_msg = error_str.str(); return false; } auto buffer = bv_state->create_info.buffer; auto buffer_state = GetBufferState(buffer); // Verify that buffer underlying the view hasn't been destroyed prematurely if (!buffer_state) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-02994"; std::stringstream error_str; error_str << "Attempted write update to texel buffer descriptor failed because underlying buffer (" << report_data->FormatHandle(buffer) << ") has been destroyed: " << error_msg->c_str(); *error_msg = error_str.str(); return false; } else if (!cvdescriptorset::ValidateBufferUsage(report_data, buffer_state, update->descriptorType, error_code, error_msg)) { std::stringstream error_str; error_str << "Attempted write update to texel buffer descriptor failed due to: " << error_msg->c_str(); *error_msg = error_str.str(); return false; } } } break; } case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER: case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC: case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER: case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: { for (uint32_t di = 0; di < update->descriptorCount; ++di) { if (update->pBufferInfo[di].buffer) { if (!ValidateBufferUpdate(update->pBufferInfo + di, update->descriptorType, func_name, error_code, error_msg)) { std::stringstream error_str; error_str << "Attempted write update to buffer descriptor failed due to: " << error_msg->c_str(); *error_msg = error_str.str(); return false; } } } break; } case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT: break; case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NV: { const auto *acc_info = LvlFindInChain<VkWriteDescriptorSetAccelerationStructureNV>(update->pNext); for (uint32_t di = 0; di < update->descriptorCount; ++di) { if (!ValidateAccelerationStructureUpdate(GetAccelerationStructureStateNV(acc_info->pAccelerationStructures[di]), func_name, error_code, error_msg)) { std::stringstream error_str; error_str << "Attempted write update to acceleration structure descriptor failed due to: " << error_msg->c_str(); *error_msg = error_str.str(); return false; } } } break; // KHR acceleration structures don't require memory to be bound manually to them. case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR: break; default: assert(0); // We've already verified update type so should never get here break; } // All checks passed so update contents are good return true; }
1
15,470
Does this actually produce different code?
KhronosGroup-Vulkan-ValidationLayers
cpp
@@ -0,0 +1,7 @@ +if (node.getAttribute('onFocus') === 'this.blur()') { + return false; +} +if (node.getAttribute('onFocus').indexOf('this.blur()') > -1) { + return undefined; +} +return true;
1
1
13,307
This should account for whitespace. Simply putting `.trim()` on the attribute value should do.
dequelabs-axe-core
js
@@ -105,6 +105,9 @@ func newVXLANManager( blackHoleProto = dpConfig.DeviceRouteProtocol } + noencTarget := routetable.Target{Type: routetable.TargetTypeNoEncap} + bhTarget := routetable.Target{Type: routetable.TargetTypeBlackhole} + brt := routetable.New( []string{routetable.InterfaceNone}, 4,
1
// Copyright (c) 2016-2021 Tigera, Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package intdataplane import ( "errors" "fmt" "net" "reflect" "strings" "sync" "syscall" "time" "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus" "github.com/vishvananda/netlink" "github.com/projectcalico/felix/dataplane/common" "github.com/projectcalico/felix/ethtool" "github.com/projectcalico/felix/ip" "github.com/projectcalico/felix/ipsets" "github.com/projectcalico/felix/logutils" "github.com/projectcalico/felix/proto" "github.com/projectcalico/felix/routetable" "github.com/projectcalico/felix/rules" ) // added so that we can shim netlink for tests type netlinkHandle interface { LinkByName(name string) (netlink.Link, error) LinkSetMTU(link netlink.Link, mtu int) error LinkSetUp(link netlink.Link) error AddrList(link netlink.Link, family int) ([]netlink.Addr, error) AddrAdd(link netlink.Link, addr *netlink.Addr) error AddrDel(link netlink.Link, addr *netlink.Addr) error LinkList() ([]netlink.Link, error) LinkAdd(netlink.Link) error LinkDel(netlink.Link) error } type vxlanManager struct { sync.Mutex // Our dependencies. hostname string routeTable routeTable blackholeRouteTable routeTable noEncapRouteTable routeTable // Hold pending updates. routesByDest map[string]*proto.RouteUpdate localIPAMBlocks map[string]*proto.RouteUpdate vtepsByNode map[string]*proto.VXLANTunnelEndpointUpdate // Holds this node's VTEP information. myVTEP *proto.VXLANTunnelEndpointUpdate // VXLAN configuration. vxlanDevice string vxlanID int vxlanPort int // Indicates if configuration has changed since the last apply. routesDirty bool ipsetsDataplane common.IPSetsDataplane ipSetMetadata ipsets.IPSetMetadata externalNodeCIDRs []string vtepsDirty bool nlHandle netlinkHandle dpConfig Config noEncapProtocol netlink.RouteProtocol // Used so that we can shim the no encap route table for the tests noEncapRTConstruct func(interfacePrefixes []string, ipVersion uint8, vxlan bool, netlinkTimeout time.Duration, deviceRouteSourceAddress net.IP, deviceRouteProtocol netlink.RouteProtocol, removeExternalRoutes bool) routeTable } const ( defaultVXLANProto netlink.RouteProtocol = 80 ) func newVXLANManager( ipsetsDataplane common.IPSetsDataplane, rt routeTable, deviceName string, dpConfig Config, opRecorder logutils.OpRecorder, ) *vxlanManager { nlHandle, _ := netlink.NewHandle() blackHoleProto := defaultVXLANProto if dpConfig.DeviceRouteProtocol != syscall.RTPROT_BOOT { blackHoleProto = dpConfig.DeviceRouteProtocol } brt := routetable.New( []string{routetable.InterfaceNone}, 4, false, dpConfig.NetlinkTimeout, dpConfig.DeviceRouteSourceAddress, blackHoleProto, false, 0, opRecorder, ) return newVXLANManagerWithShims( ipsetsDataplane, rt, brt, deviceName, dpConfig, nlHandle, func(interfaceRegexes []string, ipVersion uint8, vxlan bool, netlinkTimeout time.Duration, deviceRouteSourceAddress net.IP, deviceRouteProtocol netlink.RouteProtocol, removeExternalRoutes bool) routeTable { return routetable.New(interfaceRegexes, ipVersion, vxlan, netlinkTimeout, deviceRouteSourceAddress, deviceRouteProtocol, removeExternalRoutes, 0, opRecorder) }, ) } func newVXLANManagerWithShims( ipsetsDataplane common.IPSetsDataplane, rt, brt routeTable, deviceName string, dpConfig Config, nlHandle netlinkHandle, noEncapRTConstruct func(interfacePrefixes []string, ipVersion uint8, vxlan bool, netlinkTimeout time.Duration, deviceRouteSourceAddress net.IP, deviceRouteProtocol netlink.RouteProtocol, removeExternalRoutes bool) routeTable, ) *vxlanManager { noEncapProtocol := defaultVXLANProto if dpConfig.DeviceRouteProtocol != syscall.RTPROT_BOOT { noEncapProtocol = dpConfig.DeviceRouteProtocol } return &vxlanManager{ ipsetsDataplane: ipsetsDataplane, ipSetMetadata: ipsets.IPSetMetadata{ MaxSize: dpConfig.MaxIPSetSize, SetID: rules.IPSetIDAllVXLANSourceNets, Type: ipsets.IPSetTypeHashNet, }, hostname: dpConfig.Hostname, routeTable: rt, blackholeRouteTable: brt, routesByDest: map[string]*proto.RouteUpdate{}, localIPAMBlocks: map[string]*proto.RouteUpdate{}, vtepsByNode: map[string]*proto.VXLANTunnelEndpointUpdate{}, vxlanDevice: deviceName, vxlanID: dpConfig.RulesConfig.VXLANVNI, vxlanPort: dpConfig.RulesConfig.VXLANPort, externalNodeCIDRs: dpConfig.ExternalNodesCidrs, routesDirty: true, vtepsDirty: true, dpConfig: dpConfig, nlHandle: nlHandle, noEncapProtocol: noEncapProtocol, noEncapRTConstruct: noEncapRTConstruct, } } func (m *vxlanManager) OnUpdate(protoBufMsg interface{}) { switch msg := protoBufMsg.(type) { case *proto.RouteUpdate: // In case the route changes type to one we no longer care about... m.deleteRoute(msg.Dst) if msg.Type == proto.RouteType_REMOTE_WORKLOAD && msg.IpPoolType == proto.IPPoolType_VXLAN { logrus.WithField("msg", msg).Debug("VXLAN data plane received route update") m.routesByDest[msg.Dst] = msg m.routesDirty = true } // Process IPAM blocks that aren't associated to a single or /32 local workload if routeIsLocalVXLANBlock(msg) { logrus.WithField("msg", msg).Debug("VXLAN data plane received route update for IPAM block") m.localIPAMBlocks[msg.Dst] = msg m.routesDirty = true } else if _, ok := m.localIPAMBlocks[msg.Dst]; ok { logrus.WithField("msg", msg).Debug("VXLAN data plane IPAM block changed to something else") delete(m.localIPAMBlocks, msg.Dst) m.routesDirty = true } case *proto.RouteRemove: m.deleteRoute(msg.Dst) case *proto.VXLANTunnelEndpointUpdate: logrus.WithField("msg", msg).Debug("VXLAN data plane received VTEP update") if msg.Node == m.hostname { m.setLocalVTEP(msg) } else { m.vtepsByNode[msg.Node] = msg } m.routesDirty = true m.vtepsDirty = true case *proto.VXLANTunnelEndpointRemove: logrus.WithField("msg", msg).Debug("VXLAN data plane received VTEP remove") if msg.Node == m.hostname { m.setLocalVTEP(nil) } else { delete(m.vtepsByNode, msg.Node) } m.routesDirty = true m.vtepsDirty = true } } func routeIsLocalVXLANBlock(msg *proto.RouteUpdate) bool { // RouteType_LOCAL_WORKLOAD means "local IPAM block _or_ /32 of workload" if msg.Type != proto.RouteType_LOCAL_WORKLOAD { return false } // Only care about VXLAN blocks. if msg.IpPoolType != proto.IPPoolType_VXLAN { return false } // Ignore routes that we know are from local workload endpoints. if msg.LocalWorkload { return false } // Ignore /32 routes in any case for two reasons: // * If we have a /32 block then our blackhole route would stop the CNI plugin from programming its /32 for a // newly added workload. // * If this isn't a /32 block then it must be a borrowed /32 from another block. In that case, we know we're // racing with CNI, adding a new workload. We've received the borrowed IP but not the workload endpoint yet. if strings.HasSuffix(msg.Dst, "/32") { return false } return true } func (m *vxlanManager) deleteRoute(dst string) { _, exists := m.routesByDest[dst] if exists { logrus.Debug("deleting route dst ", dst) // In case the route changes type to one we no longer care about... delete(m.routesByDest, dst) m.routesDirty = true } if _, exists := m.localIPAMBlocks[dst]; exists { logrus.Debug("deleting local ipam dst ", dst) delete(m.localIPAMBlocks, dst) m.routesDirty = true } } func (m *vxlanManager) setLocalVTEP(vtep *proto.VXLANTunnelEndpointUpdate) { m.Lock() defer m.Unlock() m.myVTEP = vtep } func (m *vxlanManager) getLocalVTEP() *proto.VXLANTunnelEndpointUpdate { m.Lock() defer m.Unlock() return m.myVTEP } func (m *vxlanManager) getLocalVTEPParent() (netlink.Link, error) { return m.getParentInterface(m.getLocalVTEP()) } func (m *vxlanManager) getNoEncapRouteTable() routeTable { m.Lock() defer m.Unlock() return m.noEncapRouteTable } func (m *vxlanManager) setNoEncapRouteTable(rt routeTable) { m.Lock() defer m.Unlock() m.noEncapRouteTable = rt } func (m *vxlanManager) GetRouteTableSyncers() []routeTableSyncer { rts := []routeTableSyncer{m.routeTable, m.blackholeRouteTable} noEncapRouteTable := m.getNoEncapRouteTable() if noEncapRouteTable != nil { rts = append(rts, noEncapRouteTable) } return rts } func (m *vxlanManager) blackholeRoutes() []routetable.Target { var rtt []routetable.Target for dst := range m.localIPAMBlocks { cidr, err := ip.CIDRFromString(dst) if err != nil { logrus.WithError(err).Warning( "Error processing IPAM block CIDR: ", dst, ) continue } rtt = append(rtt, routetable.Target{ Type: routetable.TargetTypeBlackhole, CIDR: cidr, }) } logrus.Debug("calculated blackholes ", rtt) return rtt } func (m *vxlanManager) CompleteDeferredWork() error { if !m.routesDirty { logrus.Debug("No change since last application, nothing to do") return nil } if m.vtepsDirty { var allowedVXLANSources []string if m.vtepsDirty { logrus.Debug("VTEPs are dirty, collecting the allowed VXLAN source set") allowedVXLANSources = append(allowedVXLANSources, m.externalNodeCIDRs...) } // The route table accepts the desired state. Start by setting the desired L2 "routes" by iterating // known VTEPs. var l2routes []routetable.L2Target for _, u := range m.vtepsByNode { mac, err := net.ParseMAC(u.Mac) if err != nil { // Don't block programming of other VTEPs if somehow we receive one with a bad mac. logrus.WithError(err).Warn("Failed to parse VTEP mac address") continue } l2routes = append(l2routes, routetable.L2Target{ VTEPMAC: mac, GW: ip.FromString(u.Ipv4Addr), IP: ip.FromString(u.ParentDeviceIp), }) allowedVXLANSources = append(allowedVXLANSources, u.ParentDeviceIp) } logrus.WithField("l2routes", l2routes).Debug("VXLAN manager sending L2 updates") m.routeTable.SetL2Routes(m.vxlanDevice, l2routes) m.ipsetsDataplane.AddOrReplaceIPSet(m.ipSetMetadata, allowedVXLANSources) m.vtepsDirty = false } if m.routesDirty { // Iterate through all of our L3 routes and send them through to the route table. var vxlanRoutes []routetable.Target var noEncapRoutes []routetable.Target for _, r := range m.routesByDest { logCtx := logrus.WithField("route", r) cidr, err := ip.CIDRFromString(r.Dst) if err != nil { // Don't block programming of other routes if somehow we receive one with a bad dst. logCtx.WithError(err).Warn("Failed to parse VXLAN route destination") continue } if r.GetSameSubnet() { if r.DstNodeIp == "" { logCtx.Debug("Can't program non-encap route since host IP is not known.") continue } defaultRoute := routetable.Target{ Type: routetable.TargetTypeNoEncap, CIDR: cidr, GW: ip.FromString(r.DstNodeIp), } noEncapRoutes = append(noEncapRoutes, defaultRoute) logCtx.WithField("route", r).Debug("adding no encap route to list for addition") } else { // Extract the gateway addr for this route based on its remote VTEP. vtep, ok := m.vtepsByNode[r.DstNodeName] if !ok { // When the VTEP arrives, it'll set routesDirty=true so this loop will execute again. logCtx.Debug("Dataplane has route with no corresponding VTEP") continue } vxlanRoute := routetable.Target{ Type: routetable.TargetTypeVXLAN, CIDR: cidr, GW: ip.FromString(vtep.Ipv4Addr), } vxlanRoutes = append(vxlanRoutes, vxlanRoute) logCtx.WithField("route", vxlanRoute).Debug("adding vxlan route to list for addition") } } logrus.WithField("vxlanroutes", vxlanRoutes).Debug("VXLAN manager sending VXLAN L3 updates") m.routeTable.SetRoutes(m.vxlanDevice, vxlanRoutes) m.blackholeRouteTable.SetRoutes(routetable.InterfaceNone, m.blackholeRoutes()) noEncapRouteTable := m.getNoEncapRouteTable() // only set the noEncapRouteTable table if it's nil, as you will lose the routes that are being managed already // and the new table will probably delete routes that were put in there by the previous table if noEncapRouteTable != nil { if parentDevice, err := m.getLocalVTEPParent(); err == nil { ifName := parentDevice.Attrs().Name log.WithField("link", parentDevice).WithField("routes", noEncapRoutes).Debug("VXLAN manager sending unencapsulated L3 updates") noEncapRouteTable.SetRoutes(ifName, noEncapRoutes) } else { return err } } else { return errors.New("no encap route table not set, will defer adding routes") } logrus.Info("VXLAN Manager completed deferred work") m.routesDirty = false } return nil } // KeepVXLANDeviceInSync is a goroutine that configures the VXLAN tunnel device, then periodically // checks that it is still correctly configured. func (m *vxlanManager) KeepVXLANDeviceInSync(mtu int, xsumBroken bool, wait time.Duration) { logrus.WithFields(logrus.Fields{ "mtu": mtu, "xsumBroken": xsumBroken, "wait": wait, }).Info("VXLAN tunnel device thread started.") logNextSuccess := true for { localVTEP := m.getLocalVTEP() if localVTEP == nil { logrus.Debug("Missing local VTEP information, retrying...") time.Sleep(1 * time.Second) continue } if parent, err := m.getLocalVTEPParent(); err != nil { logrus.WithError(err).Warn("Failed configure VXLAN tunnel device, retrying...") time.Sleep(1 * time.Second) continue } else { if m.getNoEncapRouteTable() == nil { noEncapRouteTable := m.noEncapRTConstruct([]string{"^" + parent.Attrs().Name + "$"}, 4, false, m.dpConfig.NetlinkTimeout, m.dpConfig.DeviceRouteSourceAddress, m.noEncapProtocol, false) m.setNoEncapRouteTable(noEncapRouteTable) } } err := m.configureVXLANDevice(mtu, localVTEP, xsumBroken) if err != nil { logrus.WithError(err).Warn("Failed configure VXLAN tunnel device, retrying...") logNextSuccess = true time.Sleep(1 * time.Second) continue } if logNextSuccess { logrus.Info("VXLAN tunnel device configured") logNextSuccess = false } time.Sleep(wait) } } // getParentInterface returns the parent interface for the given local VTEP based on IP address. This link returned is nil // if, and only if, an error occurred func (m *vxlanManager) getParentInterface(localVTEP *proto.VXLANTunnelEndpointUpdate) (netlink.Link, error) { links, err := m.nlHandle.LinkList() if err != nil { return nil, err } for _, link := range links { addrs, err := m.nlHandle.AddrList(link, netlink.FAMILY_V4) if err != nil { return nil, err } for _, addr := range addrs { if addr.IPNet.IP.String() == localVTEP.ParentDeviceIp { logrus.Debugf("Found parent interface: %s", link) return link, nil } } } return nil, fmt.Errorf("Unable to find parent interface with address %s", localVTEP.ParentDeviceIp) } // configureVXLANDevice ensures the VXLAN tunnel device is up and configured correctly. func (m *vxlanManager) configureVXLANDevice(mtu int, localVTEP *proto.VXLANTunnelEndpointUpdate, xsumBroken bool) error { logCxt := logrus.WithFields(logrus.Fields{"device": m.vxlanDevice}) logCxt.Debug("Configuring VXLAN tunnel device") parent, err := m.getParentInterface(localVTEP) if err != nil { return err } mac, err := net.ParseMAC(localVTEP.Mac) if err != nil { return err } vxlan := &netlink.Vxlan{ LinkAttrs: netlink.LinkAttrs{ Name: m.vxlanDevice, HardwareAddr: mac, }, VxlanId: m.vxlanID, Port: m.vxlanPort, VtepDevIndex: parent.Attrs().Index, SrcAddr: ip.FromString(localVTEP.ParentDeviceIp).AsNetIP(), } // Try to get the device. link, err := m.nlHandle.LinkByName(m.vxlanDevice) if err != nil { logrus.WithError(err).Info("Failed to get VXLAN tunnel device, assuming it isn't present") if err := m.nlHandle.LinkAdd(vxlan); err == syscall.EEXIST { // Device already exists - likely a race. logrus.Debug("VXLAN device already exists, likely created by someone else.") } else if err != nil { // Error other than "device exists" - return it. return err } // The device now exists - requery it to check that the link exists and is a vxlan device. link, err = m.nlHandle.LinkByName(m.vxlanDevice) if err != nil { return fmt.Errorf("can't locate created vxlan device %v", m.vxlanDevice) } } // At this point, we have successfully queried the existing device, or made sure it exists if it didn't // already. Check for mismatched configuration. If they don't match, recreate the device. if incompat := vxlanLinksIncompat(vxlan, link); incompat != "" { // Existing device doesn't match desired configuration - delete it and recreate. logrus.Warningf("%q exists with incompatible configuration: %v; recreating device", vxlan.Name, incompat) if err = m.nlHandle.LinkDel(link); err != nil { return fmt.Errorf("failed to delete interface: %v", err) } if err = m.nlHandle.LinkAdd(vxlan); err != nil { if err == syscall.EEXIST { log.Warnf("Failed to create VXLAN device. Another device with this VNI may already exist") } return fmt.Errorf("failed to create vxlan interface: %v", err) } link, err = m.nlHandle.LinkByName(vxlan.Name) if err != nil { return err } } // Make sure the MTU is set correctly. attrs := link.Attrs() oldMTU := attrs.MTU if oldMTU != mtu { logCxt.WithFields(logrus.Fields{"old": oldMTU, "new": mtu}).Info("VXLAN device MTU needs to be updated") if err := m.nlHandle.LinkSetMTU(link, mtu); err != nil { log.WithError(err).Warn("Failed to set vxlan tunnel device MTU") } else { logCxt.Info("Updated vxlan tunnel MTU") } } // Make sure the IP address is configured. if err := m.ensureV4AddressOnLink(localVTEP.Ipv4Addr, link); err != nil { return fmt.Errorf("failed to ensure address of interface: %s", err) } // If required, disable checksum offload. if xsumBroken { if err := ethtool.EthtoolTXOff(m.vxlanDevice); err != nil { return fmt.Errorf("failed to disable checksum offload: %s", err) } } // And the device is up. if err := m.nlHandle.LinkSetUp(link); err != nil { return fmt.Errorf("failed to set interface up: %s", err) } return nil } // ensureV4AddressOnLink ensures that the provided IPv4 address is configured on the provided Link. If there are other addresses, // this function will remove them, ensuring that the desired IPv4 address is the _only_ address on the Link. func (m *vxlanManager) ensureV4AddressOnLink(ipStr string, link netlink.Link) error { _, net, err := net.ParseCIDR(ipStr + "/32") if err != nil { return err } addr := netlink.Addr{IPNet: net} existingAddrs, err := m.nlHandle.AddrList(link, netlink.FAMILY_V4) if err != nil { return err } // Remove any addresses which we don't want. addrPresent := false for _, existing := range existingAddrs { if reflect.DeepEqual(existing.IPNet, addr.IPNet) { addrPresent = true continue } logrus.WithFields(logrus.Fields{"address": existing, "link": link.Attrs().Name}).Warn("Removing unwanted IP from VXLAN device") if err := m.nlHandle.AddrDel(link, &existing); err != nil { return fmt.Errorf("failed to remove IP address %s", existing) } } // Actually add the desired address to the interface if needed. if !addrPresent { logrus.WithFields(logrus.Fields{"address": addr}).Info("Assigning address to VXLAN device") if err := m.nlHandle.AddrAdd(link, &addr); err != nil { return fmt.Errorf("failed to add IP address") } } return nil } // vlanLinksIncompat takes two vxlan devices and compares them to make sure they match. If they do not match, // this function will return a mesasge indicating which configuration is mismatched. func vxlanLinksIncompat(l1, l2 netlink.Link) string { if l1.Type() != l2.Type() { return fmt.Sprintf("link type: %v vs %v", l1.Type(), l2.Type()) } v1 := l1.(*netlink.Vxlan) v2 := l2.(*netlink.Vxlan) if v1.VxlanId != v2.VxlanId { return fmt.Sprintf("vni: %v vs %v", v1.VxlanId, v2.VxlanId) } if v1.VtepDevIndex > 0 && v2.VtepDevIndex > 0 && v1.VtepDevIndex != v2.VtepDevIndex { return fmt.Sprintf("vtep (external) interface: %v vs %v", v1.VtepDevIndex, v2.VtepDevIndex) } if len(v1.SrcAddr) > 0 && len(v2.SrcAddr) > 0 && !v1.SrcAddr.Equal(v2.SrcAddr) { return fmt.Sprintf("vtep (external) IP: %v vs %v", v1.SrcAddr, v2.SrcAddr) } if len(v1.Group) > 0 && len(v2.Group) > 0 && !v1.Group.Equal(v2.Group) { return fmt.Sprintf("group address: %v vs %v", v1.Group, v2.Group) } if v1.L2miss != v2.L2miss { return fmt.Sprintf("l2miss: %v vs %v", v1.L2miss, v2.L2miss) } if v1.Port > 0 && v2.Port > 0 && v1.Port != v2.Port { return fmt.Sprintf("port: %v vs %v", v1.Port, v2.Port) } if v1.GBP != v2.GBP { return fmt.Sprintf("gbp: %v vs %v", v1.GBP, v2.GBP) } return "" }
1
19,551
I don't think we need these. Simpler just to put `routetable.TargetType...` inline below.
projectcalico-felix
go
@@ -176,6 +176,11 @@ def get_port_from_custom_rules(method, path, data, headers): # assume that this is an S3 POST request with form parameters or multipart form in the body return config.PORT_S3 + if stripped and '/' in stripped: + if method == 'PUT': + # assume that this is an S3 PUT bucket object request with URL path `/<bucket>/object` + return config.PORT_S3 + # detect S3 requests sent from aws-cli using --no-sign-request option if 'aws-cli/' in headers.get('User-Agent', ''): return config.PORT_S3
1
import re import os import sys import json import logging from requests.models import Response from localstack import config from localstack.services import plugins from localstack.constants import ( HEADER_LOCALSTACK_TARGET, HEADER_LOCALSTACK_EDGE_URL, LOCALSTACK_ROOT_FOLDER, PATH_USER_REQUEST) from localstack.utils.common import run, is_root, TMP_THREADS, to_bytes, truncate, to_str, get_service_protocol from localstack.utils.common import safe_requests as requests from localstack.services.generic_proxy import ProxyListener, start_proxy_server from localstack.services.sqs.sqs_listener import is_sqs_queue_url LOG = logging.getLogger(__name__) # Header to indicate that the process should kill itself. This is required because if # this process is started as root, then we cannot kill it from a non-root process HEADER_KILL_SIGNAL = 'x-localstack-kill' class ProxyListenerEdge(ProxyListener): def forward_request(self, method, path, data, headers): if method == 'OPTIONS': return 200 if path.split('?')[0] == '/health': return serve_health_endpoint(method, path, data) # kill the process if we receive this header headers.get(HEADER_KILL_SIGNAL) and os._exit(0) target = headers.get('x-amz-target', '') auth_header = headers.get('authorization', '') host = headers.get('host', '') headers[HEADER_LOCALSTACK_EDGE_URL] = 'https://%s' % host # extract API details api, port, path, host = get_api_from_headers(headers, path) if port and int(port) < 0: return 404 if not port: port = get_port_from_custom_rules(method, path, data, headers) or port if not port: if api in ['', None, '_unknown_']: truncated = truncate(data) LOG.info(('Unable to find forwarding rule for host "%s", path "%s", ' 'target header "%s", auth header "%s", data "%s"') % (host, path, target, auth_header, truncated)) else: LOG.info(('Unable to determine forwarding port for API "%s" - please ' 'make sure this API is enabled via the SERVICES configuration') % api) response = Response() response.status_code = 404 response._content = '{"status": "running"}' return response connect_host = '%s:%s' % (config.HOSTNAME, port) url = '%s://%s%s' % (get_service_protocol(), connect_host, path) headers['Host'] = host function = getattr(requests, method.lower()) if isinstance(data, dict): data = json.dumps(data) response = function(url, data=data, headers=headers, verify=False, stream=True) return response def get_api_from_headers(headers, path=None): """ Determine API and backend port based on Authorization headers. """ target = headers.get('x-amz-target', '') host = headers.get('host', '') auth_header = headers.get('authorization', '') ls_target = headers.get(HEADER_LOCALSTACK_TARGET, '') path = path or '/' # initialize result result = '_unknown_', 0 # https://docs.aws.amazon.com/general/latest/gr/sigv4-signed-request-examples.html try: credential_scope = auth_header.split(',')[0].split()[1] _, _, _, service, _ = credential_scope.split('/') result = service, get_service_port_for_account(service, headers) except Exception: pass result_before = result # Fallback rules and route customizations applied below if host.endswith('cloudfront.net'): path = path or '/' result = 'cloudfront', config.PORT_CLOUDFRONT elif target.startswith('AWSCognitoIdentityProviderService') or 'cognito-idp.' in host: result = 'cognito-idp', config.PORT_COGNITO_IDP elif target.startswith('AWSCognitoIdentityService') or 'cognito-identity.' in host: result = 'cognito-identity', config.PORT_COGNITO_IDENTITY elif result[0] == 's3' or re.match(r'.*s3(\-website)?\.([^\.]+\.)?amazonaws.com', host): host = re.sub(r's3-website\..*\.amazonaws', 's3.amazonaws', host) result = 's3', config.PORT_S3 elif result[0] == 'states' in auth_header or host.startswith('states.'): result = 'stepfunctions', config.PORT_STEPFUNCTIONS elif result[0] == 'monitoring': result = 'cloudwatch', config.PORT_CLOUDWATCH elif '.execute-api.' in host: result = 'apigateway', config.PORT_APIGATEWAY elif target.startswith('DynamoDBStreams') or host.startswith('streams.dynamodb.'): # Note: DDB streams requests use ../dynamodb/.. auth header, hence we also need to update result_before result = result_before = 'dynamodbstreams', config.PORT_DYNAMODBSTREAMS elif ls_target == 'web' or path == '/graph': result = 'web', config.PORT_WEB_UI return result[0], result_before[1] or result[1], path, host def is_s3_form_data(data_bytes): if(to_bytes('key=') in data_bytes): return True if(to_bytes('Content-Disposition: form-data') in data_bytes and to_bytes('name="key"') in data_bytes): return True return False def serve_health_endpoint(method, path, data): if method == 'GET': reload = 'reload' in path return plugins.get_services_health(reload=reload) if method == 'PUT': data = json.loads(to_str(data)) plugins.set_services_health(data) return {'status': 'OK'} def get_port_from_custom_rules(method, path, data, headers): """ Determine backend port based on custom rules. """ # detect S3 presigned URLs if 'AWSAccessKeyId=' in path or 'Signature=' in path: return config.PORT_S3 # heuristic for SQS queue URLs if is_sqs_queue_url(path): return config.PORT_SQS # DynamoDB shell URLs if path.startswith('/shell') or path.startswith('/dynamodb/shell'): return config.PORT_DYNAMODB # API Gateway invocation URLs if ('/%s/' % PATH_USER_REQUEST) in path: return config.PORT_APIGATEWAY data_bytes = to_bytes(data or '') if path == '/' and to_bytes('QueueName=') in data_bytes: return config.PORT_SQS # TODO: move S3 public URLs to a separate port/endpoint, OR check ACLs here first stripped = path.strip('/') if method in ['GET', 'HEAD'] and '/' in stripped: # assume that this is an S3 GET request with URL path `/<bucket>/<key ...>` return config.PORT_S3 # detect S3 URLs if stripped and '/' not in stripped: if method == 'PUT': # assume that this is an S3 PUT bucket request with URL path `/<bucket>` return config.PORT_S3 if method == 'POST' and is_s3_form_data(data_bytes): # assume that this is an S3 POST request with form parameters or multipart form in the body return config.PORT_S3 # detect S3 requests sent from aws-cli using --no-sign-request option if 'aws-cli/' in headers.get('User-Agent', ''): return config.PORT_S3 def get_service_port_for_account(service, headers): # assume we're only using a single account, hence return the static port mapping from config.py return config.service_port(service) def do_start_edge(port, use_ssl, asynchronous=False): try: # start local DNS server, if present from localstack_ext.services import dns_server dns_server.start_servers() except Exception: pass # get port and start Edge print('Starting edge router (http%s port %s)...' % ('s' if use_ssl else '', port)) # use use=True here because our proxy allows both, HTTP and HTTPS traffic proxy = start_proxy_server(port, use_ssl=True, update_listener=ProxyListenerEdge()) if not asynchronous: proxy.join() return proxy def can_use_sudo(): try: run('echo | sudo -S echo', print_error=False) return True except Exception: return False def ensure_can_use_sudo(): if not is_root() and not can_use_sudo(): print('Please enter your sudo password (required to configure local network):') run('sudo echo', stdin=True) def start_edge(port=None, use_ssl=True, asynchronous=False): if not port: port = config.EDGE_PORT if config.EDGE_PORT_HTTP: do_start_edge(config.EDGE_PORT_HTTP, use_ssl=False, asynchronous=True) if port > 1024 or is_root(): return do_start_edge(port, use_ssl, asynchronous=asynchronous) # process requires priviledged port but we're not root -> try running as sudo class Terminator(object): def stop(self, quiet=True): try: url = 'http%s://localhost:%s' % ('s' if use_ssl else '', port) requests.verify_ssl = False requests.post(url, headers={HEADER_KILL_SIGNAL: 'kill'}) except Exception: pass # make sure we can run sudo commands ensure_can_use_sudo() # register a signal handler to terminate the sudo process later on TMP_THREADS.append(Terminator()) # start the process as sudo sudo_cmd = 'sudo ' python_cmd = sys.executable cmd = '%sPYTHONPATH=.:%s %s %s %s' % (sudo_cmd, LOCALSTACK_ROOT_FOLDER, python_cmd, __file__, port) process = run(cmd, asynchronous=asynchronous) return process if __name__ == '__main__': logging.basicConfig() start_edge(int(sys.argv[1]))
1
11,392
Nested if statement. You can merge both statements nested together to create one
localstack-localstack
py
@@ -45,6 +45,15 @@ public abstract class AbstractTest { return System.getProperty("java.version").startsWith("9."); } + protected boolean isJava10() { + return System.getProperty("java.version").startsWith("10."); + } + + protected boolean isJavaVersionAbove9() { + String jdkVersion = System.getProperty("java.version"); + return Integer.parseInt(jdkVersion.substring(0,jdkVersion.indexOf('.'))) >= 9; + } + protected boolean isJava8() { return System.getProperty("java.version").startsWith("1.8."); }
1
/* * Copyright 2016 Federico Tomassetti * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.github.javaparser.symbolsolver; import com.github.javaparser.utils.CodeGenerationUtils; import java.io.File; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; public abstract class AbstractTest { protected static Path adaptPath(Path path) { if (Files.exists(path)) { return path.toAbsolutePath(); } Path underSymbolSolver = CodeGenerationUtils.mavenModuleRoot(AbstractTest.class).resolve("javaparser-symbol-solver-testing").resolve(path); if (Files.exists(underSymbolSolver)) { return underSymbolSolver; } else { throw new IllegalArgumentException("I cannot adapt the path " + path); } } protected static Path adaptPath(String path) { return adaptPath(Paths.get(path)); } protected boolean isJava9() { return System.getProperty("java.version").startsWith("9."); } protected boolean isJava8() { return System.getProperty("java.version").startsWith("1.8."); } }
1
12,541
Nitpick: add a space after the comma. Also, we should consider that the format of the version could change in the future so we could get something that is not a parsable integer.
javaparser-javaparser
java
@@ -31,7 +31,7 @@ TEST(Load, SSTLoad) { EXPECT_EQ(ResultCode::SUCCESSED, engine->ingest(files)); std::string result; - EXPECT_EQ(ResultCode::SUCCESSED, engine->get("key", result)); + EXPECT_EQ(ResultCode::SUCCESSED, engine->get("key", &result)); EXPECT_EQ(result, "value"); } } // namespace kvstore
1
/* Copyright (c) 2018 - present, VE Software Inc. All rights reserved * * This source code is licensed under Apache 2.0 License * (found in the LICENSE.Apache file in the root directory) */ #include "base/Base.h" #include <gtest/gtest.h> #include <rocksdb/db.h> #include <rocksdb/slice.h> #include <rocksdb/options.h> #include "kvstore/RocksdbEngine.h" #include "fs/TempFile.h" #include "fs/TempDir.h" namespace nebula { namespace kvstore { TEST(Load, SSTLoad) { rocksdb::Options options; rocksdb::SstFileWriter writer(rocksdb::EnvOptions(), options); fs::TempDir rootPath("/tmp/rocksdb_engine_test.XXXXXX"); auto file = folly::stringPrintf("%s/%s", rootPath.path(), "data.sst"); auto s = writer.Open(file); ASSERT_TRUE(s.ok()); writer.Put("key", "value"); writer.Finish(); auto engine = std::make_unique<RocksdbEngine>(0, rootPath.path()); std::vector<std::string> files = {file}; EXPECT_EQ(ResultCode::SUCCESSED, engine->ingest(files)); std::string result; EXPECT_EQ(ResultCode::SUCCESSED, engine->get("key", result)); EXPECT_EQ(result, "value"); } } // namespace kvstore } // namespace nebula int main(int argc, char** argv) { testing::InitGoogleTest(&argc, argv); folly::init(&argc, &argv, true); google::SetStderrLogging(google::INFO); return RUN_ALL_TESTS(); }
1
14,612
For your reference in future, I sugguest to use the `ASSERT_*` family.
vesoft-inc-nebula
cpp
@@ -68,6 +68,7 @@ #include "modify.h" #include "universe.h" #include "variable.h" +#include "fmt/format.h" #include <cstring>
1
/* ---------------------------------------------------------------------- LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator https://lammps.sandia.gov/, Sandia National Laboratories Steve Plimpton, [email protected] Copyright (2003) Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains certain rights in this software. This software is distributed under the GNU General Public License. See the README file in the top-level LAMMPS directory. ------------------------------------------------------------------------- */ /* ---------------------------------------------------------------------- Contributing authors: Axel Kohlmeyer (Temple U), Ryan S. Elliott (UMN), Ellad B. Tadmor (UMN), Yaser Afshar (UMN) ------------------------------------------------------------------------- */ /* ---------------------------------------------------------------------- This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, see <https://www.gnu.org/licenses>. Linking LAMMPS statically or dynamically with other modules is making a combined work based on LAMMPS. Thus, the terms and conditions of the GNU General Public License cover the whole combination. In addition, as a special exception, the copyright holders of LAMMPS give you permission to combine LAMMPS with free software programs or libraries that are released under the GNU LGPL and with code included in the standard release of the "kim-api" under the CDDL (or modified versions of such code, with unchanged license). You may copy and distribute such a system following the terms of the GNU GPL for LAMMPS and the licenses of the other code concerned, provided that you include the source code of that other code when and as the GNU GPL requires distribution of source code. Note that people who make modified versions of LAMMPS are not obligated to grant this special exception for their modified versions; it is their choice whether to do so. The GNU General Public License gives permission to release a modified version without this exception; this exception also makes it possible to release a modified version which carries forward this exception. ------------------------------------------------------------------------- */ /* ---------------------------------------------------------------------- Designed for use with the kim-api-2.1.0 (and newer) package ------------------------------------------------------------------------- */ #include "kim_init.h" #include "citeme.h" #include "comm.h" #include "domain.h" #include "error.h" #include "fix_store_kim.h" #include "input.h" #include "kim_units.h" #include "modify.h" #include "universe.h" #include "variable.h" #include <cstring> extern "C" { #include "KIM_SimulatorHeaders.h" } using namespace LAMMPS_NS; /* ---------------------------------------------------------------------- */ void KimInit::command(int narg, char **arg) { if ((narg < 2) || (narg > 3)) error->all(FLERR,"Illegal kim_init command"); if (domain->box_exist) error->all(FLERR,"Must use 'kim_init' command before " "simulation box is defined"); char *model_name = new char[strlen(arg[0])+1]; strcpy(model_name,arg[0]); char *user_units = new char[strlen(arg[1])+1]; strcpy(user_units,arg[1]); if (narg == 3) { if (strcmp(arg[2],"unit_conversion_mode")==0) unit_conversion_mode = true; else { error->all(FLERR,"Illegal kim_init command"); } } else unit_conversion_mode = false; char *model_units; KIM_Model *pkim = nullptr; if (universe->me == 0) std::remove("kim.log"); if (universe->nprocs > 1) MPI_Barrier(universe->uworld); determine_model_type_and_units(model_name, user_units, &model_units, pkim); write_log_cite(model_name); do_init(model_name, user_units, model_units, pkim); } /* ---------------------------------------------------------------------- */ namespace { void get_kim_unit_names( char const * const system, KIM_LengthUnit & lengthUnit, KIM_EnergyUnit & energyUnit, KIM_ChargeUnit & chargeUnit, KIM_TemperatureUnit & temperatureUnit, KIM_TimeUnit & timeUnit, Error * error) { if ((strcmp(system,"real")==0)) { lengthUnit = KIM_LENGTH_UNIT_A; energyUnit = KIM_ENERGY_UNIT_kcal_mol; chargeUnit = KIM_CHARGE_UNIT_e; temperatureUnit = KIM_TEMPERATURE_UNIT_K; timeUnit = KIM_TIME_UNIT_fs; } else if ((strcmp(system,"metal")==0)) { lengthUnit = KIM_LENGTH_UNIT_A; energyUnit = KIM_ENERGY_UNIT_eV; chargeUnit = KIM_CHARGE_UNIT_e; temperatureUnit = KIM_TEMPERATURE_UNIT_K; timeUnit = KIM_TIME_UNIT_ps; } else if ((strcmp(system,"si")==0)) { lengthUnit = KIM_LENGTH_UNIT_m; energyUnit = KIM_ENERGY_UNIT_J; chargeUnit = KIM_CHARGE_UNIT_C; temperatureUnit = KIM_TEMPERATURE_UNIT_K; timeUnit = KIM_TIME_UNIT_s; } else if ((strcmp(system,"cgs")==0)) { lengthUnit = KIM_LENGTH_UNIT_cm; energyUnit = KIM_ENERGY_UNIT_erg; chargeUnit = KIM_CHARGE_UNIT_statC; temperatureUnit = KIM_TEMPERATURE_UNIT_K; timeUnit = KIM_TIME_UNIT_s; } else if ((strcmp(system,"electron")==0)) { lengthUnit = KIM_LENGTH_UNIT_Bohr; energyUnit = KIM_ENERGY_UNIT_Hartree; chargeUnit = KIM_CHARGE_UNIT_e; temperatureUnit = KIM_TEMPERATURE_UNIT_K; timeUnit = KIM_TIME_UNIT_fs; } else if ((strcmp(system,"lj")==0)) { error->all(FLERR,"LAMMPS unit_style lj not supported by KIM models"); } else { error->all(FLERR,"Unknown unit_style"); } } } // namespace void KimInit::determine_model_type_and_units(char * model_name, char * user_units, char ** model_units, KIM_Model *&pkim) { KIM_LengthUnit lengthUnit; KIM_EnergyUnit energyUnit; KIM_ChargeUnit chargeUnit; KIM_TemperatureUnit temperatureUnit; KIM_TimeUnit timeUnit; int units_accepted; KIM_Collections * kim_Coll; KIM_CollectionItemType itemType; int kim_error = KIM_Collections_Create(&kim_Coll); if (kim_error) { error->all(FLERR,"Unable to access KIM Collections to find Model."); } kim_error = KIM_Collections_GetItemType(kim_Coll, model_name, &itemType); if (kim_error) { error->all(FLERR,"KIM Model name not found."); } KIM_Collections_Destroy(&kim_Coll); if (KIM_CollectionItemType_Equal(itemType, KIM_COLLECTION_ITEM_TYPE_portableModel)) { get_kim_unit_names(user_units, lengthUnit, energyUnit, chargeUnit, temperatureUnit, timeUnit, error); int kim_error = KIM_Model_Create(KIM_NUMBERING_zeroBased, lengthUnit, energyUnit, chargeUnit, temperatureUnit, timeUnit, model_name, &units_accepted, &pkim); if (kim_error) error->all(FLERR,"Unable to load KIM Simulator Model."); model_type = MO; if (units_accepted) { *model_units = new char[strlen(user_units)+1]; strcpy(*model_units,user_units); return; } else if (unit_conversion_mode) { KIM_Model_Destroy(&pkim); int const num_systems = 5; char const * const systems[num_systems] = {"metal", "real", "si", "cgs", "electron"}; for (int i=0; i < num_systems; ++i) { get_kim_unit_names(systems[i], lengthUnit, energyUnit, chargeUnit, temperatureUnit, timeUnit, error); kim_error = KIM_Model_Create(KIM_NUMBERING_zeroBased, lengthUnit, energyUnit, chargeUnit, temperatureUnit, timeUnit, model_name, &units_accepted, &pkim); if (units_accepted) { *model_units = new char[strlen(systems[i])+1]; strcpy(*model_units,systems[i]); return; } KIM_Model_Destroy(&pkim); } error->all(FLERR,"KIM Model does not support any lammps unit system"); } else { KIM_Model_Destroy(&pkim); error->all(FLERR,"KIM Model does not support the requested unit system"); } } else if (KIM_CollectionItemType_Equal( itemType, KIM_COLLECTION_ITEM_TYPE_simulatorModel)) { KIM_SimulatorModel * kim_SM; kim_error = KIM_SimulatorModel_Create(model_name, &kim_SM); if (kim_error) error->all(FLERR,"Unable to load KIM Simulator Model."); model_type = SM; int sim_fields; int sim_lines; char const * sim_field; char const * sim_value; KIM_SimulatorModel_GetNumberOfSimulatorFields(kim_SM, &sim_fields); KIM_SimulatorModel_CloseTemplateMap(kim_SM); for (int i=0; i < sim_fields; ++i) { KIM_SimulatorModel_GetSimulatorFieldMetadata( kim_SM,i,&sim_lines,&sim_field); if (0 == strcmp(sim_field,"units")) { KIM_SimulatorModel_GetSimulatorFieldLine(kim_SM,i,0,&sim_value); int len=strlen(sim_value)+1; *model_units = new char[len]; strcpy(*model_units,sim_value); break; } } KIM_SimulatorModel_Destroy(&kim_SM); if ((! unit_conversion_mode) && (strcmp(*model_units, user_units)!=0)) { std::string mesg("Incompatible units for KIM Simulator Model, " "required units = "); mesg += *model_units; error->all(FLERR,mesg); } } } /* ---------------------------------------------------------------------- */ void KimInit::do_init(char *model_name, char *user_units, char *model_units, KIM_Model *&pkim) { // create storage proxy fix. delete existing fix, if needed. int ifix = modify->find_fix("KIM_MODEL_STORE"); if (ifix >= 0) modify->delete_fix(ifix); modify->add_fix("KIM_MODEL_STORE all STORE/KIM"); ifix = modify->find_fix("KIM_MODEL_STORE"); FixStoreKIM *fix_store = (FixStoreKIM *) modify->fix[ifix]; fix_store->setptr("model_name", (void *) model_name); fix_store->setptr("user_units", (void *) user_units); fix_store->setptr("model_units", (void *) model_units); // Begin output to log file input->write_echo("#=== BEGIN kim-init ==========================================\n"); KIM_SimulatorModel * simulatorModel; if (model_type == SM) { int kim_error = KIM_SimulatorModel_Create(model_name,&simulatorModel); if (kim_error) error->all(FLERR,"Unable to load KIM Simulator Model."); char const *sim_name, *sim_version; KIM_SimulatorModel_GetSimulatorNameAndVersion( simulatorModel,&sim_name, &sim_version); if (0 != strcmp(sim_name,"LAMMPS")) error->all(FLERR,"Incompatible KIM Simulator Model"); if (comm->me == 0) { std::string mesg("# Using KIM Simulator Model : "); mesg += model_name; mesg += "\n"; mesg += "# For Simulator : "; mesg += std::string(sim_name) + " " + sim_version + "\n"; mesg += "# Running on : LAMMPS "; mesg += lmp->version; mesg += "\n"; mesg += "#\n"; utils::logmesg(lmp,mesg); } fix_store->setptr("simulator_model", (void *) simulatorModel); // need to call this to have access to (some) simulator model init data. KIM_SimulatorModel_CloseTemplateMap(simulatorModel); } // Define unit conversion factor variables and print to log if (unit_conversion_mode) do_variables(user_units, model_units); // set units std::string cmd("units "); cmd += model_units; input->one(cmd); if (model_type == SM) { int sim_fields, sim_lines; char const *sim_field, *sim_value; KIM_SimulatorModel_GetNumberOfSimulatorFields(simulatorModel, &sim_fields); // init model for (int i=0; i < sim_fields; ++i) { KIM_SimulatorModel_GetSimulatorFieldMetadata( simulatorModel,i,&sim_lines,&sim_field); if (0 == strcmp(sim_field,"model-init")) { for (int j=0; j < sim_lines; ++j) { KIM_SimulatorModel_GetSimulatorFieldLine( simulatorModel,i,j,&sim_value); input->one(sim_value); } break; } } // reset template map. KIM_SimulatorModel_OpenAndInitializeTemplateMap(simulatorModel); } else if (model_type == MO) { int numberOfParameters; KIM_Model_GetNumberOfParameters(pkim, &numberOfParameters); std::string mesg = "\nThis model has "; if (numberOfParameters) { KIM_DataType kim_DataType; int extent; char const *str_name = nullptr; char const *str_desc = nullptr; mesg += std::to_string(numberOfParameters) + " mutable parameters. \n"; int max_len(0); for (int i = 0; i < numberOfParameters; ++i) { KIM_Model_GetParameterMetadata(pkim, i, &kim_DataType, &extent, &str_name, &str_desc); max_len = MAX(max_len, (int)strlen(str_name)); } max_len = MAX(18,max_len+1); mesg += fmt::format(" No. | {:<{}} | data type | extent\n", "Parameter name", max_len); mesg += fmt::format("{:-<{}}\n","-",max_len+35); for (int i = 0; i < numberOfParameters; ++i) { KIM_Model_GetParameterMetadata(pkim, i, &kim_DataType, &extent, &str_name, &str_desc); auto data_type = std::string("\""); data_type += KIM_DataType_ToString(kim_DataType) + std::string("\""); mesg += fmt::format(" {:<8} | {:<{}} | {:<10} | {}\n",i+1,str_name, max_len,data_type,extent); } } else mesg += "No mutable parameters. \n"; KIM_Model_Destroy(&pkim); input->write_echo(mesg); } // End output to log file input->write_echo("#=== END kim-init ============================================\n\n"); } /* ---------------------------------------------------------------------- */ void KimInit::do_variables(const std::string &from, const std::string &to) { // refuse conversion from or to reduced units if ((from == "lj") || (to == "lj")) error->all(FLERR,"Cannot set up conversion variables for 'lj' units"); // get index to internal style variables. create, if needed. // set conversion factors for newly created variables. double conversion_factor; int ier; std::string var_str; int v_unit; const char *units[] = {"mass", "distance", "time", "energy", "velocity", "force", "torque", "temperature", "pressure", "viscosity", "charge", "dipole", "efield", "density", nullptr}; input->write_echo(fmt::format("# Conversion factors from {} to {}:\n", from,to)); auto variable = input->variable; for (int i = 0; units[i] != nullptr; ++i) { var_str = std::string("_u_") + units[i]; v_unit = variable->find(var_str.c_str()); if (v_unit < 0) { variable->set(var_str + " internal 1.0"); v_unit = variable->find(var_str.c_str()); } ier = lammps_unit_conversion(units[i], from, to, conversion_factor); if (ier != 0) error->all(FLERR,fmt::format("Unable to obtain conversion factor: " "unit = {}; from = {}; to = {}.", units[i], from, to)); variable->internal_set(v_unit,conversion_factor); input->write_echo(fmt::format("variable {:<15s} internal {:<15.12e}\n", var_str, conversion_factor)); } input->write_echo("#\n"); } /* ---------------------------------------------------------------------- */ void KimInit::write_log_cite(char *model_name) { KIM_Collections * coll; int err = KIM_Collections_Create(&coll); if (err) return; int extent; if (model_type == MO) { err = KIM_Collections_CacheListOfItemMetadataFiles( coll,KIM_COLLECTION_ITEM_TYPE_portableModel,model_name,&extent); } else if (model_type == SM) { err = KIM_Collections_CacheListOfItemMetadataFiles( coll,KIM_COLLECTION_ITEM_TYPE_simulatorModel,model_name,&extent); } else { error->all(FLERR,"Unknown model type."); } if (err) { KIM_Collections_Destroy(&coll); return; } for (int i = 0; i < extent; ++i) { char const * fileName; int availableAsString; char const * fileString; err = KIM_Collections_GetItemMetadataFile( coll,i,&fileName,nullptr,nullptr,&availableAsString,&fileString); if (err) continue; if (0 == strncmp("kimcite",fileName,7)) { if ((lmp->citeme) && (availableAsString)) lmp->citeme->add(fileString); } } KIM_Collections_Destroy(&coll); }
1
29,416
no need to import `fmt/format.h` here since the `KimInit` class is derived from `Pointers`. Any class derived from `Pointers` can assumed that `lmptype.h`, `mpi.h`, `cstddef`, `cstdio`, `string`, `utils.h` and `fmt/format.h` are already included through `pointers.h`.
lammps-lammps
cpp
@@ -17,7 +17,10 @@ limitations under the License. package v1alpha1 import ( + "os" + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/types" apis "github.com/openebs/maya/pkg/apis/openebs.io/upgrade/v1alpha1" cast "github.com/openebs/maya/pkg/castemplate/v1alpha1"
1
/* Copyright 2019 The OpenEBS Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1alpha1 import ( "github.com/pkg/errors" apis "github.com/openebs/maya/pkg/apis/openebs.io/upgrade/v1alpha1" cast "github.com/openebs/maya/pkg/castemplate/v1alpha1" upgrade "github.com/openebs/maya/pkg/upgrade/v1alpha1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // Executor contains list of castEngine type Executor struct { engines []cast.Interface } // ExecutorBuilder helps to build Executor instance type ExecutorBuilder struct { object *Executor errors []error } // ExecutorBuilderForConfig returns an instance of ExecutorBuilder //It adds object in ExecutorBuilder struct with the help of config func ExecutorBuilderForConfig(cfg *apis.UpgradeConfig) (b *ExecutorBuilder) { b = &ExecutorBuilder{} castObj, err := cast.KubeClient(). Get(cfg.CASTemplate, metav1.GetOptions{}) if err != nil { b.errors = append(b.errors, errors.WithMessagef(err, "failed to instantiate executor builder: %s", cfg)) return } engines := []cast.Interface{} for _, resource := range cfg.Resources { resource := resource // pin it e, err := upgrade.NewCASTEngineBuilder(). WithCASTemplate(castObj). WithUnitOfUpgrade(&resource). WithRuntimeConfig(cfg.Data). Build() if err != nil { b.errors = append(b.errors, errors.WithMessagef(err, "failed to instantiate executor builder: %s: %s", resource, cfg)) return } engines = append(engines, e) } b.object = &Executor{engines: engines} return b } // Build builds a new instance of Executor with the help of // ExecutorBuilder instance func (eb *ExecutorBuilder) Build() (*Executor, error) { if len(eb.errors) != 0 { return nil, errors.Errorf("builder error: %s", eb.errors) } return eb.object, nil } // Execute runs list of castEngines. It returns error // if there is any while running these engines func (e *Executor) Execute() error { for _, engine := range e.engines { _, err := engine.Run() if err != nil { return errors.WithMessagef(err, "failed to run upgrade engine") } } return nil }
1
13,991
Can we have prefix of `OPENEBS_IO` e.g.: `OPENEBS_IO_INSTANCE_NAME`
openebs-maya
go
@@ -1059,8 +1059,14 @@ func (fbo *folderBranchOps) GetTLFCryptKeys(ctx context.Context, func (fbo *folderBranchOps) GetOrCreateRootNode( ctx context.Context, h *TlfHandle, branch BranchName) ( node Node, ei EntryInfo, err error) { - err = errors.New("GetOrCreateRootNode is not supported by " + - "folderBranchOps") + err = errors.New("GetOrCreateRootNode is not supported by folderBranchOps") + return +} + +func (fbo *folderBranchOps) GetRootNode( + ctx context.Context, h *TlfHandle, branch BranchName) ( + node Node, ei EntryInfo, err error) { + err = errors.New("GetRootNode is not supported by folderBranchOps") return }
1
// Copyright 2016 Keybase Inc. All rights reserved. // Use of this source code is governed by a BSD // license that can be found in the LICENSE file. package libkbfs import ( "errors" "fmt" "strings" "sync" "time" "github.com/keybase/backoff" "github.com/keybase/client/go/logger" keybase1 "github.com/keybase/client/go/protocol" "golang.org/x/net/context" ) // mdReqType indicates whether an operation makes MD modifications or not type mdReqType int const ( // A read request that doesn't need an identify to be // performed. mdReadNoIdentify mdReqType = iota // A read request that needs an identify to be performed (if // it hasn't been already). mdReadNeedIdentify // A write request. mdWrite // A rekey request. Doesn't need an identify to be performed, as // a rekey does its own (finer-grained) identifies. mdRekey ) type branchType int const ( standard branchType = iota // an online, read-write branch archive // an online, read-only branch offline // an offline, read-write branch archiveOffline // an offline, read-only branch ) // Constants used in this file. TODO: Make these configurable? const ( // MaxBlockSizeBytesDefault is the default maximum block size for KBFS. // 512K blocks by default, block changes embedded max == 8K. // Block size was chosen somewhat arbitrarily by trying to // minimize the overall size of the history written by a user when // appending 1KB writes to a file, up to a 1GB total file. Here // is the output of a simple script that approximates that // calculation: // // Total history size for 0065536-byte blocks: 1134341128192 bytes // Total history size for 0131072-byte blocks: 618945052672 bytes // Total history size for 0262144-byte blocks: 412786622464 bytes // Total history size for 0524288-byte blocks: 412786622464 bytes // Total history size for 1048576-byte blocks: 618945052672 bytes // Total history size for 2097152-byte blocks: 1134341128192 bytes // Total history size for 4194304-byte blocks: 2216672886784 bytes MaxBlockSizeBytesDefault = 512 << 10 // Maximum number of blocks that can be sent in parallel maxParallelBlockPuts = 100 // Max response size for a single DynamoDB query is 1MB. maxMDsAtATime = 10 // Time between checks for dirty files to flush, in case Sync is // never called. secondsBetweenBackgroundFlushes = 10 // Cap the number of times we retry after a recoverable error maxRetriesOnRecoverableErrors = 10 // When the number of dirty bytes exceeds this level, force a sync. dirtyBytesThreshold = maxParallelBlockPuts * MaxBlockSizeBytesDefault // The timeout for any background task. backgroundTaskTimeout = 1 * time.Minute ) type fboMutexLevel mutexLevel const ( fboMDWriter fboMutexLevel = 1 fboHead = 2 fboBlock = 3 ) func (o fboMutexLevel) String() string { switch o { case fboMDWriter: return "mdWriterLock" case fboHead: return "headLock" case fboBlock: return "blockLock" default: return fmt.Sprintf("Invalid fboMutexLevel %d", int(o)) } } func fboMutexLevelToString(o mutexLevel) string { return (fboMutexLevel(o)).String() } // Rules for working with lockState in FBO: // // - Every "execution flow" (i.e., program flow that happens // sequentially) needs its own lockState object. This usually means // that each "public" FBO method does: // // lState := makeFBOLockState() // // near the top. // // - Plumb lState through to all functions that hold any of the // relevant locks, or are called under those locks. // // This way, violations of the lock hierarchy will be detected at // runtime. func makeFBOLockState() *lockState { return makeLevelState(fboMutexLevelToString) } // blockLock is just like a sync.RWMutex, but with an extra operation // (DoRUnlockedIfPossible). type blockLock struct { leveledRWMutex locked bool } func (bl *blockLock) Lock(lState *lockState) { bl.leveledRWMutex.Lock(lState) bl.locked = true } func (bl *blockLock) Unlock(lState *lockState) { bl.locked = false bl.leveledRWMutex.Unlock(lState) } // DoRUnlockedIfPossible must be called when r- or w-locked. If // r-locked, r-unlocks, runs the given function, and r-locks after // it's done. Otherwise, just runs the given function. func (bl *blockLock) DoRUnlockedIfPossible(lState *lockState, f func(*lockState)) { if !bl.locked { bl.RUnlock(lState) defer bl.RLock(lState) } f(lState) } // folderBranchOps implements the KBFSOps interface for a specific // branch of a specific folder. It is go-routine safe for operations // within the folder. // // We use locks to protect against multiple goroutines accessing the // same folder-branch. The goal with our locking strategy is maximize // concurrent access whenever possible. See design/state_machine.md // for more details. There are three important locks: // // 1) mdWriterLock: Any "remote-sync" operation (one which modifies the // folder's metadata) must take this lock during the entirety of // its operation, to avoid forking the MD. // // 2) headLock: This is a read/write mutex. It must be taken for // reading before accessing any part of the current head MD. It // should be taken for the shortest time possible -- that means in // general that it should be taken, and the MD copied to a // goroutine-local variable, and then it can be released. // Remote-sync operations should take it for writing after pushing // all of the blocks and MD to the KBFS servers (i.e., all network // accesses), and then hold it until after all notifications have // been fired, to ensure that no concurrent "local" operations ever // see inconsistent state locally. // // 3) blockLock: This too is a read/write mutex. It must be taken for // reading before accessing any blocks in the block cache that // belong to this folder/branch. This includes checking their // dirty status. It should be taken for the shortest time possible // -- that means in general it should be taken, and then the blocks // that will be modified should be copied to local variables in the // goroutine, and then it should be released. The blocks should // then be modified locally, and then readied and pushed out // remotely. Only after the blocks have been pushed to the server // should a remote-sync operation take the lock again (this time // for writing) and put/finalize the blocks. Write and Truncate // should take blockLock for their entire lifetime, since they // don't involve writes over the network. Furthermore, if a block // is not in the cache and needs to be fetched, we should release // the mutex before doing the network operation, and lock it again // before writing the block back to the cache. // // We want to allow writes and truncates to a file that's currently // being sync'd, like any good networked file system. The tricky part // is making sure the changes can both: a) be read while the sync is // happening, and b) be applied to the new file path after the sync is // done. // // For now, we just do the dumb, brute force thing for now: if a block // is currently being sync'd, it copies the block and puts it back // into the cache as modified. Then, when the sync finishes, it // throws away the modified blocks and re-applies the change to the // new file path (which might have a completely different set of // blocks, so we can't just reuse the blocks that were modified during // the sync.) type folderBranchOps struct { config Config folderBranch FolderBranch bid BranchID // protected by mdWriterLock bType branchType observers *observerList // these locks, when locked concurrently by the same goroutine, // should only be taken in the following order to avoid deadlock: mdWriterLock leveledMutex // taken by any method making MD modifications // protects access to head and latestMergedRevision. headLock leveledRWMutex head ImmutableRootMetadata // latestMergedRevision tracks the latest heard merged revision on server latestMergedRevision MetadataRevision blocks folderBlockOps // nodeCache itself is goroutine-safe, but this object's use // of it has special requirements: // // - Reads can call PathFromNode() unlocked, since there are // no guarantees with concurrent reads. // // - Operations that takes mdWriterLock always needs the // most up-to-date paths, so those must call // PathFromNode() under mdWriterLock. // // - Block write operations (write/truncate/sync) need to // coordinate. Specifically, sync must make sure that // blocks referenced in a path (including all of the child // blocks) must exist in the cache during calls to // PathFromNode from write/truncate. This means that sync // must modify dirty file blocks only under blockLock, and // write/truncate must call PathFromNode() under // blockLock. // // Furthermore, calls to UpdatePointer() must happen // before the copy-on-write mode induced by Sync() is // finished. nodeCache NodeCache // Whether we've identified this TLF or not. identifyLock sync.Mutex identifyDone bool identifyTime time.Time // The current status summary for this folder status *folderBranchStatusKeeper // How to log log logger.Logger deferLog logger.Logger // Closed on shutdown shutdownChan chan struct{} // Can be used to turn off notifications for a while (e.g., for testing) updatePauseChan chan (<-chan struct{}) // After a shutdown, this channel will be closed when the register // goroutine completes. updateDoneChan chan struct{} // forceSyncChan is read from by the background sync process // to know when it should sync immediately. forceSyncChan <-chan struct{} // How to resolve conflicts cr *ConflictResolver // Helper class for archiving and cleaning up the blocks for this TLF fbm *folderBlockManager // rekeyWithPromptTimer tracks a timed function that will try to // rekey with a paper key prompt, if enough time has passed. // Protected by mdWriterLock rekeyWithPromptTimer *time.Timer editHistory *TlfEditHistory } var _ KBFSOps = (*folderBranchOps)(nil) var _ fbmHelper = (*folderBranchOps)(nil) // newFolderBranchOps constructs a new folderBranchOps object. func newFolderBranchOps(config Config, fb FolderBranch, bType branchType) *folderBranchOps { nodeCache := newNodeCacheStandard(fb) // make logger branchSuffix := "" if fb.Branch != MasterBranch { branchSuffix = " " + string(fb.Branch) } tlfStringFull := fb.Tlf.String() // Shorten the TLF ID for the module name. 8 characters should be // unique enough for a local node. log := config.MakeLogger(fmt.Sprintf("FBO %s%s", tlfStringFull[:8], branchSuffix)) // But print it out once in full, just in case. log.CInfof(nil, "Created new folder-branch for %s", tlfStringFull) observers := newObserverList() mdWriterLock := makeLeveledMutex(mutexLevel(fboMDWriter), &sync.Mutex{}) headLock := makeLeveledRWMutex(mutexLevel(fboHead), &sync.RWMutex{}) blockLockMu := makeLeveledRWMutex(mutexLevel(fboBlock), &sync.RWMutex{}) forceSyncChan := make(chan struct{}) fbo := &folderBranchOps{ config: config, folderBranch: fb, bid: BranchID{}, bType: bType, observers: observers, status: newFolderBranchStatusKeeper(config, nodeCache), mdWriterLock: mdWriterLock, headLock: headLock, blocks: folderBlockOps{ config: config, log: log, folderBranch: fb, observers: observers, forceSyncChan: forceSyncChan, blockLock: blockLock{ leveledRWMutex: blockLockMu, }, dirtyFiles: make(map[BlockPointer]*dirtyFile), unrefCache: make(map[blockRef]*syncInfo), deCache: make(map[blockRef]DirEntry), nodeCache: nodeCache, }, nodeCache: nodeCache, log: log, deferLog: log.CloneWithAddedDepth(1), shutdownChan: make(chan struct{}), updatePauseChan: make(chan (<-chan struct{})), forceSyncChan: forceSyncChan, } fbo.cr = NewConflictResolver(config, fbo) fbo.fbm = newFolderBlockManager(config, fb, fbo) fbo.editHistory = NewTlfEditHistory(config, fbo, log) if config.DoBackgroundFlushes() { go fbo.backgroundFlusher(secondsBetweenBackgroundFlushes * time.Second) } return fbo } // markForReIdentifyIfNeeded checks whether this tlf is identified and mark // it for lazy reidentification if it exceeds time limits. func (fbo *folderBranchOps) markForReIdentifyIfNeeded(now time.Time, maxValid time.Duration) { fbo.identifyLock.Lock() defer fbo.identifyLock.Unlock() if fbo.identifyDone && (now.Before(fbo.identifyTime) || fbo.identifyTime.Add(maxValid).Before(now)) { fbo.log.CDebugf(nil, "Expiring identify from %v", fbo.identifyTime) fbo.identifyDone = false } } // Shutdown safely shuts down any background goroutines that may have // been launched by folderBranchOps. func (fbo *folderBranchOps) Shutdown() error { if fbo.config.CheckStateOnShutdown() { ctx := context.TODO() lState := makeFBOLockState() if fbo.blocks.GetState(lState) == dirtyState { fbo.log.CDebugf(ctx, "Skipping state-checking due to dirty state") } else if !fbo.isMasterBranch(lState) { fbo.log.CDebugf(ctx, "Skipping state-checking due to being staged") } else { // Make sure we're up to date first if err := fbo.SyncFromServerForTesting(ctx, fbo.folderBranch); err != nil { return err } // Check the state for consistency before shutting down. sc := NewStateChecker(fbo.config) if err := sc.CheckMergedState(ctx, fbo.id()); err != nil { return err } } } close(fbo.shutdownChan) fbo.cr.Shutdown() fbo.fbm.shutdown() fbo.editHistory.Shutdown() // Wait for the update goroutine to finish, so that we don't have // any races with logging during test reporting. if fbo.updateDoneChan != nil { <-fbo.updateDoneChan } return nil } func (fbo *folderBranchOps) id() TlfID { return fbo.folderBranch.Tlf } func (fbo *folderBranchOps) branch() BranchName { return fbo.folderBranch.Branch } func (fbo *folderBranchOps) GetFavorites(ctx context.Context) ( []Favorite, error) { return nil, errors.New("GetFavorites is not supported by folderBranchOps") } func (fbo *folderBranchOps) RefreshCachedFavorites(ctx context.Context) { // no-op } func (fbo *folderBranchOps) DeleteFavorite(ctx context.Context, fav Favorite) error { return errors.New("DeleteFavorite is not supported by folderBranchOps") } func (fbo *folderBranchOps) AddFavorite(ctx context.Context, fav Favorite) error { return errors.New("AddFavorite is not supported by folderBranchOps") } func (fbo *folderBranchOps) addToFavorites(ctx context.Context, favorites *Favorites, created bool) (err error) { if _, _, err := fbo.config.KBPKI().GetCurrentUserInfo(ctx); err != nil { // Can't favorite while not logged in return nil } lState := makeFBOLockState() head := fbo.getHead(lState) if head == (ImmutableRootMetadata{}) { return OpsCantHandleFavorite{"Can't add a favorite without a handle"} } h := head.GetTlfHandle() favorites.AddAsync(ctx, h.toFavToAdd(created)) return nil } func (fbo *folderBranchOps) deleteFromFavorites(ctx context.Context, favorites *Favorites) error { if _, _, err := fbo.config.KBPKI().GetCurrentUserInfo(ctx); err != nil { // Can't unfavorite while not logged in return nil } lState := makeFBOLockState() head := fbo.getHead(lState) if head == (ImmutableRootMetadata{}) { // This can happen when identifies fail and the head is never set. return OpsCantHandleFavorite{"Can't delete a favorite without a handle"} } h := head.GetTlfHandle() return favorites.Delete(ctx, h.ToFavorite()) } func (fbo *folderBranchOps) getHead(lState *lockState) ImmutableRootMetadata { fbo.headLock.RLock(lState) defer fbo.headLock.RUnlock(lState) return fbo.head } // isMasterBranch should not be called if mdWriterLock is already taken. func (fbo *folderBranchOps) isMasterBranch(lState *lockState) bool { fbo.mdWriterLock.Lock(lState) defer fbo.mdWriterLock.Unlock(lState) return fbo.bid == NullBranchID } func (fbo *folderBranchOps) isMasterBranchLocked(lState *lockState) bool { fbo.mdWriterLock.AssertLocked(lState) return fbo.bid == NullBranchID } func (fbo *folderBranchOps) setBranchIDLocked(lState *lockState, bid BranchID) { fbo.mdWriterLock.AssertLocked(lState) fbo.bid = bid if bid == NullBranchID { fbo.status.setCRSummary(nil, nil) } } func (fbo *folderBranchOps) checkDataVersion(p path, ptr BlockPointer) error { if ptr.DataVer < FirstValidDataVer { return InvalidDataVersionError{ptr.DataVer} } // TODO: migrate back to fbo.config.DataVersion if ptr.DataVer > FilesWithHolesDataVer { return NewDataVersionError{p, ptr.DataVer} } return nil } func (fbo *folderBranchOps) setHeadLocked( ctx context.Context, lState *lockState, md ImmutableRootMetadata) error { fbo.mdWriterLock.AssertLocked(lState) fbo.headLock.AssertLocked(lState) isFirstHead := fbo.head == ImmutableRootMetadata{} wasReadable := false if !isFirstHead { wasReadable = fbo.head.IsReadable() if fbo.head.mdID == md.mdID { panic(fmt.Errorf("Re-putting the same MD: %s", md.mdID)) } } fbo.log.CDebugf(ctx, "Setting head revision to %d", md.Revision) err := fbo.config.MDCache().Put(md) if err != nil { return err } // If this is the first time the MD is being set, and we are // operating on unmerged data, initialize the state properly and // kick off conflict resolution. if isFirstHead && md.MergedStatus() == Unmerged { fbo.setBranchIDLocked(lState, md.BID) // Use uninitialized for the merged branch; the unmerged // revision is enough to trigger conflict resolution. fbo.cr.Resolve(md.Revision, MetadataRevisionUninitialized) } else if md.MergedStatus() == Merged { // If we are already merged through this write, the revision would be the // latestMergedRevision on server. fbo.setLatestMergedRevisionLocked(ctx, lState, md.Revision, false) } fbo.head = md fbo.status.setRootMetadata(md) if isFirstHead { // Start registering for updates right away, using this MD // as a starting point. For now only the master branch can // get updates if fbo.branch() == MasterBranch { fbo.updateDoneChan = make(chan struct{}) go fbo.registerAndWaitForUpdates() } } if !wasReadable && md.IsReadable() { // Let any listeners know that this folder is now readable, // which may indicate that a rekey successfully took place. fbo.config.Reporter().Notify(ctx, mdReadSuccessNotification( md.GetTlfHandle().GetCanonicalName(), md.ID.IsPublic())) } return nil } // setInitialHeadUntrustedLocked is for when the given RootMetadata // was fetched not due to a user action, i.e. via a Rekey // notification, and we don't have a TLF name to check against. func (fbo *folderBranchOps) setInitialHeadUntrustedLocked(ctx context.Context, lState *lockState, md ImmutableRootMetadata) error { fbo.mdWriterLock.AssertLocked(lState) fbo.headLock.AssertLocked(lState) if fbo.head != (ImmutableRootMetadata{}) { return errors.New("Unexpected non-nil head in setInitialHeadUntrustedLocked") } return fbo.setHeadLocked(ctx, lState, md) } // setNewInitialHeadLocked is for when we're creating a brand-new TLF. func (fbo *folderBranchOps) setNewInitialHeadLocked(ctx context.Context, lState *lockState, md ImmutableRootMetadata) error { fbo.mdWriterLock.AssertLocked(lState) fbo.headLock.AssertLocked(lState) if fbo.head != (ImmutableRootMetadata{}) { return errors.New("Unexpected non-nil head in setNewInitialHeadLocked") } if md.Revision != MetadataRevisionInitial { return fmt.Errorf("setNewInitialHeadLocked unexpectedly called with revision %d", md.Revision) } return fbo.setHeadLocked(ctx, lState, md) } // setInitialHeadUntrustedLocked is for when the given RootMetadata // was fetched due to a user action, and will be checked against the // TLF name. func (fbo *folderBranchOps) setInitialHeadTrustedLocked(ctx context.Context, lState *lockState, md ImmutableRootMetadata) error { fbo.mdWriterLock.AssertLocked(lState) fbo.headLock.AssertLocked(lState) if fbo.head != (ImmutableRootMetadata{}) { return errors.New("Unexpected non-nil head in setInitialHeadUntrustedLocked") } return fbo.setHeadLocked(ctx, lState, md) } // setHeadSuccessorLocked is for when we're applying updates from the // server or when we're applying new updates we created ourselves. func (fbo *folderBranchOps) setHeadSuccessorLocked(ctx context.Context, lState *lockState, md ImmutableRootMetadata, rebased bool) error { fbo.mdWriterLock.AssertLocked(lState) fbo.headLock.AssertLocked(lState) if fbo.head == (ImmutableRootMetadata{}) { // This can happen in tests via SyncFromServerForTesting(). return fbo.setInitialHeadTrustedLocked(ctx, lState, md) } if !rebased { err := fbo.head.CheckValidSuccessor(fbo.head.mdID, md.ReadOnly()) if err != nil { return err } } oldHandle := fbo.head.GetTlfHandle() newHandle := md.GetTlfHandle() // Newer handles should be equal or more resolved over time. // // TODO: In some cases, they shouldn't, e.g. if we're on an // unmerged branch. Add checks for this. resolvesTo, partialResolvedOldHandle, err := oldHandle.ResolvesTo( ctx, fbo.config.Codec(), fbo.config.KBPKI(), *newHandle) if err != nil { return err } oldName := oldHandle.GetCanonicalName() newName := newHandle.GetCanonicalName() if !resolvesTo { return IncompatibleHandleError{ oldName, partialResolvedOldHandle.GetCanonicalName(), newName, } } err = fbo.setHeadLocked(ctx, lState, md) if err != nil { return err } if oldName != newName { fbo.log.CDebugf(ctx, "Handle changed (%s -> %s)", oldName, newName) // If the handle has changed, send out a notification. fbo.observers.tlfHandleChange(ctx, fbo.head.GetTlfHandle()) // Also the folder should be re-identified given the // newly-resolved assertions. func() { fbo.identifyLock.Lock() defer fbo.identifyLock.Unlock() fbo.identifyDone = false }() } return nil } // setHeadPredecessorLocked is for when we're unstaging updates. func (fbo *folderBranchOps) setHeadPredecessorLocked(ctx context.Context, lState *lockState, md ImmutableRootMetadata) error { fbo.mdWriterLock.AssertLocked(lState) fbo.headLock.AssertLocked(lState) if fbo.head == (ImmutableRootMetadata{}) { return errors.New("Unexpected nil head in setHeadPredecessorLocked") } if fbo.head.Revision <= MetadataRevisionInitial { return fmt.Errorf("setHeadPredecessorLocked unexpectedly called with revision %d", fbo.head.Revision) } if fbo.head.MergedStatus() != Unmerged { return errors.New("Unexpected merged head in setHeadPredecessorLocked") } err := md.CheckValidSuccessor(md.mdID, fbo.head.ReadOnly()) if err != nil { return err } oldHandle := fbo.head.GetTlfHandle() newHandle := md.GetTlfHandle() // The two handles must be the same, since no rekeying is done // while unmerged. eq, err := oldHandle.Equals(fbo.config.Codec(), *newHandle) if err != nil { return err } if !eq { return fmt.Errorf( "head handle %v unexpectedly not equal to new handle = %v", oldHandle, newHandle) } return fbo.setHeadLocked(ctx, lState, md) } // setHeadConflictResolvedLocked is for when we're setting the merged // update with resolved conflicts. func (fbo *folderBranchOps) setHeadConflictResolvedLocked(ctx context.Context, lState *lockState, md ImmutableRootMetadata) error { fbo.mdWriterLock.AssertLocked(lState) fbo.headLock.AssertLocked(lState) if fbo.head.MergedStatus() != Unmerged { return errors.New("Unexpected merged head in setHeadConflictResolvedLocked") } if md.MergedStatus() != Merged { return errors.New("Unexpected unmerged update in setHeadConflictResolvedLocked") } return fbo.setHeadLocked(ctx, lState, md) } func (fbo *folderBranchOps) identifyOnce( ctx context.Context, md ReadOnlyRootMetadata) error { fbo.identifyLock.Lock() defer fbo.identifyLock.Unlock() if fbo.identifyDone { return nil } h := md.GetTlfHandle() fbo.log.CDebugf(ctx, "Running identifies on %s", h.GetCanonicalPath()) kbpki := fbo.config.KBPKI() err := identifyHandle(ctx, kbpki, kbpki, h) if err != nil { fbo.log.CDebugf(ctx, "Identify finished with error: %v", err) // For now, if the identify fails, let the // next function to hit this code path retry. return err } fbo.log.CDebugf(ctx, "Identify finished successfully") fbo.identifyDone = true fbo.identifyTime = fbo.config.Clock().Now() return nil } // if rtype == mdWrite || mdRekey, then mdWriterLock must be taken func (fbo *folderBranchOps) getMDLocked( ctx context.Context, lState *lockState, rtype mdReqType) ( md ImmutableRootMetadata, err error) { defer func() { if err != nil || rtype == mdReadNoIdentify || rtype == mdRekey { return } err = fbo.identifyOnce(ctx, md.ReadOnly()) }() md = fbo.getHead(lState) if md != (ImmutableRootMetadata{}) { return md, nil } // Unless we're in mdWrite or mdRekey mode, we can't safely fetch // the new MD without causing races, so bail. if rtype != mdWrite && rtype != mdRekey { return ImmutableRootMetadata{}, MDWriteNeededInRequest{} } // We go down this code path either due to a rekey // notification for an unseen TLF, or in some tests. // // TODO: Make tests not take this code path, and keep track of // the fact that MDs coming from rekey notifications are // untrusted. fbo.mdWriterLock.AssertLocked(lState) // Not in cache, fetch from server and add to cache. First, see // if this device has any unmerged commits -- take the latest one. mdops := fbo.config.MDOps() // get the head of the unmerged branch for this device (if any) md, err = mdops.GetUnmergedForTLF(ctx, fbo.id(), NullBranchID) if err != nil { return ImmutableRootMetadata{}, err } mergedMD, err := mdops.GetForTLF(ctx, fbo.id()) if err != nil { return ImmutableRootMetadata{}, err } if mergedMD == (ImmutableRootMetadata{}) { return ImmutableRootMetadata{}, fmt.Errorf("Got nil RMD for %s", fbo.id()) } if md == (ImmutableRootMetadata{}) { // There are no unmerged MDs for this device, so just use the current head. md = mergedMD } else { func() { fbo.headLock.Lock(lState) defer fbo.headLock.Unlock(lState) // We don't need to do this for merged head // because the setHeadLocked() already does // that anyway. fbo.setLatestMergedRevisionLocked(ctx, lState, mergedMD.Revision, false) }() } if md.data.Dir.Type != Dir && (!md.IsInitialized() || md.IsReadable()) { return ImmutableRootMetadata{}, fmt.Errorf("Got undecryptable RMD for %s: initialized=%t, readable=%t", fbo.id(), md.IsInitialized(), md.IsReadable()) } fbo.headLock.Lock(lState) defer fbo.headLock.Unlock(lState) err = fbo.setInitialHeadUntrustedLocked(ctx, lState, md) if err != nil { return ImmutableRootMetadata{}, err } return md, nil } func (fbo *folderBranchOps) getMDForReadHelper( ctx context.Context, lState *lockState, rtype mdReqType) (ImmutableRootMetadata, error) { md, err := fbo.getMDLocked(ctx, lState, rtype) if err != nil { return ImmutableRootMetadata{}, err } if !md.ID.IsPublic() { username, uid, err := fbo.config.KBPKI().GetCurrentUserInfo(ctx) if err != nil { return ImmutableRootMetadata{}, err } if !md.GetTlfHandle().IsReader(uid) { return ImmutableRootMetadata{}, NewReadAccessError(md.GetTlfHandle(), username) } } return md, nil } // getMDForFBM is a helper method for the folderBlockManager only. func (fbo *folderBranchOps) getMDForFBM(ctx context.Context) ( ImmutableRootMetadata, error) { lState := makeFBOLockState() return fbo.getMDForReadHelper(ctx, lState, mdReadNoIdentify) } func (fbo *folderBranchOps) getMDForReadNoIdentify( ctx context.Context, lState *lockState) (ImmutableRootMetadata, error) { return fbo.getMDForReadHelper(ctx, lState, mdReadNoIdentify) } func (fbo *folderBranchOps) getMDForReadNeedIdentify( ctx context.Context, lState *lockState) (ImmutableRootMetadata, error) { return fbo.getMDForReadHelper(ctx, lState, mdReadNeedIdentify) } // getMDForWriteLocked returns a new RootMetadata object with an // incremented version number for modification. If the returned object // is put to the MDServer (via MDOps), mdWriterLock must be held until // then. (See comments for mdWriterLock above.) func (fbo *folderBranchOps) getMDForWriteLocked( ctx context.Context, lState *lockState) (*RootMetadata, error) { fbo.mdWriterLock.AssertLocked(lState) md, err := fbo.getMDLocked(ctx, lState, mdWrite) if err != nil { return nil, err } username, uid, err := fbo.config.KBPKI().GetCurrentUserInfo(ctx) if err != nil { return nil, err } if !md.GetTlfHandle().IsWriter(uid) { return nil, NewWriteAccessError(md.GetTlfHandle(), username) } // Make a new successor of the current MD to hold the coming // writes. The caller must pass this into // syncBlockAndCheckEmbedLocked or the changes will be lost. newMd, err := md.MakeSuccessor(fbo.config, md.mdID, true) if err != nil { return nil, err } return newMd, nil } func (fbo *folderBranchOps) getMDForRekeyWriteLocked( ctx context.Context, lState *lockState) (rmd *RootMetadata, wasRekeySet bool, err error) { fbo.mdWriterLock.AssertLocked(lState) md, err := fbo.getMDLocked(ctx, lState, mdRekey) if err != nil { return nil, false, err } username, uid, err := fbo.config.KBPKI().GetCurrentUserInfo(ctx) if err != nil { return nil, false, err } handle := md.GetTlfHandle() // must be a reader or writer (it checks both.) if !handle.IsReader(uid) { return nil, false, NewRekeyPermissionError(md.GetTlfHandle(), username) } newMd, err := md.MakeSuccessor(fbo.config, md.mdID, handle.IsWriter(uid)) if err != nil { return nil, false, err } // readers shouldn't modify writer metadata if !handle.IsWriter(uid) && !newMd.IsWriterMetadataCopiedSet() { return nil, false, NewRekeyPermissionError(handle, username) } return newMd, md.IsRekeySet(), nil } func (fbo *folderBranchOps) nowUnixNano() int64 { return fbo.config.Clock().Now().UnixNano() } func (fbo *folderBranchOps) putBlockCheckQuota( ctx context.Context, tlfID TlfID, blockPtr BlockPointer, readyBlockData ReadyBlockData, tlfName CanonicalTlfName) error { err := fbo.config.BlockOps().Put(ctx, tlfID, blockPtr, readyBlockData) if qe, ok := err.(BServerErrorOverQuota); ok && !qe.Throttled { fbo.config.Reporter().ReportErr(ctx, tlfName, tlfID.IsPublic(), WriteMode, OverQuotaWarning{qe.Usage, qe.Limit}) return nil } return err } func (fbo *folderBranchOps) initMDLocked( ctx context.Context, lState *lockState, md *RootMetadata) error { fbo.mdWriterLock.AssertLocked(lState) // create a dblock since one doesn't exist yet username, uid, err := fbo.config.KBPKI().GetCurrentUserInfo(ctx) if err != nil { return err } handle := md.GetTlfHandle() // make sure we're a writer before rekeying or putting any blocks. if !handle.IsWriter(uid) { return NewWriteAccessError(handle, username) } newDblock := &DirBlock{ Children: make(map[string]DirEntry), } var expectedKeyGen KeyGen var tlfCryptKey *TLFCryptKey if md.ID.IsPublic() { expectedKeyGen = PublicKeyGen } else { var rekeyDone bool // create a new set of keys for this metadata rekeyDone, tlfCryptKey, err = fbo.config.KeyManager().Rekey(ctx, md, false) if err != nil { return err } if !rekeyDone { return fmt.Errorf("Initial rekey unexpectedly not done for private TLF %v", md.ID) } expectedKeyGen = FirstValidKeyGen } keyGen := md.LatestKeyGeneration() if keyGen != expectedKeyGen { return InvalidKeyGenerationError{md.ID, keyGen} } info, plainSize, readyBlockData, err := fbo.blocks.ReadyBlock( ctx, md.ReadOnly(), newDblock, uid) if err != nil { return err } now := fbo.nowUnixNano() md.data.Dir = DirEntry{ BlockInfo: info, EntryInfo: EntryInfo{ Type: Dir, Size: uint64(plainSize), Mtime: now, Ctime: now, }, } co := newCreateOpForRootDir() md.AddOp(co) md.AddRefBlock(md.data.Dir.BlockInfo) md.UnrefBytes = 0 if err = fbo.putBlockCheckQuota( ctx, md.ID, info.BlockPointer, readyBlockData, md.GetTlfHandle().GetCanonicalName()); err != nil { return err } if err = fbo.config.BlockCache().Put( info.BlockPointer, fbo.id(), newDblock, TransientEntry); err != nil { return err } // finally, write out the new metadata mdID, err := fbo.config.MDOps().Put(ctx, md) if err != nil { return err } fbo.headLock.Lock(lState) defer fbo.headLock.Unlock(lState) if fbo.head != (ImmutableRootMetadata{}) { return fmt.Errorf( "%v: Unexpected MD ID during new MD initialization: %v", md.ID, fbo.head.mdID) } fbo.setNewInitialHeadLocked(ctx, lState, MakeImmutableRootMetadata(md, mdID, fbo.config.Clock().Now())) if err != nil { return err } // cache any new TLF crypt key if tlfCryptKey != nil { err = fbo.config.KeyCache().PutTLFCryptKey(md.ID, keyGen, *tlfCryptKey) if err != nil { return err } } return nil } func (fbo *folderBranchOps) GetTLFCryptKeys(ctx context.Context, h *TlfHandle) (keys []TLFCryptKey, id TlfID, err error) { err = errors.New("GetTLFCryptKeys is not supported by folderBranchOps") return } func (fbo *folderBranchOps) GetOrCreateRootNode( ctx context.Context, h *TlfHandle, branch BranchName) ( node Node, ei EntryInfo, err error) { err = errors.New("GetOrCreateRootNode is not supported by " + "folderBranchOps") return } func (fbo *folderBranchOps) checkNode(node Node) error { fb := node.GetFolderBranch() if fb != fbo.folderBranch { return WrongOpsError{fbo.folderBranch, fb} } return nil } // SetInitialHeadFromServer sets the head to the given // ImmutableRootMetadata, which must be retrieved from the MD server. func (fbo *folderBranchOps) SetInitialHeadFromServer( ctx context.Context, md ImmutableRootMetadata) (err error) { fbo.log.CDebugf(ctx, "SetInitialHeadFromServer, revision=%d (%s)", md.Revision, md.MergedStatus()) defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }() if md.data.Dir.Type != Dir { // Not initialized. return fmt.Errorf("MD with revision=%d not initialized", md.Revision) } return runUnlessCanceled(ctx, func() error { fb := FolderBranch{md.ID, MasterBranch} if fb != fbo.folderBranch { return WrongOpsError{fbo.folderBranch, fb} } // Always identify first when trying to initialize the folder, // even if we turn out not to be a writer. (We can't rely on // the identifyOnce call in getMDLocked, because that isn't // called from the initialization code path when the local // user is not a valid writer.) Also, we want to make sure we // fail before we set the head, otherwise future calls will // succeed incorrectly. err = fbo.identifyOnce(ctx, md.ReadOnly()) if err != nil { return err } lState := makeFBOLockState() fbo.mdWriterLock.Lock(lState) defer fbo.mdWriterLock.Unlock(lState) if md.MergedStatus() == Unmerged { mdops := fbo.config.MDOps() mergedMD, err := mdops.GetForTLF(ctx, fbo.id()) if err != nil { return err } func() { fbo.headLock.Lock(lState) defer fbo.headLock.Unlock(lState) fbo.setLatestMergedRevisionLocked(ctx, lState, mergedMD.Revision, false) }() } fbo.headLock.Lock(lState) defer fbo.headLock.Unlock(lState) // Only update the head the first time; later it will be // updated either directly via writes or through the // background update processor. if fbo.head == (ImmutableRootMetadata{}) { err = fbo.setInitialHeadTrustedLocked(ctx, lState, md) if err != nil { return err } } return nil }) } // SetInitialHeadToNew creates a brand-new ImmutableRootMetadata // object and sets the head to that. func (fbo *folderBranchOps) SetInitialHeadToNew( ctx context.Context, id TlfID, handle *TlfHandle) (err error) { fbo.log.CDebugf(ctx, "SetInitialHeadToNew") defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }() bh, err := handle.ToBareHandle() if err != nil { return err } var rmd RootMetadata err = updateNewBareRootMetadata(&rmd.BareRootMetadata, id, bh) if err != nil { return err } // Need to keep the TLF handle around long enough to // rekey the metadata for the first time. rmd.tlfHandle = handle return runUnlessCanceled(ctx, func() error { fb := FolderBranch{rmd.ID, MasterBranch} if fb != fbo.folderBranch { return WrongOpsError{fbo.folderBranch, fb} } // Always identify first when trying to initialize the folder, // even if we turn out not to be a writer. (We can't rely on // the identifyOnce call in getMDLocked, because that isn't // called from the initialization code path when the local // user is not a valid writer.) Also, we want to make sure we // fail before we set the head, otherwise future calls will // succeed incorrectly. err = fbo.identifyOnce(ctx, rmd.ReadOnly()) if err != nil { return err } lState := makeFBOLockState() fbo.mdWriterLock.Lock(lState) defer fbo.mdWriterLock.Unlock(lState) return fbo.initMDLocked(ctx, lState, &rmd) }) } // execMDReadNoIdentifyThenMDWrite first tries to execute the // passed-in method in mdReadNoIdentify mode. If it fails with an // MDWriteNeededInRequest error, it re-executes the method as in // mdWrite mode. The passed-in method must note whether or not this // is an mdWrite call. // // This must only be used by getRootNode(). func (fbo *folderBranchOps) execMDReadNoIdentifyThenMDWrite( lState *lockState, f func(*lockState, mdReqType) error) error { err := f(lState, mdReadNoIdentify) // Redo as an MD write request if needed if _, ok := err.(MDWriteNeededInRequest); ok { fbo.mdWriterLock.Lock(lState) defer fbo.mdWriterLock.Unlock(lState) err = f(lState, mdWrite) } return err } func (fbo *folderBranchOps) getRootNode(ctx context.Context) ( node Node, ei EntryInfo, handle *TlfHandle, err error) { fbo.log.CDebugf(ctx, "getRootNode") defer func() { if err != nil { fbo.deferLog.CDebugf(ctx, "Error: %v", err) } else { // node may still be nil if we're unwinding // from a panic. fbo.deferLog.CDebugf(ctx, "Done: %v", node) } }() lState := makeFBOLockState() var md ImmutableRootMetadata err = fbo.execMDReadNoIdentifyThenMDWrite(lState, func(lState *lockState, rtype mdReqType) error { md, err = fbo.getMDLocked(ctx, lState, rtype) return err }) if err != nil { return nil, EntryInfo{}, nil, err } // we may be an unkeyed client if err := isReadableOrError(ctx, fbo.config, md.ReadOnly()); err != nil { return nil, EntryInfo{}, nil, err } handle = md.GetTlfHandle() node, err = fbo.nodeCache.GetOrCreate(md.data.Dir.BlockPointer, string(handle.GetCanonicalName()), nil) if err != nil { return nil, EntryInfo{}, nil, err } return node, md.Data().Dir.EntryInfo, handle, nil } type makeNewBlock func() Block // pathFromNodeHelper() shouldn't be called except by the helper // functions below. func (fbo *folderBranchOps) pathFromNodeHelper(n Node) (path, error) { p := fbo.nodeCache.PathFromNode(n) if !p.isValid() { return path{}, InvalidPathError{p} } return p, nil } // Helper functions to clarify uses of pathFromNodeHelper() (see // nodeCache comments). func (fbo *folderBranchOps) pathFromNodeForRead(n Node) (path, error) { return fbo.pathFromNodeHelper(n) } func (fbo *folderBranchOps) pathFromNodeForMDWriteLocked( lState *lockState, n Node) (path, error) { fbo.mdWriterLock.AssertLocked(lState) return fbo.pathFromNodeHelper(n) } func (fbo *folderBranchOps) GetDirChildren(ctx context.Context, dir Node) ( children map[string]EntryInfo, err error) { fbo.log.CDebugf(ctx, "GetDirChildren %p", dir.GetID()) defer func() { fbo.deferLog.CDebugf(ctx, "Done GetDirChildren: %v", err) }() err = fbo.checkNode(dir) if err != nil { return nil, err } err = runUnlessCanceled(ctx, func() error { var err error lState := makeFBOLockState() md, err := fbo.getMDForReadNeedIdentify(ctx, lState) if err != nil { return err } dirPath, err := fbo.pathFromNodeForRead(dir) if err != nil { return err } children, err = fbo.blocks.GetDirtyDirChildren( ctx, lState, md.ReadOnly(), dirPath) if err != nil { return err } return nil }) if err != nil { return nil, err } return children, nil } func (fbo *folderBranchOps) Lookup(ctx context.Context, dir Node, name string) ( node Node, ei EntryInfo, err error) { fbo.log.CDebugf(ctx, "Lookup %p %s", dir.GetID(), name) defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }() err = fbo.checkNode(dir) if err != nil { return nil, EntryInfo{}, err } var de DirEntry err = runUnlessCanceled(ctx, func() error { lState := makeFBOLockState() md, err := fbo.getMDForReadNeedIdentify(ctx, lState) if err != nil { return err } dirPath, err := fbo.pathFromNodeForRead(dir) if err != nil { return err } childPath := dirPath.ChildPathNoPtr(name) de, err = fbo.blocks.GetDirtyEntry( ctx, lState, md.ReadOnly(), childPath) if err != nil { return err } if de.Type == Sym { node = nil } else { err = fbo.checkDataVersion(childPath, de.BlockPointer) if err != nil { return err } node, err = fbo.nodeCache.GetOrCreate(de.BlockPointer, name, dir) if err != nil { return err } } return nil }) if err != nil { return nil, EntryInfo{}, err } return node, de.EntryInfo, nil } // statEntry is like Stat, but it returns a DirEntry. This is used by // tests. func (fbo *folderBranchOps) statEntry(ctx context.Context, node Node) ( de DirEntry, err error) { err = fbo.checkNode(node) if err != nil { return DirEntry{}, err } lState := makeFBOLockState() nodePath, err := fbo.pathFromNodeForRead(node) if err != nil { return DirEntry{}, err } var md ImmutableRootMetadata if nodePath.hasValidParent() { md, err = fbo.getMDForReadNeedIdentify(ctx, lState) } else { // If nodePath has no valid parent, it's just the TLF // root, so we don't need an identify in this case. md, err = fbo.getMDForReadNoIdentify(ctx, lState) } if err != nil { return DirEntry{}, err } if nodePath.hasValidParent() { de, err = fbo.blocks.GetDirtyEntry( ctx, lState, md.ReadOnly(), nodePath) if err != nil { return DirEntry{}, err } } else { // nodePath is just the root. de = md.data.Dir } return de, nil } var zeroPtr BlockPointer type blockState struct { blockPtr BlockPointer block Block readyBlockData ReadyBlockData syncedCb func() error } func (fbo *folderBranchOps) Stat(ctx context.Context, node Node) ( ei EntryInfo, err error) { fbo.log.CDebugf(ctx, "Stat %p", node.GetID()) defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }() var de DirEntry err = runUnlessCanceled(ctx, func() error { de, err = fbo.statEntry(ctx, node) return err }) if err != nil { return EntryInfo{}, err } return de.EntryInfo, nil } // blockPutState is an internal structure to track data when putting blocks type blockPutState struct { blockStates []blockState } func newBlockPutState(length int) *blockPutState { bps := &blockPutState{} bps.blockStates = make([]blockState, 0, length) return bps } // addNewBlock tracks a new block that will be put. If syncedCb is // non-nil, it will be called whenever the put for that block is // complete (whether or not the put resulted in an error). Currently // it will not be called if the block is never put (due to an earlier // error). func (bps *blockPutState) addNewBlock(blockPtr BlockPointer, block Block, readyBlockData ReadyBlockData, syncedCb func() error) { bps.blockStates = append(bps.blockStates, blockState{blockPtr, block, readyBlockData, syncedCb}) } func (bps *blockPutState) mergeOtherBps(other *blockPutState) { bps.blockStates = append(bps.blockStates, other.blockStates...) } func (bps *blockPutState) DeepCopy() *blockPutState { newBps := &blockPutState{} newBps.blockStates = make([]blockState, len(bps.blockStates)) copy(newBps.blockStates, bps.blockStates) return newBps } func (fbo *folderBranchOps) readyBlockMultiple(ctx context.Context, kmd KeyMetadata, currBlock Block, uid keybase1.UID, bps *blockPutState) (info BlockInfo, plainSize int, err error) { info, plainSize, readyBlockData, err := fbo.blocks.ReadyBlock(ctx, kmd, currBlock, uid) if err != nil { return } bps.addNewBlock(info.BlockPointer, currBlock, readyBlockData, nil) return } func (fbo *folderBranchOps) unembedBlockChanges( ctx context.Context, bps *blockPutState, md *RootMetadata, changes *BlockChanges, uid keybase1.UID) (err error) { buf, err := fbo.config.Codec().Encode(changes) if err != nil { return } block := NewFileBlock().(*FileBlock) block.Contents = buf info, _, err := fbo.readyBlockMultiple( ctx, md.ReadOnly(), block, uid, bps) if err != nil { return } md.data.cachedChanges = *changes changes.Info = info changes.Ops = nil md.RefBytes += uint64(info.EncodedSize) md.DiskUsage += uint64(info.EncodedSize) return } type localBcache map[BlockPointer]*DirBlock // syncBlock updates, and readies, the blocks along the path for the // given write, up to the root of the tree or stopAt (if specified). // When it updates the root of the tree, it also modifies the given // head object with a new revision number and root block ID. It first // checks the provided lbc for blocks that may have been modified by // previous syncBlock calls or the FS calls themselves. It returns // the updated path to the changed directory, the new or updated // directory entry created as part of the call, and a summary of all // the blocks that now must be put to the block server. // // This function is safe to use unlocked, but may modify MD to have // the same revision number as another one. All functions in this file // must call syncBlockLocked instead, which holds mdWriterLock and // thus serializes the revision numbers. Conflict resolution may call // syncBlockForConflictResolution, which doesn't hold the lock, since // it already handles conflicts correctly. // // entryType must not be Sym. // // TODO: deal with multiple nodes for indirect blocks func (fbo *folderBranchOps) syncBlock( ctx context.Context, lState *lockState, uid keybase1.UID, md *RootMetadata, newBlock Block, dir path, name string, entryType EntryType, mtime bool, ctime bool, stopAt BlockPointer, lbc localBcache) (path, DirEntry, *blockPutState, error) { // now ready each dblock and write the DirEntry for the next one // in the path currBlock := newBlock currName := name newPath := path{ FolderBranch: dir.FolderBranch, path: make([]pathNode, 0, len(dir.path)), } bps := newBlockPutState(len(dir.path)) refPath := dir.ChildPathNoPtr(name) var newDe DirEntry doSetTime := true now := fbo.nowUnixNano() for len(newPath.path) < len(dir.path)+1 { info, plainSize, err := fbo.readyBlockMultiple( ctx, md.ReadOnly(), currBlock, uid, bps) if err != nil { return path{}, DirEntry{}, nil, err } // prepend to path and setup next one newPath.path = append([]pathNode{{info.BlockPointer, currName}}, newPath.path...) // get the parent block prevIdx := len(dir.path) - len(newPath.path) var prevDblock *DirBlock var de DirEntry var nextName string nextDoSetTime := false if prevIdx < 0 { // root dir, update the MD instead de = md.data.Dir } else { prevDir := path{ FolderBranch: dir.FolderBranch, path: dir.path[:prevIdx+1], } // First, check the localBcache, which could contain // blocks that were modified across multiple calls to // syncBlock. var ok bool prevDblock, ok = lbc[prevDir.tailPointer()] if !ok { // If the block isn't in the local bcache, we // have to fetch it, possibly from the // network. Directory blocks are only ever // modified while holding mdWriterLock, so it's // safe to fetch them one at a time. prevDblock, err = fbo.blocks.GetDir( ctx, lState, md.ReadOnly(), prevDir, blockWrite) if err != nil { return path{}, DirEntry{}, nil, err } } // modify the direntry for currName; make one // if it doesn't exist (which should only // happen the first time around). // // TODO: Pull the creation out of here and // into createEntryLocked(). if de, ok = prevDblock.Children[currName]; !ok { // If this isn't the first time // around, we have an error. if len(newPath.path) > 1 { return path{}, DirEntry{}, nil, NoSuchNameError{currName} } // If this is a file, the size should be 0. (TODO: // Ensure this.) If this is a directory, the size will // be filled in below. The times will be filled in // below as well, since we should only be creating a // new directory entry when doSetTime is true. de = DirEntry{ EntryInfo: EntryInfo{ Type: entryType, Size: 0, }, } // If we're creating a new directory entry, the // parent's times must be set as well. nextDoSetTime = true } currBlock = prevDblock nextName = prevDir.tailName() } if de.Type == Dir { // TODO: When we use indirect dir blocks, // we'll have to calculate the size some other // way. de.Size = uint64(plainSize) } if prevIdx < 0 { md.AddUpdate(md.data.Dir.BlockInfo, info) } else if prevDe, ok := prevDblock.Children[currName]; ok { md.AddUpdate(prevDe.BlockInfo, info) } else { // this is a new block md.AddRefBlock(info) } if len(refPath.path) > 1 { refPath = *refPath.parentPath() } de.BlockInfo = info if doSetTime { if mtime { de.Mtime = now } if ctime { de.Ctime = now } } if !newDe.IsInitialized() { newDe = de } if prevIdx < 0 { md.data.Dir = de } else { prevDblock.Children[currName] = de } currName = nextName // Stop before we get to the common ancestor; it will be taken care of // on the next sync call if prevIdx >= 0 && dir.path[prevIdx].BlockPointer == stopAt { // Put this back into the cache as dirty -- the next // syncBlock call will ready it. dblock, ok := currBlock.(*DirBlock) if !ok { return path{}, DirEntry{}, nil, BadDataError{stopAt.ID} } lbc[stopAt] = dblock break } doSetTime = nextDoSetTime } return newPath, newDe, bps, nil } // syncBlockLock calls syncBlock under mdWriterLock. func (fbo *folderBranchOps) syncBlockLocked( ctx context.Context, lState *lockState, uid keybase1.UID, md *RootMetadata, newBlock Block, dir path, name string, entryType EntryType, mtime bool, ctime bool, stopAt BlockPointer, lbc localBcache) (path, DirEntry, *blockPutState, error) { fbo.mdWriterLock.AssertLocked(lState) return fbo.syncBlock(ctx, lState, uid, md, newBlock, dir, name, entryType, mtime, ctime, stopAt, lbc) } // syncBlockForConflictResolution calls syncBlock unlocked, since // conflict resolution can handle MD revision number conflicts // correctly. func (fbo *folderBranchOps) syncBlockForConflictResolution( ctx context.Context, lState *lockState, uid keybase1.UID, md *RootMetadata, newBlock Block, dir path, name string, entryType EntryType, mtime bool, ctime bool, stopAt BlockPointer, lbc localBcache) (path, DirEntry, *blockPutState, error) { return fbo.syncBlock( ctx, lState, uid, md, newBlock, dir, name, entryType, mtime, ctime, stopAt, lbc) } // entryType must not be Sym. func (fbo *folderBranchOps) syncBlockAndCheckEmbedLocked(ctx context.Context, lState *lockState, md *RootMetadata, newBlock Block, dir path, name string, entryType EntryType, mtime bool, ctime bool, stopAt BlockPointer, lbc localBcache) ( path, DirEntry, *blockPutState, error) { fbo.mdWriterLock.AssertLocked(lState) _, uid, err := fbo.config.KBPKI().GetCurrentUserInfo(ctx) if err != nil { return path{}, DirEntry{}, nil, err } newPath, newDe, bps, err := fbo.syncBlockLocked( ctx, lState, uid, md, newBlock, dir, name, entryType, mtime, ctime, stopAt, lbc) if err != nil { return path{}, DirEntry{}, nil, err } // do the block changes need their own blocks? bsplit := fbo.config.BlockSplitter() if !bsplit.ShouldEmbedBlockChanges(&md.data.Changes) { err = fbo.unembedBlockChanges(ctx, bps, md, &md.data.Changes, uid) if err != nil { return path{}, DirEntry{}, nil, err } } return newPath, newDe, bps, nil } func isRecoverableBlockError(err error) bool { _, isArchiveError := err.(BServerErrorBlockArchived) _, isDeleteError := err.(BServerErrorBlockDeleted) _, isRefError := err.(BServerErrorBlockNonExistent) _, isMaxExceededError := err.(BServerErrorMaxRefExceeded) return isArchiveError || isDeleteError || isRefError || isMaxExceededError } // Returns whether the given error is one that shouldn't block the // removal of a file or directory. // // TODO: Consider other errors recoverable, e.g. ones that arise from // present but corrupted blocks? func isRecoverableBlockErrorForRemoval(err error) bool { return isRecoverableBlockError(err) } func isRetriableError(err error, retries int) bool { _, isExclOnUnmergedError := err.(ExclOnUnmergedError) _, isUnmergedSelfConflictError := err.(UnmergedSelfConflictError) recoverable := isExclOnUnmergedError || isUnmergedSelfConflictError || isRecoverableBlockError(err) return recoverable && retries < maxRetriesOnRecoverableErrors } func (fbo *folderBranchOps) doOneBlockPut(ctx context.Context, tlfID TlfID, tlfName CanonicalTlfName, blockState blockState, errChan chan error, blocksToRemoveChan chan *FileBlock) { err := fbo.putBlockCheckQuota( ctx, tlfID, blockState.blockPtr, blockState.readyBlockData, tlfName) if err == nil && blockState.syncedCb != nil { err = blockState.syncedCb() } if err != nil { if isRecoverableBlockError(err) { fblock, ok := blockState.block.(*FileBlock) if ok && !fblock.IsInd { blocksToRemoveChan <- fblock } } // one error causes everything else to cancel select { case errChan <- err: default: return } } } // doBlockPuts writes all the pending block puts to the cache and // server. If the err returned by this function satisfies // isRecoverableBlockError(err), the caller should retry its entire // operation, starting from when the MD successor was created. // // Returns a slice of block pointers that resulted in recoverable // errors and should be removed by the caller from any saved state. func (fbo *folderBranchOps) doBlockPuts(ctx context.Context, tlfID TlfID, tlfName CanonicalTlfName, bps blockPutState) ( []BlockPointer, error) { errChan := make(chan error, 1) ctx, cancel := context.WithCancel(ctx) defer cancel() blocks := make(chan blockState, len(bps.blockStates)) var wg sync.WaitGroup numWorkers := len(bps.blockStates) if numWorkers > maxParallelBlockPuts { numWorkers = maxParallelBlockPuts } wg.Add(numWorkers) // A channel to list any blocks that have been archived or // deleted. Any of these will result in an error, so the maximum // we'll get is the same as the number of workers. blocksToRemoveChan := make(chan *FileBlock, numWorkers) worker := func() { defer wg.Done() for blockState := range blocks { fbo.doOneBlockPut(ctx, tlfID, tlfName, blockState, errChan, blocksToRemoveChan) select { // return early if the context has been canceled case <-ctx.Done(): return default: } } } for i := 0; i < numWorkers; i++ { go worker() } for _, blockState := range bps.blockStates { blocks <- blockState } close(blocks) go func() { wg.Wait() close(errChan) close(blocksToRemoveChan) }() err := <-errChan var blocksToRemove []BlockPointer if isRecoverableBlockError(err) { bcache := fbo.config.BlockCache() // Wait for all the outstanding puts to finish, to amortize // the work of re-doing the put. for fblock := range blocksToRemoveChan { for i, bs := range bps.blockStates { if bs.block == fblock { // Let the caller know which blocks shouldn't be // retried. blocksToRemove = append(blocksToRemove, bps.blockStates[i].blockPtr) } } // Remove each problematic block from the cache so the // redo can just make a new block instead. if err := bcache.DeleteKnownPtr(fbo.id(), fblock); err != nil { fbo.log.CWarningf(ctx, "Couldn't delete ptr for a block: %v", err) } } } return blocksToRemove, err } func (fbo *folderBranchOps) finalizeBlocks(bps *blockPutState) error { bcache := fbo.config.BlockCache() for _, blockState := range bps.blockStates { newPtr := blockState.blockPtr // only cache this block if we made a brand new block, not if // we just incref'd some other block. if !newPtr.IsFirstRef() { continue } if err := bcache.Put(newPtr, fbo.id(), blockState.block, TransientEntry); err != nil { return err } } return nil } // Returns true if the passed error indicates a revision conflict. func isRevisionConflict(err error) bool { if err == nil { return false } _, isConflictRevision := err.(MDServerErrorConflictRevision) _, isConflictPrevRoot := err.(MDServerErrorConflictPrevRoot) _, isConflictDiskUsage := err.(MDServerErrorConflictDiskUsage) _, isConditionFailed := err.(MDServerErrorConditionFailed) _, isConflictFolderMapping := err.(MDServerErrorConflictFolderMapping) _, isJournal := err.(MDJournalConflictError) return isConflictRevision || isConflictPrevRoot || isConflictDiskUsage || isConditionFailed || isConflictFolderMapping || isJournal } func (fbo *folderBranchOps) finalizeMDWriteLocked(ctx context.Context, lState *lockState, md *RootMetadata, bps *blockPutState, excl Excl) (err error) { fbo.mdWriterLock.AssertLocked(lState) // finally, write out the new metadata mdops := fbo.config.MDOps() doUnmergedPut := true mergedRev := MetadataRevisionUninitialized oldPrevRoot := md.PrevRoot var mdID MdID if fbo.isMasterBranchLocked(lState) { // only do a normal Put if we're not already staged. mdID, err = mdops.Put(ctx, md) if doUnmergedPut = isRevisionConflict(err); doUnmergedPut { fbo.log.CDebugf(ctx, "Conflict: %v", err) mergedRev = md.Revision if excl == WithExcl { // If this was caused by an exclusive create, we shouldn't do an // UnmergedPut, but rather try to get newest update from server, and // retry afterwards. err = fbo.getAndApplyMDUpdates(ctx, lState, fbo.applyMDUpdatesLocked) if err != nil { return err } return ExclOnUnmergedError{} } } else if err != nil { return err } } else if excl == WithExcl { return ExclOnUnmergedError{} } if doUnmergedPut { // We're out of date, and this is not an exclusive write, so put it as an // unmerged MD. mdID, err = mdops.PutUnmerged(ctx, md) if isRevisionConflict(err) { // Self-conflicts are retried in `doMDWriteWithRetry`. err = UnmergedSelfConflictError{err} } if err != nil { return err } bid := md.BID fbo.setBranchIDLocked(lState, bid) fbo.cr.Resolve(md.Revision, mergedRev) } else { fbo.setBranchIDLocked(lState, NullBranchID) if md.IsRekeySet() && !md.IsWriterMetadataCopiedSet() { // Queue this folder for rekey if the bit was set and it's not a copy. // This is for the case where we're coming out of conflict resolution. // So why don't we do this in finalizeResolution? Well, we do but we don't // want to block on a rekey so we queue it. Because of that it may fail // due to a conflict with some subsequent write. By also handling it here // we'll always retry if we notice we haven't been successful in clearing // the bit yet. Note that I haven't actually seen this happen but it seems // theoretically possible. defer fbo.config.RekeyQueue().Enqueue(md.ID) } } md.swapCachedBlockChanges() err = fbo.finalizeBlocks(bps) if err != nil { return err } rebased := (oldPrevRoot != md.PrevRoot) if rebased { bid := md.BID fbo.setBranchIDLocked(lState, bid) fbo.cr.Resolve(md.Revision, MetadataRevisionUninitialized) } fbo.headLock.Lock(lState) defer fbo.headLock.Unlock(lState) irmd := MakeImmutableRootMetadata(md, mdID, fbo.config.Clock().Now()) err = fbo.setHeadSuccessorLocked(ctx, lState, irmd, rebased) if err != nil { return err } // Archive the old, unref'd blocks fbo.fbm.archiveUnrefBlocks(irmd.ReadOnly()) fbo.notifyBatchLocked(ctx, lState, irmd) return nil } func (fbo *folderBranchOps) finalizeMDRekeyWriteLocked(ctx context.Context, lState *lockState, md *RootMetadata) (err error) { fbo.mdWriterLock.AssertLocked(lState) oldPrevRoot := md.PrevRoot // finally, write out the new metadata mdID, err := fbo.config.MDOps().Put(ctx, md) isConflict := isRevisionConflict(err) if err != nil && !isConflict { return err } if isConflict { // drop this block. we've probably collided with someone also // trying to rekey the same folder but that's not necessarily // the case. we'll queue another rekey just in case. it should // be safe as it's idempotent. we don't want any rekeys present // in unmerged history or that will just make a mess. fbo.config.RekeyQueue().Enqueue(md.ID) return RekeyConflictError{err} } fbo.setBranchIDLocked(lState, NullBranchID) rebased := (oldPrevRoot != md.PrevRoot) if rebased { bid := md.BID fbo.setBranchIDLocked(lState, bid) fbo.cr.Resolve(md.Revision, MetadataRevisionUninitialized) } fbo.headLock.Lock(lState) defer fbo.headLock.Unlock(lState) return fbo.setHeadSuccessorLocked(ctx, lState, MakeImmutableRootMetadata(md, mdID, fbo.config.Clock().Now()), rebased) } func (fbo *folderBranchOps) finalizeGCOp(ctx context.Context, gco *gcOp) ( err error) { lState := makeFBOLockState() // Lock the folder so we can get an internally-consistent MD // revision number. fbo.mdWriterLock.Lock(lState) defer fbo.mdWriterLock.Unlock(lState) md, err := fbo.getMDForWriteLocked(ctx, lState) if err != nil { return err } if md.MergedStatus() == Unmerged { return UnexpectedUnmergedPutError{} } md.AddOp(gco) if !fbo.config.BlockSplitter().ShouldEmbedBlockChanges(&md.data.Changes) { var uid keybase1.UID _, uid, err = fbo.config.KBPKI().GetCurrentUserInfo(ctx) if err != nil { return err } bps := newBlockPutState(1) err = fbo.unembedBlockChanges(ctx, bps, md, &md.data.Changes, uid) if err != nil { return err } defer func() { if err != nil { fbo.fbm.cleanUpBlockState( md.ReadOnly(), bps, blockDeleteOnMDFail) } }() ptrsToDelete, err := fbo.doBlockPuts( ctx, md.ID, md.GetTlfHandle().GetCanonicalName(), *bps) if err != nil { return err } if len(ptrsToDelete) > 0 { return fmt.Errorf("Unexpected pointers to delete after "+ "unembedding block changes in gc op: %v", ptrsToDelete) } } oldPrevRoot := md.PrevRoot // finally, write out the new metadata mdID, err := fbo.config.MDOps().Put(ctx, md) if err != nil { // Don't allow garbage collection to put us into a conflicting // state; just wait for the next period. return err } fbo.setBranchIDLocked(lState, NullBranchID) md.swapCachedBlockChanges() rebased := (oldPrevRoot != md.PrevRoot) if rebased { bid := md.BID fbo.setBranchIDLocked(lState, bid) fbo.cr.Resolve(md.Revision, MetadataRevisionUninitialized) } fbo.headLock.Lock(lState) defer fbo.headLock.Unlock(lState) irmd := MakeImmutableRootMetadata(md, mdID, fbo.config.Clock().Now()) err = fbo.setHeadSuccessorLocked(ctx, lState, irmd, rebased) if err != nil { return err } fbo.notifyBatchLocked(ctx, lState, irmd) return nil } func (fbo *folderBranchOps) syncBlockAndFinalizeLocked(ctx context.Context, lState *lockState, md *RootMetadata, newBlock Block, dir path, name string, entryType EntryType, mtime bool, ctime bool, stopAt BlockPointer, excl Excl) (de DirEntry, err error) { fbo.mdWriterLock.AssertLocked(lState) _, de, bps, err := fbo.syncBlockAndCheckEmbedLocked( ctx, lState, md, newBlock, dir, name, entryType, mtime, ctime, zeroPtr, nil) if err != nil { return DirEntry{}, err } defer func() { if err != nil { fbo.fbm.cleanUpBlockState( md.ReadOnly(), bps, blockDeleteOnMDFail) } }() _, err = fbo.doBlockPuts( ctx, md.ID, md.GetTlfHandle().GetCanonicalName(), *bps) if err != nil { return DirEntry{}, err } err = fbo.finalizeMDWriteLocked(ctx, lState, md, bps, excl) if err != nil { return DirEntry{}, err } return de, nil } func checkDisallowedPrefixes(name string) error { for _, prefix := range disallowedPrefixes { if strings.HasPrefix(name, prefix) { return DisallowedPrefixError{name, prefix} } } return nil } func (fbo *folderBranchOps) checkNewDirSize(ctx context.Context, lState *lockState, md ReadOnlyRootMetadata, dirPath path, newName string) error { // Check that the directory isn't past capacity already. var currSize uint64 if dirPath.hasValidParent() { de, err := fbo.blocks.GetDirtyEntry(ctx, lState, md, dirPath) if err != nil { return err } currSize = de.Size } else { // dirPath is just the root. currSize = md.data.Dir.Size } // Just an approximation since it doesn't include the size of the // directory entry itself, but that's ok -- at worst it'll be an // off-by-one-entry error, and since there's a maximum name length // we can't get in too much trouble. if currSize+uint64(len(newName)) > fbo.config.MaxDirBytes() { return DirTooBigError{dirPath, currSize + uint64(len(newName)), fbo.config.MaxDirBytes()} } return nil } // entryType must not by Sym. func (fbo *folderBranchOps) createEntryLocked( ctx context.Context, lState *lockState, dir Node, name string, entryType EntryType, excl Excl) (Node, DirEntry, error) { fbo.mdWriterLock.AssertLocked(lState) if err := checkDisallowedPrefixes(name); err != nil { return nil, DirEntry{}, err } if uint32(len(name)) > fbo.config.MaxNameBytes() { return nil, DirEntry{}, NameTooLongError{name, fbo.config.MaxNameBytes()} } // verify we have permission to write md, err := fbo.getMDForWriteLocked(ctx, lState) if err != nil { return nil, DirEntry{}, err } dirPath, err := fbo.pathFromNodeForMDWriteLocked(lState, dir) if err != nil { return nil, DirEntry{}, err } dblock, err := fbo.blocks.GetDir( ctx, lState, md.ReadOnly(), dirPath, blockWrite) if err != nil { return nil, DirEntry{}, err } // does name already exist? if _, ok := dblock.Children[name]; ok { return nil, DirEntry{}, NameExistsError{name} } if err := fbo.checkNewDirSize( ctx, lState, md.ReadOnly(), dirPath, name); err != nil { return nil, DirEntry{}, err } co, err := newCreateOp(name, dirPath.tailPointer(), entryType) if err != nil { return nil, DirEntry{}, err } md.AddOp(co) // create new data block var newBlock Block // XXX: for now, put a unique ID in every new block, to make sure it // has a unique block ID. This may not be needed once we have encryption. if entryType == Dir { newBlock = &DirBlock{ Children: make(map[string]DirEntry), } } else { newBlock = &FileBlock{} } de, err := fbo.syncBlockAndFinalizeLocked( ctx, lState, md, newBlock, dirPath, name, entryType, true, true, zeroPtr, excl) if err != nil { return nil, DirEntry{}, err } node, err := fbo.nodeCache.GetOrCreate(de.BlockPointer, name, dir) if err != nil { return nil, DirEntry{}, err } return node, de, nil } func (fbo *folderBranchOps) doMDWriteWithRetry(ctx context.Context, lState *lockState, fn func(lState *lockState) error) error { doUnlock := false defer func() { if doUnlock { fbo.mdWriterLock.Unlock(lState) } }() for i := 0; ; i++ { fbo.mdWriterLock.Lock(lState) doUnlock = true // Make sure we haven't been canceled before doing anything // too serious. select { case <-ctx.Done(): return ctx.Err() default: } err := fn(lState) if isRetriableError(err, i) { fbo.log.CDebugf(ctx, "Trying again after retriable error: %v", err) // Release the lock to give someone else a chance doUnlock = false fbo.mdWriterLock.Unlock(lState) if _, ok := err.(ExclOnUnmergedError); ok { if err = fbo.cr.Wait(ctx); err != nil { return err } } else if _, ok := err.(UnmergedSelfConflictError); ok { // We can only get here if we are already on an // unmerged branch and an errored PutUnmerged did make // it to the mdserver. Let's force sync, with a fresh // context so the observer doesn't ignore the updates // (but tie the cancels together). newCtx := fbo.ctxWithFBOID(context.Background()) newCtx, cancel := context.WithCancel(newCtx) defer cancel() go func() { select { case <-ctx.Done(): cancel() case <-newCtx.Done(): } }() fbo.log.CDebugf(ctx, "Got a revision conflict while unmerged "+ "(%v); forcing a sync", err) err = fbo.getAndApplyNewestUnmergedHead(newCtx, lState) if err != nil { return err } cancel() } continue } else if err != nil { return err } return nil } } func (fbo *folderBranchOps) doMDWriteWithRetryUnlessCanceled( ctx context.Context, fn func(lState *lockState) error) error { return runUnlessCanceled(ctx, func() error { lState := makeFBOLockState() return fbo.doMDWriteWithRetry(ctx, lState, fn) }) } func (fbo *folderBranchOps) CreateDir( ctx context.Context, dir Node, path string) ( n Node, ei EntryInfo, err error) { fbo.log.CDebugf(ctx, "CreateDir %p %s", dir.GetID(), path) defer func() { if err != nil { fbo.deferLog.CDebugf(ctx, "Error: %v", err) } else { fbo.deferLog.CDebugf(ctx, "Done: %p", n.GetID()) } }() err = fbo.checkNode(dir) if err != nil { return nil, EntryInfo{}, err } var retNode Node var retEntryInfo EntryInfo err = fbo.doMDWriteWithRetryUnlessCanceled(ctx, func(lState *lockState) error { node, de, err := fbo.createEntryLocked(ctx, lState, dir, path, Dir, NoExcl) // Don't set node and ei directly, as that can cause a // race when the Create is canceled. retNode = node retEntryInfo = de.EntryInfo return err }) if err != nil { return nil, EntryInfo{}, err } return retNode, retEntryInfo, nil } func (fbo *folderBranchOps) CreateFile( ctx context.Context, dir Node, path string, isExec bool, excl Excl) ( n Node, ei EntryInfo, err error) { fbo.log.CDebugf(ctx, "CreateFile %p %s isExec=%v Excl=%s", dir.GetID(), path, isExec, excl) defer func() { if err != nil { fbo.deferLog.CDebugf(ctx, "Error: %v", err) } else { fbo.deferLog.CDebugf(ctx, "Done: %p", n.GetID()) } }() err = fbo.checkNode(dir) if err != nil { return nil, EntryInfo{}, err } var entryType EntryType if isExec { entryType = Exec } else { entryType = File } if excl == WithExcl { if err = fbo.cr.Wait(ctx); err != nil { return nil, EntryInfo{}, err } } var retNode Node var retEntryInfo EntryInfo err = fbo.doMDWriteWithRetryUnlessCanceled(ctx, func(lState *lockState) error { // Don't set node and ei directly, as that can cause a // race when the Create is canceled. node, de, err := fbo.createEntryLocked(ctx, lState, dir, path, entryType, excl) retNode = node retEntryInfo = de.EntryInfo return err }) if err != nil { return nil, EntryInfo{}, err } return retNode, retEntryInfo, nil } func (fbo *folderBranchOps) createLinkLocked( ctx context.Context, lState *lockState, dir Node, fromName string, toPath string) (DirEntry, error) { fbo.mdWriterLock.AssertLocked(lState) if err := checkDisallowedPrefixes(fromName); err != nil { return DirEntry{}, err } if uint32(len(fromName)) > fbo.config.MaxNameBytes() { return DirEntry{}, NameTooLongError{fromName, fbo.config.MaxNameBytes()} } // verify we have permission to write md, err := fbo.getMDForWriteLocked(ctx, lState) if err != nil { return DirEntry{}, err } dirPath, err := fbo.pathFromNodeForMDWriteLocked(lState, dir) if err != nil { return DirEntry{}, err } dblock, err := fbo.blocks.GetDir( ctx, lState, md.ReadOnly(), dirPath, blockWrite) if err != nil { return DirEntry{}, err } // TODO: validate inputs // does name already exist? if _, ok := dblock.Children[fromName]; ok { return DirEntry{}, NameExistsError{fromName} } if err := fbo.checkNewDirSize(ctx, lState, md.ReadOnly(), dirPath, fromName); err != nil { return DirEntry{}, err } co, err := newCreateOp(fromName, dirPath.tailPointer(), Sym) if err != nil { return DirEntry{}, err } md.AddOp(co) // Create a direntry for the link, and then sync now := fbo.nowUnixNano() dblock.Children[fromName] = DirEntry{ EntryInfo: EntryInfo{ Type: Sym, Size: uint64(len(toPath)), SymPath: toPath, Mtime: now, Ctime: now, }, } _, err = fbo.syncBlockAndFinalizeLocked( ctx, lState, md, dblock, *dirPath.parentPath(), dirPath.tailName(), Dir, true, true, zeroPtr, NoExcl) if err != nil { return DirEntry{}, err } return dblock.Children[fromName], nil } func (fbo *folderBranchOps) CreateLink( ctx context.Context, dir Node, fromName string, toPath string) ( ei EntryInfo, err error) { fbo.log.CDebugf(ctx, "CreateLink %p %s -> %s", dir.GetID(), fromName, toPath) defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }() err = fbo.checkNode(dir) if err != nil { return EntryInfo{}, err } var retEntryInfo EntryInfo err = fbo.doMDWriteWithRetryUnlessCanceled(ctx, func(lState *lockState) error { // Don't set ei directly, as that can cause a race when // the Create is canceled. de, err := fbo.createLinkLocked(ctx, lState, dir, fromName, toPath) retEntryInfo = de.EntryInfo return err }) if err != nil { return EntryInfo{}, err } return retEntryInfo, nil } // unrefEntry modifies md to unreference all relevant blocks for the // given entry. func (fbo *folderBranchOps) unrefEntry(ctx context.Context, lState *lockState, md *RootMetadata, dir path, de DirEntry, name string) error { md.AddUnrefBlock(de.BlockInfo) // construct a path for the child so we can unlink with it. childPath := dir.ChildPath(name, de.BlockPointer) // If this is an indirect block, we need to delete all of its // children as well. NOTE: non-empty directories can't be // removed, so no need to check for indirect directory blocks // here. if de.Type == File || de.Type == Exec { blockInfos, err := fbo.blocks.GetIndirectFileBlockInfos( ctx, lState, md.ReadOnly(), childPath) if isRecoverableBlockErrorForRemoval(err) { msg := fmt.Sprintf("Recoverable block error encountered for unrefEntry(%v); continuing", childPath) fbo.log.CWarningf(ctx, "%s", msg) fbo.log.CDebugf(ctx, "%s (err=%v)", msg, err) } else if err != nil { return err } for _, blockInfo := range blockInfos { md.AddUnrefBlock(blockInfo) } } return nil } func (fbo *folderBranchOps) removeEntryLocked(ctx context.Context, lState *lockState, md *RootMetadata, dir path, name string) error { fbo.mdWriterLock.AssertLocked(lState) pblock, err := fbo.blocks.GetDir( ctx, lState, md.ReadOnly(), dir, blockWrite) if err != nil { return err } // make sure the entry exists de, ok := pblock.Children[name] if !ok { return NoSuchNameError{name} } ro, err := newRmOp(name, dir.tailPointer()) if err != nil { return err } md.AddOp(ro) err = fbo.unrefEntry(ctx, lState, md, dir, de, name) if err != nil { return err } // the actual unlink delete(pblock.Children, name) // sync the parent directory _, err = fbo.syncBlockAndFinalizeLocked( ctx, lState, md, pblock, *dir.parentPath(), dir.tailName(), Dir, true, true, zeroPtr, NoExcl) if err != nil { return err } return nil } func (fbo *folderBranchOps) removeDirLocked(ctx context.Context, lState *lockState, dir Node, dirName string) (err error) { fbo.mdWriterLock.AssertLocked(lState) // verify we have permission to write md, err := fbo.getMDForWriteLocked(ctx, lState) if err != nil { return err } dirPath, err := fbo.pathFromNodeForMDWriteLocked(lState, dir) if err != nil { return err } pblock, err := fbo.blocks.GetDir( ctx, lState, md.ReadOnly(), dirPath, blockRead) de, ok := pblock.Children[dirName] if !ok { return NoSuchNameError{dirName} } // construct a path for the child so we can check for an empty dir childPath := dirPath.ChildPath(dirName, de.BlockPointer) childBlock, err := fbo.blocks.GetDir( ctx, lState, md.ReadOnly(), childPath, blockRead) if isRecoverableBlockErrorForRemoval(err) { msg := fmt.Sprintf("Recoverable block error encountered for removeDirLocked(%v); continuing", childPath) fbo.log.CWarningf(ctx, "%s", msg) fbo.log.CDebugf(ctx, "%s (err=%v)", msg, err) } else if err != nil { return err } else if len(childBlock.Children) > 0 { return DirNotEmptyError{dirName} } return fbo.removeEntryLocked(ctx, lState, md, dirPath, dirName) } func (fbo *folderBranchOps) RemoveDir( ctx context.Context, dir Node, dirName string) (err error) { fbo.log.CDebugf(ctx, "RemoveDir %p %s", dir.GetID(), dirName) defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }() err = fbo.checkNode(dir) if err != nil { return } return fbo.doMDWriteWithRetryUnlessCanceled(ctx, func(lState *lockState) error { return fbo.removeDirLocked(ctx, lState, dir, dirName) }) } func (fbo *folderBranchOps) RemoveEntry(ctx context.Context, dir Node, name string) (err error) { fbo.log.CDebugf(ctx, "RemoveEntry %p %s", dir.GetID(), name) defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }() err = fbo.checkNode(dir) if err != nil { return err } return fbo.doMDWriteWithRetryUnlessCanceled(ctx, func(lState *lockState) error { // verify we have permission to write md, err := fbo.getMDForWriteLocked(ctx, lState) if err != nil { return err } dirPath, err := fbo.pathFromNodeForMDWriteLocked(lState, dir) if err != nil { return err } return fbo.removeEntryLocked(ctx, lState, md, dirPath, name) }) } func (fbo *folderBranchOps) renameLocked( ctx context.Context, lState *lockState, oldParent path, oldName string, newParent path, newName string) (err error) { fbo.mdWriterLock.AssertLocked(lState) // verify we have permission to write md, err := fbo.getMDForWriteLocked(ctx, lState) if err != nil { return err } oldPBlock, newPBlock, newDe, lbc, err := fbo.blocks.PrepRename( ctx, lState, md, oldParent, oldName, newParent, newName) if err != nil { return err } // does name exist? if de, ok := newPBlock.Children[newName]; ok { // Usually higher-level programs check these, but just in case. if de.Type == Dir && newDe.Type != Dir { return NotDirError{newParent.ChildPathNoPtr(newName)} } else if de.Type != Dir && newDe.Type == Dir { return NotFileError{newParent.ChildPathNoPtr(newName)} } if de.Type == Dir { // The directory must be empty. oldTargetDir, err := fbo.blocks.GetDirBlockForReading(ctx, lState, md.ReadOnly(), de.BlockPointer, newParent.Branch, newParent.ChildPathNoPtr(newName)) if err != nil { return err } if len(oldTargetDir.Children) != 0 { fbo.log.CWarningf(ctx, "Renaming over a non-empty directory "+ " (%s/%s) not allowed.", newParent, newName) return DirNotEmptyError{newName} } } // Delete the old block pointed to by this direntry. err := fbo.unrefEntry(ctx, lState, md, newParent, de, newName) if err != nil { return err } } // only the ctime changes newDe.Ctime = fbo.nowUnixNano() newPBlock.Children[newName] = newDe delete(oldPBlock.Children, oldName) // find the common ancestor var i int found := false // the root block will always be the same, so start at number 1 for i = 1; i < len(oldParent.path) && i < len(newParent.path); i++ { if oldParent.path[i].ID != newParent.path[i].ID { found = true i-- break } } if !found { // if we couldn't find one, then the common ancestor is the // last node in the shorter path if len(oldParent.path) < len(newParent.path) { i = len(oldParent.path) - 1 } else { i = len(newParent.path) - 1 } } commonAncestor := oldParent.path[i].BlockPointer oldIsCommon := oldParent.tailPointer() == commonAncestor newIsCommon := newParent.tailPointer() == commonAncestor newOldPath := path{FolderBranch: oldParent.FolderBranch} var oldBps *blockPutState if oldIsCommon { if newIsCommon { // if old and new are both the common ancestor, there is // nothing to do (syncBlock will take care of everything) } else { // If the old one is common and the new one is // not, then the last // syncBlockAndCheckEmbedLocked call will need // to access the old one. lbc[oldParent.tailPointer()] = oldPBlock } } else { if newIsCommon { // If the new one is common, then the first // syncBlockAndCheckEmbedLocked call will need to access // it. lbc[newParent.tailPointer()] = newPBlock } // The old one is not the common ancestor, so we need to sync it. // TODO: optimize by pushing blocks from both paths in parallel newOldPath, _, oldBps, err = fbo.syncBlockAndCheckEmbedLocked( ctx, lState, md, oldPBlock, *oldParent.parentPath(), oldParent.tailName(), Dir, true, true, commonAncestor, lbc) if err != nil { return err } } newNewPath, _, newBps, err := fbo.syncBlockAndCheckEmbedLocked( ctx, lState, md, newPBlock, *newParent.parentPath(), newParent.tailName(), Dir, true, true, zeroPtr, lbc) if err != nil { return err } // newOldPath is really just a prefix now. A copy is necessary as an // append could cause the new path to contain nodes from the old path. newOldPath.path = append(make([]pathNode, i+1, i+1), newOldPath.path...) copy(newOldPath.path[:i+1], newNewPath.path[:i+1]) // merge and finalize the blockPutStates if oldBps != nil { newBps.mergeOtherBps(oldBps) } defer func() { if err != nil { fbo.fbm.cleanUpBlockState( md.ReadOnly(), newBps, blockDeleteOnMDFail) } }() _, err = fbo.doBlockPuts(ctx, md.ID, md.GetTlfHandle().GetCanonicalName(), *newBps) if err != nil { return err } return fbo.finalizeMDWriteLocked(ctx, lState, md, newBps, NoExcl) } func (fbo *folderBranchOps) Rename( ctx context.Context, oldParent Node, oldName string, newParent Node, newName string) (err error) { fbo.log.CDebugf(ctx, "Rename %p/%s -> %p/%s", oldParent.GetID(), oldName, newParent.GetID(), newName) defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }() err = fbo.checkNode(newParent) if err != nil { return err } return fbo.doMDWriteWithRetryUnlessCanceled(ctx, func(lState *lockState) error { oldParentPath, err := fbo.pathFromNodeForMDWriteLocked(lState, oldParent) if err != nil { return err } newParentPath, err := fbo.pathFromNodeForMDWriteLocked(lState, newParent) if err != nil { return err } // only works for paths within the same topdir if oldParentPath.FolderBranch != newParentPath.FolderBranch { return RenameAcrossDirsError{} } return fbo.renameLocked(ctx, lState, oldParentPath, oldName, newParentPath, newName) }) } func (fbo *folderBranchOps) Read( ctx context.Context, file Node, dest []byte, off int64) ( n int64, err error) { fbo.log.CDebugf(ctx, "Read %p %d %d", file.GetID(), len(dest), off) defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }() err = fbo.checkNode(file) if err != nil { return 0, err } // Don't let the goroutine below write directly to the return // variable, since if the context is canceled the goroutine might // outlast this function call, and end up in a read/write race // with the caller. var bytesRead int64 err = runUnlessCanceled(ctx, func() error { lState := makeFBOLockState() // verify we have permission to read md, err := fbo.getMDForReadNeedIdentify(ctx, lState) if err != nil { return err } filePath, err := fbo.pathFromNodeForRead(file) if err != nil { return err } bytesRead, err = fbo.blocks.Read( ctx, lState, md.ReadOnly(), filePath, dest, off) return err }) if err != nil { return 0, err } return bytesRead, nil } func (fbo *folderBranchOps) Write( ctx context.Context, file Node, data []byte, off int64) (err error) { fbo.log.CDebugf(ctx, "Write %p %d %d", file.GetID(), len(data), off) defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }() err = fbo.checkNode(file) if err != nil { return err } return runUnlessCanceled(ctx, func() error { lState := makeFBOLockState() // Get the MD for reading. We won't modify it; we'll track the // unref changes on the side, and put them into the MD during the // sync. md, err := fbo.getMDLocked(ctx, lState, mdReadNeedIdentify) if err != nil { return err } err = fbo.blocks.Write( ctx, lState, md.ReadOnly(), file, data, off) if err != nil { return err } fbo.status.addDirtyNode(file) return nil }) } func (fbo *folderBranchOps) Truncate( ctx context.Context, file Node, size uint64) (err error) { fbo.log.CDebugf(ctx, "Truncate %p %d", file.GetID(), size) defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }() err = fbo.checkNode(file) if err != nil { return err } return runUnlessCanceled(ctx, func() error { lState := makeFBOLockState() // Get the MD for reading. We won't modify it; we'll track the // unref changes on the side, and put them into the MD during the // sync. md, err := fbo.getMDLocked(ctx, lState, mdReadNeedIdentify) if err != nil { return err } err = fbo.blocks.Truncate( ctx, lState, md.ReadOnly(), file, size) if err != nil { return err } fbo.status.addDirtyNode(file) return nil }) } func (fbo *folderBranchOps) setExLocked( ctx context.Context, lState *lockState, file path, ex bool) (err error) { fbo.mdWriterLock.AssertLocked(lState) // verify we have permission to write md, err := fbo.getMDForWriteLocked(ctx, lState) if err != nil { return } dblock, de, err := fbo.blocks.GetDirtyParentAndEntry( ctx, lState, md.ReadOnly(), file) if err != nil { return err } // If the file is a symlink, do nothing (to match ext4 // behavior). if de.Type == Sym { return nil } if ex && (de.Type == File) { de.Type = Exec } else if !ex && (de.Type == Exec) { de.Type = File } // If the type isn't File or Exec, there's nothing to do, but // change the ctime anyway (to match ext4 behavior). de.Ctime = fbo.nowUnixNano() parentPath := file.parentPath() sao, err := newSetAttrOp(file.tailName(), parentPath.tailPointer(), exAttr, file.tailPointer()) if err != nil { return err } // If the MD doesn't match the MD expected by the path, that // implies we are using a cached path, which implies the node has // been unlinked. In that case, we can safely ignore this setex. if md.data.Dir.BlockPointer != file.path[0].BlockPointer { fbo.log.CDebugf(ctx, "Skipping setex for a removed file %v", file.tailPointer()) fbo.blocks.UpdateCachedEntryAttributesOnRemovedFile( ctx, lState, sao, de) return nil } md.AddOp(sao) dblock.Children[file.tailName()] = de _, err = fbo.syncBlockAndFinalizeLocked( ctx, lState, md, dblock, *parentPath.parentPath(), parentPath.tailName(), Dir, false, false, zeroPtr, NoExcl) return err } func (fbo *folderBranchOps) SetEx( ctx context.Context, file Node, ex bool) (err error) { fbo.log.CDebugf(ctx, "SetEx %p %t", file.GetID(), ex) defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }() err = fbo.checkNode(file) if err != nil { return } return fbo.doMDWriteWithRetryUnlessCanceled(ctx, func(lState *lockState) error { filePath, err := fbo.pathFromNodeForMDWriteLocked(lState, file) if err != nil { return err } return fbo.setExLocked(ctx, lState, filePath, ex) }) } func (fbo *folderBranchOps) setMtimeLocked( ctx context.Context, lState *lockState, file path, mtime *time.Time) error { fbo.mdWriterLock.AssertLocked(lState) // verify we have permission to write md, err := fbo.getMDForWriteLocked(ctx, lState) if err != nil { return err } dblock, de, err := fbo.blocks.GetDirtyParentAndEntry( ctx, lState, md.ReadOnly(), file) if err != nil { return err } de.Mtime = mtime.UnixNano() // setting the mtime counts as changing the file MD, so must set ctime too de.Ctime = fbo.nowUnixNano() parentPath := file.parentPath() sao, err := newSetAttrOp(file.tailName(), parentPath.tailPointer(), mtimeAttr, file.tailPointer()) if err != nil { return err } // If the MD doesn't match the MD expected by the path, that // implies we are using a cached path, which implies the node has // been unlinked. In that case, we can safely ignore this // setmtime. if md.data.Dir.BlockPointer != file.path[0].BlockPointer { fbo.log.CDebugf(ctx, "Skipping setmtime for a removed file %v", file.tailPointer()) fbo.blocks.UpdateCachedEntryAttributesOnRemovedFile( ctx, lState, sao, de) return nil } md.AddOp(sao) dblock.Children[file.tailName()] = de _, err = fbo.syncBlockAndFinalizeLocked( ctx, lState, md, dblock, *parentPath.parentPath(), parentPath.tailName(), Dir, false, false, zeroPtr, NoExcl) return err } func (fbo *folderBranchOps) SetMtime( ctx context.Context, file Node, mtime *time.Time) (err error) { fbo.log.CDebugf(ctx, "SetMtime %p %v", file.GetID(), mtime) defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }() if mtime == nil { // Can happen on some OSes (e.g. OSX) when trying to set the atime only return nil } err = fbo.checkNode(file) if err != nil { return } return fbo.doMDWriteWithRetryUnlessCanceled(ctx, func(lState *lockState) error { filePath, err := fbo.pathFromNodeForMDWriteLocked(lState, file) if err != nil { return err } return fbo.setMtimeLocked(ctx, lState, filePath, mtime) }) } func (fbo *folderBranchOps) syncLocked(ctx context.Context, lState *lockState, file path) (stillDirty bool, err error) { fbo.mdWriterLock.AssertLocked(lState) // if the cache for this file isn't dirty, we're done if !fbo.blocks.IsDirty(lState, file) { return false, nil } // Verify we have permission to write. We do this after the dirty // check because otherwise readers who sync clean files on close // would get an error. md, err := fbo.getMDForWriteLocked(ctx, lState) if err != nil { return true, err } // If the MD doesn't match the MD expected by the path, that // implies we are using a cached path, which implies the node has // been unlinked. In that case, we can safely ignore this sync. if md.data.Dir.BlockPointer != file.path[0].BlockPointer { fbo.log.CDebugf(ctx, "Skipping sync for a removed file %v", file.tailPointer()) // Removing the cached info here is a little sketchy, // since there's no guarantee that this sync comes // from closing the file, and we still want to serve // stat calls accurately if the user still has an open // handle to this file. TODO: Hook this in with the // node cache GC logic to be perfectly accurate. return true, fbo.blocks.ClearCacheInfo(lState, file) } _, uid, err := fbo.config.KBPKI().GetCurrentUserInfo(ctx) if err != nil { return true, err } // notify the daemon that a write is being performed fbo.config.Reporter().Notify(ctx, writeNotification(file, false)) defer fbo.config.Reporter().Notify(ctx, writeNotification(file, true)) // Filled in by doBlockPuts below. var blocksToRemove []BlockPointer fblock, bps, lbc, syncState, err := fbo.blocks.StartSync(ctx, lState, md, uid, file) defer func() { fbo.blocks.CleanupSyncState( ctx, lState, md.ReadOnly(), file, blocksToRemove, syncState, err) }() if err != nil { return true, err } newPath, _, newBps, err := fbo.syncBlockAndCheckEmbedLocked( ctx, lState, md, fblock, *file.parentPath(), file.tailName(), File, true, true, zeroPtr, lbc) if err != nil { return true, err } bps.mergeOtherBps(newBps) // Note: We explicitly don't call fbo.fbm.cleanUpBlockState here // when there's an error, because it's possible some of the blocks // will be reused in a future attempt at this same sync, and we // don't want them cleaned up in that case. Instead, the // FinishSync call below will take care of that. blocksToRemove, err = fbo.doBlockPuts( ctx, md.ID, md.GetTlfHandle().GetCanonicalName(), *bps) if err != nil { return true, err } err = fbo.finalizeMDWriteLocked(ctx, lState, md, bps, NoExcl) if err != nil { return true, err } // At this point, all reads through the old path (i.e., file) // see writes that happened since StartSync, whereas all reads // through the new path (newPath) don't. // // TODO: This isn't completely correct, since reads that // happen after a write should always see the new data. // // After FinishSync succeeds, then reads through both the old // and the new paths will see the writes that happened during // the sync. return fbo.blocks.FinishSync(ctx, lState, file, newPath, md.ReadOnly(), syncState, fbo.fbm) } func (fbo *folderBranchOps) Sync(ctx context.Context, file Node) (err error) { fbo.log.CDebugf(ctx, "Sync %p", file.GetID()) defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }() err = fbo.checkNode(file) if err != nil { return } var stillDirty bool err = fbo.doMDWriteWithRetryUnlessCanceled(ctx, func(lState *lockState) error { filePath, err := fbo.pathFromNodeForMDWriteLocked(lState, file) if err != nil { return err } stillDirty, err = fbo.syncLocked(ctx, lState, filePath) return err }) if err != nil { return err } if !stillDirty { fbo.status.rmDirtyNode(file) } return nil } func (fbo *folderBranchOps) FolderStatus( ctx context.Context, folderBranch FolderBranch) ( fbs FolderBranchStatus, updateChan <-chan StatusUpdate, err error) { fbo.log.CDebugf(ctx, "Status") defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }() if folderBranch != fbo.folderBranch { return FolderBranchStatus{}, nil, WrongOpsError{fbo.folderBranch, folderBranch} } return fbo.status.getStatus(ctx) } func (fbo *folderBranchOps) Status( ctx context.Context) ( fbs KBFSStatus, updateChan <-chan StatusUpdate, err error) { return KBFSStatus{}, nil, InvalidOpError{} } // RegisterForChanges registers a single Observer to receive // notifications about this folder/branch. func (fbo *folderBranchOps) RegisterForChanges(obs Observer) error { // It's the caller's responsibility to make sure // RegisterForChanges isn't called twice for the same Observer fbo.observers.add(obs) return nil } // UnregisterFromChanges stops an Observer from getting notifications // about the folder/branch. func (fbo *folderBranchOps) UnregisterFromChanges(obs Observer) error { fbo.observers.remove(obs) return nil } // notifyBatchLocked sends out a notification for the most recent op // in md. func (fbo *folderBranchOps) notifyBatchLocked( ctx context.Context, lState *lockState, md ImmutableRootMetadata) { fbo.headLock.AssertLocked(lState) lastOp := md.data.Changes.Ops[len(md.data.Changes.Ops)-1] fbo.notifyOneOpLocked(ctx, lState, lastOp, md) fbo.editHistory.UpdateHistory(ctx, []ImmutableRootMetadata{md}) } // searchForNode tries to figure out the path to the given // blockPointer, using only the block updates that happened as part of // a given MD update operation. func (fbo *folderBranchOps) searchForNode(ctx context.Context, ptr BlockPointer, md ReadOnlyRootMetadata) (Node, error) { // Record which pointers are new to this update, and thus worth // searching. newPtrs := make(map[BlockPointer]bool) for _, op := range md.data.Changes.Ops { for _, update := range op.AllUpdates() { newPtrs[update.Ref] = true } for _, ref := range op.Refs() { newPtrs[ref] = true } } nodeMap, _, err := fbo.blocks.SearchForNodes(ctx, fbo.nodeCache, []BlockPointer{ptr}, newPtrs, md) if err != nil { return nil, err } n, ok := nodeMap[ptr] if !ok { return nil, NodeNotFoundError{ptr} } return n, nil } func (fbo *folderBranchOps) unlinkFromCache(op op, oldDir BlockPointer, node Node, name string) error { // The entry could be under any one of the unref'd blocks, and // it's safe to perform this when the pointer isn't real, so just // try them all to avoid the overhead of looking up the right // pointer in the old version of the block. p, err := fbo.pathFromNodeForRead(node) if err != nil { return err } childPath := p.ChildPathNoPtr(name) // revert the parent pointer childPath.path[len(childPath.path)-2].BlockPointer = oldDir for _, ptr := range op.Unrefs() { childPath.path[len(childPath.path)-1].BlockPointer = ptr fbo.nodeCache.Unlink(ptr.ref(), childPath) } return nil } func (fbo *folderBranchOps) notifyOneOpLocked(ctx context.Context, lState *lockState, op op, md ImmutableRootMetadata) { fbo.headLock.AssertLocked(lState) fbo.blocks.UpdatePointers(lState, op) var changes []NodeChange switch realOp := op.(type) { default: return case *createOp: node := fbo.nodeCache.Get(realOp.Dir.Ref.ref()) if node == nil { return } fbo.log.CDebugf(ctx, "notifyOneOp: create %s in node %p", realOp.NewName, node.GetID()) changes = append(changes, NodeChange{ Node: node, DirUpdated: []string{realOp.NewName}, }) case *rmOp: node := fbo.nodeCache.Get(realOp.Dir.Ref.ref()) if node == nil { return } fbo.log.CDebugf(ctx, "notifyOneOp: remove %s in node %p", realOp.OldName, node.GetID()) changes = append(changes, NodeChange{ Node: node, DirUpdated: []string{realOp.OldName}, }) // If this node exists, then the child node might exist too, // and we need to unlink it in the node cache. err := fbo.unlinkFromCache(op, realOp.Dir.Unref, node, realOp.OldName) if err != nil { fbo.log.CErrorf(ctx, "Couldn't unlink from cache: %v", err) return } case *renameOp: oldNode := fbo.nodeCache.Get(realOp.OldDir.Ref.ref()) if oldNode != nil { changes = append(changes, NodeChange{ Node: oldNode, DirUpdated: []string{realOp.OldName}, }) } var newNode Node if realOp.NewDir.Ref != zeroPtr { newNode = fbo.nodeCache.Get(realOp.NewDir.Ref.ref()) if newNode != nil { changes = append(changes, NodeChange{ Node: newNode, DirUpdated: []string{realOp.NewName}, }) } } else { newNode = oldNode if oldNode != nil { // Add another name to the existing NodeChange. changes[len(changes)-1].DirUpdated = append(changes[len(changes)-1].DirUpdated, realOp.NewName) } } if oldNode != nil { var newNodeID NodeID if newNode != nil { newNodeID = newNode.GetID() } fbo.log.CDebugf(ctx, "notifyOneOp: rename %v from %s/%p to %s/%p", realOp.Renamed, realOp.OldName, oldNode.GetID(), realOp.NewName, newNodeID) if newNode == nil { if childNode := fbo.nodeCache.Get(realOp.Renamed.ref()); childNode != nil { // if the childNode exists, we still have to update // its path to go through the new node. That means // creating nodes for all the intervening paths. // Unfortunately we don't have enough information to // know what the newPath is; we have to guess it from // the updates. var err error newNode, err = fbo.searchForNode(ctx, realOp.NewDir.Ref, md.ReadOnly()) if newNode == nil { fbo.log.CErrorf(ctx, "Couldn't find the new node: %v", err) } } } if newNode != nil { // If new node exists as well, unlink any previously // existing entry and move the node. var unrefPtr BlockPointer if oldNode != newNode { unrefPtr = realOp.NewDir.Unref } else { unrefPtr = realOp.OldDir.Unref } err := fbo.unlinkFromCache(op, unrefPtr, newNode, realOp.NewName) if err != nil { fbo.log.CErrorf(ctx, "Couldn't unlink from cache: %v", err) return } err = fbo.nodeCache.Move(realOp.Renamed.ref(), newNode, realOp.NewName) if err != nil { fbo.log.CErrorf(ctx, "Couldn't move node in cache: %v", err) return } } } case *syncOp: node := fbo.nodeCache.Get(realOp.File.Ref.ref()) if node == nil { return } fbo.log.CDebugf(ctx, "notifyOneOp: sync %d writes in node %p", len(realOp.Writes), node.GetID()) changes = append(changes, NodeChange{ Node: node, FileUpdated: realOp.Writes, }) case *setAttrOp: node := fbo.nodeCache.Get(realOp.Dir.Ref.ref()) if node == nil { return } fbo.log.CDebugf(ctx, "notifyOneOp: setAttr %s for file %s in node %p", realOp.Attr, realOp.Name, node.GetID()) p, err := fbo.pathFromNodeForRead(node) if err != nil { return } childNode, err := fbo.blocks.UpdateCachedEntryAttributes( ctx, lState, md.ReadOnly(), p, realOp) if err != nil { // TODO: Log error? return } if childNode == nil { return } changes = append(changes, NodeChange{ Node: childNode, }) case *gcOp: // Unreferenced blocks in a gcOp mean that we shouldn't cache // them anymore bcache := fbo.config.BlockCache() for _, ptr := range realOp.Unrefs() { if err := bcache.DeleteTransient(ptr, fbo.id()); err != nil { fbo.log.CDebugf(ctx, "Couldn't delete transient entry for %v: %v", ptr, err) } } } fbo.observers.batchChanges(ctx, changes) } func (fbo *folderBranchOps) getCurrMDRevisionLocked(lState *lockState) MetadataRevision { fbo.headLock.AssertAnyLocked(lState) if fbo.head != (ImmutableRootMetadata{}) { return fbo.head.Revision } return MetadataRevisionUninitialized } func (fbo *folderBranchOps) getCurrMDRevision( lState *lockState) MetadataRevision { fbo.headLock.RLock(lState) defer fbo.headLock.RUnlock(lState) return fbo.getCurrMDRevisionLocked(lState) } type applyMDUpdatesFunc func(context.Context, *lockState, []ImmutableRootMetadata) error func (fbo *folderBranchOps) applyMDUpdatesLocked(ctx context.Context, lState *lockState, rmds []ImmutableRootMetadata) error { fbo.mdWriterLock.AssertLocked(lState) fbo.headLock.Lock(lState) defer fbo.headLock.Unlock(lState) // if we have staged changes, ignore all updates until conflict // resolution kicks in. TODO: cache these for future use. if !fbo.isMasterBranchLocked(lState) { if len(rmds) > 0 { // setHeadLocked takes care of merged case fbo.setLatestMergedRevisionLocked(ctx, lState, rmds[len(rmds)-1].Revision, false) unmergedRev := MetadataRevisionUninitialized if fbo.head != (ImmutableRootMetadata{}) { unmergedRev = fbo.head.Revision } fbo.cr.Resolve(unmergedRev, rmds[len(rmds)-1].Revision) } return UnmergedError{} } // Don't allow updates while we're in the dirty state; the next // sync will put us into an unmerged state anyway and we'll // require conflict resolution. if fbo.blocks.GetState(lState) != cleanState { return errors.New("Ignoring MD updates while writes are dirty") } appliedRevs := make([]ImmutableRootMetadata, 0, len(rmds)) for _, rmd := range rmds { // check that we're applying the expected MD revision if rmd.Revision <= fbo.getCurrMDRevisionLocked(lState) { // Already caught up! continue } if err := isReadableOrError(ctx, fbo.config, rmd.ReadOnly()); err != nil { return err } err := fbo.setHeadSuccessorLocked(ctx, lState, rmd, false) if err != nil { return err } // No new operations in these. if rmd.IsWriterMetadataCopiedSet() { continue } for _, op := range rmd.data.Changes.Ops { fbo.notifyOneOpLocked(ctx, lState, op, rmd) } appliedRevs = append(appliedRevs, rmd) } if len(appliedRevs) > 0 { fbo.editHistory.UpdateHistory(ctx, appliedRevs) } return nil } func (fbo *folderBranchOps) undoMDUpdatesLocked(ctx context.Context, lState *lockState, rmds []ImmutableRootMetadata) error { fbo.mdWriterLock.AssertLocked(lState) fbo.headLock.Lock(lState) defer fbo.headLock.Unlock(lState) // Don't allow updates while we're in the dirty state; the next // sync will put us into an unmerged state anyway and we'll // require conflict resolution. if fbo.blocks.GetState(lState) != cleanState { return NotPermittedWhileDirtyError{} } // go backwards through the updates for i := len(rmds) - 1; i >= 0; i-- { rmd := rmds[i] // on undo, it's ok to re-apply the current revision since you // need to invert all of its ops. // // This duplicates a check in // fbo.setHeadPredecessorLocked. TODO: Remove this // duplication. if rmd.Revision != fbo.getCurrMDRevisionLocked(lState) && rmd.Revision != fbo.getCurrMDRevisionLocked(lState)-1 { return MDUpdateInvertError{rmd.Revision, fbo.getCurrMDRevisionLocked(lState)} } // TODO: Check that the revisions are equal only for // the first iteration. if rmd.Revision < fbo.getCurrMDRevisionLocked(lState) { err := fbo.setHeadPredecessorLocked(ctx, lState, rmd) if err != nil { return err } } // iterate the ops in reverse and invert each one ops := rmd.data.Changes.Ops for j := len(ops) - 1; j >= 0; j-- { io, err := invertOpForLocalNotifications(ops[j]) if err != nil { fbo.log.CWarningf(ctx, "got error %v when invert op %v; "+ "skipping. Open file handles "+ "may now be in an invalid "+ "state, which can be fixed by "+ "either closing them all or "+ "restarting KBFS.", err, ops[j]) continue } fbo.notifyOneOpLocked(ctx, lState, io, rmd) } } // TODO: update the edit history? return nil } func (fbo *folderBranchOps) applyMDUpdates(ctx context.Context, lState *lockState, rmds []ImmutableRootMetadata) error { fbo.mdWriterLock.Lock(lState) defer fbo.mdWriterLock.Unlock(lState) return fbo.applyMDUpdatesLocked(ctx, lState, rmds) } func (fbo *folderBranchOps) getLatestMergedRevision(lState *lockState) MetadataRevision { fbo.headLock.RLock(lState) defer fbo.headLock.RUnlock(lState) return fbo.latestMergedRevision } // caller should have held fbo.headLock func (fbo *folderBranchOps) setLatestMergedRevisionLocked(ctx context.Context, lState *lockState, rev MetadataRevision, allowBackward bool) { fbo.headLock.AssertLocked(lState) if fbo.latestMergedRevision < rev || allowBackward { fbo.latestMergedRevision = rev fbo.log.CDebugf(ctx, "Updated latestMergedRevision to %d.", rev) } else { fbo.log.CDebugf(ctx, "Local latestMergedRevision (%d) is higher than "+ "the new revision (%d); won't update.", fbo.latestMergedRevision, rev) } } // Assumes all necessary locking is either already done by caller, or // is done by applyFunc. func (fbo *folderBranchOps) getAndApplyMDUpdates(ctx context.Context, lState *lockState, applyFunc applyMDUpdatesFunc) error { // first look up all MD revisions newer than my current head start := fbo.getLatestMergedRevision(lState) + 1 rmds, err := getMergedMDUpdates(ctx, fbo.config, fbo.id(), start) if err != nil { return err } err = applyFunc(ctx, lState, rmds) if err != nil { return err } return nil } func (fbo *folderBranchOps) getAndApplyNewestUnmergedHead(ctx context.Context, lState *lockState) error { fbo.log.CDebugf(ctx, "Fetching the newest unmerged head") bid := func() BranchID { fbo.mdWriterLock.Lock(lState) defer fbo.mdWriterLock.Unlock(lState) return fbo.bid }() // We can only ever be at most one revision behind, so fetch the // latest unmerged revision and apply it as a successor. md, err := fbo.config.MDOps().GetUnmergedForTLF(ctx, fbo.id(), bid) if err != nil { return err } if md == (ImmutableRootMetadata{}) { // There is no unmerged revision, oops! return errors.New("Couldn't find an unmerged head") } fbo.mdWriterLock.Lock(lState) defer fbo.mdWriterLock.Unlock(lState) if fbo.bid != bid { // The branches switched (apparently CR completed), so just // try again. fbo.log.CDebugf(ctx, "Branches switched while fetching unmerged head") return nil } fbo.headLock.Lock(lState) defer fbo.headLock.Unlock(lState) if err := fbo.setHeadSuccessorLocked(ctx, lState, md, false); err != nil { return err } fbo.notifyBatchLocked(ctx, lState, md) if err := fbo.config.MDCache().Put(md); err != nil { return err } return nil } // getUnmergedMDUpdates returns a slice of the unmerged MDs for this // TLF's current unmerged branch and unmerged branch, between the // merge point for the branch and the current head. The returned MDs // are the same instances that are stored in the MD cache, so they // should be modified with care. func (fbo *folderBranchOps) getUnmergedMDUpdates( ctx context.Context, lState *lockState) ( MetadataRevision, []ImmutableRootMetadata, error) { // acquire mdWriterLock to read the current branch ID. bid := func() BranchID { fbo.mdWriterLock.Lock(lState) defer fbo.mdWriterLock.Unlock(lState) return fbo.bid }() return getUnmergedMDUpdates(ctx, fbo.config, fbo.id(), bid, fbo.getCurrMDRevision(lState)) } func (fbo *folderBranchOps) getUnmergedMDUpdatesLocked( ctx context.Context, lState *lockState) ( MetadataRevision, []ImmutableRootMetadata, error) { fbo.mdWriterLock.AssertLocked(lState) return getUnmergedMDUpdates(ctx, fbo.config, fbo.id(), fbo.bid, fbo.getCurrMDRevision(lState)) } // Returns a list of block pointers that were created during the // staged era. func (fbo *folderBranchOps) undoUnmergedMDUpdatesLocked( ctx context.Context, lState *lockState) ([]BlockPointer, error) { fbo.mdWriterLock.AssertLocked(lState) currHead, unmergedRmds, err := fbo.getUnmergedMDUpdatesLocked(ctx, lState) if err != nil { return nil, err } err = fbo.undoMDUpdatesLocked(ctx, lState, unmergedRmds) if err != nil { return nil, err } // We have arrived at the branch point. The new root is // the previous revision from the current head. Find it // and apply. TODO: somehow fake the current head into // being currHead-1, so that future calls to // applyMDUpdates will fetch this along with the rest of // the updates. fbo.setBranchIDLocked(lState, NullBranchID) rmds, err := getMDRange(ctx, fbo.config, fbo.id(), NullBranchID, currHead, currHead, Merged) if err != nil { return nil, err } if len(rmds) == 0 { return nil, fmt.Errorf("Couldn't find the branch point %d", currHead) } err = func() error { fbo.headLock.Lock(lState) defer fbo.headLock.Unlock(lState) err = fbo.setHeadPredecessorLocked(ctx, lState, rmds[0]) if err != nil { return err } fbo.setLatestMergedRevisionLocked(ctx, lState, rmds[0].Revision, true) return nil }() if err != nil { return nil, err } // Return all new refs var unmergedPtrs []BlockPointer for _, rmd := range unmergedRmds { for _, op := range rmd.data.Changes.Ops { for _, ptr := range op.Refs() { if ptr != zeroPtr { unmergedPtrs = append(unmergedPtrs, ptr) } } for _, update := range op.AllUpdates() { if update.Ref != zeroPtr { unmergedPtrs = append(unmergedPtrs, update.Ref) } } } } return unmergedPtrs, nil } func (fbo *folderBranchOps) unstageLocked(ctx context.Context, lState *lockState) error { fbo.mdWriterLock.AssertLocked(lState) // fetch all of my unstaged updates, and undo them one at a time bid, wasMasterBranch := fbo.bid, fbo.isMasterBranchLocked(lState) unmergedPtrs, err := fbo.undoUnmergedMDUpdatesLocked(ctx, lState) if err != nil { return err } // let the server know we no longer have need if !wasMasterBranch { err = fbo.config.MDOps().PruneBranch(ctx, fbo.id(), bid) if err != nil { return err } } // now go forward in time, if possible err = fbo.getAndApplyMDUpdates(ctx, lState, fbo.applyMDUpdatesLocked) if err != nil { return err } md, err := fbo.getMDForWriteLocked(ctx, lState) if err != nil { return err } // Finally, create a resolutionOp with the newly-unref'd pointers. resOp := newResolutionOp() for _, ptr := range unmergedPtrs { resOp.AddUnrefBlock(ptr) } md.AddOp(resOp) return fbo.finalizeMDWriteLocked(ctx, lState, md, &blockPutState{}, NoExcl) } // TODO: remove once we have automatic conflict resolution func (fbo *folderBranchOps) UnstageForTesting( ctx context.Context, folderBranch FolderBranch) (err error) { fbo.log.CDebugf(ctx, "UnstageForTesting") defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }() if folderBranch != fbo.folderBranch { return WrongOpsError{fbo.folderBranch, folderBranch} } return runUnlessCanceled(ctx, func() error { lState := makeFBOLockState() if fbo.isMasterBranch(lState) { // no-op return nil } if fbo.blocks.GetState(lState) != cleanState { return NotPermittedWhileDirtyError{} } // launch unstaging in a new goroutine, because we don't want to // use the provided context because upper layers might ignore our // notifications if we do. But we still want to wait for the // context to cancel. c := make(chan error, 1) ctxWithTags := fbo.ctxWithFBOID(context.Background()) freshCtx, cancel := context.WithCancel(ctxWithTags) defer cancel() fbo.log.CDebugf(freshCtx, "Launching new context for UnstageForTesting") go func() { lState := makeFBOLockState() c <- fbo.doMDWriteWithRetry(ctx, lState, func(lState *lockState) error { return fbo.unstageLocked(freshCtx, lState) }) }() select { case err := <-c: return err case <-ctx.Done(): return ctx.Err() } }) } // mdWriterLock must be taken by the caller. func (fbo *folderBranchOps) rekeyLocked(ctx context.Context, lState *lockState, promptPaper bool) (err error) { fbo.mdWriterLock.AssertLocked(lState) if !fbo.isMasterBranchLocked(lState) { return errors.New("Can't rekey while staged.") } head := fbo.getHead(lState) if head != (ImmutableRootMetadata{}) { // If we already have a cached revision, make sure we're // up-to-date with the latest revision before inspecting the // metadata, since Rekey doesn't let us go into CR mode, and // we don't actually get folder update notifications when the // rekey bit is set, just a "folder needs rekey" update. if err := fbo.getAndApplyMDUpdates( ctx, lState, fbo.applyMDUpdatesLocked); err != nil { if applyErr, ok := err.(MDRevisionMismatch); !ok || applyErr.rev != applyErr.curr { return err } } } md, rekeyWasSet, err := fbo.getMDForRekeyWriteLocked(ctx, lState) if err != nil { return err } if fbo.rekeyWithPromptTimer != nil { if !promptPaper { fbo.log.CDebugf(ctx, "rekeyWithPrompt superseded before it fires.") } else if !md.IsRekeySet() { fbo.rekeyWithPromptTimer.Stop() fbo.rekeyWithPromptTimer = nil // If the rekey bit isn't set, then some other device // already took care of our request, and we can stop // early. Note that if this FBO never registered for // updates, then we might not yet have seen the update, in // which case we'll still try to rekey but it will fail as // a conflict. fbo.log.CDebugf(ctx, "rekeyWithPrompt not needed because the "+ "rekey bit was already unset.") return nil } } rekeyDone, tlfCryptKey, err := fbo.config.KeyManager(). Rekey(ctx, md, promptPaper) stillNeedsRekey := false switch err.(type) { case nil: // TODO: implement a "forced" option that rekeys even when the // devices haven't changed? if !rekeyDone { fbo.log.CDebugf(ctx, "No rekey necessary") return nil } // Clear the rekey bit if any. md.Flags &= ^MetadataFlagRekey md.clearLastRevision() case RekeyIncompleteError: if !rekeyDone && rekeyWasSet { // The rekey bit was already set, and there's nothing else // we can to do, so don't put any new revisions. fbo.log.CDebugf(ctx, "No further rekey possible by this user.") return nil } // Rekey incomplete, fallthrough without early exit, to ensure // we write the metadata with any potential changes fbo.log.CDebugf(ctx, "Rekeyed reader devices, but still need writer rekey") case NeedOtherRekeyError: stillNeedsRekey = true case NeedSelfRekeyError: stillNeedsRekey = true default: if err == context.DeadlineExceeded { fbo.log.CDebugf(ctx, "Paper key prompt timed out") // Reschedule the prompt in the timeout case. stillNeedsRekey = true } else { return err } } if stillNeedsRekey { fbo.log.CDebugf(ctx, "Device doesn't have access to rekey") // If we didn't have read access, then we don't have any // unlocked paper keys. Wait for some time, and then if we // still aren't rekeyed, try again but this time prompt the // user for any known paper keys. We do this even if the // rekey bit is already set, since we may have restarted since // the previous rekey attempt, before prompting for the paper // key. Only schedule this as a one-time event, since direct // folder accesses from the user will also cause a // rekeyWithPrompt. // // Only ever set the timer once. if fbo.rekeyWithPromptTimer == nil { d := fbo.config.RekeyWithPromptWaitTime() fbo.log.CDebugf(ctx, "Scheduling a rekeyWithPrompt in %s", d) fbo.rekeyWithPromptTimer = time.AfterFunc(d, fbo.rekeyWithPrompt) } if rekeyWasSet { // Devices not yet keyed shouldn't set the rekey bit again fbo.log.CDebugf(ctx, "Rekey bit already set") return nil } // This device hasn't been keyed yet, fall through to set the rekey bit } // add an empty operation to satisfy assumptions elsewhere md.AddOp(newRekeyOp()) // we still let readers push a new md block that we validate against reader // permissions err = fbo.finalizeMDRekeyWriteLocked(ctx, lState, md) if err != nil { return err } // cache any new TLF crypt key if tlfCryptKey != nil { keyGen := md.LatestKeyGeneration() err = fbo.config.KeyCache().PutTLFCryptKey(md.ID, keyGen, *tlfCryptKey) if err != nil { return err } } // send rekey finish notification handle := md.GetTlfHandle() fbo.config.Reporter().Notify(ctx, rekeyNotification(ctx, fbo.config, handle, true)) if !stillNeedsRekey && fbo.rekeyWithPromptTimer != nil { fbo.log.CDebugf(ctx, "Scheduled rekey timer no longer needed") fbo.rekeyWithPromptTimer.Stop() fbo.rekeyWithPromptTimer = nil } return nil } func (fbo *folderBranchOps) rekeyWithPrompt() { var err error ctx := ctxWithRandomID(context.Background(), CtxRekeyIDKey, CtxRekeyOpID, fbo.log) // Only give the user limited time to enter their paper key, so we // don't wait around forever. d := fbo.config.RekeyWithPromptWaitTime() ctx, cancel := context.WithTimeout(ctx, d) defer cancel() fbo.log.CDebugf(ctx, "rekeyWithPrompt") defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }() err = fbo.doMDWriteWithRetryUnlessCanceled(ctx, func(lState *lockState) error { return fbo.rekeyLocked(ctx, lState, true) }) } // Rekey rekeys the given folder. func (fbo *folderBranchOps) Rekey(ctx context.Context, tlf TlfID) (err error) { fbo.log.CDebugf(ctx, "Rekey") defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }() fb := FolderBranch{tlf, MasterBranch} if fb != fbo.folderBranch { return WrongOpsError{fbo.folderBranch, fb} } return fbo.doMDWriteWithRetryUnlessCanceled(ctx, func(lState *lockState) error { return fbo.rekeyLocked(ctx, lState, false) }) } func (fbo *folderBranchOps) SyncFromServerForTesting( ctx context.Context, folderBranch FolderBranch) (err error) { fbo.log.CDebugf(ctx, "SyncFromServerForTesting") defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }() if folderBranch != fbo.folderBranch { return WrongOpsError{fbo.folderBranch, folderBranch} } lState := makeFBOLockState() if !fbo.isMasterBranch(lState) { if err := fbo.cr.Wait(ctx); err != nil { return err } // If we are still staged after the wait, then we have a problem. if !fbo.isMasterBranch(lState) { return fmt.Errorf("Conflict resolution didn't take us out of " + "staging.") } } dirtyRefs := fbo.blocks.GetDirtyRefs(lState) if len(dirtyRefs) > 0 { for _, ref := range dirtyRefs { fbo.log.CDebugf(ctx, "DeCache entry left: %v", ref) } return errors.New("Can't sync from server while dirty.") } if err := fbo.getAndApplyMDUpdates(ctx, lState, fbo.applyMDUpdates); err != nil { if applyErr, ok := err.(MDRevisionMismatch); ok { if applyErr.rev == applyErr.curr { fbo.log.CDebugf(ctx, "Already up-to-date with server") return nil } } return err } // Wait for all the asynchronous block archiving and quota // reclamation to hit the block server. if err := fbo.fbm.waitForArchives(ctx); err != nil { return err } if err := fbo.fbm.waitForDeletingBlocks(ctx); err != nil { return err } if err := fbo.editHistory.Wait(ctx); err != nil { return err } return fbo.fbm.waitForQuotaReclamations(ctx) } // CtxFBOTagKey is the type used for unique context tags within folderBranchOps type CtxFBOTagKey int const ( // CtxFBOIDKey is the type of the tag for unique operation IDs // within folderBranchOps. CtxFBOIDKey CtxFBOTagKey = iota ) // CtxFBOOpID is the display name for the unique operation // folderBranchOps ID tag. const CtxFBOOpID = "FBOID" func (fbo *folderBranchOps) ctxWithFBOID(ctx context.Context) context.Context { return ctxWithRandomID(ctx, CtxFBOIDKey, CtxFBOOpID, fbo.log) } // Run the passed function with a context that's canceled on shutdown. func (fbo *folderBranchOps) runUnlessShutdown(fn func(ctx context.Context) error) error { ctx := fbo.ctxWithFBOID(context.Background()) ctx, cancelFunc := context.WithCancel(ctx) defer cancelFunc() errChan := make(chan error, 1) go func() { errChan <- fn(ctx) }() select { case err := <-errChan: return err case <-fbo.shutdownChan: return ShutdownHappenedError{} } } func (fbo *folderBranchOps) registerAndWaitForUpdates() { defer close(fbo.updateDoneChan) childDone := make(chan struct{}) err := fbo.runUnlessShutdown(func(ctx context.Context) error { defer close(childDone) // If we fail to register for or process updates, try again // with an exponential backoff, so we don't overwhelm the // server or ourselves with too many attempts in a hopeless // situation. expBackoff := backoff.NewExponentialBackOff() // Never give up hope until we shut down expBackoff.MaxElapsedTime = 0 // Register and wait in a loop unless we hit an unrecoverable error for { err := backoff.RetryNotifyWithContext(ctx, func() error { // Replace the FBOID one with a fresh id for every attempt newCtx := fbo.ctxWithFBOID(ctx) updateChan, err := fbo.registerForUpdates(newCtx) if err != nil { select { case <-ctx.Done(): // Shortcut the retry, we're done. return nil default: return err } } err = fbo.waitForAndProcessUpdates(newCtx, updateChan) if _, ok := err.(UnmergedError); ok { // skip the back-off timer and continue directly to next // registerForUpdates return nil } select { case <-ctx.Done(): // Shortcut the retry, we're done. return nil default: return err } }, expBackoff, func(err error, nextTime time.Duration) { fbo.log.CDebugf(ctx, "Retrying registerForUpdates in %s due to err: %v", nextTime, err) }) if err != nil { return err } } }) if err != nil && err != context.Canceled { fbo.log.CWarningf(context.Background(), "registerAndWaitForUpdates failed unexpectedly with an error: %v", err) } <-childDone } func (fbo *folderBranchOps) registerForUpdates(ctx context.Context) ( updateChan <-chan error, err error) { lState := makeFBOLockState() currRev := fbo.getCurrMDRevision(lState) fbo.log.CDebugf(ctx, "Registering for updates (curr rev = %d)", currRev) defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }() // RegisterForUpdate will itself retry on connectivity issues return fbo.config.MDServer().RegisterForUpdate(ctx, fbo.id(), fbo.getLatestMergedRevision(lState)) } func (fbo *folderBranchOps) waitForAndProcessUpdates( ctx context.Context, updateChan <-chan error) (err error) { // successful registration; now, wait for an update or a shutdown fbo.log.CDebugf(ctx, "Waiting for updates") defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }() lState := makeFBOLockState() for { select { case err := <-updateChan: fbo.log.CDebugf(ctx, "Got an update: %v", err) if err != nil { return err } // Getting and applying the updates requires holding // locks, so make sure it doesn't take too long. ctx, cancel := context.WithTimeout(ctx, backgroundTaskTimeout) defer cancel() err = fbo.getAndApplyMDUpdates(ctx, lState, fbo.applyMDUpdates) if err != nil { fbo.log.CDebugf(ctx, "Got an error while applying "+ "updates: %v", err) return err } return nil case unpause := <-fbo.updatePauseChan: fbo.log.CInfof(ctx, "Updates paused") // wait to be unpaused select { case <-unpause: fbo.log.CInfof(ctx, "Updates unpaused") case <-ctx.Done(): return ctx.Err() } case <-ctx.Done(): return ctx.Err() } } } func (fbo *folderBranchOps) backgroundFlusher(betweenFlushes time.Duration) { ticker := time.NewTicker(betweenFlushes) defer ticker.Stop() lState := makeFBOLockState() for { doSelect := true if fbo.blocks.GetState(lState) == dirtyState && fbo.config.DirtyBlockCache().ShouldForceSync() { // We have dirty files, and the system has a full buffer, // so don't bother waiting for a signal, just get right to // the main attraction. doSelect = false } if doSelect { select { case <-ticker.C: case <-fbo.forceSyncChan: case <-fbo.shutdownChan: return } } dirtyRefs := fbo.blocks.GetDirtyRefs(lState) fbo.runUnlessShutdown(func(ctx context.Context) (err error) { // Denote that these are coming from a background // goroutine, not directly from any user. ctx = context.WithValue(ctx, CtxBackgroundSyncKey, "1") // Just in case network access or a bug gets stuck for a // long time, time out the sync eventually. longCtx, longCancel := context.WithTimeout(ctx, backgroundTaskTimeout) defer longCancel() // Make sure this loop doesn't starve user requests for // too long. But use the longer-timeout version in the // actual Sync command, to avoid unnecessary errors. shortCtx, shortCancel := context.WithTimeout(ctx, 1*time.Second) defer shortCancel() for _, ref := range dirtyRefs { select { case <-shortCtx.Done(): fbo.log.CDebugf(ctx, "Stopping background sync early due to timeout") return nil default: } node := fbo.nodeCache.Get(ref) if node == nil { continue } err := fbo.Sync(longCtx, node) if err != nil { // Just log the warning and keep trying to // sync the rest of the dirty files. p := fbo.nodeCache.PathFromNode(node) fbo.log.CWarningf(ctx, "Couldn't sync dirty file with "+ "ref=%v, nodeID=%p, and path=%v: %v", ref, node.GetID(), p, err) } } return nil }) } } func (fbo *folderBranchOps) blockUnmergedWrites(lState *lockState) { fbo.mdWriterLock.Lock(lState) } func (fbo *folderBranchOps) unblockUnmergedWrites(lState *lockState) { fbo.mdWriterLock.Unlock(lState) } func (fbo *folderBranchOps) finalizeResolutionLocked(ctx context.Context, lState *lockState, md *RootMetadata, bps *blockPutState, newOps []op) error { fbo.mdWriterLock.AssertLocked(lState) // Put the blocks into the cache so that, even if we fail below, // future attempts may reuse the blocks. err := fbo.finalizeBlocks(bps) if err != nil { return err } // Last chance to get pre-empted. select { case <-ctx.Done(): return ctx.Err() default: } // Put the MD. If there's a conflict, abort the whole process and // let CR restart itself. mdID, err := fbo.config.MDOps().Put(ctx, md) doUnmergedPut := isRevisionConflict(err) if doUnmergedPut { fbo.log.CDebugf(ctx, "Got a conflict after resolution; aborting CR") return err } if err != nil { return err } err = fbo.config.MDOps().PruneBranch(ctx, fbo.id(), fbo.bid) if err != nil { return err } // Queue a rekey if the bit was set. if md.IsRekeySet() { defer fbo.config.RekeyQueue().Enqueue(md.ID) } // Set the head to the new MD. fbo.headLock.Lock(lState) defer fbo.headLock.Unlock(lState) irmd := MakeImmutableRootMetadata(md, mdID, fbo.config.Clock().Now()) err = fbo.setHeadConflictResolvedLocked(ctx, lState, irmd) if err != nil { fbo.log.CWarningf(ctx, "Couldn't set local MD head after a "+ "successful put: %v", err) return err } fbo.setBranchIDLocked(lState, NullBranchID) // Archive the old, unref'd blocks fbo.fbm.archiveUnrefBlocks(irmd.ReadOnly()) // notifyOneOp for every fixed-up merged op. for _, op := range newOps { fbo.notifyOneOpLocked(ctx, lState, op, irmd) } fbo.editHistory.UpdateHistory(ctx, []ImmutableRootMetadata{irmd}) return nil } // finalizeResolution caches all the blocks, and writes the new MD to // the merged branch, failing if there is a conflict. It also sends // out the given newOps notifications locally. This is used for // completing conflict resolution. func (fbo *folderBranchOps) finalizeResolution(ctx context.Context, lState *lockState, md *RootMetadata, bps *blockPutState, newOps []op) error { // Take the writer lock. fbo.mdWriterLock.Lock(lState) defer fbo.mdWriterLock.Unlock(lState) return fbo.finalizeResolutionLocked(ctx, lState, md, bps, newOps) } func (fbo *folderBranchOps) unstageAfterFailedResolution(ctx context.Context, lState *lockState) error { // Take the writer lock. fbo.mdWriterLock.Lock(lState) defer fbo.mdWriterLock.Unlock(lState) // Last chance to get pre-empted. select { case <-ctx.Done(): return ctx.Err() default: } fbo.log.CWarningf(ctx, "Unstaging branch %s after a resolution failure", fbo.bid) return fbo.unstageLocked(ctx, lState) } // GetUpdateHistory implements the KBFSOps interface for folderBranchOps func (fbo *folderBranchOps) GetUpdateHistory(ctx context.Context, folderBranch FolderBranch) (history TLFUpdateHistory, err error) { fbo.log.CDebugf(ctx, "GetUpdateHistory") defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }() if folderBranch != fbo.folderBranch { return TLFUpdateHistory{}, WrongOpsError{fbo.folderBranch, folderBranch} } rmds, err := getMergedMDUpdates(ctx, fbo.config, fbo.id(), MetadataRevisionInitial) if err != nil { return TLFUpdateHistory{}, err } if len(rmds) > 0 { rmd := rmds[len(rmds)-1] history.ID = rmd.ID.String() history.Name = rmd.GetTlfHandle().GetCanonicalPath() } history.Updates = make([]UpdateSummary, 0, len(rmds)) writerNames := make(map[keybase1.UID]string) for _, rmd := range rmds { writer, ok := writerNames[rmd.LastModifyingWriter] if !ok { name, err := fbo.config.KBPKI(). GetNormalizedUsername(ctx, rmd.LastModifyingWriter) if err != nil { return TLFUpdateHistory{}, err } writer = string(name) writerNames[rmd.LastModifyingWriter] = writer } updateSummary := UpdateSummary{ Revision: rmd.Revision, Date: time.Unix(0, rmd.data.Dir.Mtime), Writer: writer, LiveBytes: rmd.DiskUsage, Ops: make([]OpSummary, 0, len(rmd.data.Changes.Ops)), } for _, op := range rmd.data.Changes.Ops { opSummary := OpSummary{ Op: op.String(), Refs: make([]string, 0, len(op.Refs())), Unrefs: make([]string, 0, len(op.Unrefs())), Updates: make(map[string]string), } for _, ptr := range op.Refs() { opSummary.Refs = append(opSummary.Refs, ptr.String()) } for _, ptr := range op.Unrefs() { opSummary.Unrefs = append(opSummary.Unrefs, ptr.String()) } for _, update := range op.AllUpdates() { opSummary.Updates[update.Unref.String()] = update.Ref.String() } updateSummary.Ops = append(updateSummary.Ops, opSummary) } history.Updates = append(history.Updates, updateSummary) } return history, nil } // GetEditHistory implements the KBFSOps interface for folderBranchOps func (fbo *folderBranchOps) GetEditHistory(ctx context.Context, folderBranch FolderBranch) (edits TlfWriterEdits, err error) { fbo.log.CDebugf(ctx, "GetEditHistory") defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }() if folderBranch != fbo.folderBranch { return nil, WrongOpsError{fbo.folderBranch, folderBranch} } lState := makeFBOLockState() head, err := fbo.getMDForReadHelper(ctx, lState, mdReadNeedIdentify) if err != nil { return nil, err } return fbo.editHistory.GetComplete(ctx, head) } // PushConnectionStatusChange pushes human readable connection status changes. func (fbo *folderBranchOps) PushConnectionStatusChange(service string, newStatus error) { fbo.config.KBFSOps().PushConnectionStatusChange(service, newStatus) }
1
11,838
Might as well fix these bare returns by making them `return errors.New(...` directly.
keybase-kbfs
go
@@ -66,7 +66,7 @@ public class Benchmarks { .sender(Address.ZERO) .value(Wei.ZERO) .apparentValue(Wei.ZERO) - .code(new Code(Bytes.EMPTY)) + .code(new Code(Bytes.EMPTY, org.hyperledger.besu.datatypes.Hash.EMPTY)) .depth(1) .completer(__ -> {}) .address(Address.ZERO)
1
/* * Copyright ConsenSys AG. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. * * SPDX-License-Identifier: Apache-2.0 * */ package org.hyperledger.besu.evm.precompile; import static java.nio.charset.StandardCharsets.UTF_8; import static org.hyperledger.besu.crypto.Hash.keccak256; import static org.mockito.Mockito.mock; import org.hyperledger.besu.crypto.Hash; import org.hyperledger.besu.crypto.KeyPair; import org.hyperledger.besu.crypto.SECPPrivateKey; import org.hyperledger.besu.crypto.SECPSignature; import org.hyperledger.besu.crypto.SignatureAlgorithm; import org.hyperledger.besu.crypto.SignatureAlgorithmFactory; import org.hyperledger.besu.datatypes.Address; import org.hyperledger.besu.datatypes.Wei; import org.hyperledger.besu.evm.Code; import org.hyperledger.besu.evm.Gas; import org.hyperledger.besu.evm.frame.BlockValues; import org.hyperledger.besu.evm.frame.MessageFrame; import org.hyperledger.besu.evm.gascalculator.BerlinGasCalculator; import org.hyperledger.besu.evm.gascalculator.IstanbulGasCalculator; import org.hyperledger.besu.evm.worldstate.WorldUpdater; import java.math.BigInteger; import java.util.ArrayDeque; import java.util.Map; import java.util.Random; import java.util.concurrent.TimeUnit; import com.google.common.base.Stopwatch; import com.google.common.collect.ImmutableMap; import org.apache.tuweni.bytes.Bytes; import org.apache.tuweni.bytes.Bytes32; public class Benchmarks { static final Random random = new Random(); static final long GAS_PER_SECOND_STANDARD = 35_000_000L; static final int HASH_WARMUP = 1_000_000; static final int HASH_ITERATIONS = 10_000; static final int MATH_WARMUP = 10_000; static final int MATH_ITERATIONS = 1_000; static final MessageFrame fakeFrame = MessageFrame.builder() .type(MessageFrame.Type.CONTRACT_CREATION) .contract(Address.ZERO) .inputData(Bytes.EMPTY) .sender(Address.ZERO) .value(Wei.ZERO) .apparentValue(Wei.ZERO) .code(new Code(Bytes.EMPTY)) .depth(1) .completer(__ -> {}) .address(Address.ZERO) .blockHashLookup(n -> null) .blockValues(mock(BlockValues.class)) .gasPrice(Wei.ZERO) .messageFrameStack(new ArrayDeque<>()) .miningBeneficiary(Address.ZERO) .originator(Address.ZERO) .initialGas(Gas.of(100000)) .worldUpdater(mock(WorldUpdater.class)) .build(); public static void benchSecp256k1Recover() { final SignatureAlgorithm signatureAlgorithm = SignatureAlgorithmFactory.getInstance(); final SECPPrivateKey privateKey = signatureAlgorithm.createPrivateKey( new BigInteger("c85ef7d79691fe79573b1a7064c19c1a9819ebdbd1faaab1a8ec92344438aaf4", 16)); final KeyPair keyPair = signatureAlgorithm.createKeyPair(privateKey); final Bytes data = Bytes.wrap("This is an example of a signed message.".getBytes(UTF_8)); final Bytes32 dataHash = keccak256(data); final SECPSignature signature = signatureAlgorithm.sign(dataHash, keyPair); for (int i = 0; i < MATH_WARMUP; i++) { signatureAlgorithm.recoverPublicKeyFromSignature(dataHash, signature); } final Stopwatch timer = Stopwatch.createStarted(); for (int i = 0; i < MATH_ITERATIONS; i++) { signatureAlgorithm.recoverPublicKeyFromSignature(dataHash, signature); } timer.stop(); final double elapsed = timer.elapsed(TimeUnit.NANOSECONDS) / 1.0e9D; final double perCall = elapsed / MATH_ITERATIONS; final double gasSpent = perCall * GAS_PER_SECOND_STANDARD; System.out.printf("secp256k1 signature recovery for %,d gas.%n", (int) gasSpent); } public static void benchSha256() { final SHA256PrecompiledContract contract = new SHA256PrecompiledContract(new IstanbulGasCalculator()); final byte[] warmupData = new byte[240]; final Bytes warmupBytes = Bytes.wrap(warmupData); for (int i = 0; i < HASH_WARMUP; i++) { contract.compute(warmupBytes, fakeFrame); } for (int len = 0; len <= 256; len += 8) { final byte[] data = new byte[len]; random.nextBytes(data); final Bytes bytes = Bytes.wrap(data); final Stopwatch timer = Stopwatch.createStarted(); for (int i = 0; i < HASH_ITERATIONS; i++) { contract.compute(bytes, fakeFrame); } timer.stop(); final double elapsed = timer.elapsed(TimeUnit.NANOSECONDS) / 1.0e9D; final double perCall = elapsed / HASH_ITERATIONS; final double gasSpent = perCall * GAS_PER_SECOND_STANDARD; System.out.printf( "sha256 %,d bytes for %,d gas. Charging %,d gas.%n", len, (int) gasSpent, contract.gasRequirement(bytes).asUInt256().toLong()); } } private static void benchKeccak256() { final byte[] warmupData = new byte[240]; final Bytes warmupBytes = Bytes.wrap(warmupData); for (int i = 0; i < HASH_WARMUP; i++) { Hash.keccak256(warmupBytes); } for (int len = 0; len <= 512; len += 8) { final byte[] data = new byte[len]; random.nextBytes(data); final Bytes bytes = Bytes.wrap(data); final Stopwatch timer = Stopwatch.createStarted(); for (int i = 0; i < HASH_ITERATIONS; i++) { Hash.keccak256(bytes); } timer.stop(); final double elapsed = timer.elapsed(TimeUnit.NANOSECONDS) / 1.0e9D; final double perCall = elapsed / HASH_ITERATIONS; final double gasSpent = perCall * GAS_PER_SECOND_STANDARD; System.out.printf("keccak256 %,d bytes for %,d gas.%n", len, (int) gasSpent); } } private static void benchRipeMD() { final RIPEMD160PrecompiledContract contract = new RIPEMD160PrecompiledContract(new IstanbulGasCalculator()); final byte[] warmupData = new byte[240]; final Bytes warmupBytes = Bytes.wrap(warmupData); for (int i = 0; i < HASH_WARMUP; i++) { contract.compute(warmupBytes, fakeFrame); } for (int len = 0; len <= 256; len += 8) { final byte[] data = new byte[len]; random.nextBytes(data); final Bytes bytes = Bytes.wrap(data); final Stopwatch timer = Stopwatch.createStarted(); for (int i = 0; i < HASH_ITERATIONS; i++) { contract.compute(bytes, fakeFrame); } timer.stop(); final double elapsed = timer.elapsed(TimeUnit.NANOSECONDS) / 1.0e9D; final double perCall = elapsed / HASH_ITERATIONS; final double gasSpent = perCall * GAS_PER_SECOND_STANDARD; System.out.printf( "ripemd %,d bytes for %,d gas. Charging %,d gas.%n", len, (int) gasSpent, contract.gasRequirement(bytes).asUInt256().toLong()); } } private static void benchModExp() { final Map<String, Bytes> testcases = new ImmutableMap.Builder<String, Bytes>() .put( "eip_example1", Bytes.fromHexString( "0000000000000000000000000000000000000000000000000000000000000001" + "0000000000000000000000000000000000000000000000000000000000000020" + "0000000000000000000000000000000000000000000000000000000000000020" + "03" + "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2e" + "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f")) .put( "eip_example2", Bytes.fromHexString( "0000000000000000000000000000000000000000000000000000000000000000" + "0000000000000000000000000000000000000000000000000000000000000020" + "0000000000000000000000000000000000000000000000000000000000000020" + "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2e" + "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f")) .put( "nagydani-1-square", Bytes.fromHexString( "000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040e09ad9675465c53a109fac66a445c91b292d2bb2c5268addb30cd82f80fcb0033ff97c80a5fc6f39193ae969c6ede6710a6b7ac27078a06d90ef1c72e5c85fb502fc9e1f6beb81516545975218075ec2af118cd8798df6e08a147c60fd6095ac2bb02c2908cf4dd7c81f11c289e4bce98f3553768f392a80ce22bf5c4f4a248c6b")) .put( "nagydani-1-qube", Bytes.fromHexString( "000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040e09ad9675465c53a109fac66a445c91b292d2bb2c5268addb30cd82f80fcb0033ff97c80a5fc6f39193ae969c6ede6710a6b7ac27078a06d90ef1c72e5c85fb503fc9e1f6beb81516545975218075ec2af118cd8798df6e08a147c60fd6095ac2bb02c2908cf4dd7c81f11c289e4bce98f3553768f392a80ce22bf5c4f4a248c6b")) .put( "nagydani-1-pow0x10001", Bytes.fromHexString( "000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000040e09ad9675465c53a109fac66a445c91b292d2bb2c5268addb30cd82f80fcb0033ff97c80a5fc6f39193ae969c6ede6710a6b7ac27078a06d90ef1c72e5c85fb5010001fc9e1f6beb81516545975218075ec2af118cd8798df6e08a147c60fd6095ac2bb02c2908cf4dd7c81f11c289e4bce98f3553768f392a80ce22bf5c4f4a248c6b")) .put( "nagydani-2-square", Bytes.fromHexString( "000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000080cad7d991a00047dd54d3399b6b0b937c718abddef7917c75b6681f40cc15e2be0003657d8d4c34167b2f0bbbca0ccaa407c2a6a07d50f1517a8f22979ce12a81dcaf707cc0cebfc0ce2ee84ee7f77c38b9281b9822a8d3de62784c089c9b18dcb9a2a5eecbede90ea788a862a9ddd9d609c2c52972d63e289e28f6a590ffbf5102e6d893b80aeed5e6e9ce9afa8a5d5675c93a32ac05554cb20e9951b2c140e3ef4e433068cf0fb73bc9f33af1853f64aa27a0028cbf570d7ac9048eae5dc7b28c87c31e5810f1e7fa2cda6adf9f1076dbc1ec1238560071e7efc4e9565c49be9e7656951985860a558a754594115830bcdb421f741408346dd5997bb01c287087")) .put( "nagydani-2-qube", Bytes.fromHexString( "000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000080cad7d991a00047dd54d3399b6b0b937c718abddef7917c75b6681f40cc15e2be0003657d8d4c34167b2f0bbbca0ccaa407c2a6a07d50f1517a8f22979ce12a81dcaf707cc0cebfc0ce2ee84ee7f77c38b9281b9822a8d3de62784c089c9b18dcb9a2a5eecbede90ea788a862a9ddd9d609c2c52972d63e289e28f6a590ffbf5103e6d893b80aeed5e6e9ce9afa8a5d5675c93a32ac05554cb20e9951b2c140e3ef4e433068cf0fb73bc9f33af1853f64aa27a0028cbf570d7ac9048eae5dc7b28c87c31e5810f1e7fa2cda6adf9f1076dbc1ec1238560071e7efc4e9565c49be9e7656951985860a558a754594115830bcdb421f741408346dd5997bb01c287087")) .put( "nagydani-2-pow0x10001", Bytes.fromHexString( "000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000080cad7d991a00047dd54d3399b6b0b937c718abddef7917c75b6681f40cc15e2be0003657d8d4c34167b2f0bbbca0ccaa407c2a6a07d50f1517a8f22979ce12a81dcaf707cc0cebfc0ce2ee84ee7f77c38b9281b9822a8d3de62784c089c9b18dcb9a2a5eecbede90ea788a862a9ddd9d609c2c52972d63e289e28f6a590ffbf51010001e6d893b80aeed5e6e9ce9afa8a5d5675c93a32ac05554cb20e9951b2c140e3ef4e433068cf0fb73bc9f33af1853f64aa27a0028cbf570d7ac9048eae5dc7b28c87c31e5810f1e7fa2cda6adf9f1076dbc1ec1238560071e7efc4e9565c49be9e7656951985860a558a754594115830bcdb421f741408346dd5997bb01c287087")) .put( "nagydani-3-square", Bytes.fromHexString( "000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000100c9130579f243e12451760976261416413742bd7c91d39ae087f46794062b8c239f2a74abf3918605a0e046a7890e049475ba7fbb78f5de6490bd22a710cc04d30088179a919d86c2da62cf37f59d8f258d2310d94c24891be2d7eeafaa32a8cb4b0cfe5f475ed778f45907dc8916a73f03635f233f7a77a00a3ec9ca6761a5bbd558a2318ecd0caa1c5016691523e7e1fa267dd35e70c66e84380bdcf7c0582f540174e572c41f81e93da0b757dff0b0fe23eb03aa19af0bdec3afb474216febaacb8d0381e631802683182b0fe72c28392539850650b70509f54980241dc175191a35d967288b532a7a8223ce2440d010615f70df269501944d4ec16fe4a3cb02d7a85909174757835187cb52e71934e6c07ef43b4c46fc30bbcd0bc72913068267c54a4aabebb493922492820babdeb7dc9b1558fcf7bd82c37c82d3147e455b623ab0efa752fe0b3a67ca6e4d126639e645a0bf417568adbb2a6a4eef62fa1fa29b2a5a43bebea1f82193a7dd98eb483d09bb595af1fa9c97c7f41f5649d976aee3e5e59e2329b43b13bea228d4a93f16ba139ccb511de521ffe747aa2eca664f7c9e33da59075cc335afcd2bf3ae09765f01ab5a7c3e3938ec168b74724b5074247d200d9970382f683d6059b94dbc336603d1dfee714e4b447ac2fa1d99ecb4961da2854e03795ed758220312d101e1e3d87d5313a6d052aebde75110363d")) .put( "nagydani-3-qube", Bytes.fromHexString( "000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000100c9130579f243e12451760976261416413742bd7c91d39ae087f46794062b8c239f2a74abf3918605a0e046a7890e049475ba7fbb78f5de6490bd22a710cc04d30088179a919d86c2da62cf37f59d8f258d2310d94c24891be2d7eeafaa32a8cb4b0cfe5f475ed778f45907dc8916a73f03635f233f7a77a00a3ec9ca6761a5bbd558a2318ecd0caa1c5016691523e7e1fa267dd35e70c66e84380bdcf7c0582f540174e572c41f81e93da0b757dff0b0fe23eb03aa19af0bdec3afb474216febaacb8d0381e631802683182b0fe72c28392539850650b70509f54980241dc175191a35d967288b532a7a8223ce2440d010615f70df269501944d4ec16fe4a3cb03d7a85909174757835187cb52e71934e6c07ef43b4c46fc30bbcd0bc72913068267c54a4aabebb493922492820babdeb7dc9b1558fcf7bd82c37c82d3147e455b623ab0efa752fe0b3a67ca6e4d126639e645a0bf417568adbb2a6a4eef62fa1fa29b2a5a43bebea1f82193a7dd98eb483d09bb595af1fa9c97c7f41f5649d976aee3e5e59e2329b43b13bea228d4a93f16ba139ccb511de521ffe747aa2eca664f7c9e33da59075cc335afcd2bf3ae09765f01ab5a7c3e3938ec168b74724b5074247d200d9970382f683d6059b94dbc336603d1dfee714e4b447ac2fa1d99ecb4961da2854e03795ed758220312d101e1e3d87d5313a6d052aebde75110363d")) .put( "nagydani-3-pow0x10001", Bytes.fromHexString( "000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000100c9130579f243e12451760976261416413742bd7c91d39ae087f46794062b8c239f2a74abf3918605a0e046a7890e049475ba7fbb78f5de6490bd22a710cc04d30088179a919d86c2da62cf37f59d8f258d2310d94c24891be2d7eeafaa32a8cb4b0cfe5f475ed778f45907dc8916a73f03635f233f7a77a00a3ec9ca6761a5bbd558a2318ecd0caa1c5016691523e7e1fa267dd35e70c66e84380bdcf7c0582f540174e572c41f81e93da0b757dff0b0fe23eb03aa19af0bdec3afb474216febaacb8d0381e631802683182b0fe72c28392539850650b70509f54980241dc175191a35d967288b532a7a8223ce2440d010615f70df269501944d4ec16fe4a3cb010001d7a85909174757835187cb52e71934e6c07ef43b4c46fc30bbcd0bc72913068267c54a4aabebb493922492820babdeb7dc9b1558fcf7bd82c37c82d3147e455b623ab0efa752fe0b3a67ca6e4d126639e645a0bf417568adbb2a6a4eef62fa1fa29b2a5a43bebea1f82193a7dd98eb483d09bb595af1fa9c97c7f41f5649d976aee3e5e59e2329b43b13bea228d4a93f16ba139ccb511de521ffe747aa2eca664f7c9e33da59075cc335afcd2bf3ae09765f01ab5a7c3e3938ec168b74724b5074247d200d9970382f683d6059b94dbc336603d1dfee714e4b447ac2fa1d99ecb4961da2854e03795ed758220312d101e1e3d87d5313a6d052aebde75110363d")) .put( "nagydani-4-square", Bytes.fromHexString( "000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000200db34d0e438249c0ed685c949cc28776a05094e1c48691dc3f2dca5fc3356d2a0663bd376e4712839917eb9a19c670407e2c377a2de385a3ff3b52104f7f1f4e0c7bf7717fb913896693dc5edbb65b760ef1b00e42e9d8f9af17352385e1cd742c9b006c0f669995cb0bb21d28c0aced2892267637b6470d8cee0ab27fc5d42658f6e88240c31d6774aa60a7ebd25cd48b56d0da11209f1928e61005c6eb709f3e8e0aaf8d9b10f7d7e296d772264dc76897ccdddadc91efa91c1903b7232a9e4c3b941917b99a3bc0c26497dedc897c25750af60237aa67934a26a2bc491db3dcc677491944bc1f51d3e5d76b8d846a62db03dedd61ff508f91a56d71028125035c3a44cbb041497c83bf3e4ae2a9613a401cc721c547a2afa3b16a2969933d3626ed6d8a7428648f74122fd3f2a02a20758f7f693892c8fd798b39abac01d18506c45e71432639e9f9505719ee822f62ccbf47f6850f096ff77b5afaf4be7d772025791717dbe5abf9b3f40cff7d7aab6f67e38f62faf510747276e20a42127e7500c444f9ed92baf65ade9e836845e39c4316d9dce5f8e2c8083e2c0acbb95296e05e51aab13b6b8f53f06c9c4276e12b0671133218cc3ea907da3bd9a367096d9202128d14846cc2e20d56fc8473ecb07cecbfb8086919f3971926e7045b853d85a69d026195c70f9f7a823536e2a8f4b3e12e94d9b53a934353451094b8102df3143a0057457d75e8c708b6337a6f5a4fd1a06727acf9fb93e2993c62f3378b37d56c85e7b1e00f0145ebf8e4095bd723166293c60b6ac1252291ef65823c9e040ddad14969b3b340a4ef714db093a587c37766d68b8d6b5016e741587e7e6bf7e763b44f0247e64bae30f994d248bfd20541a333e5b225ef6a61199e301738b1e688f70ec1d7fb892c183c95dc543c3e12adf8a5e8b9ca9d04f9445cced3ab256f29e998e69efaa633a7b60e1db5a867924ccab0a171d9d6e1098dfa15acde9553de599eaa56490c8f411e4985111f3d40bddfc5e301edb01547b01a886550a61158f7e2033c59707789bf7c854181d0c2e2a42a93cf09209747d7082e147eb8544de25c3eb14f2e35559ea0c0f5877f2f3fc92132c0ae9da4e45b2f6c866a224ea6d1f28c05320e287750fbc647368d41116e528014cc1852e5531d53e4af938374daba6cee4baa821ed07117253bb3601ddd00d59a3d7fb2ef1f5a2fbba7c429f0cf9a5b3462410fd833a69118f8be9c559b1000cc608fd877fb43f8e65c2d1302622b944462579056874b387208d90623fcdaf93920ca7a9e4ba64ea208758222ad868501cc2c345e2d3a5ea2a17e5069248138c8a79c0251185d29ee73e5afab5354769142d2bf0cb6712727aa6bf84a6245fcdae66e4938d84d1b9dd09a884818622080ff5f98942fb20acd7e0c916c2d5ea7ce6f7e173315384518f")) .put( "nagydani-4-qube", Bytes.fromHexString( "000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000200db34d0e438249c0ed685c949cc28776a05094e1c48691dc3f2dca5fc3356d2a0663bd376e4712839917eb9a19c670407e2c377a2de385a3ff3b52104f7f1f4e0c7bf7717fb913896693dc5edbb65b760ef1b00e42e9d8f9af17352385e1cd742c9b006c0f669995cb0bb21d28c0aced2892267637b6470d8cee0ab27fc5d42658f6e88240c31d6774aa60a7ebd25cd48b56d0da11209f1928e61005c6eb709f3e8e0aaf8d9b10f7d7e296d772264dc76897ccdddadc91efa91c1903b7232a9e4c3b941917b99a3bc0c26497dedc897c25750af60237aa67934a26a2bc491db3dcc677491944bc1f51d3e5d76b8d846a62db03dedd61ff508f91a56d71028125035c3a44cbb041497c83bf3e4ae2a9613a401cc721c547a2afa3b16a2969933d3626ed6d8a7428648f74122fd3f2a02a20758f7f693892c8fd798b39abac01d18506c45e71432639e9f9505719ee822f62ccbf47f6850f096ff77b5afaf4be7d772025791717dbe5abf9b3f40cff7d7aab6f67e38f62faf510747276e20a42127e7500c444f9ed92baf65ade9e836845e39c4316d9dce5f8e2c8083e2c0acbb95296e05e51aab13b6b8f53f06c9c4276e12b0671133218cc3ea907da3bd9a367096d9202128d14846cc2e20d56fc8473ecb07cecbfb8086919f3971926e7045b853d85a69d026195c70f9f7a823536e2a8f4b3e12e94d9b53a934353451094b8103df3143a0057457d75e8c708b6337a6f5a4fd1a06727acf9fb93e2993c62f3378b37d56c85e7b1e00f0145ebf8e4095bd723166293c60b6ac1252291ef65823c9e040ddad14969b3b340a4ef714db093a587c37766d68b8d6b5016e741587e7e6bf7e763b44f0247e64bae30f994d248bfd20541a333e5b225ef6a61199e301738b1e688f70ec1d7fb892c183c95dc543c3e12adf8a5e8b9ca9d04f9445cced3ab256f29e998e69efaa633a7b60e1db5a867924ccab0a171d9d6e1098dfa15acde9553de599eaa56490c8f411e4985111f3d40bddfc5e301edb01547b01a886550a61158f7e2033c59707789bf7c854181d0c2e2a42a93cf09209747d7082e147eb8544de25c3eb14f2e35559ea0c0f5877f2f3fc92132c0ae9da4e45b2f6c866a224ea6d1f28c05320e287750fbc647368d41116e528014cc1852e5531d53e4af938374daba6cee4baa821ed07117253bb3601ddd00d59a3d7fb2ef1f5a2fbba7c429f0cf9a5b3462410fd833a69118f8be9c559b1000cc608fd877fb43f8e65c2d1302622b944462579056874b387208d90623fcdaf93920ca7a9e4ba64ea208758222ad868501cc2c345e2d3a5ea2a17e5069248138c8a79c0251185d29ee73e5afab5354769142d2bf0cb6712727aa6bf84a6245fcdae66e4938d84d1b9dd09a884818622080ff5f98942fb20acd7e0c916c2d5ea7ce6f7e173315384518f")) .put( "nagydani-4-pow0x10001", Bytes.fromHexString( "000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000200db34d0e438249c0ed685c949cc28776a05094e1c48691dc3f2dca5fc3356d2a0663bd376e4712839917eb9a19c670407e2c377a2de385a3ff3b52104f7f1f4e0c7bf7717fb913896693dc5edbb65b760ef1b00e42e9d8f9af17352385e1cd742c9b006c0f669995cb0bb21d28c0aced2892267637b6470d8cee0ab27fc5d42658f6e88240c31d6774aa60a7ebd25cd48b56d0da11209f1928e61005c6eb709f3e8e0aaf8d9b10f7d7e296d772264dc76897ccdddadc91efa91c1903b7232a9e4c3b941917b99a3bc0c26497dedc897c25750af60237aa67934a26a2bc491db3dcc677491944bc1f51d3e5d76b8d846a62db03dedd61ff508f91a56d71028125035c3a44cbb041497c83bf3e4ae2a9613a401cc721c547a2afa3b16a2969933d3626ed6d8a7428648f74122fd3f2a02a20758f7f693892c8fd798b39abac01d18506c45e71432639e9f9505719ee822f62ccbf47f6850f096ff77b5afaf4be7d772025791717dbe5abf9b3f40cff7d7aab6f67e38f62faf510747276e20a42127e7500c444f9ed92baf65ade9e836845e39c4316d9dce5f8e2c8083e2c0acbb95296e05e51aab13b6b8f53f06c9c4276e12b0671133218cc3ea907da3bd9a367096d9202128d14846cc2e20d56fc8473ecb07cecbfb8086919f3971926e7045b853d85a69d026195c70f9f7a823536e2a8f4b3e12e94d9b53a934353451094b81010001df3143a0057457d75e8c708b6337a6f5a4fd1a06727acf9fb93e2993c62f3378b37d56c85e7b1e00f0145ebf8e4095bd723166293c60b6ac1252291ef65823c9e040ddad14969b3b340a4ef714db093a587c37766d68b8d6b5016e741587e7e6bf7e763b44f0247e64bae30f994d248bfd20541a333e5b225ef6a61199e301738b1e688f70ec1d7fb892c183c95dc543c3e12adf8a5e8b9ca9d04f9445cced3ab256f29e998e69efaa633a7b60e1db5a867924ccab0a171d9d6e1098dfa15acde9553de599eaa56490c8f411e4985111f3d40bddfc5e301edb01547b01a886550a61158f7e2033c59707789bf7c854181d0c2e2a42a93cf09209747d7082e147eb8544de25c3eb14f2e35559ea0c0f5877f2f3fc92132c0ae9da4e45b2f6c866a224ea6d1f28c05320e287750fbc647368d41116e528014cc1852e5531d53e4af938374daba6cee4baa821ed07117253bb3601ddd00d59a3d7fb2ef1f5a2fbba7c429f0cf9a5b3462410fd833a69118f8be9c559b1000cc608fd877fb43f8e65c2d1302622b944462579056874b387208d90623fcdaf93920ca7a9e4ba64ea208758222ad868501cc2c345e2d3a5ea2a17e5069248138c8a79c0251185d29ee73e5afab5354769142d2bf0cb6712727aa6bf84a6245fcdae66e4938d84d1b9dd09a884818622080ff5f98942fb20acd7e0c916c2d5ea7ce6f7e173315384518f")) .put( "nagydani-5-square", Bytes.fromHexString( "000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000400c5a1611f8be90071a43db23cc2fe01871cc4c0e8ab5743f6378e4fef77f7f6db0095c0727e20225beb665645403453e325ad5f9aeb9ba99bf3c148f63f9c07cf4fe8847ad5242d6b7d4499f93bd47056ddab8f7dee878fc2314f344dbee2a7c41a5d3db91eff372c730c2fdd3a141a4b61999e36d549b9870cf2f4e632c4d5df5f024f81c028000073a0ed8847cfb0593d36a47142f578f05ccbe28c0c06aeb1b1da027794c48db880278f79ba78ae64eedfea3c07d10e0562668d839749dc95f40467d15cf65b9cfc52c7c4bcef1cda3596dd52631aac942f146c7cebd46065131699ce8385b0db1874336747ee020a5698a3d1a1082665721e769567f579830f9d259cec1a836845109c21cf6b25da572512bf3c42fd4b96e43895589042ab60dd41f497db96aec102087fe784165bb45f942859268fd2ff6c012d9d00c02ba83eace047cc5f7b2c392c2955c58a49f0338d6fc58749c9db2155522ac17914ec216ad87f12e0ee95574613942fa615898c4d9e8a3be68cd6afa4e7a003dedbdf8edfee31162b174f965b20ae752ad89c967b3068b6f722c16b354456ba8e280f987c08e0a52d40a2e8f3a59b94d590aeef01879eb7a90b3ee7d772c839c85519cbeaddc0c193ec4874a463b53fcaea3271d80ebfb39b33489365fc039ae549a17a9ff898eea2f4cb27b8dbee4c17b998438575b2b8d107e4a0d66ba7fca85b41a58a8d51f191a35c856dfbe8aef2b00048a694bbccff832d23c8ca7a7ff0b6c0b3011d00b97c86c0628444d267c951d9e4fb8f83e154b8f74fb51aa16535e498235c5597dac9606ed0be3173a3836baa4e7d756ffe1e2879b415d3846bccd538c05b847785699aefde3e305decb600cd8fb0e7d8de5efc26971a6ad4e6d7a2d91474f1023a0ac4b78dc937da0ce607a45974d2cac1c33a2631ff7fe6144a3b2e5cf98b531a9627dea92c1dc82204d09db0439b6a11dd64b484e1263aa45fd9539b6020b55e3baece3986a8bffc1003406348f5c61265099ed43a766ee4f93f5f9c5abbc32a0fd3ac2b35b87f9ec26037d88275bd7dd0a54474995ee34ed3727f3f97c48db544b1980193a4b76a8a3ddab3591ce527f16d91882e67f0103b5cda53f7da54d489fc4ac08b6ab358a5a04aa9daa16219d50bd672a7cb804ed769d218807544e5993f1c27427104b349906a0b654df0bf69328afd3013fbe430155339c39f236df5557bf92f1ded7ff609a8502f49064ec3d1dbfb6c15d3a4c11a4f8acd12278cbf68acd5709463d12e3338a6eddb8c112f199645e23154a8e60879d2a654e3ed9296aa28f134168619691cd2c6b9e2eba4438381676173fc63c2588a3c5910dc149cf3760f0aa9fa9c3f5faa9162b0bf1aac9dd32b706a60ef53cbdb394b6b40222b5bc80eea82ba8958386672564cae3794f977871ab62337cf02e30049201ec12937e7ce79d0f55d9c810e20acf52212aca1d3888949e0e4830aad88d804161230eb89d4d329cc83570fe257217d2119134048dd2ed167646975fc7d77136919a049ea74cf08ddd2b896890bb24a0ba18094a22baa351bf29ad96c66bbb1a598f2ca391749620e62d61c3561a7d3653ccc8892c7b99baaf76bf836e2991cb06d6bc0514568ff0d1ec8bb4b3d6984f5eaefb17d3ea2893722375d3ddb8e389a8eef7d7d198f8e687d6a513983df906099f9a2d23f4f9dec6f8ef2f11fc0a21fac45353b94e00486f5e17d386af42502d09db33cf0cf28310e049c07e88682aeeb00cb833c5174266e62407a57583f1f88b304b7c6e0c84bbe1c0fd423072d37a5bd0aacf764229e5c7cd02473460ba3645cd8e8ae144065bf02d0dd238593d8e230354f67e0b2f23012c23274f80e3ee31e35e2606a4a3f31d94ab755e6d163cff52cbb36b6d0cc67ffc512aeed1dce4d7a0d70ce82f2baba12e8d514dc92a056f994adfb17b5b9712bd5186f27a2fda1f7039c5df2c8587fdc62f5627580c13234b55be4df3056050e2d1ef3218f0dd66cb05265fe1acfb0989d8213f2c19d1735a7cf3fa65d88dad5af52dc2bba22b7abf46c3bc77b5091baab9e8f0ddc4d5e581037de91a9f8dcbc69309be29cc815cf19a20a7585b8b3073edf51fc9baeb3e509b97fa4ecfd621e0fd57bd61cac1b895c03248ff12bdbc57509250df3517e8a3fe1d776836b34ab352b973d932ef708b14f7418f9eceb1d87667e61e3e758649cb083f01b133d37ab2f5afa96d6c84bcacf4efc3851ad308c1e7d9113624fce29fab460ab9d2a48d92cdb281103a5250ad44cb2ff6e67ac670c02fdafb3e0f1353953d6d7d5646ca1568dea55275a050ec501b7c6250444f7219f1ba7521ba3b93d089727ca5f3bbe0d6c1300b423377004954c5628fdb65770b18ced5c9b23a4a5a6d6ef25fe01b4ce278de0bcc4ed86e28a0a68818ffa40970128cf2c38740e80037984428c1bd5113f40ff47512ee6f4e4d8f9b8e8e1b3040d2928d003bd1c1329dc885302fbce9fa81c23b4dc49c7c82d29b52957847898676c89aa5d32b5b0e1c0d5a2b79a19d67562f407f19425687971a957375879d90c5f57c857136c17106c9ab1b99d80e69c8c954ed386493368884b55c939b8d64d26f643e800c56f90c01079d7c534e3b2b7ae352cefd3016da55f6a85eb803b85e2304915fd2001f77c74e28746293c46e4f5f0fd49cf988aafd0026b8e7a3bab2da5cdce1ea26c2e29ec03f4807fac432662b2d6c060be1c7be0e5489de69d0a6e03a4b9117f9244b34a0f1ecba89884f781c6320412413a00c4980287409a2a78c2cd7e65cecebbe4ec1c28cac4dd95f6998e78fc6f1392384331c9436aa10e10e2bf8ad2c4eafbcf276aa7bae64b74428911b3269c749338b0fc5075ad")) .put( "nagydani-5-qube", Bytes.fromHexString( "000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000400c5a1611f8be90071a43db23cc2fe01871cc4c0e8ab5743f6378e4fef77f7f6db0095c0727e20225beb665645403453e325ad5f9aeb9ba99bf3c148f63f9c07cf4fe8847ad5242d6b7d4499f93bd47056ddab8f7dee878fc2314f344dbee2a7c41a5d3db91eff372c730c2fdd3a141a4b61999e36d549b9870cf2f4e632c4d5df5f024f81c028000073a0ed8847cfb0593d36a47142f578f05ccbe28c0c06aeb1b1da027794c48db880278f79ba78ae64eedfea3c07d10e0562668d839749dc95f40467d15cf65b9cfc52c7c4bcef1cda3596dd52631aac942f146c7cebd46065131699ce8385b0db1874336747ee020a5698a3d1a1082665721e769567f579830f9d259cec1a836845109c21cf6b25da572512bf3c42fd4b96e43895589042ab60dd41f497db96aec102087fe784165bb45f942859268fd2ff6c012d9d00c02ba83eace047cc5f7b2c392c2955c58a49f0338d6fc58749c9db2155522ac17914ec216ad87f12e0ee95574613942fa615898c4d9e8a3be68cd6afa4e7a003dedbdf8edfee31162b174f965b20ae752ad89c967b3068b6f722c16b354456ba8e280f987c08e0a52d40a2e8f3a59b94d590aeef01879eb7a90b3ee7d772c839c85519cbeaddc0c193ec4874a463b53fcaea3271d80ebfb39b33489365fc039ae549a17a9ff898eea2f4cb27b8dbee4c17b998438575b2b8d107e4a0d66ba7fca85b41a58a8d51f191a35c856dfbe8aef2b00048a694bbccff832d23c8ca7a7ff0b6c0b3011d00b97c86c0628444d267c951d9e4fb8f83e154b8f74fb51aa16535e498235c5597dac9606ed0be3173a3836baa4e7d756ffe1e2879b415d3846bccd538c05b847785699aefde3e305decb600cd8fb0e7d8de5efc26971a6ad4e6d7a2d91474f1023a0ac4b78dc937da0ce607a45974d2cac1c33a2631ff7fe6144a3b2e5cf98b531a9627dea92c1dc82204d09db0439b6a11dd64b484e1263aa45fd9539b6020b55e3baece3986a8bffc1003406348f5c61265099ed43a766ee4f93f5f9c5abbc32a0fd3ac2b35b87f9ec26037d88275bd7dd0a54474995ee34ed3727f3f97c48db544b1980193a4b76a8a3ddab3591ce527f16d91882e67f0103b5cda53f7da54d489fc4ac08b6ab358a5a04aa9daa16219d50bd672a7cb804ed769d218807544e5993f1c27427104b349906a0b654df0bf69328afd3013fbe430155339c39f236df5557bf92f1ded7ff609a8502f49064ec3d1dbfb6c15d3a4c11a4f8acd12278cbf68acd5709463d12e3338a6eddb8c112f199645e23154a8e60879d2a654e3ed9296aa28f134168619691cd2c6b9e2eba4438381676173fc63c2588a3c5910dc149cf3760f0aa9fa9c3f5faa9162b0bf1aac9dd32b706a60ef53cbdb394b6b40222b5bc80eea82ba8958386672564cae3794f977871ab62337cf03e30049201ec12937e7ce79d0f55d9c810e20acf52212aca1d3888949e0e4830aad88d804161230eb89d4d329cc83570fe257217d2119134048dd2ed167646975fc7d77136919a049ea74cf08ddd2b896890bb24a0ba18094a22baa351bf29ad96c66bbb1a598f2ca391749620e62d61c3561a7d3653ccc8892c7b99baaf76bf836e2991cb06d6bc0514568ff0d1ec8bb4b3d6984f5eaefb17d3ea2893722375d3ddb8e389a8eef7d7d198f8e687d6a513983df906099f9a2d23f4f9dec6f8ef2f11fc0a21fac45353b94e00486f5e17d386af42502d09db33cf0cf28310e049c07e88682aeeb00cb833c5174266e62407a57583f1f88b304b7c6e0c84bbe1c0fd423072d37a5bd0aacf764229e5c7cd02473460ba3645cd8e8ae144065bf02d0dd238593d8e230354f67e0b2f23012c23274f80e3ee31e35e2606a4a3f31d94ab755e6d163cff52cbb36b6d0cc67ffc512aeed1dce4d7a0d70ce82f2baba12e8d514dc92a056f994adfb17b5b9712bd5186f27a2fda1f7039c5df2c8587fdc62f5627580c13234b55be4df3056050e2d1ef3218f0dd66cb05265fe1acfb0989d8213f2c19d1735a7cf3fa65d88dad5af52dc2bba22b7abf46c3bc77b5091baab9e8f0ddc4d5e581037de91a9f8dcbc69309be29cc815cf19a20a7585b8b3073edf51fc9baeb3e509b97fa4ecfd621e0fd57bd61cac1b895c03248ff12bdbc57509250df3517e8a3fe1d776836b34ab352b973d932ef708b14f7418f9eceb1d87667e61e3e758649cb083f01b133d37ab2f5afa96d6c84bcacf4efc3851ad308c1e7d9113624fce29fab460ab9d2a48d92cdb281103a5250ad44cb2ff6e67ac670c02fdafb3e0f1353953d6d7d5646ca1568dea55275a050ec501b7c6250444f7219f1ba7521ba3b93d089727ca5f3bbe0d6c1300b423377004954c5628fdb65770b18ced5c9b23a4a5a6d6ef25fe01b4ce278de0bcc4ed86e28a0a68818ffa40970128cf2c38740e80037984428c1bd5113f40ff47512ee6f4e4d8f9b8e8e1b3040d2928d003bd1c1329dc885302fbce9fa81c23b4dc49c7c82d29b52957847898676c89aa5d32b5b0e1c0d5a2b79a19d67562f407f19425687971a957375879d90c5f57c857136c17106c9ab1b99d80e69c8c954ed386493368884b55c939b8d64d26f643e800c56f90c01079d7c534e3b2b7ae352cefd3016da55f6a85eb803b85e2304915fd2001f77c74e28746293c46e4f5f0fd49cf988aafd0026b8e7a3bab2da5cdce1ea26c2e29ec03f4807fac432662b2d6c060be1c7be0e5489de69d0a6e03a4b9117f9244b34a0f1ecba89884f781c6320412413a00c4980287409a2a78c2cd7e65cecebbe4ec1c28cac4dd95f6998e78fc6f1392384331c9436aa10e10e2bf8ad2c4eafbcf276aa7bae64b74428911b3269c749338b0fc5075ad")) .put( "nagydani-5-pow0x10001", Bytes.fromHexString( "000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000400c5a1611f8be90071a43db23cc2fe01871cc4c0e8ab5743f6378e4fef77f7f6db0095c0727e20225beb665645403453e325ad5f9aeb9ba99bf3c148f63f9c07cf4fe8847ad5242d6b7d4499f93bd47056ddab8f7dee878fc2314f344dbee2a7c41a5d3db91eff372c730c2fdd3a141a4b61999e36d549b9870cf2f4e632c4d5df5f024f81c028000073a0ed8847cfb0593d36a47142f578f05ccbe28c0c06aeb1b1da027794c48db880278f79ba78ae64eedfea3c07d10e0562668d839749dc95f40467d15cf65b9cfc52c7c4bcef1cda3596dd52631aac942f146c7cebd46065131699ce8385b0db1874336747ee020a5698a3d1a1082665721e769567f579830f9d259cec1a836845109c21cf6b25da572512bf3c42fd4b96e43895589042ab60dd41f497db96aec102087fe784165bb45f942859268fd2ff6c012d9d00c02ba83eace047cc5f7b2c392c2955c58a49f0338d6fc58749c9db2155522ac17914ec216ad87f12e0ee95574613942fa615898c4d9e8a3be68cd6afa4e7a003dedbdf8edfee31162b174f965b20ae752ad89c967b3068b6f722c16b354456ba8e280f987c08e0a52d40a2e8f3a59b94d590aeef01879eb7a90b3ee7d772c839c85519cbeaddc0c193ec4874a463b53fcaea3271d80ebfb39b33489365fc039ae549a17a9ff898eea2f4cb27b8dbee4c17b998438575b2b8d107e4a0d66ba7fca85b41a58a8d51f191a35c856dfbe8aef2b00048a694bbccff832d23c8ca7a7ff0b6c0b3011d00b97c86c0628444d267c951d9e4fb8f83e154b8f74fb51aa16535e498235c5597dac9606ed0be3173a3836baa4e7d756ffe1e2879b415d3846bccd538c05b847785699aefde3e305decb600cd8fb0e7d8de5efc26971a6ad4e6d7a2d91474f1023a0ac4b78dc937da0ce607a45974d2cac1c33a2631ff7fe6144a3b2e5cf98b531a9627dea92c1dc82204d09db0439b6a11dd64b484e1263aa45fd9539b6020b55e3baece3986a8bffc1003406348f5c61265099ed43a766ee4f93f5f9c5abbc32a0fd3ac2b35b87f9ec26037d88275bd7dd0a54474995ee34ed3727f3f97c48db544b1980193a4b76a8a3ddab3591ce527f16d91882e67f0103b5cda53f7da54d489fc4ac08b6ab358a5a04aa9daa16219d50bd672a7cb804ed769d218807544e5993f1c27427104b349906a0b654df0bf69328afd3013fbe430155339c39f236df5557bf92f1ded7ff609a8502f49064ec3d1dbfb6c15d3a4c11a4f8acd12278cbf68acd5709463d12e3338a6eddb8c112f199645e23154a8e60879d2a654e3ed9296aa28f134168619691cd2c6b9e2eba4438381676173fc63c2588a3c5910dc149cf3760f0aa9fa9c3f5faa9162b0bf1aac9dd32b706a60ef53cbdb394b6b40222b5bc80eea82ba8958386672564cae3794f977871ab62337cf010001e30049201ec12937e7ce79d0f55d9c810e20acf52212aca1d3888949e0e4830aad88d804161230eb89d4d329cc83570fe257217d2119134048dd2ed167646975fc7d77136919a049ea74cf08ddd2b896890bb24a0ba18094a22baa351bf29ad96c66bbb1a598f2ca391749620e62d61c3561a7d3653ccc8892c7b99baaf76bf836e2991cb06d6bc0514568ff0d1ec8bb4b3d6984f5eaefb17d3ea2893722375d3ddb8e389a8eef7d7d198f8e687d6a513983df906099f9a2d23f4f9dec6f8ef2f11fc0a21fac45353b94e00486f5e17d386af42502d09db33cf0cf28310e049c07e88682aeeb00cb833c5174266e62407a57583f1f88b304b7c6e0c84bbe1c0fd423072d37a5bd0aacf764229e5c7cd02473460ba3645cd8e8ae144065bf02d0dd238593d8e230354f67e0b2f23012c23274f80e3ee31e35e2606a4a3f31d94ab755e6d163cff52cbb36b6d0cc67ffc512aeed1dce4d7a0d70ce82f2baba12e8d514dc92a056f994adfb17b5b9712bd5186f27a2fda1f7039c5df2c8587fdc62f5627580c13234b55be4df3056050e2d1ef3218f0dd66cb05265fe1acfb0989d8213f2c19d1735a7cf3fa65d88dad5af52dc2bba22b7abf46c3bc77b5091baab9e8f0ddc4d5e581037de91a9f8dcbc69309be29cc815cf19a20a7585b8b3073edf51fc9baeb3e509b97fa4ecfd621e0fd57bd61cac1b895c03248ff12bdbc57509250df3517e8a3fe1d776836b34ab352b973d932ef708b14f7418f9eceb1d87667e61e3e758649cb083f01b133d37ab2f5afa96d6c84bcacf4efc3851ad308c1e7d9113624fce29fab460ab9d2a48d92cdb281103a5250ad44cb2ff6e67ac670c02fdafb3e0f1353953d6d7d5646ca1568dea55275a050ec501b7c6250444f7219f1ba7521ba3b93d089727ca5f3bbe0d6c1300b423377004954c5628fdb65770b18ced5c9b23a4a5a6d6ef25fe01b4ce278de0bcc4ed86e28a0a68818ffa40970128cf2c38740e80037984428c1bd5113f40ff47512ee6f4e4d8f9b8e8e1b3040d2928d003bd1c1329dc885302fbce9fa81c23b4dc49c7c82d29b52957847898676c89aa5d32b5b0e1c0d5a2b79a19d67562f407f19425687971a957375879d90c5f57c857136c17106c9ab1b99d80e69c8c954ed386493368884b55c939b8d64d26f643e800c56f90c01079d7c534e3b2b7ae352cefd3016da55f6a85eb803b85e2304915fd2001f77c74e28746293c46e4f5f0fd49cf988aafd0026b8e7a3bab2da5cdce1ea26c2e29ec03f4807fac432662b2d6c060be1c7be0e5489de69d0a6e03a4b9117f9244b34a0f1ecba89884f781c6320412413a00c4980287409a2a78c2cd7e65cecebbe4ec1c28cac4dd95f6998e78fc6f1392384331c9436aa10e10e2bf8ad2c4eafbcf276aa7bae64b74428911b3269c749338b0fc5075ad")) .build(); final BigIntegerModularExponentiationPrecompiledContract contract = new BigIntegerModularExponentiationPrecompiledContract(new BerlinGasCalculator()); for (final Map.Entry<String, Bytes> testCase : testcases.entrySet()) { final double gasSpent = runBenchmark(testCase.getValue(), contract); System.out.printf( "ModEXP %s for \t%,d gas. Charging %,d gas.%n", testCase.getKey(), (int) gasSpent, contract.gasRequirement(testCase.getValue()).asUInt256().toLong()); } } private static void benchBNADD() { final Bytes g1Point0 = Bytes.concatenate( Bytes.fromHexString( "0x17c139df0efee0f766bc0204762b774362e4ded88953a39ce849a8a7fa163fa9"), Bytes.fromHexString( "0x01e0559bacb160664764a357af8a9fe70baa9258e0b959273ffc5718c6d4cc7c")); final Bytes g1Point1 = Bytes.concatenate( Bytes.fromHexString( "0x17c139df0efee0f766bc0204762b774362e4ded88953a39ce849a8a7fa163fa9"), Bytes.fromHexString( "0x2e83f8d734803fc370eba25ed1f6b8768bd6d83887b87165fc2434fe11a830cb")); final Bytes arg = Bytes.concatenate(g1Point0, g1Point1); final AltBN128AddPrecompiledContract contract = AltBN128AddPrecompiledContract.istanbul(new IstanbulGasCalculator()); final double gasSpent = runBenchmark(arg, contract); System.out.printf( "BNADD for %,d gas. Charging %,d gas.%n", (int) gasSpent, contract.gasRequirement(arg).asUInt256().toLong()); } private static void benchBNMUL() { final Bytes g1Point1 = Bytes.concatenate( Bytes.fromHexString( "0x0000000000000000000000000000000000000000000000000000000000000001"), Bytes.fromHexString( "0x30644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd45")); final Bytes scalar = Bytes.fromHexString("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"); final Bytes arg = Bytes.concatenate(g1Point1, scalar); final AltBN128MulPrecompiledContract contract = AltBN128MulPrecompiledContract.istanbul(new IstanbulGasCalculator()); final double gasSpent = runBenchmark(arg, contract); System.out.printf( "BNMUL for %,d gas. Charging %,d gas.%n", (int) gasSpent, contract.gasRequirement(arg).asUInt256().toLong()); } private static void benchBNPairing() { final Bytes[] args = { Bytes.fromHexString( "0x0fc6ebd1758207e311a99674dc77d28128643c057fb9ca2c92b4205b6bf57ed2" + "1e50042f97b7a1f2768fa15f6683eca9ee7fa8ee655d94246ab85fb1da3f0b90" + "198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c2" + "1800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed" + "090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b" + "12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa"), Bytes.fromHexString( "0x2b101be01b2f064cba109e065dc0b5e5bf6b64ed4054b82af3a7e6e34c1e2005" + "1a4d9ceecf9115a98efd147c4abb2684102d3e925938989153b9ff330523cdb4" + "08d554bf59102bbb961ba81107ec71785ef9ce6638e5332b6c1a58b87447d181" + "01cf7cc93bfbf7b2c5f04a3bc9cb8b72bbcf2defcabdceb09860c493bdf1588d" + "02cb2a424885c9e412b94c40905b359e3043275cd29f5b557f008cd0a3e0c0dc" + "204e5d81d86c561f9344ad5f122a625f259996b065b80cbbe74a9ad97b6d7cc2" + "07402fdc3bc28a434909f24695adea3e9418d9857efc8c71f67a470a17f3cf12" + "255dbc3a8b5c2c1a7a3f8c59e2f5b6e04bc4d7b7bb82fcbe18b2294305c8473b" + "19156e854972d656d1020003e5781972d84081309cdf71baacf6c6e29272f5ff" + "2acded377df8902b7a75de6c0f53c161f3a2ff3f374470b78d5b3c4d826d84d5" + "1731ef3b84913296c30a649461b2ca35e3fcc2e3031ea2386d32f885ff096559" + "0919e7685f6ea605db14f311dede6e83f21937f05cfc53ac1dbe45891c47bf2a"), Bytes.fromHexString( "0x1a3fabea802788c8aa88741c6a68f271b221eb75838bb1079381f3f1ae414f40" + "126308d6cdb6b7efceb1ec0016b99cf7a1e5780f5a9a775d43bc7f2b6fd510e2" + "11b35cf2c85531eab64b96eb2eef487e0eb60fb9207fe4763e7f6e02dcead646" + "2cbea52f3417b398aed9e355ed16934a81b72d2646e3bf90dbc2dcba294b631d" + "2c6518cd26310e541a799357d1ae8bc477b162f2040407b965ecd777e26d31f7" + "125170b5860fb8f8da2c43e00ea4a83bcc1a974e47e59fcd657851d2b0dd1655" + "130a2183533392b5fd031857eb4c199a19382f39fcb666d6133b3a6e5784d6a5" + "2cca76f2bc625d2e61a41b5f382eadf1df1756dd392f639c3d9f3513099e63f9" + "07ecba8131b3fb354272c86d01577e228c5bd5fb6404bbaf106d7f4858dc2996" + "1c5d49a9ae291a2a2213da57a76653391fa1fc0fa7c534afa124ad71b7fdd719" + "10f1a73f94a8f077f478d069d7cf1c49444f64cd20ed75d4f6de3d8986147cf8" + "0d5816f2f116c5cc0be7dfc4c0b4c592204864acb70ad5f789013389a0092ce4" + "2650b89e5540eea1375b27dfd9081a0622e03352e5c6a7593df72e2113328e64" + "21991b3e5100845cd9b8f0fa16c7fe5f40152e702e61f4cdf0d98e7f213b1a47" + "10520008be7609bdb92145596ac6bf37da0269f7460e04e8e4701c3afbae0e52" + "0664e736b2af7bf9125f69fe5c3706cd893cd769b1dae8a6e3d639e2d76e66e2" + "1cacce8776f5ada6b35036f9343faab26c91b9aea83d3cb59cf5628ffe18ab1b" + "03b48ca7e6d84fca619aaf81745fbf9c30e5a78ed4766cc62b0f12aea5044f56") }; final AltBN128PairingPrecompiledContract contract = AltBN128PairingPrecompiledContract.istanbul(new IstanbulGasCalculator()); for (int i = 0; i < args.length; i++) { final double gasSpent = runBenchmark(args[i], contract); System.out.printf( "BNPairings %d pairs for %,d gas. Charging %,d gas.%n", i * 2 + 2, (int) gasSpent, contract.gasRequirement(args[i]).asUInt256().toLong()); } } public static void benchBLS12G1Add() { final Bytes arg = Bytes.fromHexString( "0000000000000000000000000000000012196c5a43d69224d8713389285f26b98f86ee910ab3dd668e413738282003cc5b7357af9a7af54bb713d62255e80f56" + "0000000000000000000000000000000006ba8102bfbeea4416b710c73e8cce3032c31c6269c44906f8ac4f7874ce99fb17559992486528963884ce429a992fee" + "000000000000000000000000000000000001101098f5c39893765766af4512a0c74e1bb89bc7e6fdf14e3e7337d257cc0f94658179d83320b99f31ff94cd2bac" + "0000000000000000000000000000000003e1a9f9f44ca2cdab4f43a1a3ee3470fdf90b2fc228eb3b709fcd72f014838ac82a6d797aeefed9a0804b22ed1ce8f7"); final BLS12G1AddPrecompiledContract contract = new BLS12G1AddPrecompiledContract(); final double gasSpent = runBenchmark(arg, contract); System.out.printf( "G1ADD for %,d gas. Charging %,d gas.%n", (int) gasSpent, contract.gasRequirement(arg).asUInt256().toLong()); } private static void benchBLS12G1Mul() { final Bytes arg = Bytes.fromHexString( "0000000000000000000000000000000017f1d3a73197d7942695638c4fa9ac0fc3688c4f9774b905a14e3a3f171bac586c55e83ff97a1aeffb3af00adb22c6bb" + "0000000000000000000000000000000008b3f481e3aaa0f1a09e30ed741d8ae4fcf5e095d5d00af600db18cb2c04b3edd03cc744a2888ae40caa232946c5e7e1" + "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"); final BLS12G1MulPrecompiledContract contract = new BLS12G1MulPrecompiledContract(); contract.compute(arg, fakeFrame); final double gasSpent = runBenchmark(arg, contract); System.out.printf( "G1MUL for %,d gas. Charging %,d gas.%n", (int) gasSpent, contract.gasRequirement(arg).asUInt256().toLong()); } private static void benchBLS12G1MultiExp() { final Bytes[] args = { Bytes.fromHexString( "0000000000000000000000000000000012196c5a43d69224d8713389285f26b98f86ee910ab3dd668e413738282003cc5b7357af9a7af54bb713d62255e80f560000000000000000000000000000000006ba8102bfbeea4416b710c73e8cce3032c31c6269c44906f8ac4f7874ce99fb17559992486528963884ce429a992fee" + "b3c940fe79b6966489b527955de7599194a9ac69a6ff58b8d99e7b1084f0464e"), Bytes.fromHexString( "00000000000000000000000000000000117dbe419018f67844f6a5e1b78a1e597283ad7b8ee7ac5e58846f5a5fd68d0da99ce235a91db3ec1cf340fe6b7afcdb0000000000000000000000000000000013316f23de032d25e912ae8dc9b54c8dba1be7cecdbb9d2228d7e8f652011d46be79089dd0a6080a73c82256ce5e4ed2" + "4d0e25bf3f6fc9f4da25d21fdc71773f1947b7a8a775b8177f7eca990b05b71d" + "0000000000000000000000000000000008ab7b556c672db7883ec47efa6d98bb08cec7902ebb421aac1c31506b177ac444ffa2d9b400a6f1cbdc6240c607ee110000000000000000000000000000000016b7fa9adf4addc2192271ce7ad3c8d8f902d061c43b7d2e8e26922009b777855bffabe7ed1a09155819eabfa87f276f" + "973f40c12c92b703d7b7848ef8b4466d40823aad3943a312b57432b91ff68be1"), Bytes.fromHexString( "0000000000000000000000000000000015ff9a232d9b5a8020a85d5fe08a1dcfb73ece434258fe0e2fddf10ddef0906c42dcb5f5d62fc97f934ba900f17beb330000000000000000000000000000000009cfe4ee2241d9413c616462d7bac035a6766aeaab69c81e094d75b840df45d7e0dfac0265608b93efefb9a8728b98e4" + "4c51f97bcdda93904ae26991b471e9ea942e2b5b8ed26055da11c58bc7b5002a" + "0000000000000000000000000000000017a17b82e3bfadf3250210d8ef572c02c3610d65ab4d7366e0b748768a28ee6a1b51f77ed686a64f087f36f641e7dca900000000000000000000000000000000077ea73d233ccea51dc4d5acecf6d9332bf17ae51598f4b394a5f62fb387e9c9aa1d6823b64a074f5873422ca57545d3" + "8964d5867927bc3e35a0b4c457482373969bff5edff8a781d65573e07fd87b89" + "000000000000000000000000000000000c1243478f4fbdc21ea9b241655947a28accd058d0cdb4f9f0576d32f09dddaf0850464550ff07cab5927b3e4c863ce90000000000000000000000000000000015fb54db10ffac0b6cd374eb7168a8cb3df0a7d5f872d8e98c1f623deb66df5dd08ff4c3658f2905ec8bd02598bd4f90" + "787c38b944eadbd03fd3187f450571740f6cd00e5b2e560165846eb800e5c944"), Bytes.fromHexString( "000000000000000000000000000000000328f09584b6d6c98a709fc22e184123994613aca95a28ac53df8523b92273eb6f4e2d9b2a7dcebb474604d54a210719000000000000000000000000000000001220ebde579911fe2e707446aaad8d3789fae96ae2e23670a4fd856ed82daaab704779eb4224027c1ed9460f39951a1b" + "aaee7ae2a237e8e53560c79e7baa9adf9c00a0ea4d6f514e7a6832eb15cef1e1" + "0000000000000000000000000000000002ebfa98aa92c32a29ebe17fcb1819ba82e686abd9371fcee8ea793b4c72b6464085044f818f1f5902396df0122830cb00000000000000000000000000000000001184715b8432ed190b459113977289a890f68f6085ea111466af15103c9c02467da33e01d6bff87fd57db6ccba442a" + "dac6ed3ef45c1d7d3028f0f89e5458797996d3294b95bebe049b76c7d0db317c" + "0000000000000000000000000000000009d6424e002439998e91cd509f85751ad25e574830c564e7568347d19e3f38add0cab067c0b4b0801785a78bcbeaf246000000000000000000000000000000000ef6d7db03ee654503b46ff0dbc3297536a422e963bda9871a8da8f4eeb98dedebd6071c4880b4636198f4c2375dc795" + "bb30985756c3ca075114c92f231575d6befafe4084517f1166a47376867bd108" + "0000000000000000000000000000000002d1cdb93191d1f9f0308c2c55d0208a071f5520faca7c52ab0311dbc9ba563bd33b5dd6baa77bf45ac2c3269e945f4800000000000000000000000000000000072a52106e6d7b92c594c4dacd20ef5fab7141e45c231457cd7e71463b2254ee6e72689e516fa6a8f29f2a173ce0a190" + "fb730105809f64ea522983d6bbb62f7e2e8cbf702685e9be10e2ef71f8187672"), Bytes.fromHexString( "0000000000000000000000000000000000641642f6801d39a09a536f506056f72a619c50d043673d6d39aa4af11d8e3ded38b9c3bbc970dbc1bd55d68f94b50d0000000000000000000000000000000009ab050de356a24aea90007c6b319614ba2f2ed67223b972767117769e3c8e31ee4056494628fb2892d3d37afb6ac943" + "b6a9408625b0ca8fcbfb21d34eec2d8e24e9a30d2d3b32d7a37d110b13afbfea" + "000000000000000000000000000000000fd4893addbd58fb1bf30b8e62bef068da386edbab9541d198e8719b2de5beb9223d87387af82e8b55bd521ff3e47e2d000000000000000000000000000000000f3a923b76473d5b5a53501790cb02597bb778bdacb3805a9002b152d22241ad131d0f0d6a260739cbab2c2fe602870e" + "3b77283d0a7bb9e17a27e66851792fdd605cc0a339028b8985390fd024374c76" + "0000000000000000000000000000000002cb4b24c8aa799fd7cb1e4ab1aab1372113200343d8526ea7bc64dfaf926baf5d90756a40e35617854a2079cd07fba40000000000000000000000000000000003327ca22bd64ebd673cc6d5b02b2a8804d5353c9d251637c4273ad08d581cc0d58da9bea27c37a0b3f4961dbafd276b" + "dd994eae929aee7428fdda2e44f8cb12b10b91c83b22abc8bbb561310b62257c" + "00000000000000000000000000000000024ad70f2b2105ca37112858e84c6f5e3ffd4a8b064522faae1ecba38fabd52a6274cb46b00075deb87472f11f2e67d90000000000000000000000000000000010a502c8b2a68aa30d2cb719273550b9a3c283c35b2e18a01b0b765344ffaaa5cb30a1e3e6ecd3a53ab67658a5787681" + "7010b134989c8368c7f831f9dd9f9a890e2c1435681107414f2e8637153bbf6a" + "0000000000000000000000000000000000704cc57c8e0944326ddc7c747d9e7347a7f6918977132eea269f161461eb64066f773352f293a3ac458dc3ccd5026a000000000000000000000000000000001099d3c2bb2d082f2fdcbed013f7ac69e8624f4fcf6dfab3ee9dcf7fbbdb8c49ee79de40e887c0b6828d2496e3a6f768" + "94c68bc8d91ac8c489ee87dbfc4b94c93c8bbd5fc04c27db8b02303f3a659054") }; final BLS12G1MultiExpPrecompiledContract contract = new BLS12G1MultiExpPrecompiledContract(); for (int i = 0; i < args.length; i++) { final double gasSpent = runBenchmark(args[i], contract); System.out.printf( "G1MULTIEXP %d for %,d gas. Charging %,d gas.%n", i + 1, (int) gasSpent, contract.gasRequirement(args[i]).asUInt256().toLong()); } } public static void benchBLS12G2Add() { final Bytes arg = Bytes.fromHexString( "0000000000000000000000000000000018c0ada6351b70661f053365deae56910798bd2ace6e2bf6ba4192d1a229967f6af6ca1c9a8a11ebc0a232344ee0f6d6000000000000000000000000000000000cc70a587f4652039d8117b6103858adcd9728f6aebe230578389a62da0042b7623b1c0436734f463cfdd187d2090324" + "0000000000000000000000000000000009f50bd7beedb23328818f9ffdafdb6da6a4dd80c5a9048ab8b154df3cad938ccede829f1156f769d9e149791e8e0cd900000000000000000000000000000000079ba50d2511631b20b6d6f3841e616e9d11b68ec3368cd60129d9d4787ab56c4e9145a38927e51c9cd6271d493d9388" + "00000000000000000000000000000000192fa5d8732ff9f38e0b1cf12eadfd2608f0c7a39aced7746837833ae253bb57ef9c0d98a4b69eeb2950901917e99d1e0000000000000000000000000000000009aeb10c372b5ef1010675c6a4762fda33636489c23b581c75220589afbc0cc46249f921eea02dd1b761e036ffdbae22" + "0000000000000000000000000000000002d225447600d49f932b9dd3ca1e6959697aa603e74d8666681a2dca8160c3857668ae074440366619eb8920256c4e4a00000000000000000000000000000000174882cdd3551e0ce6178861ff83e195fecbcffd53a67b6f10b4431e423e28a480327febe70276036f60bb9c99cf7633"); final BLS12G2AddPrecompiledContract contract = new BLS12G2AddPrecompiledContract(); final double gasSpent = runBenchmark(arg, contract); System.out.printf( "G2ADD for %,d gas. Charging %,d gas.%n", (int) gasSpent, contract.gasRequirement(arg).asUInt256().toLong()); } private static void benchBLS12G2Mul() { final Bytes arg = Bytes.fromHexString( "00000000000000000000000000000000024aa2b2f08f0a91260805272dc51051c6e47ad4fa403b02b4510b647ae3d1770bac0326a805bbefd48056c8c121bdb80000000000000000000000000000000013e02b6052719f607dacd3a088274f65596bd0d09920b61ab5da61bbdc7f5049334cf11213945d57e5ac7d055d042b7e" + "000000000000000000000000000000000ce5d527727d6e118cc9cdc6da2e351aadfd9baa8cbdd3a76d429a695160d12c923ac9cc3baca289e193548608b82801000000000000000000000000000000000606c4a02ea734cc32acd2b02bc28b99cb3e287e85a763af267492ab572e99ab3f370d275cec1da1aaa9075ff05f79be" + "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"); final BLS12G2MulPrecompiledContract contract = new BLS12G2MulPrecompiledContract(); final double gasSpent = runBenchmark(arg, contract); System.out.printf( "G2MUL for %,d gas. Charging %,d gas.%n", (int) gasSpent, contract.gasRequirement(arg).asUInt256().toLong()); } private static void benchBLS12G2MultiExp() { final Bytes[] args = { Bytes.fromHexString( "00000000000000000000000000000000039b10ccd664da6f273ea134bb55ee48f09ba585a7e2bb95b5aec610631ac49810d5d616f67ba0147e6d1be476ea220e0000000000000000000000000000000000fbcdff4e48e07d1f73ec42fe7eb026f5c30407cfd2f22bbbfe5b2a09e8a7bb4884178cb6afd1c95f80e646929d30040000000000000000000000000000000001ed3b0e71acb0adbf44643374edbf4405af87cfc0507db7e8978889c6c3afbe9754d1182e98ac3060d64994d31ef576000000000000000000000000000000001681a2bf65b83be5a2ca50430949b6e2a099977482e9405b593f34d2ed877a3f0d1bddc37d0cec4d59d7df74b2b8f2df" + "b3c940fe79b6966489b527955de7599194a9ac69a6ff58b8d99e7b1084f0464e"), Bytes.fromHexString( "0000000000000000000000000000000018c0ada6351b70661f053365deae56910798bd2ace6e2bf6ba4192d1a229967f6af6ca1c9a8a11ebc0a232344ee0f6d6000000000000000000000000000000000cc70a587f4652039d8117b6103858adcd9728f6aebe230578389a62da0042b7623b1c0436734f463cfdd187d20903240000000000000000000000000000000009f50bd7beedb23328818f9ffdafdb6da6a4dd80c5a9048ab8b154df3cad938ccede829f1156f769d9e149791e8e0cd900000000000000000000000000000000079ba50d2511631b20b6d6f3841e616e9d11b68ec3368cd60129d9d4787ab56c4e9145a38927e51c9cd6271d493d9388" + "4d0e25bf3f6fc9f4da25d21fdc71773f1947b7a8a775b8177f7eca990b05b71d" + "0000000000000000000000000000000003632695b09dbf86163909d2bb25995b36ad1d137cf252860fd4bb6c95749e19eb0c1383e9d2f93f2791cb0cf6c8ed9d000000000000000000000000000000001688a855609b0bbff4452d146396558ff18777f329fd4f76a96859dabfc6a6f6977c2496280dbe3b1f8923990c1d6407000000000000000000000000000000000c8567fee05d05af279adc67179468a29d7520b067dbb348ee315a99504f70a206538b81a457cce855f4851ad48b7e80000000000000000000000000000000001238dcdfa80ea46e1500026ea5feadb421de4409f4992ffbf5ae59fa67fd82f38452642a50261b849e74b4a33eed70cc" + "973f40c12c92b703d7b7848ef8b4466d40823aad3943a312b57432b91ff68be1"), Bytes.fromHexString( "000000000000000000000000000000000149704960cccf9d5ea414c73871e896b1d4cf0a946b0db72f5f2c5df98d2ec4f3adbbc14c78047961bc9620cb6cfb5900000000000000000000000000000000140c5d25e534fb1bfdc19ba4cecaabe619f6e0cd3d60b0f17dafd7bcd27b286d4f4477d00c5e1af22ee1a0c67fbf177c00000000000000000000000000000000029a1727041590b8459890de736df15c00d80ab007c3aee692ddcdf75790c9806d198e9f4502bec2f0a623491c3f877d0000000000000000000000000000000008a94c98baa9409151030d4fae2bd4a64c6f11ea3c99b9661fdaed226b9a7c2a7d609be34afda5d18b8911b6e015bf49" + "4c51f97bcdda93904ae26991b471e9ea942e2b5b8ed26055da11c58bc7b5002a" + "000000000000000000000000000000001156d478661337478ab0cbc877a99d9e4d9824a2b3f605d41404d6b557b3ffabbf42635b0bbcb854cf9ed8b8637561a8000000000000000000000000000000001147ed317d5642e699787a7b47e6795c9a8943a34a694007e44f8654ba96390cf19f010dcf695e22c21874022c6ce291000000000000000000000000000000000c6dccdf920fd5e7fae284115511952633744c6ad94120d9cae6acda8a7c23c48bd912cba6c38de5159587e1e6cad519000000000000000000000000000000001944227d462bc2e5dcc6f6db0f83dad411ba8895262836f975b2b91e06fd0e2138862162acc04e9e65050b34ccbd1a4e" + "8964d5867927bc3e35a0b4c457482373969bff5edff8a781d65573e07fd87b89" + "0000000000000000000000000000000019c31e3ab8cc9c920aa8f56371f133b6cb8d7b0b74b23c0c7201aca79e5ae69dc01f1f74d2492dcb081895b17d106b4e000000000000000000000000000000001789b0d371bd63077ccde3dbbebf3531368feb775bced187fb31cc6821481664600978e323ff21085b8c08e0f21daf72000000000000000000000000000000000009eacfe8f4a2a9bae6573424d07f42bd6af8a9d55f71476a7e3c7a4b2b898550c1e72ec13afd4eff22421a03af1d31000000000000000000000000000000000410bd4ea74dcfa33f2976aa1b571c67cbb596ab10f76a8aaf4548f1097e55b3373bff02683f806cb84e1e0e877819e2" + "787c38b944eadbd03fd3187f450571740f6cd00e5b2e560165846eb800e5c944"), Bytes.fromHexString( "00000000000000000000000000000000147f09986691f2e57073378e8bfd58804241eed7934f6adfe6d0a6bac4da0b738495778a303e52113e1c80e698476d50000000000000000000000000000000000762348b84c92a8ca6de319cf1f8f11db296a71b90fe13e1e4bcd25903829c00a5d2ad4b1c8d98c37eaad7e042ab023d0000000000000000000000000000000011d1d94530d4a2daf0e902a5c3382cd135938557f94b04bccea5e16ea089c5e020e13524c854a316662bd68784fe31f300000000000000000000000000000000070828522bec75b6a492fd9bca7b54dac6fbbf4f0bc3179d312bb65c647439e3868e4d5b21af5a64c93aeee8a9b7e46e" + "aaee7ae2a237e8e53560c79e7baa9adf9c00a0ea4d6f514e7a6832eb15cef1e1" + "000000000000000000000000000000000690a0869204c8dced5ba0ce13554b2703a3f18afb8fa8fa1c457d79c58fdc25471ae85bafad52e506fc1917fc3becff0000000000000000000000000000000010f7dbb16f8571ede1cec79e3f9ea03ae6468d7285984713f19607f5cab902b9a6b7cbcfd900be5c2e407cc093ea0e6700000000000000000000000000000000151caf87968433cb1f85fc1854c57049be22c26497a86bfbd66a2b3af121d894dba8004a17c6ff96a5843c2719fa32d10000000000000000000000000000000011f0270f2b039409f70392879bcc2c67c836c100cf9883d3dc48d7adbcd52037d270539e863a951acd47ecaa1ca4db12" + "dac6ed3ef45c1d7d3028f0f89e5458797996d3294b95bebe049b76c7d0db317c" + "0000000000000000000000000000000017fae043c8fd4c520a90d4a6bd95f5b0484acc279b899e7b1d8f7f7831cc6ba37cd5965c4dc674768f5805842d433af30000000000000000000000000000000008ddd7b41b8fa4d29fb931830f29b46f4015ec202d51cb969d7c832aafc0995c875cd45eff4a083e2d5ecb5ad185b64f0000000000000000000000000000000015d384ab7e52420b83a69827257cb52b00f0199ed2240a142812b46cf67e92b99942ac59fb9f9efd7dd822f5a36c799f00000000000000000000000000000000074b3a16a9cc4be9da0ac8e2e7003d9c1ec89244d2c33441b31af76716cce439f805843a9a44701203231efdca551d5b" + "bb30985756c3ca075114c92f231575d6befafe4084517f1166a47376867bd108" + "000000000000000000000000000000000e25365988664e8b6ade2e5a40da49c11ff1e084cc0f8dca51f0d0578555d39e3617c8cadb2abc2633b28c5895ab0a9e00000000000000000000000000000000169f5fd768152169c403475dee475576fd2cc3788179453b0039ff3cb1b7a5a0fff8f82d03f56e65cad579218486c3b600000000000000000000000000000000087ccd7f92032febc1f75c7115111ede4acbb2e429cbccf3959524d0b79c449d431ff65485e1aecb442b53fec80ecb4000000000000000000000000000000000135d63f264360003b2eb28f126c6621a40088c6eb15acc4aea89d6068e9d5a47f842aa4b4300f5cda5cc5831edb81596" + "fb730105809f64ea522983d6bbb62f7e2e8cbf702685e9be10e2ef71f8187672"), Bytes.fromHexString( "00000000000000000000000000000000159da74f15e4c614b418997f81a1b8a3d9eb8dd80d94b5bad664bff271bb0f2d8f3c4ceb947dc6300d5003a2f7d7a829000000000000000000000000000000000cdd4d1d4666f385dd54052cf5c1966328403251bebb29f0d553a9a96b5ade350c8493270e9b5282d8a06f9fa8d7b1d900000000000000000000000000000000189f8d3c94fdaa72cc67a7f93d35f91e22206ff9e97eed9601196c28d45b69c802ae92bcbf582754717b0355e08d37c000000000000000000000000000000000054b0a282610f108fc7f6736b8c22c8778d082bf4b0d0abca5a228198eba6a868910dd5c5c440036968e977955054196" + "b6a9408625b0ca8fcbfb21d34eec2d8e24e9a30d2d3b32d7a37d110b13afbfea" + "000000000000000000000000000000000f29b0d2b6e3466668e1328048e8dbc782c1111ab8cbe718c85d58ded992d97ca8ba20b9d048feb6ed0aa1b4139d02d3000000000000000000000000000000000d1f0dae940b99fbfc6e4a58480cac8c4e6b2fe33ce6f39c7ac1671046ce94d9e16cba2bb62c6749ef73d45bea21501a000000000000000000000000000000001902ccece1c0c763fd06934a76d1f2f056563ae6d8592bafd589cfebd6f057726fd908614ccd6518a21c66ecc2f78b660000000000000000000000000000000017f6b113f8872c3187d20b0c765d73b850b54244a719cf461fb318796c0b8f310b5490959f9d9187f99c8ed3e25e42a9" + "3b77283d0a7bb9e17a27e66851792fdd605cc0a339028b8985390fd024374c76" + "000000000000000000000000000000000576b8cf1e69efdc277465c344cadf7f8cceffacbeca83821f3ff81717308b97f4ac046f1926e7c2eb42677d7afc257c000000000000000000000000000000000cc1524531e96f3c00e4250dd351aedb5a4c3184aff52ec8c13d470068f5967f3674fe173ee239933e67501a9decc6680000000000000000000000000000000001610cfcaea414c241b44cf6f3cc319dcb51d6b8de29c8a6869ff7c1ebb7b747d881e922b42e8fab96bde7cf23e8e4cd0000000000000000000000000000000017d4444dc8b6893b681cf10dac8169054f9d2f61d3dd5fd785ae7afa49d18ebbde9ce8dde5641adc6b38173173459836" + "dd994eae929aee7428fdda2e44f8cb12b10b91c83b22abc8bbb561310b62257c" + "000000000000000000000000000000000ca8f961f86ee6c46fc88fbbf721ba760186f13cd4cce743f19dc60a89fd985cb3feee34dcc4656735a326f515a729e400000000000000000000000000000000174baf466b809b1155d524050f7ee58c7c5cf728c674e0ce549f5551047a4479ca15bdf69b403b03fa74eb1b26bbff6c0000000000000000000000000000000000e8c8b587c171b1b292779abfef57202ed29e7fe94ade9634ec5a2b3b4692a4f3c15468e3f6418b144674be70780d5b000000000000000000000000000000001865e99cf97d88bdf56dae32314eb32295c39a1e755cd7d1478bea8520b9ff21c39b683b92ae15568420c390c42b123b" + "7010b134989c8368c7f831f9dd9f9a890e2c1435681107414f2e8637153bbf6a" + "0000000000000000000000000000000017eccd446f10018219a1bd111b8786cf9febd49f9e7e754e82dd155ead59b819f0f20e42f4635d5044ec5d550d847623000000000000000000000000000000000403969d2b8f914ff2ea3bf902782642e2c6157bd2a343acf60ff9125b48b558d990a74c6d4d6398e7a3cc2a16037346000000000000000000000000000000000bd45f61f142bd78619fb520715320eb5e6ebafa8b078ce796ba62fe1a549d5fb9df57e92d8d2795988eb6ae18cf9d9300000000000000000000000000000000097db1314e064b8e670ec286958f17065bce644cf240ab1b1b220504560d36a0b43fc18453ff3a2bb315e219965f5bd3" + "94c68bc8d91ac8c489ee87dbfc4b94c93c8bbd5fc04c27db8b02303f3a659054") }; final BLS12G2MultiExpPrecompiledContract contract = new BLS12G2MultiExpPrecompiledContract(); for (int i = 0; i < args.length; i++) { final double gasSpent = runBenchmark(args[i], contract); System.out.printf( "G2MULTIEXP %d for %,d gas. Charging %,d gas.%n", i + 1, (int) gasSpent, contract.gasRequirement(args[i]).asUInt256().toLong()); } } public static void benchBLS12Pair() { final Bytes[] args = { Bytes.fromHexString( "0000000000000000000000000000000012196c5a43d69224d8713389285f26b98f86ee910ab3dd668e413738282003cc5b7357af9a7af54bb713d62255e80f56" + "0000000000000000000000000000000006ba8102bfbeea4416b710c73e8cce3032c31c6269c44906f8ac4f7874ce99fb17559992486528963884ce429a992fee0000000000000000000000000000000017c9fcf0504e62d3553b2f089b64574150aa5117bd3d2e89a8c1ed59bb7f70fb83215975ef31976e757abf60a75a1d9f" + "0000000000000000000000000000000008f5a53d704298fe0cfc955e020442874fe87d5c729c7126abbdcbed355eef6c8f07277bee6d49d56c4ebaf334848624" + "000000000000000000000000000000001302dcc50c6ce4c28086f8e1b43f9f65543cf598be440123816765ab6bc93f62bceda80045fbcad8598d4f32d03ee8fa000000000000000000000000000000000bbb4eb37628d60b035a3e0c45c0ea8c4abef5a6ddc5625e0560097ef9caab208221062e81cd77ef72162923a1906a40"), Bytes.fromHexString( "000000000000000000000000000000001830f52d9bff64a623c6f5259e2cd2c2a08ea17a8797aaf83174ea1e8c3bd3955c2af1d39bfa474815bfe60714b7cd80" + "000000000000000000000000000000000874389c02d4cf1c61bc54c4c24def11dfbe7880bc998a95e70063009451ee8226fec4b278aade3a7cea55659459f1d500000000000000000000000000000000197737f831d4dc7e708475f4ca7ca15284db2f3751fcaac0c17f517f1ddab35e1a37907d7b99b39d6c8d9001cd50e79e" + "000000000000000000000000000000000af1a3f6396f0c983e7c2d42d489a3ae5a3ff0a553d93154f73ac770cd0af7467aa0cef79f10bbd34621b3ec9583a834" + "000000000000000000000000000000001918cb6e448ed69fb906145de3f11455ee0359d030e90d673ce050a360d796de33ccd6a941c49a1414aca1c26f9e699e0000000000000000000000000000000019a915154a13249d784093facc44520e7f3a18410ab2a3093e0b12657788e9419eec25729944f7945e732104939e7a9e" + "000000000000000000000000000000001830f52d9bff64a623c6f5259e2cd2c2a08ea17a8797aaf83174ea1e8c3bd3955c2af1d39bfa474815bfe60714b7cd80" + "00000000000000000000000000000000118cd94e36ab177de95f52f180fdbdc584b8d30436eb882980306fa0625f07a1f7ad3b4c38a921c53d14aa9a6ba5b8d600000000000000000000000000000000197737f831d4dc7e708475f4ca7ca15284db2f3751fcaac0c17f517f1ddab35e1a37907d7b99b39d6c8d9001cd50e79e" + "000000000000000000000000000000000af1a3f6396f0c983e7c2d42d489a3ae5a3ff0a553d93154f73ac770cd0af7467aa0cef79f10bbd34621b3ec9583a834" + "000000000000000000000000000000001918cb6e448ed69fb906145de3f11455ee0359d030e90d673ce050a360d796de33ccd6a941c49a1414aca1c26f9e699e0000000000000000000000000000000019a915154a13249d784093facc44520e7f3a18410ab2a3093e0b12657788e9419eec25729944f7945e732104939e7a9e"), Bytes.fromHexString( "00000000000000000000000000000000189bf269a72de2872706983835afcbd09f6f4dfcabe0241b4e9fe1965a250d230d6f793ab17ce7cac456af7be4376be6" + "000000000000000000000000000000000d4441801d287ba8de0e2fb6b77f766dbff07b4027098ce463cab80e01eb31d9f5dbd7ac935703d68c7032fa5128ff170000000000000000000000000000000011798ea9c137acf6ef9483b489c0273d4f69296959922a352b079857953263372b8d339115f0576cfabedc185abf2086" + "000000000000000000000000000000001498b1412f52b07a0e4f91cbf5e1852ea38fc111613523f1e61b97ebf1fd7fd2cdf36d7f73f1e33719c0b63d7bf66b8f" + "0000000000000000000000000000000004c56d3ee9931f7582d7eebeb598d1be208e3b333ab976dc7bb271969fa1d6caf8f467eb7cbee4af5d30e5c66d00a4e2000000000000000000000000000000000de29857dae126c0acbe966da6f50342837ef5dd9994ad929d75814f6f33f77e5b33690945bf6e980031ddd90ebc76ce" + "00000000000000000000000000000000189bf269a72de2872706983835afcbd09f6f4dfcabe0241b4e9fe1965a250d230d6f793ab17ce7cac456af7be4376be6" + "000000000000000000000000000000000cbcd06a1c576af16d0d77ff8bcc3669a486d044cc7b85db03661a92f4c5c44a28d028521dfcfc292d8ecd05aed6ab940000000000000000000000000000000011798ea9c137acf6ef9483b489c0273d4f69296959922a352b079857953263372b8d339115f0576cfabedc185abf2086" + "000000000000000000000000000000001498b1412f52b07a0e4f91cbf5e1852ea38fc111613523f1e61b97ebf1fd7fd2cdf36d7f73f1e33719c0b63d7bf66b8f" + "0000000000000000000000000000000004c56d3ee9931f7582d7eebeb598d1be208e3b333ab976dc7bb271969fa1d6caf8f467eb7cbee4af5d30e5c66d00a4e2000000000000000000000000000000000de29857dae126c0acbe966da6f50342837ef5dd9994ad929d75814f6f33f77e5b33690945bf6e980031ddd90ebc76ce" + "00000000000000000000000000000000189bf269a72de2872706983835afcbd09f6f4dfcabe0241b4e9fe1965a250d230d6f793ab17ce7cac456af7be4376be6" + "000000000000000000000000000000000d4441801d287ba8de0e2fb6b77f766dbff07b4027098ce463cab80e01eb31d9f5dbd7ac935703d68c7032fa5128ff170000000000000000000000000000000011798ea9c137acf6ef9483b489c0273d4f69296959922a352b079857953263372b8d339115f0576cfabedc185abf2086" + "000000000000000000000000000000001498b1412f52b07a0e4f91cbf5e1852ea38fc111613523f1e61b97ebf1fd7fd2cdf36d7f73f1e33719c0b63d7bf66b8f" + "00000000000000000000000000000000153ba4ab4fecc724c843b8f78db2db1943e91051b8cb9be2eb7e610a570f1f5925b7981334951b505cce1a3992ff05c9000000000000000000000000000000000c1e79925e9ebfd99e5d11489c56a994e0f855a759f0652cc9bb5151877cfea5c37896f56b949167b9cd2226f14333dd"), }; final BLS12PairingPrecompiledContract contract = new BLS12PairingPrecompiledContract(); for (int i = 0; i < args.length; i++) { final double gasSpent = runBenchmark(args[i], contract); System.out.printf( "BLS pairings %d pairs for %,d gas. Charging %,d gas.%n", i * 2 + 2, (int) gasSpent, contract.gasRequirement(args[i]).asUInt256().toLong()); } } private static void benchBLS12MapFPTOG1() { final Bytes arg = Bytes.fromHexString( "0000000000000000000000000000000014406e5bfb9209256a3820879a29ac2f62d6aca82324bf3ae2aa7d3c54792043bd8c791fccdb080c1a52dc68b8b69350"); final BLS12MapFpToG1PrecompiledContract contract = new BLS12MapFpToG1PrecompiledContract(); final double gasSpent = runBenchmark(arg, contract); System.out.printf( "MAPFPTOG1 for %,d gas. Charging %,d gas.%n", (int) gasSpent, contract.gasRequirement(arg).asUInt256().toLong()); } private static void benchBLS12MapFP2TOG2() { final Bytes arg = Bytes.fromHexString( "0000000000000000000000000000000014406e5bfb9209256a3820879a29ac2f62d6aca82324bf3ae2aa7d3c54792043bd8c791fccdb080c1a52dc68b8b69350000000000000000000000000000000000e885bb33996e12f07da69073e2c0cc880bc8eff26d2a724299eb12d54f4bcf26f4748bb020e80a7e3794a7b0e47a641"); final BLS12MapFp2ToG2PrecompiledContract contract = new BLS12MapFp2ToG2PrecompiledContract(); final double gasSpent = runBenchmark(arg, contract); System.out.printf( "MAPFP2TOG2 for %,d gas. Charging %,d gas.%n", (int) gasSpent, contract.gasRequirement(arg).asUInt256().toLong()); } private static double runBenchmark(final Bytes arg, final PrecompiledContract contract) { if (contract.compute(arg, fakeFrame) == null) { throw new RuntimeException("Input is Invalid"); } for (int i = 0; i < MATH_WARMUP; i++) { contract.compute(arg, fakeFrame); } final Stopwatch timer = Stopwatch.createStarted(); for (int i = 0; i < MATH_ITERATIONS; i++) { contract.compute(arg, fakeFrame); } timer.stop(); final double elapsed = timer.elapsed(TimeUnit.NANOSECONDS) / 1.0e9D; final double perCall = elapsed / MATH_ITERATIONS; return perCall * GAS_PER_SECOND_STANDARD; } public static void main(final String[] args) { benchSecp256k1Recover(); benchSha256(); benchKeccak256(); benchRipeMD(); benchBNADD(); benchBNMUL(); benchBNPairing(); benchModExp(); benchBLS12G1Add(); benchBLS12G1Mul(); benchBLS12G1MultiExp(); benchBLS12G2Add(); benchBLS12G2Mul(); benchBLS12G2MultiExp(); benchBLS12Pair(); benchBLS12MapFPTOG1(); benchBLS12MapFP2TOG2(); } }
1
26,086
we can use import here
hyperledger-besu
java
@@ -35,8 +35,6 @@ func EchoBehavior(addr string) Result { ctx, // TODO get Service from client &json.Request{ - Caller: "client", - Service: "yarpc-test", Procedure: "echo", Body: &server.EchoRequest{Token: token}, TTL: 3 * time.Second,
1
package client import ( "crypto/rand" "encoding/base64" "fmt" "io" "io/ioutil" "time" "github.com/yarpc/yarpc-go" "github.com/yarpc/yarpc-go/crossdock/server" "github.com/yarpc/yarpc-go/encoding/json" "github.com/yarpc/yarpc-go/transport" "github.com/yarpc/yarpc-go/transport/http" "golang.org/x/net/context" ) // EchoBehavior asserts that a server response is the same as the request func EchoBehavior(addr string) Result { yarpc := yarpc.New(yarpc.Config{ Name: "client", Outbounds: transport.Outbounds{ "yarpc-test": http.NewOutbound(fmt.Sprintf("http://%v:8081", addr)), }, }) client := json.New(yarpc.Channel("yarpc-test")) ctx, _ := context.WithTimeout(context.Background(), 3*time.Second) var response server.EchoResponse token := randString(5) _, err := client.Call( ctx, // TODO get Service from client &json.Request{ Caller: "client", Service: "yarpc-test", Procedure: "echo", Body: &server.EchoRequest{Token: token}, TTL: 3 * time.Second, }, &response, ) if err != nil { return Result{ Passed: false, Message: fmt.Sprintf("Got err: %v", err), } } if response.Token != token { return Result{ Passed: false, Message: fmt.Sprintf("Got %v, wanted %v", response.Token, token), } } return Result{ Passed: true, Message: fmt.Sprintf("Server said: %v", response.Token), } } func randString(length int64) string { bs, err := ioutil.ReadAll(io.LimitReader(rand.Reader, length)) if err != nil { panic(err) } return base64.RawStdEncoding.EncodeToString(bs) }
1
9,226
thx for removing these
yarpc-yarpc-go
go
@@ -55,7 +55,11 @@ public final class XmlReportFailuresSupplier implements FailuresSupplier { @Override public List<Failure> getFailures() throws IOException { File sourceReport = reporting.getReports().findByName("xml").getDestination(); - return XmlUtils.parseXml(reportHandler, new FileInputStream(sourceReport)).failures(); + try { + return XmlUtils.parseXml(reportHandler, new FileInputStream(sourceReport)).failures(); + } catch (Exception e) { + throw new RuntimeException(String.format("Failed to parse failures XML: %s", sourceReport), e); + } } @Override
1
/* * (c) Copyright 2017 Palantir Technologies Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.palantir.gradle.junit; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import java.util.List; import java.util.UUID; import org.gradle.api.Action; import org.gradle.api.Task; import org.gradle.api.reporting.ReportContainer; import org.gradle.api.reporting.Reporting; import org.gradle.api.reporting.SingleFileReport; public final class XmlReportFailuresSupplier implements FailuresSupplier { public static <T extends Task & Reporting<? extends ReportContainer<SingleFileReport>>> XmlReportFailuresSupplier create(final T task, final ReportHandler<T> reportHandler) { // Ensure any necessary output is enabled task.doFirst(new Action<Task>() { @Override public void execute(Task ignored) { reportHandler.configureTask(task); } }); return new XmlReportFailuresSupplier(task, reportHandler); } private final Reporting<? extends ReportContainer<SingleFileReport>> reporting; private final ReportHandler<?> reportHandler; private XmlReportFailuresSupplier( Reporting<? extends ReportContainer<SingleFileReport>> reporting, ReportHandler<?> reportHandler) { this.reporting = reporting; this.reportHandler = reportHandler; } @Override public List<Failure> getFailures() throws IOException { File sourceReport = reporting.getReports().findByName("xml").getDestination(); return XmlUtils.parseXml(reportHandler, new FileInputStream(sourceReport)).failures(); } @Override public RuntimeException handleInternalFailure(Path reportDir, RuntimeException ex) { Path rawReportsDir = reportDir.resolve(UUID.randomUUID().toString()); try { Files.createDirectories(rawReportsDir); } catch (IOException e) { throw new RuntimeException(e); } for (SingleFileReport rawReport : reporting.getReports()) { if (rawReport.isEnabled()) { rawReport.getDestination() .renameTo(rawReportsDir.resolve(rawReport.getDestination().getName()).toFile()); } } return new RuntimeException( "Finalizer failed; raw report files can be found at " + rawReportsDir.getFileName().toString(), ex); } }
1
7,401
I snuck this one in as well as checkstyle crashing (on files from resources) caused an unfinished xml to be written, and it wasn't obvious where that XML was
palantir-gradle-baseline
java
@@ -542,6 +542,7 @@ func TestCreateInstanceValidateMachineType(t *testing.T) { shouldErr bool }{ {"good case", fmt.Sprintf("projects/%s/zones/%s/machineTypes/%s", testProject, testZone, testMachineType), false}, + {"good case2", fmt.Sprintf("projects/%s/zones/%s/machineTypes/%s", testProject, testZone, testMachineType), false}, {"bad machine type case", fmt.Sprintf("projects/%s/zones/%s/machineTypes/bad-mt", testProject, testZone), true}, {"bad project case", fmt.Sprintf("projects/p2/zones/%s/machineTypes/%s", testZone, testMachineType), true}, {"bad zone case", fmt.Sprintf("projects/%s/zones/z2/machineTypes/%s", testProject, testMachineType), true},
1
// Copyright 2017 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package daisy import ( "bytes" "context" "errors" "fmt" "log" "net/http" "path" "reflect" "sort" "testing" "time" daisyCompute "github.com/GoogleCloudPlatform/compute-image-tools/daisy/compute" "github.com/kylelemons/godebug/pretty" compute "google.golang.org/api/compute/v1" ) func TestLogSerialOutput(t *testing.T) { ctx := context.Background() w := testWorkflow() instances[w].m = map[string]*resource{ "i1": {real: w.genName("i1"), link: "link"}, "i2": {real: w.genName("i2"), link: "link"}, "i3": {real: w.genName("i3"), link: "link"}, } w.ComputeClient.(*daisyCompute.TestClient).GetSerialPortOutputFn = func(_, _, n string, _, s int64) (*compute.SerialPortOutput, error) { if n == "i3" && s == 0 { return &compute.SerialPortOutput{Contents: "", Next: 1}, nil } return nil, errors.New("fail") } w.ComputeClient.(*daisyCompute.TestClient).InstanceStoppedFn = func(_, _, n string) (bool, error) { if n == "i2" { return false, nil } return true, nil } w.bucket = "test-bucket" var buf bytes.Buffer w.logger = log.New(&buf, "", 0) tests := []struct { test, want, name string }{ { "Error but instance stopped", "CreateInstances: streaming instance \"i1\" serial port 0 output to gs://test-bucket/i1-serial-port0.log\n", "i1", }, { "Error but instance running", "CreateInstances: streaming instance \"i2\" serial port 0 output to gs://test-bucket/i2-serial-port0.log\nCreateInstances: instance \"i2\": error getting serial port: fail\n", "i2", }, { "Normal flow", "CreateInstances: streaming instance \"i3\" serial port 0 output to gs://test-bucket/i3-serial-port0.log\n", "i3", }, { "Error but instance deleted", "CreateInstances: streaming instance \"i4\" serial port 0 output to gs://test-bucket/i4-serial-port0.log\n", "i4", }, } for _, tt := range tests { buf.Reset() logSerialOutput(ctx, w, tt.name, 0, 1*time.Microsecond) if buf.String() != tt.want { t.Errorf("%s: got: %q, want: %q", tt.test, buf.String(), tt.want) } } } func TestCreateInstancePopulate(t *testing.T) { ctx := context.Background() w := testWorkflow() desc := "desc" defP := w.Project defZ := w.Zone defMT := fmt.Sprintf("projects/%s/zones/%s/machineTypes/n1-standard-1", defP, defZ) defDM := defaultDiskMode defDs := []*compute.AttachedDisk{{Boot: true, Source: "foo", Mode: defDM}} defAcs := []*compute.AccessConfig{{Type: defaultAccessConfigType}} defNs := []*compute.NetworkInterface{{Network: fmt.Sprintf("projects/%s/global/networks/default", defP), AccessConfigs: defAcs}} defMD := map[string]string{"daisy-sources-path": "gs://", "daisy-logs-path": "gs://", "daisy-outs-path": "gs://"} defSs := []string{"https://www.googleapis.com/auth/devstorage.read_only"} defSAs := []*compute.ServiceAccount{{Email: "default", Scopes: defSs}} tests := []struct { desc string ci, want *CreateInstance shouldErr bool }{ { "defaults, non exact name case", &CreateInstance{Instance: compute.Instance{Name: "foo", Description: desc, Disks: []*compute.AttachedDisk{{Source: "foo"}}}}, &CreateInstance{Instance: compute.Instance{Name: w.genName("foo"), Description: desc, Disks: defDs, MachineType: defMT, NetworkInterfaces: defNs, ServiceAccounts: defSAs}, Metadata: defMD, Scopes: defSs, Project: defP, Zone: defZ, daisyName: "foo"}, false, }, { "nondefault zone/project case", &CreateInstance{ Instance: compute.Instance{Name: "foo", Description: desc, Disks: []*compute.AttachedDisk{{Source: "foo"}}}, Project: "pfoo", Zone: "zfoo", RealName: "inst-pfoo", }, &CreateInstance{ Instance: compute.Instance{ Name: "inst-pfoo", Description: desc, Disks: []*compute.AttachedDisk{{Boot: true, Source: "foo", Mode: defDM}}, MachineType: "projects/pfoo/zones/zfoo/machineTypes/n1-standard-1", NetworkInterfaces: []*compute.NetworkInterface{{Network: "projects/pfoo/global/networks/default", AccessConfigs: defAcs}}, ServiceAccounts: defSAs, }, Metadata: defMD, Scopes: defSs, Project: "pfoo", Zone: "zfoo", daisyName: "foo", RealName: "inst-pfoo", }, false, }, } for _, tt := range tests { s, _ := w.NewStep(tt.desc) s.CreateInstances = &CreateInstances{tt.ci} err := s.CreateInstances.populate(ctx, s) if tt.shouldErr { if err == nil { t.Errorf("%s: should have returned error but didn't", tt.desc) } } else if err != nil { t.Errorf("%s: unexpected error: %v", tt.desc, err) } else { tt.ci.Instance.Metadata = nil // This is undeterministic, but we can check tt.input.Metadata. if diff := pretty.Compare(tt.ci, tt.want); diff != "" { t.Errorf("%s: CreateInstance not modified as expected: (-got +want)\n%s", tt.desc, diff) } } } } func TestCreateInstancePopulateDisks(t *testing.T) { w := testWorkflow() iName := "foo" defDT := fmt.Sprintf("projects/%s/zones/%s/diskTypes/%s", testProject, testZone, defaultDiskType) tests := []struct { desc string ad, wantAd []*compute.AttachedDisk }{ { "normal case", []*compute.AttachedDisk{{Source: "d1"}}, []*compute.AttachedDisk{{Boot: true, Source: "d1", Mode: defaultDiskMode}}, }, { "multiple disks case", []*compute.AttachedDisk{{Source: "d1"}, {Source: "d2"}}, []*compute.AttachedDisk{{Boot: true, Source: "d1", Mode: defaultDiskMode}, {Boot: false, Source: "d2", Mode: defaultDiskMode}}, }, { "mode specified case", []*compute.AttachedDisk{{Source: "d1", Mode: diskModeRO}}, []*compute.AttachedDisk{{Boot: true, Source: "d1", Mode: diskModeRO}}, }, { "init params daisy image (and other defaults)", []*compute.AttachedDisk{{InitializeParams: &compute.AttachedDiskInitializeParams{SourceImage: "i"}}}, []*compute.AttachedDisk{{InitializeParams: &compute.AttachedDiskInitializeParams{DiskName: iName, SourceImage: "i", DiskType: defDT}, Mode: defaultDiskMode, Boot: true}}, }, { "init params image short url", []*compute.AttachedDisk{{InitializeParams: &compute.AttachedDiskInitializeParams{SourceImage: "global/images/i"}}}, []*compute.AttachedDisk{{InitializeParams: &compute.AttachedDiskInitializeParams{DiskName: iName, SourceImage: fmt.Sprintf("projects/%s/global/images/i", testProject), DiskType: defDT}, Mode: defaultDiskMode, Boot: true}}, }, { "init params image extended url", []*compute.AttachedDisk{{InitializeParams: &compute.AttachedDiskInitializeParams{SourceImage: fmt.Sprintf("projects/%s/global/images/i", testProject)}}}, []*compute.AttachedDisk{{InitializeParams: &compute.AttachedDiskInitializeParams{DiskName: iName, SourceImage: fmt.Sprintf("projects/%s/global/images/i", testProject), DiskType: defDT}, Mode: defaultDiskMode, Boot: true}}, }, { "init params disk type short url", []*compute.AttachedDisk{{InitializeParams: &compute.AttachedDiskInitializeParams{SourceImage: "i", DiskType: fmt.Sprintf("zones/%s/diskTypes/dt", testZone)}}}, []*compute.AttachedDisk{{InitializeParams: &compute.AttachedDiskInitializeParams{DiskName: iName, SourceImage: "i", DiskType: fmt.Sprintf("projects/%s/zones/%s/diskTypes/dt", testProject, testZone)}, Mode: defaultDiskMode, Boot: true}}, }, { "init params disk type extended url", []*compute.AttachedDisk{{InitializeParams: &compute.AttachedDiskInitializeParams{SourceImage: "i", DiskType: fmt.Sprintf("projects/%s/zones/%s/diskTypes/dt", testProject, testZone)}}}, []*compute.AttachedDisk{{InitializeParams: &compute.AttachedDiskInitializeParams{DiskName: iName, SourceImage: "i", DiskType: fmt.Sprintf("projects/%s/zones/%s/diskTypes/dt", testProject, testZone)}, Mode: defaultDiskMode, Boot: true}}, }, { "init params name suffixes", []*compute.AttachedDisk{ {InitializeParams: &compute.AttachedDiskInitializeParams{SourceImage: "i"}}, {Source: "d"}, {InitializeParams: &compute.AttachedDiskInitializeParams{DiskName: "foo", SourceImage: "i"}}, {InitializeParams: &compute.AttachedDiskInitializeParams{SourceImage: "i"}}, }, []*compute.AttachedDisk{ {InitializeParams: &compute.AttachedDiskInitializeParams{DiskName: iName, SourceImage: "i", DiskType: defDT}, Mode: defaultDiskMode, Boot: true}, {Source: "d", Mode: defaultDiskMode}, {InitializeParams: &compute.AttachedDiskInitializeParams{DiskName: "foo", SourceImage: "i", DiskType: defDT}, Mode: defaultDiskMode}, {InitializeParams: &compute.AttachedDiskInitializeParams{DiskName: fmt.Sprintf("%s-2", iName), SourceImage: "i", DiskType: defDT}, Mode: defaultDiskMode}, }, }, } for _, tt := range tests { ci := CreateInstance{Instance: compute.Instance{Name: iName, Disks: tt.ad}, Project: testProject, Zone: testZone} err := ci.populateDisks(w) if err != nil { t.Errorf("%s: populateDisks returned an unexpected error: %v", tt.desc, err) } else if diff := pretty.Compare(tt.ad, tt.wantAd); diff != "" { t.Errorf("%s: AttachedDisks not modified as expected: (-got +want)\n%s", tt.desc, diff) } } } func TestCreateInstancePopulateMachineType(t *testing.T) { tests := []struct { desc, mt, wantMt string shouldErr bool }{ {"normal case", "mt", "projects/foo/zones/bar/machineTypes/mt", false}, {"expand case", "zones/bar/machineTypes/mt", "projects/foo/zones/bar/machineTypes/mt", false}, } for _, tt := range tests { ci := CreateInstance{Instance: compute.Instance{MachineType: tt.mt}, Project: "foo", Zone: "bar"} err := ci.populateMachineType() if tt.shouldErr && err == nil { t.Errorf("%s: populateMachineType should have erred but didn't", tt.desc) } else if !tt.shouldErr && err != nil { t.Errorf("%s: populateMachineType returned an unexpected error: %v", tt.desc, err) } else if err == nil && ci.MachineType != tt.wantMt { t.Errorf("%s: MachineType not modified as expected: got: %q, want: %q", tt.desc, ci.MachineType, tt.wantMt) } } } func TestCreateInstancePopulateMetadata(t *testing.T) { w := testWorkflow() w.populate(context.Background()) w.Sources = map[string]string{"file": "foo/bar"} filePath := "gs://" + path.Join(w.bucket, w.sourcesPath, "file") baseMd := map[string]string{ "daisy-sources-path": "gs://" + path.Join(w.bucket, w.sourcesPath), "daisy-logs-path": "gs://" + path.Join(w.bucket, w.logsPath), "daisy-outs-path": "gs://" + path.Join(w.bucket, w.outsPath), } getWantMd := func(md map[string]string) *compute.Metadata { for k, v := range baseMd { md[k] = v } result := &compute.Metadata{} for k, v := range md { vCopy := v result.Items = append(result.Items, &compute.MetadataItems{Key: k, Value: &vCopy}) } return result } tests := []struct { desc string md map[string]string startupScript string wantMd *compute.Metadata shouldErr bool }{ {"defaults case", nil, "", getWantMd(map[string]string{}), false}, {"startup script case", nil, "file", getWantMd(map[string]string{"startup-script-url": filePath, "windows-startup-script-url": filePath}), false}, {"bad startup script case", nil, "foo", nil, true}, } for _, tt := range tests { ci := CreateInstance{Metadata: tt.md, StartupScript: tt.startupScript} err := ci.populateMetadata(w) if err == nil { if tt.shouldErr { t.Errorf("%s: populateMetadata should have erred but didn't", tt.desc) } else { compFactory := func(items []*compute.MetadataItems) func(i, j int) bool { return func(i, j int) bool { return items[i].Key < items[j].Key } } sort.Slice(ci.Instance.Metadata.Items, compFactory(ci.Instance.Metadata.Items)) sort.Slice(tt.wantMd.Items, compFactory(tt.wantMd.Items)) if diff := pretty.Compare(ci.Instance.Metadata, tt.wantMd); diff != "" { t.Errorf("%s: Metadata not modified as expected: (-got +want)\n%s", tt.desc, diff) } } } else if !tt.shouldErr { t.Errorf("%s: populateMetadata returned an unexpected error: %v", tt.desc, err) } } } func TestCreateInstancePopulateNetworks(t *testing.T) { defaultAcs := []*compute.AccessConfig{{Type: "ONE_TO_ONE_NAT"}} tests := []struct { desc string input, want []*compute.NetworkInterface }{ {"default case", nil, []*compute.NetworkInterface{{Network: fmt.Sprintf("projects/%s/global/networks/default", testProject), AccessConfigs: defaultAcs}}}, {"default AccessConfig case", []*compute.NetworkInterface{{Network: "global/networks/foo"}}, []*compute.NetworkInterface{{Network: fmt.Sprintf("projects/%s/global/networks/foo", testProject), AccessConfigs: defaultAcs}}}, {"network URL resolution case", []*compute.NetworkInterface{{Network: "foo", AccessConfigs: []*compute.AccessConfig{}}}, []*compute.NetworkInterface{{Network: fmt.Sprintf("projects/%s/global/networks/foo", testProject), AccessConfigs: []*compute.AccessConfig{}}}}, } for _, tt := range tests { ci := &CreateInstance{Instance: compute.Instance{NetworkInterfaces: tt.input}, Project: testProject} err := ci.populateNetworks() if err != nil { t.Errorf("%s: should have returned an error", tt.desc) } else if diff := pretty.Compare(ci.NetworkInterfaces, tt.want); diff != "" { t.Errorf("%s: NetworkInterfaces not modified as expected: (-got +want)\n%s", tt.desc, diff) } } } func TestCreateInstancePopulateScopes(t *testing.T) { defaultScopes := []string{"https://www.googleapis.com/auth/devstorage.read_only"} tests := []struct { desc string input []string inputSas, want []*compute.ServiceAccount shouldErr bool }{ {"default case", nil, nil, []*compute.ServiceAccount{{Email: "default", Scopes: defaultScopes}}, false}, {"nondefault case", []string{"foo"}, nil, []*compute.ServiceAccount{{Email: "default", Scopes: []string{"foo"}}}, false}, {"service accounts override case", []string{"foo"}, []*compute.ServiceAccount{}, []*compute.ServiceAccount{}, false}, } for _, tt := range tests { ci := &CreateInstance{Scopes: tt.input, Instance: compute.Instance{ServiceAccounts: tt.inputSas}} err := ci.populateScopes() if err == nil { if tt.shouldErr { t.Errorf("%s: should have returned an error", tt.desc) } else if diff := pretty.Compare(ci.ServiceAccounts, tt.want); diff != "" { t.Errorf("%s: NetworkInterfaces not modified as expected: (-got +want)\n%s", tt.desc, diff) } } else if !tt.shouldErr { t.Errorf("%s: unexpected error: %v", tt.desc, err) } } } func TestCreateInstancesRun(t *testing.T) { ctx := context.Background() var createErr error w := testWorkflow() w.ComputeClient.(*daisyCompute.TestClient).CreateInstanceFn = func(p, z string, i *compute.Instance) error { i.SelfLink = "insertedLink" return createErr } s := &Step{w: w} w.Sources = map[string]string{"file": "gs://some/file"} disks[w].m = map[string]*resource{ "d0": {real: w.genName("d0"), link: "diskLink0"}, } // Good case: check disk link gets resolved. Check instance reference map updates. i0 := &CreateInstance{daisyName: "i0", Instance: compute.Instance{Name: "realI0", MachineType: "foo-type", Disks: []*compute.AttachedDisk{{Source: "d0"}}}} i1 := &CreateInstance{daisyName: "i1", Project: "foo", Zone: "bar", Instance: compute.Instance{Name: "realI1", MachineType: "foo-type", Disks: []*compute.AttachedDisk{{Source: "other"}}}} ci := &CreateInstances{i0, i1} if err := ci.run(ctx, s); err != nil { t.Errorf("unexpected error running CreateInstances.run(): %v", err) } if i0.Disks[0].Source != disks[w].m["d0"].link { t.Errorf("instance disk link did not resolve properly: want: %q, got: %q", disks[w].m["d0"].link, i0.Disks[0].Source) } if i1.Disks[0].Source != "other" { t.Errorf("instance disk link did not resolve properly: want: %q, got: %q", "other", i1.Disks[0].Source) } // Bad case: compute client CreateInstance error. Check instance ref map doesn't update. instances[w].m = map[string]*resource{} createErr = errors.New("client error") ci = &CreateInstances{ {daisyName: "i0", Instance: compute.Instance{Name: "realI0", MachineType: "foo-type", Disks: []*compute.AttachedDisk{{Source: "d0"}}}}, } if err := ci.run(ctx, s); err != createErr { t.Errorf("CreateInstances.run() should have return compute client error: %v != %v", err, createErr) } } func TestCreateInstanceValidateDisks(t *testing.T) { // Test: // - good case // - no disks bad case // - bad disk mode case ctx := context.Background() w := testWorkflow() disks[w].m = map[string]*resource{"d": {link: fmt.Sprintf("projects/%s/zones/%s/disks/d", testProject, testZone)}} m := defaultDiskMode tests := []struct { desc string ci *CreateInstance shouldErr bool }{ {"good case", &CreateInstance{Instance: compute.Instance{Name: "foo", Disks: []*compute.AttachedDisk{{Source: "d", Mode: m}}}, Project: testProject, Zone: testZone}, false}, {"good case 2", &CreateInstance{Instance: compute.Instance{Name: "foo", Disks: []*compute.AttachedDisk{{Source: fmt.Sprintf("projects/%s/zones/%s/disks/d", testProject, testZone), Mode: m}}}, Project: testProject, Zone: testZone}, false}, {"bad no disks case", &CreateInstance{Instance: compute.Instance{Name: "foo"}}, true}, {"bad disk mode case", &CreateInstance{Instance: compute.Instance{Name: "foo", Disks: []*compute.AttachedDisk{{Source: "d", Mode: "bad mode!"}}}, Project: testProject, Zone: testZone}, true}, } for _, tt := range tests { s, _ := w.NewStep(tt.desc) s.CreateInstances = &CreateInstances{tt.ci} if err := tt.ci.validateDisks(ctx, s); tt.shouldErr && err == nil { t.Errorf("%s: should have returned an error", tt.desc) } else if !tt.shouldErr && err != nil { t.Errorf("%s: unexpected error: %v", tt.desc, err) } } } func TestCreateInstanceValidateDiskSource(t *testing.T) { // Test: // - good case // - disk dne // - disk has wrong project/zone w := testWorkflow() disks[w].m = map[string]*resource{"d": {link: fmt.Sprintf("projects/%s/zones/%s/disks/d", testProject, testZone)}} m := defaultDiskMode p := testProject z := testZone tests := []struct { desc string ads []*compute.AttachedDisk shouldErr bool }{ {"good case", []*compute.AttachedDisk{{Source: "d", Mode: m}}, false}, {"disk dne case", []*compute.AttachedDisk{{Source: "dne", Mode: m}}, true}, {"bad project case", []*compute.AttachedDisk{{Source: fmt.Sprintf("projects/bad/zones/%s/disks/d", z), Mode: m}}, true}, {"bad zone case", []*compute.AttachedDisk{{Source: fmt.Sprintf("projects/%s/zones/bad/disks/d", p), Mode: m}}, true}, } for _, tt := range tests { s, _ := w.NewStep(tt.desc) ci := &CreateInstance{Instance: compute.Instance{Disks: tt.ads}, Project: p, Zone: z} s.CreateInstances = &CreateInstances{ci} err := ci.validateDiskSource(tt.ads[0], s) if tt.shouldErr && err == nil { t.Errorf("%s: should have returned an error but didn't", tt.desc) } else if !tt.shouldErr && err != nil { t.Errorf("%s: unexpected error: %v", tt.desc, err) } } } func TestCreateInstanceValidateDiskInitializeParams(t *testing.T) { // Test: // - good case // - bad disk name // - duplicate disk // - bad source given // - bad disk types (wrong project/zone) // - check that disks are created w := testWorkflow() images[w].m = map[string]*resource{"i": {link: "iLink"}} dt := fmt.Sprintf("projects/%s/zones/%s/diskTypes/pd-ssd", testProject, testZone) tests := []struct { desc string p *compute.AttachedDiskInitializeParams shouldErr bool }{ {"good case", &compute.AttachedDiskInitializeParams{DiskName: "foo", SourceImage: "i", DiskType: dt}, false}, {"bad disk name case", &compute.AttachedDiskInitializeParams{DiskName: "bad!", SourceImage: "i", DiskType: dt}, true}, {"bad dupe disk case", &compute.AttachedDiskInitializeParams{DiskName: "foo", SourceImage: "i", DiskType: dt}, true}, {"bad source case", &compute.AttachedDiskInitializeParams{DiskName: "bar", SourceImage: "i2", DiskType: dt}, true}, {"bad disk type case", &compute.AttachedDiskInitializeParams{DiskName: "bar", SourceImage: "i2", DiskType: fmt.Sprintf("projects/bad/zones/%s/diskTypes/pd-ssd", testZone)}, true}, {"bad disk type case 2", &compute.AttachedDiskInitializeParams{DiskName: "bar", SourceImage: "i2", DiskType: fmt.Sprintf("projects/%s/zones/bad/diskTypes/pd-ssd", testProject)}, true}, } for _, tt := range tests { s, _ := w.NewStep(tt.desc) ci := &CreateInstance{Instance: compute.Instance{Disks: []*compute.AttachedDisk{{InitializeParams: tt.p}}}, Project: testProject, Zone: testZone} s.CreateInstances = &CreateInstances{ci} if err := ci.validateDiskInitializeParams(ci.Disks[0], s); err == nil { if tt.shouldErr { t.Errorf("%s: should have returned an error but didn't", tt.desc) } } else if !tt.shouldErr { t.Errorf("%s: unexpected error: %v", tt.desc, err) } } // Check good disks were created. wantCreator := w.Steps["good case"] wantLink := fmt.Sprintf("projects/%s/zones/%s/disks/foo", testProject, testZone) wantFoo := &resource{real: "foo", link: wantLink, creator: wantCreator} if gotFoo, ok := disks[w].m["foo"]; !ok || !reflect.DeepEqual(gotFoo, wantFoo) { t.Errorf("foo resource not added as expected: got: %+v, want: %+v", gotFoo, wantFoo) } // Check proper image user registrations. wantU := w.Steps["good case"] found := false for _, u := range images[w].m["i"].users { if u == wantU { found = true } } if !found { t.Error("good case should have been a registered user of image \"i\"") } } func TestCreateInstanceValidateMachineType(t *testing.T) { c, err := newTestGCEClient() if err != nil { t.Fatalf("error creating test client: %v", err) } tests := []struct { desc string mt string shouldErr bool }{ {"good case", fmt.Sprintf("projects/%s/zones/%s/machineTypes/%s", testProject, testZone, testMachineType), false}, {"bad machine type case", fmt.Sprintf("projects/%s/zones/%s/machineTypes/bad-mt", testProject, testZone), true}, {"bad project case", fmt.Sprintf("projects/p2/zones/%s/machineTypes/%s", testZone, testMachineType), true}, {"bad zone case", fmt.Sprintf("projects/%s/zones/z2/machineTypes/%s", testProject, testMachineType), true}, {"bad zone case 2", "zones/z2/machineTypes/mt", true}, } for _, tt := range tests { ci := &CreateInstance{Instance: compute.Instance{MachineType: tt.mt}, Project: testProject, Zone: testZone} if err := ci.validateMachineType(c); tt.shouldErr && err == nil { t.Errorf("%s: should have returned an error", tt.desc) } else if !tt.shouldErr && err != nil { t.Errorf("%s: unexpected error: %v", tt.desc, err) } } } func TestCreateInstanceValidateNetworks(t *testing.T) { acs := []*compute.AccessConfig{{Type: "ONE_TO_ONE_NAT"}} tests := []struct { desc string nis []*compute.NetworkInterface shouldErr bool }{ {"good case", []*compute.NetworkInterface{{Network: "projects/p/global/networks/n", AccessConfigs: acs}}, false}, {"good case 2", []*compute.NetworkInterface{{Network: "projects/p/global/networks/n", AccessConfigs: acs}}, false}, {"bad name case", []*compute.NetworkInterface{{Network: "projects/p/global/networks/bad!", AccessConfigs: acs}}, true}, {"bad project case", []*compute.NetworkInterface{{Network: "projects/bad-project/global/networks/n", AccessConfigs: acs}}, true}, } for _, tt := range tests { ci := &CreateInstance{Instance: compute.Instance{NetworkInterfaces: tt.nis}, Project: "p"} if err := ci.validateNetworks(); tt.shouldErr && err == nil { t.Errorf("%s: should have returned an error", tt.desc) } else if !tt.shouldErr && err != nil { t.Errorf("%s: unexpected error: %v", tt.desc, err) } } } func TestCreateInstancesValidate(t *testing.T) { ctx := context.Background() w := testWorkflow() _, c, err := daisyCompute.NewTestClient(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method == "GET" && r.URL.String() == "/p/zones/z?alt=json" { fmt.Fprintln(w, `{}`) } else if r.Method == "GET" && r.URL.String() == "/p/zones/z/machineTypes/mt?alt=json" { fmt.Fprintln(w, `{}`) } else if r.Method == "GET" && r.URL.String() == "/p?alt=json" { fmt.Fprintln(w, `{}`) } else { w.WriteHeader(http.StatusBadRequest) fmt.Fprintf(w, "bad request: %+v", r) } })) if err != nil { t.Fatalf("error creating test client: %v", err) } w.ComputeClient = c p := "p" z := "z" ad := []*compute.AttachedDisk{{Source: "d", Mode: defaultDiskMode}} mt := fmt.Sprintf("projects/%s/zones/%s/machineTypes/mt", p, z) dCreator := &Step{name: "dCreator", w: w} w.Steps["dCreator"] = dCreator disks[w].registerCreation("d", &resource{link: fmt.Sprintf("projects/%s/zones/%s/disks/d", p, z)}, dCreator) tests := []struct { desc string input *CreateInstance shouldErr bool }{ {"normal case", &CreateInstance{Instance: compute.Instance{Name: "foo", Disks: ad, MachineType: mt}, Project: p, Zone: z}, false}, {"bad dupe case", &CreateInstance{Instance: compute.Instance{Name: "foo", Disks: ad, MachineType: mt}, Project: p, Zone: z}, true}, {"bad name case", &CreateInstance{Instance: compute.Instance{Name: "bad!", Disks: ad, MachineType: mt}, Project: p, Zone: z}, true}, {"bad project case", &CreateInstance{Instance: compute.Instance{Name: "bar", Disks: ad, MachineType: mt}, Project: "bad!", Zone: z}, true}, {"bad zone case", &CreateInstance{Instance: compute.Instance{Name: "baz", Disks: ad, MachineType: mt}, Project: p, Zone: "bad!"}, true}, {"machine type validation fails case", &CreateInstance{Instance: compute.Instance{Name: "gaz", Disks: ad, MachineType: "bad machine type!"}, Project: p, Zone: z, daisyName: "gaz"}, true}, } for _, tt := range tests { s, _ := w.NewStep(tt.desc) w.AddDependency(tt.desc, "dCreator") s.CreateInstances = &CreateInstances{tt.input} if err := s.CreateInstances.validate(ctx, s); tt.shouldErr && err == nil { t.Errorf("%s: should have returned an error", tt.desc) } else if !tt.shouldErr && err != nil { t.Errorf("%s: unexpected error: %v", tt.desc, err) } } }
1
6,819
How is this different than "good case"?
GoogleCloudPlatform-compute-image-tools
go
@@ -33,7 +33,7 @@ const ( // Client is wrapper of ECS client. type Client interface { - ServiceExists(ctx context.Context, clusterName string, services []string) (bool, error) + ServiceExists(ctx context.Context, clusterName string, services string) (bool, error) CreateService(ctx context.Context, service types.Service) (*types.Service, error) UpdateService(ctx context.Context, service types.Service) (*types.Service, error) RegisterTaskDefinition(ctx context.Context, taskDefinition types.TaskDefinition) (*types.TaskDefinition, error)
1
// Copyright 2020 The PipeCD Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package ecs import ( "context" "path/filepath" "sync" "github.com/aws/aws-sdk-go-v2/service/ecs/types" "go.uber.org/zap" "golang.org/x/sync/singleflight" "github.com/pipe-cd/pipe/pkg/config" ) const ( defaultTaskDefinitionFilename = "taskdef.yaml" defaultserviceDefinitionFilename = "servicedef.yaml" ) // Client is wrapper of ECS client. type Client interface { ServiceExists(ctx context.Context, clusterName string, services []string) (bool, error) CreateService(ctx context.Context, service types.Service) (*types.Service, error) UpdateService(ctx context.Context, service types.Service) (*types.Service, error) RegisterTaskDefinition(ctx context.Context, taskDefinition types.TaskDefinition) (*types.TaskDefinition, error) DeregisterTaskDefinition(ctx context.Context, taskDefinition types.TaskDefinition) (*types.TaskDefinition, error) CreateTaskSet(ctx context.Context, service types.Service, taskDefinition types.TaskDefinition) (*types.TaskSet, error) DeleteTaskSet(ctx context.Context, service types.Service, taskSet types.TaskSet) (*types.TaskSet, error) } // Registry holds a pool of aws client wrappers. type Registry interface { Client(name string, cfg *config.CloudProviderLambdaConfig, logger *zap.Logger) (Client, error) } // LoadServiceDefinition returns ServiceDefinition object from a given service definition file. func LoadServiceDefinition(appDir, serviceDefinitionFilename string) (types.Service, error) { if serviceDefinitionFilename == "" { serviceDefinitionFilename = defaultserviceDefinitionFilename } path := filepath.Join(appDir, serviceDefinitionFilename) return loadServiceDefinition(path) } // LoadTaskDefinition returns TaskDefinition object from a given task definition file. func LoadTaskDefinition(appDir, serviceDefinitionFilename string) (types.TaskDefinition, error) { if serviceDefinitionFilename == "" { serviceDefinitionFilename = defaultserviceDefinitionFilename } path := filepath.Join(appDir, serviceDefinitionFilename) return loadTaskDefinition(path) } type registry struct { clients map[string]Client mu sync.RWMutex newGroup *singleflight.Group } func (r *registry) Client(name string, cfg *config.CloudProviderLambdaConfig, logger *zap.Logger) (Client, error) { r.mu.RLock() client, ok := r.clients[name] r.mu.RUnlock() if ok { return client, nil } c, err, _ := r.newGroup.Do(name, func() (interface{}, error) { return newClient(cfg.Region, cfg.Profile, cfg.CredentialsFile, cfg.RoleARN, cfg.TokenFile, logger) }) if err != nil { return nil, err } client = c.(Client) r.mu.Lock() r.clients[name] = client r.mu.Unlock() return client, nil } var defaultRegistry = &registry{ clients: make(map[string]Client), newGroup: &singleflight.Group{}, } // DefaultRegistry returns a pool of aws clients and a mutex associated with it. func DefaultRegistry() Registry { return defaultRegistry }
1
15,335
the last parameter name should be `serviceName` as is in the implementation
pipe-cd-pipe
go
@@ -178,7 +178,7 @@ public class PrettyPrintVisitorTest { @Test public void multilineJavadocGetsFormatted() { CompilationUnit cu = new CompilationUnit(); - cu.addClass("X").addMethod("abc").setJavadocComment(" line1\n line2 *\n * line3"); + cu.addClass("X").addMethod("abc").setJavadocComment("line1\n line2 *\n * line3"); assertEqualsNoEol("public class X {\n" + "\n" +
1
/* * Copyright (C) 2007-2010 Júlio Vilmar Gesser. * Copyright (C) 2011, 2013-2016 The JavaParser Team. * * This file is part of JavaParser. * * JavaParser can be used either under the terms of * a) the GNU Lesser General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * b) the terms of the Apache License * * You should have received a copy of both licenses in LICENCE.LGPL and * LICENCE.APACHE. Please refer to those files for details. * * JavaParser is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. */ package com.github.javaparser.printer; import com.github.javaparser.JavaParser; import com.github.javaparser.ast.CompilationUnit; import com.github.javaparser.ast.Node; import com.github.javaparser.ast.body.MethodDeclaration; import com.github.javaparser.ast.comments.LineComment; import com.github.javaparser.ast.expr.CastExpr; import com.github.javaparser.ast.expr.ClassExpr; import com.github.javaparser.ast.expr.Expression; import com.github.javaparser.ast.expr.VariableDeclarationExpr; import com.github.javaparser.ast.type.Type; import org.junit.Test; import static com.github.javaparser.utils.TestUtils.assertEqualsNoEol; import static com.github.javaparser.utils.Utils.EOL; import static org.junit.Assert.assertEquals; public class PrettyPrintVisitorTest { @Test public void getMaximumCommonTypeWithoutAnnotations() { VariableDeclarationExpr vde1 = JavaParser.parseVariableDeclarationExpr("int a[], b[]"); assertEquals("int[]", vde1.getMaximumCommonType().get().toString()); VariableDeclarationExpr vde2 = JavaParser.parseVariableDeclarationExpr("int[][] a[], b[]"); assertEquals("int[][][]", vde2.getMaximumCommonType().get().toString()); VariableDeclarationExpr vde3 = JavaParser.parseVariableDeclarationExpr("int[][] a, b[]"); assertEquals("int[][]", vde3.getMaximumCommonType().get().toString()); } @Test public void getMaximumCommonTypeWithAnnotations() { VariableDeclarationExpr vde1 = JavaParser.parseVariableDeclarationExpr("int a @Foo [], b[]"); assertEquals("int", vde1.getMaximumCommonType().get().toString()); VariableDeclarationExpr vde2 = JavaParser.parseVariableDeclarationExpr("int[]@Foo [] a[], b[]"); assertEquals("int[] @Foo [][]", vde2.getMaximumCommonType().get().toString()); } private String print(Node node) { return new PrettyPrinter().print(node); } @Test public void printSimpleClassExpr() { ClassExpr expr = JavaParser.parseExpression("Foo.class"); assertEquals("Foo.class", print(expr)); } @Test public void printArrayClassExpr() { ClassExpr expr = JavaParser.parseExpression("Foo[].class"); assertEquals("Foo[].class", print(expr)); } @Test public void printGenericClassExpr() { ClassExpr expr = JavaParser.parseExpression("Foo<String>.class"); assertEquals("Foo<String>.class", print(expr)); } @Test public void printSimplestClass() { Node node = JavaParser.parse("class A {}"); assertEquals("class A {" + EOL + "}" + EOL, print(node)); } @Test public void printAClassWithField() { Node node = JavaParser.parse("class A { int a; }"); assertEquals("class A {" + EOL + EOL + " int a;" + EOL + "}" + EOL, print(node)); } @Test public void printAReceiverParameter() { Node node = JavaParser.parseBodyDeclaration("int x(@O X A.B.this, int y) { }"); assertEquals("int x(@O X A.B.this, int y) {" + EOL + "}", print(node)); } @Test public void printLambdaIntersectionTypeAssignment() { String code = "class A {" + EOL + " void f() {" + EOL + " Runnable r = (Runnable & Serializable) (() -> {});" + EOL + " r = (Runnable & Serializable)() -> {};" + EOL + " r = (Runnable & I)() -> {};" + EOL + " }}"; CompilationUnit cu = JavaParser.parse(code); MethodDeclaration methodDeclaration = (MethodDeclaration) cu.getType(0).getMember(0); assertEquals("Runnable r = (Runnable & Serializable) (() -> {" + EOL + "});", print(methodDeclaration.getBody().get().getStatements().get(0))); } @Test public void printIntersectionType() { String code = "(Runnable & Serializable) (() -> {})"; Expression expression = JavaParser.parseExpression(code); Type type = ((CastExpr) expression).getType(); assertEquals("Runnable & Serializable", print(type)); } @Test public void printLambdaIntersectionTypeReturn() { String code = "class A {" + EOL + " Object f() {" + EOL + " return (Comparator<Map.Entry<K, V>> & Serializable)(c1, c2) -> c1.getKey().compareTo(c2.getKey()); " + EOL + "}}"; CompilationUnit cu = JavaParser.parse(code); MethodDeclaration methodDeclaration = (MethodDeclaration) cu.getType(0).getMember(0); assertEquals("return (Comparator<Map.Entry<K, V>> & Serializable) (c1, c2) -> c1.getKey().compareTo(c2.getKey());", print(methodDeclaration.getBody().get().getStatements().get(0))); } @Test public void printClassWithoutJavaDocButWithComment() { String code = String.format("/** javadoc */ public class A { %s// stuff%s}", EOL, EOL); CompilationUnit cu = JavaParser.parse(code); PrettyPrinterConfiguration ignoreJavaDoc = new PrettyPrinterConfiguration().setPrintJavadoc(false); String content = cu.toString(ignoreJavaDoc); assertEquals(String.format("public class A {%s // stuff%s}%s", EOL, EOL, EOL), content); } @Test public void printImportsDefaultOrder() { String code = "import x.y.z;import a.b.c;import static b.c.d;class c {}"; CompilationUnit cu = JavaParser.parse(code); String content = cu.toString(); assertEqualsNoEol("import x.y.z;\n" + "import a.b.c;\n" + "import static b.c.d;\n" + "\n" + "class c {\n" + "}\n", content); } @Test public void printImportsOrdered() { String code = "import x.y.z;import a.b.c;import static b.c.d;class c {}"; CompilationUnit cu = JavaParser.parse(code); PrettyPrinterConfiguration orderImports = new PrettyPrinterConfiguration().setOrderImports(true); String content = cu.toString(orderImports); assertEqualsNoEol("import static b.c.d;\n" + "import a.b.c;\n" + "import x.y.z;\n" + "\n" + "class c {\n" + "}\n", content); } @Test public void multilineJavadocGetsFormatted() { CompilationUnit cu = new CompilationUnit(); cu.addClass("X").addMethod("abc").setJavadocComment(" line1\n line2 *\n * line3"); assertEqualsNoEol("public class X {\n" + "\n" + " /**\n" + " * line1\n" + " * line2 *\n" + " * line3\n" + " */\n" + " void abc() {\n" + " }\n" + "}\n", cu.toString()); } @Test public void emptyJavadocGetsFormatted() { CompilationUnit cu = new CompilationUnit(); cu.addClass("X").addMethod("abc").setJavadocComment(""); assertEqualsNoEol("public class X {\n" + "\n" + " /**\n" + " */\n" + " void abc() {\n" + " }\n" + "}\n", cu.toString()); } @Test public void multilineJavadocWithLotsOfEmptyLinesGetsFormattedNeatly() { CompilationUnit cu = new CompilationUnit(); cu.addClass("X").addMethod("abc").setJavadocComment("\n\n\n ab\n\n\n cd\n\n\n"); assertEqualsNoEol("public class X {\n" + "\n" + " /**\n" + " * ab\n" + " *\n" + " * cd\n" + " */\n" + " void abc() {\n" + " }\n" + "}\n", cu.toString()); } @Test public void singlelineJavadocGetsFormatted() { CompilationUnit cu = new CompilationUnit(); cu.addClass("X").addMethod("abc").setJavadocComment(" line1"); assertEqualsNoEol("public class X {\n" + "\n" + " /**\n" + " * line1\n" + " */\n" + " void abc() {\n" + " }\n" + "}\n", cu.toString()); } @Test public void singlelineCommentGetsFormatted() { CompilationUnit cu = new CompilationUnit(); cu.addClass("X").addMethod("abc").setComment(new LineComment(" line1 \n ")); assertEqualsNoEol("public class X {\n" + "\n" + " // line1\n" + " void abc() {\n" + " }\n" + "}\n", cu.toString()); } @Test public void blockcommentGetsNoFormatting() { CompilationUnit cu = JavaParser.parse("class A {\n" + " public void helloWorld(String greeting, String name) {\n" + " //sdfsdfsdf\n" + " //sdfds\n" + " /*\n" + " dgfdgfdgfdgfdgfd\n" + " */\n" + " }\n" + "}\n"); assertEqualsNoEol("class A {\n" + "\n" + " public void helloWorld(String greeting, String name) {\n" + " // sdfsdfsdf\n" + " // sdfds\n" + " /*\n" + " dgfdgfdgfdgfdgfd\n" + " */\n" + " }\n" + "}\n", cu.toString()); } }
1
12,161
@ftomassetti - okay, this is a little meh.
javaparser-javaparser
java
@@ -11,7 +11,8 @@ import ( // Portable analogs of some common system call errors. var ( - ErrUnsupported = errors.New("operation not supported") + errUnsupported = errors.New("operation not supported") + notImplemented = errors.New("os: not implemented") ) // Stdin, Stdout, and Stderr are open Files pointing to the standard input,
1
// Package os implements a subset of the Go "os" package. See // https://godoc.org/os for details. // // Note that the current implementation is blocking. This limitation should be // removed in a future version. package os import ( "errors" ) // Portable analogs of some common system call errors. var ( ErrUnsupported = errors.New("operation not supported") ) // Stdin, Stdout, and Stderr are open Files pointing to the standard input, // standard output, and standard error file descriptors. var ( Stdin = &File{0, "/dev/stdin"} Stdout = &File{1, "/dev/stdout"} Stderr = &File{2, "/dev/stderr"} ) // File represents an open file descriptor. type File struct { fd uintptr name string } // NewFile returns a new File with the given file descriptor and name. func NewFile(fd uintptr, name string) *File { return &File{fd, name} } // Fd returns the integer Unix file descriptor referencing the open file. The // file descriptor is valid only until f.Close is called. func (f *File) Fd() uintptr { return f.fd }
1
7,314
Note, changed initial capitalisation of this for consistency. Can do it the other way around too if that'd be better. :wink:
tinygo-org-tinygo
go
@@ -11,7 +11,7 @@ if (typeof define === 'function' && define.amd) { }); } if (typeof module === 'object' && module.exports && typeof axeFunction.toString === 'function') { - axe.source = '(' + axeFunction.toString() + ')(this, this.document);'; + axe.source = '(' + axeFunction.toString() + ')(typeof window === "object" ? window : this);'; module.exports = axe; } if (typeof window.getComputedStyle === 'function') {
1
/*exported axe, commons */ /*global axeFunction, module, define */ // exported namespace for aXe var axe = axe || {}; axe.version = '<%= pkg.version %>'; if (typeof define === 'function' && define.amd) { define([], function () { 'use strict'; return axe; }); } if (typeof module === 'object' && module.exports && typeof axeFunction.toString === 'function') { axe.source = '(' + axeFunction.toString() + ')(this, this.document);'; module.exports = axe; } if (typeof window.getComputedStyle === 'function') { window.axe = axe; } // local namespace for common functions var commons; function SupportError(error) { this.name = 'SupportError'; this.cause = error.cause; this.message = `\`${error.cause}\` - feature unsupported in your environment.`; if (error.ruleId) { this.ruleId = error.ruleId; this.message += ` Skipping ${this.ruleId} rule.`; } this.stack = (new Error()).stack; } SupportError.prototype = Object.create(Error.prototype); SupportError.prototype.constructor = SupportError;
1
11,121
hey, aren't we supposed to be passing in two parameters here?
dequelabs-axe-core
js
@@ -18,6 +18,7 @@ bitcore.encoding.BufferWriter = require('./lib/encoding/bufferwriter'); bitcore.encoding.Varint = require('./lib/encoding/varint'); // main bitcoin library +bitcore.Unit = require('./lib/unit'); bitcore.Address = require('./lib/address'); bitcore.BIP32 = require('./lib/bip32'); bitcore.Block = require('./lib/block');
1
var bitcore = module.exports; // crypto bitcore.crypto = {}; bitcore.crypto.BN = require('./lib/crypto/bn'); bitcore.crypto.ECDSA = require('./lib/crypto/ecdsa'); bitcore.crypto.Hash = require('./lib/crypto/hash'); bitcore.crypto.Random = require('./lib/crypto/random'); bitcore.crypto.Point = require('./lib/crypto/point'); // encoding bitcore.encoding = {}; bitcore.encoding.Base58 = require('./lib/encoding/base58'); bitcore.encoding.Base58Check = require('./lib/encoding/base58check'); bitcore.encoding.BufferReader = require('./lib/encoding/bufferreader'); bitcore.encoding.BufferWriter = require('./lib/encoding/bufferwriter'); bitcore.encoding.Varint = require('./lib/encoding/varint'); // main bitcoin library bitcore.Address = require('./lib/address'); bitcore.BIP32 = require('./lib/bip32'); bitcore.Block = require('./lib/block'); bitcore.Blockheader = require('./lib/blockheader'); bitcore.Networks = require('./lib/networks'); bitcore.Opcode = require('./lib/opcode'); bitcore.PrivateKey = require('./lib/privatekey'); bitcore.PublicKey = require('./lib/publickey'); bitcore.Script = require('./lib/script'); bitcore.Signature = require('./lib/signature'); bitcore.Transaction = require('./lib/transaction'); bitcore.Txin = require('./lib/txin'); bitcore.Txout = require('./lib/txout'); //dependencies, subject to change bitcore.deps = {}; bitcore.deps.bnjs = require('bn.js'); bitcore.deps.bs58 = require('bs58'); bitcore.deps.Buffer = Buffer; bitcore.deps.elliptic = require('elliptic'); //bitcore.scriptexec = require('lib/scriptexec'); //bitcore.tx = require('lib/tx'); //bitcore.txpartial = require('lib/txpartial'); //bitcore.bip70 = require('lib/bip70');
1
13,195
please keep alphabetical ordering :)
bitpay-bitcore
js
@@ -43,11 +43,14 @@ import org.openqa.selenium.remote.internal.CircularOutputStream; import java.io.File; import java.io.IOException; +import java.io.InputStream; import java.io.InputStreamReader; +import java.io.OutputStream; import java.io.PrintWriter; import java.io.Reader; import java.net.ConnectException; import java.net.Socket; +import java.nio.charset.StandardCharsets; import java.util.List; import java.util.Map;
1
// Licensed to the Software Freedom Conservancy (SFC) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The SFC licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package org.openqa.selenium.firefox.internal; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import org.openqa.selenium.Beta; import org.openqa.selenium.WebDriverException; import org.openqa.selenium.WebElement; import org.openqa.selenium.firefox.ExtensionConnection; import org.openqa.selenium.firefox.FirefoxBinary; import org.openqa.selenium.firefox.FirefoxDriver; import org.openqa.selenium.firefox.FirefoxProfile; import org.openqa.selenium.internal.Lock; import org.openqa.selenium.logging.LocalLogs; import org.openqa.selenium.logging.NeedsLocalLogs; import org.openqa.selenium.net.PortProber; import org.openqa.selenium.remote.BeanToJsonConverter; import org.openqa.selenium.remote.Command; import org.openqa.selenium.remote.DriverCommand; import org.openqa.selenium.remote.ErrorCodes; import org.openqa.selenium.remote.JsonToBeanConverter; import org.openqa.selenium.remote.Response; import org.openqa.selenium.remote.SessionId; import org.openqa.selenium.remote.internal.CircularOutputStream; import java.io.File; import java.io.IOException; import java.io.InputStreamReader; import java.io.PrintWriter; import java.io.Reader; import java.net.ConnectException; import java.net.Socket; import java.util.List; import java.util.Map; @Beta public class MarionetteConnection implements ExtensionConnection, NeedsLocalLogs { private final static int BUFFER_SIZE = 4096; private final long connectTimeout; private final FirefoxBinary process; private final FirefoxProfile profile; private final String host; private final Lock lock; private File profileDir; private static Map<String, String> seleniumToMarionetteCommandMap = ImmutableMap.<String, String>builder() .put(DriverCommand.GET, "get") .put(DriverCommand.GET_ALERT_TEXT, "getTextFromDialog") .put(DriverCommand.ACCEPT_ALERT, "acceptDialog") .put(DriverCommand.DISMISS_ALERT, "dismissDialog") .put(DriverCommand.SET_ALERT_VALUE, "sendKeysToDialog") .put(DriverCommand.GET_CURRENT_WINDOW_HANDLE, "getWindow") .put(DriverCommand.GET_WINDOW_HANDLES, "getWindows") .put(DriverCommand.CLOSE, "closeWindow") .put(DriverCommand.GET_CURRENT_URL, "getUrl") .put(DriverCommand.FIND_CHILD_ELEMENT, "findElement") .put(DriverCommand.FIND_CHILD_ELEMENTS, "findElements") .put(DriverCommand.GET_ELEMENT_LOCATION, "getElementPosition") .put(DriverCommand.GET_ALL_COOKIES, "getAllCookies") .put(DriverCommand.QUIT, "deleteSession") .build(); private Socket socket; private PrintWriter writer; private Reader reader; private String marionetteId; private LocalLogs logs = LocalLogs.getNullLogger(); public MarionetteConnection(Lock lock, FirefoxBinary binary, FirefoxProfile profile, String host) throws Exception { this.host = host; this.connectTimeout = binary.getTimeout(); this.lock = lock; this.profile = profile; this.process = binary; } public void start() throws IOException { int port = PortProber.findFreePort(); profile.setPreference("marionette.defaultPrefs.enabled", true); profile.setPreference("marionette.defaultPrefs.port", port); profile.setPreference("browser.warnOnQuit", false); lock.lock(connectTimeout); try { profileDir = profile.layoutOnDisk(); process.clean(profile, profileDir); String firefoxLogFile = System.getProperty(FirefoxDriver.SystemProperty.BROWSER_LOGFILE); if (firefoxLogFile != null) { if ("/dev/stdout".equals(firefoxLogFile)) { process.setOutputWatcher(System.out); } else { File logFile = new File(firefoxLogFile); process.setOutputWatcher(new CircularOutputStream(logFile, BUFFER_SIZE)); } } process.startProfile(profile, profileDir, "-foreground", "-marionette"); // Just for the record; the critical section is all along while firefox is starting with the // profile. // There is currently no mechanism for the profile to notify us when it has started // successfully and is ready for requests. Instead, we must loop until we're able to // open a connection with the server, at which point it should be safe to continue // (since the extension shouldn't accept connections until it is ready for requests). long waitUntil = System.currentTimeMillis() + connectTimeout; while (!isConnected()) { tryToConnect(host, port); if (waitUntil < System.currentTimeMillis()) { throw new Error("Can't connect to " + host + ":" + port + "\n" + process.getConsoleOutput()); } try { Thread.sleep(100); } catch (InterruptedException ignored) { // Do nothing } } } catch (IOException e) { e.printStackTrace(); throw new WebDriverException( String.format("Failed to connect to binary %s on port %d; process output follows: \n%s", process.toString(), port, process.getConsoleOutput()), e); } catch (WebDriverException e) { throw new WebDriverException( String.format("Failed to connect to binary %s on port %d; process output follows: \n%s", process.toString(), port, process.getConsoleOutput()), e); } catch (Exception e) { throw new WebDriverException(e); } finally { lock.unlock(); } // Marionette sends back an initial acknowledgement response upon first // connect. We need to read that response before we can proceed. String rawResponse = receiveResponse(); // This initializes the "actor" for future communication with this instance. sendCommand(serializeCommand(new Command(null, "getMarionetteID"))); String getMarionetteIdRawResponse = receiveResponse(); System.out.println(getMarionetteIdRawResponse); Map<String, Object> map = new JsonToBeanConverter().convert(Map.class, getMarionetteIdRawResponse); marionetteId = map.get("id").toString(); } private void tryToConnect(String host, int port) { try { socket = new Socket(host, port); writer = new PrintWriter(socket.getOutputStream(), true); reader = new InputStreamReader(socket.getInputStream()); } catch (ConnectException ex) { socket = null; writer = null; reader = null; } catch (IOException ex) { socket = null; writer = null; reader = null; } } public Response execute(Command command) throws IOException { String commandAsString = serializeCommand(command); sendCommand(commandAsString); String rawResponse = receiveResponse(); Map<String, Object> map = new JsonToBeanConverter().convert(Map.class, rawResponse); Response response; if (DriverCommand.NEW_SESSION.equals(command.getName())) { response = new Response(new SessionId(map.get("sessionId").toString())); response.setValue(map.get("value")); } else { if (map.containsKey("error")) { // *************************************************************** // Marionette Compliance Issue: Error responses should, at a // minimum, put the status property at the root of the object. // In other words: // { // status: 7, // value: // { // message: "Did not find element with id=foo", // stackTrace: <stack trace goes here> // } // } // *************************************************************** response = new Response(); Object value = map.get("error"); if (value != null) { if (value instanceof Map) { Map<String, Object> errorMap = (Map<String, Object>) value; if (errorMap != null) { response.setStatus(Integer.parseInt(errorMap.get("status").toString())); errorMap.remove("status"); response.setValue(errorMap); } } else { response.setStatus(ErrorCodes.UNHANDLED_ERROR); response.setValue(value + ": " + map.get("message")); } } } else { response = new JsonToBeanConverter().convert(Response.class, rawResponse); // *************************************************************** // Marionette Compliance Issue: Responses from findElements // are returned with raw element IDs as the value. // This should be a JSON object with the following structure: // // { "ELEMENT": "<element ID goes here>" } // // This is to allow the client bindings to distinguish between // a raw string and an element reference returned from the // executeScript command. // *************************************************************** if (DriverCommand.GET_ACTIVE_ELEMENT.equals(command.getName())) { if (response.getStatus() == ErrorCodes.SUCCESS) { Map<String, Object> wrappedElement = Maps.newHashMap(); wrappedElement.put("ELEMENT", response.getValue().toString()); response.setValue(wrappedElement); } } } } return response; } private String serializeCommand(Command command) { // System.out.println("Command " + command); String commandName = command.getName(); Map<String, Object> params = Maps.newHashMap(); params.putAll(command.getParameters()); if (DriverCommand.NEW_SESSION.equals(commandName)) { params.remove("desiredCapabilities"); } else if (DriverCommand.SET_TIMEOUT.equals(commandName)) { String timeoutType = (String) params.get("type"); if ("implicit".equals(timeoutType)) { commandName = "setSearchTimeout"; } else if ("script".equals(timeoutType)) { commandName = "setScriptTimeout"; } params.remove("type"); } else if (DriverCommand.FIND_CHILD_ELEMENT.equals(commandName) || DriverCommand.FIND_CHILD_ELEMENTS.equals(commandName)) { renameParameter(params, "id", "element"); } else if (DriverCommand.CLICK.equals(commandName) || DriverCommand.DOUBLE_CLICK.equals(commandName) || DriverCommand.MOUSE_DOWN.equals(commandName) || DriverCommand.MOUSE_UP.equals(commandName) || DriverCommand.MOVE_TO.equals(commandName)) { String actionName = commandName; commandName = "actionChain"; List<Object> action = Lists.newArrayList(); action.add(actionName); if (params.containsKey("element")) { action.add(params.get("element")); params.remove("element"); } List<Object> actions = Lists.newArrayList(); actions.add(action); params.put("chain", actions); } else if (DriverCommand.SET_ALERT_VALUE.equals(commandName)) { renameParameter(params, "text", "value"); } else if (DriverCommand.SWITCH_TO_FRAME.equals(commandName)) { // https://bugzilla.mozilla.org/show_bug.cgi?id=1143908 if (params.get("id") instanceof Map) { params.put("element", ((Map<String, Object>) params.get("id")).get("ELEMENT")); params.remove("id"); } } if (seleniumToMarionetteCommandMap.containsKey(commandName)) { commandName = seleniumToMarionetteCommandMap.get(commandName); } Map<String, Object> map = Maps.newHashMap(); map.put("to", marionetteId != null ? marionetteId : "root"); map.put("name", commandName); if (command.getSessionId() != null) { map.put("sessionId", command.getSessionId().toString()); } map.put("parameters", params); return new BeanToJsonConverter().convert(map); } private void renameParameter(Map<String, Object> params, String origParName, String newParName) { Object o = params.get(origParName); params.put(newParName, o); params.remove(origParName); } private void sendCommand(String commandAsString) { String line = "" + commandAsString.length() + ":" + commandAsString; System.out.println(line); writer.write(line); writer.flush(); } private String receiveResponse() throws IOException { StringBuilder response = new StringBuilder(); char[] buf = new char[1024]; int len = reader.read(buf); response.append(buf, 0, len); String[] parts = response.toString().split(":", 2); int contentLength = Integer.parseInt(parts[0]); while (response.length() < contentLength + ":".length() + parts[0].length()) { buf = new char[1024]; len = reader.read(buf); if (len > 0) { response.append(buf, 0, len); } else { try { Thread.sleep(100); } catch (InterruptedException e) { } } } System.out.println("<- |" + response.toString() + "|"); parts = response.toString().split(":", 2); return parts[1].substring(0, contentLength); } public void quit() { try { writer.close(); reader.close(); socket.close(); } catch (IOException e) { e.printStackTrace(); } socket = null; // This should only be called after the QUIT command has been sent, // so go ahead and clean up our process and profile. process.quit(); if (profileDir != null) { profile.clean(profileDir); } } public boolean isConnected() { return socket != null && socket.isConnected(); } public void setLocalLogs(LocalLogs logs) { this.logs = logs; } }
1
12,020
Selenium must compile against Java 6. Revert this line.
SeleniumHQ-selenium
py
@@ -147,7 +147,7 @@ func (r *Reconciler) reconcilePullSubscription(ctx context.Context, source *v1al logging.FromContext(ctx).Desugar().Error("Failed to get PullSubscription", zap.Error(err)) return nil, fmt.Errorf("failed to get PullSubscription: %w", err) } - newPS := resources.MakePullSubscription(source.Namespace, source.Name, &source.Spec.PubSubSpec, source, source.Spec.Topic, r.receiveAdapterName, resourceGroup) + newPS := resources.MakePullSubscription(source.Namespace, source.Name, &source.Spec.PubSubSpec, source, source.Spec.Topic, r.receiveAdapterName, "", resourceGroup) logging.FromContext(ctx).Desugar().Debug("Creating PullSubscription", zap.Any("ps", newPS)) ps, err = r.RunClientSet.PubsubV1alpha1().PullSubscriptions(newPS.Namespace).Create(newPS) if err != nil {
1
/* Copyright 2019 Google LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package pubsub import ( "context" "fmt" "time" "go.uber.org/zap" corev1 "k8s.io/api/core/v1" apierrs "k8s.io/apimachinery/pkg/api/errors" "k8s.io/client-go/tools/cache" "knative.dev/pkg/apis" "knative.dev/pkg/controller" "knative.dev/pkg/logging" "github.com/google/knative-gcp/pkg/apis/events/v1alpha1" pubsubv1alpha1 "github.com/google/knative-gcp/pkg/apis/pubsub/v1alpha1" listers "github.com/google/knative-gcp/pkg/client/listers/events/v1alpha1" pubsublisters "github.com/google/knative-gcp/pkg/client/listers/pubsub/v1alpha1" "github.com/google/knative-gcp/pkg/reconciler" "github.com/google/knative-gcp/pkg/reconciler/pubsub/resources" "k8s.io/apimachinery/pkg/api/equality" ) const ( finalizerName = controllerAgentName resourceGroup = "pubsubs.events.cloud.google.com" ) // Reconciler is the controller implementation for the PubSub source. type Reconciler struct { *reconciler.Base // pubsubLister for reading pubsubs. pubsubLister listers.PubSubLister // pullsubscriptionLister for reading pullsubscriptions. pullsubscriptionLister pubsublisters.PullSubscriptionLister receiveAdapterName string } // Check that we implement the controller.Reconciler interface. var _ controller.Reconciler = (*Reconciler)(nil) // Reconcile compares the actual state with the desired, and attempts to // converge the two. It then updates the Status block of the PubSub resource // with the current status of the resource. func (r *Reconciler) Reconcile(ctx context.Context, key string) error { // Convert the namespace/name string into a distinct namespace and name namespace, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { logging.FromContext(ctx).Desugar().Error("Invalid resource key") return nil } // Get the PubSub resource with this namespace/name original, err := r.pubsubLister.PubSubs(namespace).Get(name) if apierrs.IsNotFound(err) { // The PubSub resource may no longer exist, in which case we stop processing. logging.FromContext(ctx).Desugar().Error("PubSub in work queue no longer exists") return nil } else if err != nil { return err } // Don't modify the informers copy pubsub := original.DeepCopy() reconcileErr := r.reconcile(ctx, pubsub) // If no error is returned, mark the observed generation. if reconcileErr == nil { pubsub.Status.ObservedGeneration = pubsub.Generation } if equality.Semantic.DeepEqual(original.Status, pubsub.Status) { // If we didn't change anything then don't call updateStatus. // This is important because the copy we loaded from the informer's // cache may be stale and we don't want to overwrite a prior update // to status with this stale state. } else if _, uErr := r.updateStatus(ctx, pubsub); uErr != nil { logging.FromContext(ctx).Desugar().Warn("Failed to update PubSub status", zap.Error(uErr)) r.Recorder.Eventf(pubsub, corev1.EventTypeWarning, "UpdateFailed", "Failed to update status for PubSub %q: %v", pubsub.Name, uErr) return uErr } else if reconcileErr == nil { // There was a difference and updateStatus did not return an error. r.Recorder.Eventf(pubsub, corev1.EventTypeNormal, "Updated", "Updated PubSub %q", pubsub.Name) } if reconcileErr != nil { r.Recorder.Event(pubsub, corev1.EventTypeWarning, "InternalError", reconcileErr.Error()) } return reconcileErr } func (r *Reconciler) reconcile(ctx context.Context, pubsub *v1alpha1.PubSub) error { ctx = logging.WithLogger(ctx, r.Logger.With(zap.Any("pubsub", pubsub))) pubsub.Status.InitializeConditions() if pubsub.DeletionTimestamp != nil { // No finalizer needed, the pullsubscription will be garbage collected. return nil } ps, err := r.reconcilePullSubscription(ctx, pubsub) if err != nil { pubsub.Status.MarkPullSubscriptionNotReady("PullSubscriptionReconcileFailed", "Failed to reconcile PullSubscription: %s", err.Error()) return err } pubsub.Status.PropagatePullSubscriptionStatus(ps.Status.GetCondition(apis.ConditionReady)) // Sink has been resolved from the underlying PullSubscription, set it here. sinkURI, err := apis.ParseURL(ps.Status.SinkURI) if err != nil { pubsub.Status.SinkURI = nil return err } else { pubsub.Status.SinkURI = sinkURI } return nil } func (r *Reconciler) reconcilePullSubscription(ctx context.Context, source *v1alpha1.PubSub) (*pubsubv1alpha1.PullSubscription, error) { ps, err := r.pullsubscriptionLister.PullSubscriptions(source.Namespace).Get(source.Name) if err != nil { if !apierrs.IsNotFound(err) { logging.FromContext(ctx).Desugar().Error("Failed to get PullSubscription", zap.Error(err)) return nil, fmt.Errorf("failed to get PullSubscription: %w", err) } newPS := resources.MakePullSubscription(source.Namespace, source.Name, &source.Spec.PubSubSpec, source, source.Spec.Topic, r.receiveAdapterName, resourceGroup) logging.FromContext(ctx).Desugar().Debug("Creating PullSubscription", zap.Any("ps", newPS)) ps, err = r.RunClientSet.PubsubV1alpha1().PullSubscriptions(newPS.Namespace).Create(newPS) if err != nil { logging.FromContext(ctx).Desugar().Error("Failed to create PullSubscription", zap.Error(err)) return nil, fmt.Errorf("failed to create PullSubscription: %w", err) } } return ps, nil } func (r *Reconciler) updateStatus(ctx context.Context, desired *v1alpha1.PubSub) (*v1alpha1.PubSub, error) { source, err := r.pubsubLister.PubSubs(desired.Namespace).Get(desired.Name) if err != nil { return nil, err } // Check if there is anything to update. if equality.Semantic.DeepEqual(source.Status, desired.Status) { return source, nil } becomesReady := desired.Status.IsReady() && !source.Status.IsReady() // Don't modify the informers copy. existing := source.DeepCopy() existing.Status = desired.Status src, err := r.RunClientSet.EventsV1alpha1().PubSubs(desired.Namespace).UpdateStatus(existing) if err == nil && becomesReady { // TODO compute duration since last non-ready. See https://github.com/google/knative-gcp/issues/455. duration := time.Since(src.ObjectMeta.CreationTimestamp.Time) logging.FromContext(ctx).Desugar().Info("PubSub became ready", zap.Any("after", duration)) r.Recorder.Event(source, corev1.EventTypeNormal, "ReadinessChanged", fmt.Sprintf("PubSub %q became ready", source.Name)) if metricErr := r.StatsReporter.ReportReady("PubSub", source.Namespace, source.Name, duration); metricErr != nil { logging.FromContext(ctx).Desugar().Error("Failed to record ready for PubSub", zap.Error(metricErr)) } } return src, err }
1
10,076
what is that `""`, the adapterType? can you add `"" /* adapterType */`... in other places as well... might be cleaner if it's a pointer or some varargs at the end but don't have any strong preference..
google-knative-gcp
go
@@ -2003,7 +2003,8 @@ Document.prototype.$__dirty = function() { // gh-2558: if we had to set a default and the value is not undefined, // we have to save as well all = all.concat(this.$__.activePaths.map('default', function(path) { - if (path === '_id' || !_this.getValue(path)) { + var pType = typeof _this.getValue(path); + if (path === '_id' || (!_this.getValue(path) && pType !== 'boolean')) { return; } return {
1
'use strict'; /*! * Module dependencies. */ const EventEmitter = require('events').EventEmitter; const InternalCache = require('./internal'); const MongooseError = require('./error'); const MixedSchema = require('./schema/mixed'); const Schema = require('./schema'); const ObjectExpectedError = require('./error/objectExpected'); const ObjectParameterError = require('./error/objectParameter'); const StrictModeError = require('./error/strict'); const ValidatorError = require('./schematype').ValidatorError; const VirtualType = require('./virtualtype'); const cleanModifiedSubpaths = require('./services/document/cleanModifiedSubpaths'); const compile = require('./services/document/compile').compile; const defineKey = require('./services/document/compile').defineKey; const flatten = require('./services/common').flatten; const get = require('lodash.get'); const idGetter = require('./plugins/idGetter'); const isDefiningProjection = require('./services/projection/isDefiningProjection'); const isExclusive = require('./services/projection/isExclusive'); const inspect = require('util').inspect; const internalToObjectOptions = require('./options').internalToObjectOptions; const mpath = require('mpath'); const utils = require('./utils'); const ValidationError = MongooseError.ValidationError; const clone = utils.clone; const deepEqual = utils.deepEqual; const isMongooseObject = utils.isMongooseObject; let DocumentArray; let MongooseArray; let Embedded; /** * Document constructor. * * @param {Object} obj the values to set * @param {Object} [fields] optional object containing the fields which were selected in the query returning this document and any populated paths data * @param {Boolean} [skipId] bool, should we auto create an ObjectId _id * @inherits NodeJS EventEmitter http://nodejs.org/api/events.html#events_class_events_eventemitter * @event `init`: Emitted on a document after it has was retreived from the db and fully hydrated by Mongoose. * @event `save`: Emitted when the document is successfully saved * @api private */ function Document(obj, fields, skipId, options) { if (typeof skipId === 'object' && skipId != null) { options = skipId; skipId = options.skipId; } options = options || {}; this.$__ = new InternalCache; this.$__.emitter = new EventEmitter(); this.isNew = 'isNew' in options ? options.isNew : true; this.errors = undefined; this.$__.$options = options || {}; if (obj != null && typeof obj !== 'object') { throw new ObjectParameterError(obj, 'obj', 'Document'); } var schema = this.schema; if (typeof fields === 'boolean') { this.$__.strictMode = fields; fields = undefined; } else { this.$__.strictMode = schema.options && schema.options.strict; this.$__.selected = fields; } var required = schema.requiredPaths(true); for (var i = 0; i < required.length; ++i) { this.$__.activePaths.require(required[i]); } this.$__.emitter.setMaxListeners(0); let exclude = null; // determine if this doc is a result of a query with // excluded fields if (fields && utils.getFunctionName(fields.constructor) === 'Object') { exclude = isExclusive(fields); } let hasIncludedChildren = exclude === false && fields ? $__hasIncludedChildren(fields) : {}; this.$__buildDoc(obj, fields, skipId, exclude, hasIncludedChildren, false); // By default, defaults get applied **before** setting initial values // Re: gh-6155 $__applyDefaults(this, fields, skipId, exclude, hasIncludedChildren, true); if (obj) { if (obj instanceof Document) { this.isNew = obj.isNew; } // Skip set hooks if (this.$__original_set) { this.$__original_set(obj, undefined, true); } else { this.$set(obj, undefined, true); } } // Function defaults get applied **after** setting initial values so they // see the full doc rather than an empty one, unless they opt out. // Re: gh-3781, gh-6155 $__applyDefaults(this, fields, skipId, exclude, hasIncludedChildren, false, options.skipDefaults); this.$__._id = this._id; if (!schema.options.strict && obj) { var _this = this, keys = Object.keys(this._doc); keys.forEach(function(key) { if (!(key in schema.tree)) { defineKey(key, null, _this); } }); } applyQueue(this); } /*! * Document exposes the NodeJS event emitter API, so you can use * `on`, `once`, etc. */ utils.each( ['on', 'once', 'emit', 'listeners', 'removeListener', 'setMaxListeners', 'removeAllListeners', 'addListener'], function(emitterFn) { Document.prototype[emitterFn] = function() { return this.$__.emitter[emitterFn].apply(this.$__.emitter, arguments); }; }); Document.prototype.constructor = Document; /** * The documents schema. * * @api public * @property schema * @memberOf Document */ Document.prototype.schema; /** * Boolean flag specifying if the document is new. * * @api public * @property isNew * @memberOf Document */ Document.prototype.isNew; /** * The string version of this documents _id. * * ####Note: * * This getter exists on all documents by default. The getter can be disabled by setting the `id` [option](/docs/guide.html#id) of its `Schema` to false at construction time. * * new Schema({ name: String }, { id: false }); * * @api public * @see Schema options /docs/guide.html#options * @property id * @memberOf Document */ Document.prototype.id; /** * Hash containing current validation errors. * * @api public * @property errors * @memberOf Document */ Document.prototype.errors; /*! * ignore */ function $__hasIncludedChildren(fields) { let hasIncludedChildren = {}; let keys = Object.keys(fields); for (var j = 0; j < keys.length; ++j) { let parts = keys[j].split('.'); let c = []; for (var k = 0; k < parts.length; ++k) { c.push(parts[k]); hasIncludedChildren[c.join('.')] = 1; } } return hasIncludedChildren; } /*! * ignore */ function $__applyDefaults(doc, fields, skipId, exclude, hasIncludedChildren, isBeforeSetters, pathsToSkip) { const paths = Object.keys(doc.schema.paths); const plen = paths.length; for (let i = 0; i < plen; ++i) { let def; let curPath = ''; let p = paths[i]; if (p === '_id' && skipId) { continue; } let type = doc.schema.paths[p]; let path = p.split('.'); let len = path.length; let included = false; let doc_ = doc._doc; for (let j = 0; j < len; ++j) { if (doc_ == null) { break; } let piece = path[j]; curPath += (!curPath.length ? '' : '.') + piece; if (exclude === true) { if (curPath in fields) { break; } } else if (exclude === false && fields && !included) { if (curPath in fields) { included = true; } else if (!hasIncludedChildren[curPath]) { break; } } if (j === len - 1) { if (doc_[piece] !== void 0) { break; } if (typeof type.defaultValue === 'function') { if (!type.defaultValue.$runBeforeSetters && isBeforeSetters) { break; } if (type.defaultValue.$runBeforeSetters && !isBeforeSetters) { break; } } else if (!isBeforeSetters) { // Non-function defaults should always run **before** setters continue; } if (pathsToSkip && pathsToSkip[curPath]) { break; } if (fields && exclude !== null) { if (exclude === true) { // apply defaults to all non-excluded fields if (p in fields) { continue; } def = type.getDefault(doc, false); if (typeof def !== 'undefined') { doc_[piece] = def; doc.$__.activePaths.default(p); } } else if (included) { // selected field def = type.getDefault(doc, false); if (typeof def !== 'undefined') { doc_[piece] = def; doc.$__.activePaths.default(p); } } } else { def = type.getDefault(doc, false); if (typeof def !== 'undefined') { doc_[piece] = def; doc.$__.activePaths.default(p); } } } else { doc_ = doc_[piece]; } } } } /** * Builds the default doc structure * * @param {Object} obj * @param {Object} [fields] * @param {Boolean} [skipId] * @api private * @method $__buildDoc * @memberOf Document */ Document.prototype.$__buildDoc = function(obj, fields, skipId, exclude, hasIncludedChildren) { const doc = {}; const paths = Object.keys(this.schema.paths). // Don't build up any paths that are underneath a map, we don't know // what the keys will be filter(p => !p.includes('$*')); const plen = paths.length; let ii = 0; for (; ii < plen; ++ii) { var p = paths[ii]; if (p === '_id') { if (skipId) { continue; } if (obj && '_id' in obj) { continue; } } const path = p.split('.'); const len = path.length; const last = len - 1; let curPath = ''; let doc_ = doc; let included = false; for (let i = 0; i < len; ++i) { const piece = path[i]; curPath += (!curPath.length ? '' : '.') + piece; // support excluding intermediary levels if (exclude === true) { if (curPath in fields) { break; } } else if (exclude === false && fields && !included) { if (curPath in fields) { included = true; } else if (!hasIncludedChildren[curPath]) { break; } } if (i < last) { doc_ = doc_[piece] || (doc_[piece] = {}); } } } this._doc = doc; }; /*! * Converts to POJO when you use the document for querying */ Document.prototype.toBSON = function() { return this.toObject(internalToObjectOptions); }; /** * Initializes the document without setters or marking anything modified. * * Called internally after a document is returned from mongodb. * * @param {Object} doc document returned by mongo * @api public * @memberOf Document */ Document.prototype.init = function(doc, opts, fn) { if (typeof opts === 'function') { fn = opts; opts = null; } this.$__init(doc, opts); if (fn) { fn(null, this); } return this; }; /*! * ignore */ Document.prototype.$__init = function(doc, opts) { this.isNew = false; this.$init = true; // handle docs with populated paths // If doc._id is not null or undefined if (doc._id !== null && doc._id !== undefined && opts && opts.populated && opts.populated.length) { var id = String(doc._id); for (var i = 0; i < opts.populated.length; ++i) { var item = opts.populated[i]; if (item.isVirtual) { this.populated(item.path, utils.getValue(item.path, doc), item); } else { this.populated(item.path, item._docs[id], item); } } } init(this, doc, this._doc); this.emit('init', this); this.constructor.emit('init', this); this.$__._id = this._id; return this; }; /*! * Init helper. * * @param {Object} self document instance * @param {Object} obj raw mongodb doc * @param {Object} doc object we are initializing * @api private */ function init(self, obj, doc, prefix) { prefix = prefix || ''; var keys = Object.keys(obj); var len = keys.length; var schema; var path; var i; var index = 0; while (index < len) { _init(index++); } function _init(index) { i = keys[index]; path = prefix + i; schema = self.schema.path(path); // Should still work if not a model-level discriminator, but should not be // necessary. This is *only* to catch the case where we queried using the // base model and the discriminated model has a projection if (self.schema.$isRootDiscriminator && !self.isSelected(path)) { return; } if (!schema && utils.isObject(obj[i]) && (!obj[i].constructor || utils.getFunctionName(obj[i].constructor) === 'Object')) { // assume nested object if (!doc[i]) { doc[i] = {}; } init(self, obj[i], doc[i], path + '.'); } else if (!schema) { doc[i] = obj[i]; } else { if (obj[i] === null) { doc[i] = null; } else if (obj[i] !== undefined) { let intCache = obj[i].$__ || {}; let wasPopulated = intCache.wasPopulated || null; if (schema && !wasPopulated) { try { doc[i] = schema.cast(obj[i], self, true); } catch (e) { self.invalidate(e.path, new ValidatorError({ path: e.path, message: e.message, type: 'cast', value: e.value })); } } else { doc[i] = obj[i]; } } // mark as hydrated if (!self.isModified(path)) { self.$__.activePaths.init(path); } } } } /** * Sends an update command with this document `_id` as the query selector. * * ####Example: * * weirdCar.update({$inc: {wheels:1}}, { w: 1 }, callback); * * ####Valid options: * * - same as in [Model.update](#model_Model.update) * * @see Model.update #model_Model.update * @param {Object} doc * @param {Object} options * @param {Function} callback * @return {Query} * @api public * @memberOf Document */ Document.prototype.update = function update() { var args = utils.args(arguments); args.unshift({_id: this._id}); return this.constructor.update.apply(this.constructor, args); }; /** * Getter/setter around the session associated with this document. Used to * automatically set `session` if you `save()` a doc that you got from a * query with an associated session. * * @param {ClientSession} [session] overwrite the current session * @return {ClientSession} * @method $session * @api public * @memberOf Document */ Document.prototype.$session = function $session(session) { if (arguments.length === 0) { return this.$__.session; } this.$__.session = session; return session; }; /** * Alias for `set()`, used internally to avoid conflicts * * @param {String|Object} path path or object of key/vals to set * @param {Any} val the value to set * @param {Schema|String|Number|Buffer|*} [type] optionally specify a type for "on-the-fly" attributes * @param {Object} [options] optionally specify options that modify the behavior of the set * @method $set * @name $set * @memberOf Document * @api public */ Document.prototype.$set = function $set(path, val, type, options) { if (type && utils.getFunctionName(type.constructor) === 'Object') { options = type; type = undefined; } options = options || {}; var merge = options.merge; var adhoc = type && type !== true; var constructing = type === true; var adhocs; var strict = 'strict' in options ? options.strict : this.$__.strictMode; if (adhoc) { adhocs = this.$__.adhocPaths || (this.$__.adhocPaths = {}); adhocs[path] = Schema.interpretAsType(path, type, this.schema.options); } if (typeof path !== 'string') { // new Document({ key: val }) if (path === null || path === void 0) { var _ = path; path = val; val = _; } else { var prefix = val ? val + '.' : ''; if (path instanceof Document) { if (path.$__isNested) { path = path.toObject(); } else { path = path._doc; } } var keys = Object.keys(path); var len = keys.length; var i = 0; var pathtype; var key; if (len === 0 && !this.schema.options.minimize) { if (val) { this.$set(val, {}); } return this; } while (i < len) { _handleIndex.call(this, i++); } return this; } } function _handleIndex(i) { key = keys[i]; var pathName = prefix + key; pathtype = this.schema.pathType(pathName); if (path[key] !== null && path[key] !== void 0 // need to know if plain object - no Buffer, ObjectId, ref, etc && utils.isObject(path[key]) && (!path[key].constructor || utils.getFunctionName(path[key].constructor) === 'Object') && pathtype !== 'virtual' && pathtype !== 'real' && !(this.$__path(pathName) instanceof MixedSchema) && !(this.schema.paths[pathName] && this.schema.paths[pathName].options && this.schema.paths[pathName].options.ref)) { this.$set(path[key], prefix + key, constructing); } else if (strict) { // Don't overwrite defaults with undefined keys (gh-3981) if (constructing && path[key] === void 0 && this.get(key) !== void 0) { return; } if (pathtype === 'real' || pathtype === 'virtual') { // Check for setting single embedded schema to document (gh-3535) var p = path[key]; if (this.schema.paths[pathName] && this.schema.paths[pathName].$isSingleNested && path[key] instanceof Document) { p = p.toObject({ virtuals: false, transform: false }); } this.$set(prefix + key, p, constructing); } else if (pathtype === 'nested' && path[key] instanceof Document) { this.$set(prefix + key, path[key].toObject({transform: false}), constructing); } else if (strict === 'throw') { if (pathtype === 'nested') { throw new ObjectExpectedError(key, path[key]); } else { throw new StrictModeError(key); } } } else if (path[key] !== void 0) { this.$set(prefix + key, path[key], constructing); } } var pathType = this.schema.pathType(path); if (pathType === 'nested' && val) { if (utils.isObject(val) && (!val.constructor || utils.getFunctionName(val.constructor) === 'Object')) { if (!merge) { this.setValue(path, null); cleanModifiedSubpaths(this, path); } if (Object.keys(val).length === 0) { this.setValue(path, {}); this.markModified(path); cleanModifiedSubpaths(this, path); } else { this.$set(val, path, constructing); } return this; } this.invalidate(path, new MongooseError.CastError('Object', val, path)); return this; } var schema; var parts = path.split('.'); if (pathType === 'adhocOrUndefined' && strict) { // check for roots that are Mixed types var mixed; for (i = 0; i < parts.length; ++i) { var subpath = parts.slice(0, i + 1).join('.'); schema = this.schema.path(subpath); if (schema instanceof MixedSchema) { // allow changes to sub paths of mixed types mixed = true; break; } // If path is underneath a virtual, bypass everything and just set it. if (i + 1 < parts.length && this.schema.pathType(subpath) === 'virtual') { mpath.set(path, val, this); return this; } } if (!mixed) { if (strict === 'throw') { throw new StrictModeError(path); } return this; } } else if (pathType === 'virtual') { schema = this.schema.virtualpath(path); schema.applySetters(val, this); return this; } else { schema = this.$__path(path); } // gh-4578, if setting a deeply nested path that doesn't exist yet, create it var cur = this._doc; var curPath = ''; for (i = 0; i < parts.length - 1; ++i) { cur = cur[parts[i]]; curPath += (curPath.length > 0 ? '.' : '') + parts[i]; if (!cur) { this.$set(curPath, {}); // Hack re: gh-5800. If nested field is not selected, it probably exists // so `MongoError: cannot use the part (nested of nested.num) to // traverse the element ({nested: null})` is not likely. If user gets // that error, its their fault for now. We should reconsider disallowing // modifying not selected paths for v5. if (!this.isSelected(curPath)) { this.unmarkModified(curPath); } cur = this.getValue(curPath); } } var pathToMark; // When using the $set operator the path to the field must already exist. // Else mongodb throws: "LEFT_SUBFIELD only supports Object" if (parts.length <= 1) { pathToMark = path; } else { for (i = 0; i < parts.length; ++i) { subpath = parts.slice(0, i + 1).join('.'); if (this.isDirectModified(subpath) // earlier prefixes that are already // marked as dirty have precedence || this.get(subpath) === null) { pathToMark = subpath; break; } } if (!pathToMark) { pathToMark = path; } } // if this doc is being constructed we should not trigger getters var priorVal = constructing ? undefined : this.getValue(path); if (!schema) { this.$__set(pathToMark, path, constructing, parts, schema, val, priorVal); return this; } var shouldSet = true; try { // If the user is trying to set a ref path to a document with // the correct model name, treat it as populated var didPopulate = false; if (schema.options && schema.options.ref && val instanceof Document && (schema.options.ref === val.constructor.modelName || schema.options.ref === val.constructor.baseModelName)) { if (this.ownerDocument) { this.ownerDocument().populated(this.$__fullPath(path), val._id, {model: val.constructor}); } else { this.populated(path, val._id, {model: val.constructor}); } didPopulate = true; } var popOpts; if (schema.options && Array.isArray(schema.options[this.schema.options.typeKey]) && schema.options[this.schema.options.typeKey].length && schema.options[this.schema.options.typeKey][0].ref && Array.isArray(val) && val.length > 0 && val[0] instanceof Document && val[0].constructor.modelName && (schema.options[this.schema.options.typeKey][0].ref === val[0].constructor.baseModelName || schema.options[this.schema.options.typeKey][0].ref === val[0].constructor.modelName)) { if (this.ownerDocument) { popOpts = { model: val[0].constructor }; this.ownerDocument().populated(this.$__fullPath(path), val.map(function(v) { return v._id; }), popOpts); } else { popOpts = { model: val[0].constructor }; this.populated(path, val.map(function(v) { return v._id; }), popOpts); } didPopulate = true; } var setterContext = constructing && this.$__.$options.priorDoc ? this.$__.$options.priorDoc : this; val = schema.applySetters(val, setterContext, false, priorVal); if (!didPopulate && this.$__.populated) { delete this.$__.populated[path]; } this.$markValid(path); } catch (e) { this.invalidate(path, new MongooseError.CastError(schema.instance, val, path, e)); shouldSet = false; } if (shouldSet) { this.$__set(pathToMark, path, constructing, parts, schema, val, priorVal); } if (schema.$isSingleNested && (this.isDirectModified(path) || val == null)) { cleanModifiedSubpaths(this, path); } return this; }; /** * Sets the value of a path, or many paths. * * ####Example: * * // path, value * doc.set(path, value) * * // object * doc.set({ * path : value * , path2 : { * path : value * } * }) * * // on-the-fly cast to number * doc.set(path, value, Number) * * // on-the-fly cast to string * doc.set(path, value, String) * * // changing strict mode behavior * doc.set(path, value, { strict: false }); * * @param {String|Object} path path or object of key/vals to set * @param {Any} val the value to set * @param {Schema|String|Number|Buffer|*} [type] optionally specify a type for "on-the-fly" attributes * @param {Object} [options] optionally specify options that modify the behavior of the set * @api public * @method set * @memberOf Document */ Document.prototype.set = Document.prototype.$set; /** * Determine if we should mark this change as modified. * * @return {Boolean} * @api private * @method $__shouldModify * @memberOf Document */ Document.prototype.$__shouldModify = function(pathToMark, path, constructing, parts, schema, val, priorVal) { if (this.isNew) { return true; } if (undefined === val && !this.isSelected(path)) { // when a path is not selected in a query, its initial // value will be undefined. return true; } if (undefined === val && path in this.$__.activePaths.states.default) { // we're just unsetting the default value which was never saved return false; } // gh-3992: if setting a populated field to a doc, don't mark modified // if they have the same _id if (this.populated(path) && val instanceof Document && deepEqual(val._id, priorVal)) { return false; } if (!deepEqual(val, priorVal || this.get(path))) { return true; } if (!constructing && val !== null && val !== undefined && path in this.$__.activePaths.states.default && deepEqual(val, schema.getDefault(this, constructing))) { // a path with a default was $unset on the server // and the user is setting it to the same value again return true; } return false; }; /** * Handles the actual setting of the value and marking the path modified if appropriate. * * @api private * @method $__set * @memberOf Document */ Document.prototype.$__set = function(pathToMark, path, constructing, parts, schema, val, priorVal) { Embedded = Embedded || require('./types/embedded'); var shouldModify = this.$__shouldModify(pathToMark, path, constructing, parts, schema, val, priorVal); var _this = this; if (shouldModify) { this.markModified(pathToMark); // handle directly setting arrays (gh-1126) MongooseArray || (MongooseArray = require('./types/array')); if (val && val.isMongooseArray) { val._registerAtomic('$set', val); // Update embedded document parent references (gh-5189) if (val.isMongooseDocumentArray) { val.forEach(function(item) { item && item.__parentArray && (item.__parentArray = val); }); } // Small hack for gh-1638: if we're overwriting the entire array, ignore // paths that were modified before the array overwrite this.$__.activePaths.forEach(function(modifiedPath) { if (modifiedPath.indexOf(path + '.') === 0) { _this.$__.activePaths.ignore(modifiedPath); } }); } } var obj = this._doc; var i = 0; var l = parts.length; var cur = ''; for (; i < l; i++) { var next = i + 1; var last = next === l; cur += (cur ? '.' + parts[i] : parts[i]); if (last) { if (obj instanceof Map) { obj.set(parts[i], val); } else { obj[parts[i]] = val; } } else { if (obj[parts[i]] && utils.getFunctionName(obj[parts[i]].constructor) === 'Object') { obj = obj[parts[i]]; } else if (obj[parts[i]] && obj[parts[i]] instanceof Embedded) { obj = obj[parts[i]]; } else if (obj[parts[i]] && obj[parts[i]].$isSingleNested) { obj = obj[parts[i]]; } else if (obj[parts[i]] && Array.isArray(obj[parts[i]])) { obj = obj[parts[i]]; } else { obj[parts[i]] = obj[parts[i]] || {}; obj = obj[parts[i]]; } } } }; /** * Gets a raw value from a path (no getters) * * @param {String} path * @api private */ Document.prototype.getValue = function(path) { return utils.getValue(path, this._doc); }; /** * Sets a raw value for a path (no casting, setters, transformations) * * @param {String} path * @param {Object} value * @api private */ Document.prototype.setValue = function(path, val) { utils.setValue(path, val, this._doc); return this; }; /** * Returns the value of a path. * * ####Example * * // path * doc.get('age') // 47 * * // dynamic casting to a string * doc.get('age', String) // "47" * * @param {String} path * @param {Schema|String|Number|Buffer|*} [type] optionally specify a type for on-the-fly attributes * @api public */ Document.prototype.get = function(path, type, options) { var adhoc; options = options || {}; if (type) { adhoc = Schema.interpretAsType(path, type, this.schema.options); } var schema = this.$__path(path) || this.schema.virtualpath(path); var pieces = path.split('.'); var obj = this._doc; if (schema instanceof VirtualType) { if (schema.getters.length === 0) { return void 0; } return schema.applyGetters(null, this); } for (var i = 0, l = pieces.length; i < l; i++) { if (obj == null) { obj = void 0; } else if (obj instanceof Map) { obj = obj.get(pieces[i]); } else { obj = obj[pieces[i]]; } } if (adhoc) { obj = adhoc.cast(obj); } if (schema) { obj = schema.applyGetters(obj, this); } else if (this.schema.nested[path] && options.virtuals) { // Might need to apply virtuals if this is a nested path return applyGetters(this, utils.clone(obj), 'virtuals', { path: path }); } return obj; }; /** * Returns the schematype for the given `path`. * * @param {String} path * @api private * @method $__path * @memberOf Document */ Document.prototype.$__path = function(path) { var adhocs = this.$__.adhocPaths, adhocType = adhocs && adhocs[path]; if (adhocType) { return adhocType; } return this.schema.path(path); }; /** * Marks the path as having pending changes to write to the db. * * _Very helpful when using [Mixed](./schematypes.html#mixed) types._ * * ####Example: * * doc.mixed.type = 'changed'; * doc.markModified('mixed.type'); * doc.save() // changes to mixed.type are now persisted * * @param {String} path the path to mark modified * @param {Document} [scope] the scope to run validators with * @api public */ Document.prototype.markModified = function(path, scope) { this.$__.activePaths.modify(path); if (scope != null && !this.ownerDocument) { this.$__.pathsToScopes[path] = scope; } }; /** * Clears the modified state on the specified path. * * ####Example: * * doc.foo = 'bar'; * doc.unmarkModified('foo'); * doc.save() // changes to foo will not be persisted * * @param {String} path the path to unmark modified * @api public */ Document.prototype.unmarkModified = function(path) { this.$__.activePaths.init(path); delete this.$__.pathsToScopes[path]; }; /** * Don't run validation on this path or persist changes to this path. * * ####Example: * * doc.foo = null; * doc.$ignore('foo'); * doc.save() // changes to foo will not be persisted and validators won't be run * * @memberOf Document * @method $ignore * @param {String} path the path to ignore * @api public */ Document.prototype.$ignore = function(path) { this.$__.activePaths.ignore(path); }; /** * Returns the list of paths that have been modified. * * @param {Object} [options] * @param {Boolean} [options.includeChildren=false] if true, returns children of modified paths as well. For example, if false, the list of modified paths for `doc.colors = { primary: 'blue' };` will **not** contain `colors.primary` * @return {Array} * @api public */ Document.prototype.modifiedPaths = function(options) { options = options || {}; var directModifiedPaths = Object.keys(this.$__.activePaths.states.modify); var _this = this; return directModifiedPaths.reduce(function(list, path) { var parts = path.split('.'); list = list.concat(parts.reduce(function(chains, part, i) { return chains.concat(parts.slice(0, i).concat(part).join('.')); }, []).filter(function(chain) { return (list.indexOf(chain) === -1); })); if (!options.includeChildren) { return list; } var cur = _this.get(path); if (cur != null && typeof cur === 'object') { if (cur._doc) { cur = cur._doc; } Object.keys(cur). filter(function(key) { return list.indexOf(path + '.' + key) === -1; }). forEach(function(key) { list.push(path + '.' + key); }); } return list; }, []); }; /** * Returns true if this document was modified, else false. * * If `path` is given, checks if a path or any full path containing `path` as part of its path chain has been modified. * * ####Example * * doc.set('documents.0.title', 'changed'); * doc.isModified() // true * doc.isModified('documents') // true * doc.isModified('documents.0.title') // true * doc.isModified('documents otherProp') // true * doc.isDirectModified('documents') // false * * @param {String} [path] optional * @return {Boolean} * @api public */ Document.prototype.isModified = function(paths, modifiedPaths) { if (paths) { if (!Array.isArray(paths)) { paths = paths.split(' '); } var modified = modifiedPaths || this.modifiedPaths(); var directModifiedPaths = Object.keys(this.$__.activePaths.states.modify); var isModifiedChild = paths.some(function(path) { return !!~modified.indexOf(path); }); return isModifiedChild || paths.some(function(path) { return directModifiedPaths.some(function(mod) { return mod === path || path.indexOf(mod + '.') === 0; }); }); } return this.$__.activePaths.some('modify'); }; /** * Checks if a path is set to its default. * * ####Example * * MyModel = mongoose.model('test', { name: { type: String, default: 'Val '} }); * var m = new MyModel(); * m.$isDefault('name'); // true * * @memberOf Document * @method $isDefault * @param {String} [path] * @return {Boolean} * @api public */ Document.prototype.$isDefault = function(path) { return (path in this.$__.activePaths.states.default); }; /** * Getter/setter, determines whether the document was removed or not. * * ####Example: * product.remove(function (err, product) { * product.isDeleted(); // true * product.remove(); // no-op, doesn't send anything to the db * * product.isDeleted(false); * product.isDeleted(); // false * product.remove(); // will execute a remove against the db * }) * * @param {Boolean} [val] optional, overrides whether mongoose thinks the doc is deleted * @return {Boolean} whether mongoose thinks this doc is deleted. * @method $isDeleted * @memberOf Document * @api public */ Document.prototype.$isDeleted = function(val) { if (arguments.length === 0) { return !!this.$__.isDeleted; } this.$__.isDeleted = !!val; return this; }; /** * Returns true if `path` was directly set and modified, else false. * * ####Example * * doc.set('documents.0.title', 'changed'); * doc.isDirectModified('documents.0.title') // true * doc.isDirectModified('documents') // false * * @param {String} path * @return {Boolean} * @api public */ Document.prototype.isDirectModified = function(path) { return (path in this.$__.activePaths.states.modify); }; /** * Checks if `path` was initialized. * * @param {String} path * @return {Boolean} * @api public */ Document.prototype.isInit = function(path) { return (path in this.$__.activePaths.states.init); }; /** * Checks if `path` was selected in the source query which initialized this document. * * ####Example * * Thing.findOne().select('name').exec(function (err, doc) { * doc.isSelected('name') // true * doc.isSelected('age') // false * }) * * @param {String} path * @return {Boolean} * @api public */ Document.prototype.isSelected = function isSelected(path) { if (this.$__.selected) { if (path === '_id') { return this.$__.selected._id !== 0; } var paths = Object.keys(this.$__.selected); var i = paths.length; var inclusive = null; var cur; if (i === 1 && paths[0] === '_id') { // only _id was selected. return this.$__.selected._id === 0; } while (i--) { cur = paths[i]; if (cur === '_id') { continue; } if (!isDefiningProjection(this.$__.selected[cur])) { continue; } inclusive = !!this.$__.selected[cur]; break; } if (inclusive === null) { return true; } if (path in this.$__.selected) { return inclusive; } i = paths.length; var pathDot = path + '.'; while (i--) { cur = paths[i]; if (cur === '_id') { continue; } if (cur.indexOf(pathDot) === 0) { return inclusive || cur !== pathDot; } if (pathDot.indexOf(cur + '.') === 0) { return inclusive; } } return !inclusive; } return true; }; /** * Checks if `path` was explicitly selected. If no projection, always returns * true. * * ####Example * * Thing.findOne().select('nested.name').exec(function (err, doc) { * doc.isDirectSelected('nested.name') // true * doc.isDirectSelected('nested.otherName') // false * doc.isDirectSelected('nested') // false * }) * * @param {String} path * @return {Boolean} * @api public */ Document.prototype.isDirectSelected = function isDirectSelected(path) { if (this.$__.selected) { if (path === '_id') { return this.$__.selected._id !== 0; } var paths = Object.keys(this.$__.selected); var i = paths.length; var inclusive = null; var cur; if (i === 1 && paths[0] === '_id') { // only _id was selected. return this.$__.selected._id === 0; } while (i--) { cur = paths[i]; if (cur === '_id') { continue; } if (!isDefiningProjection(this.$__.selected[cur])) { continue; } inclusive = !!this.$__.selected[cur]; break; } if (inclusive === null) { return true; } if (path in this.$__.selected) { return inclusive; } return !inclusive; } return true; }; /** * Executes registered validation rules for this document. * * ####Note: * * This method is called `pre` save and if a validation rule is violated, [save](#model_Model-save) is aborted and the error is returned to your `callback`. * * ####Example: * * doc.validate(function (err) { * if (err) handleError(err); * else // validation passed * }); * * @param {Object} optional options internal options * @param {Function} callback optional callback called after validation completes, passing an error if one occurred * @return {Promise} Promise * @api public */ Document.prototype.validate = function(options, callback) { if (typeof options === 'function') { callback = options; options = null; } return utils.promiseOrCallback(callback, cb => this.$__validate(function(error) { cb(error); })); }; /*! * ignore */ function _getPathsToValidate(doc) { var i; var len; var skipSchemaValidators = {}; // only validate required fields when necessary var paths = Object.keys(doc.$__.activePaths.states.require).filter(function(path) { if (!doc.isSelected(path) && !doc.isModified(path)) { return false; } var p = doc.schema.path(path); if (typeof p.originalRequiredValue === 'function') { return p.originalRequiredValue.call(doc); } return true; }); paths = paths.concat(Object.keys(doc.$__.activePaths.states.init)); paths = paths.concat(Object.keys(doc.$__.activePaths.states.modify)); paths = paths.concat(Object.keys(doc.$__.activePaths.states.default)); if (!doc.ownerDocument) { var subdocs = doc.$__getAllSubdocs(); var subdoc; len = subdocs.length; var modifiedPaths = doc.modifiedPaths(); for (i = 0; i < len; ++i) { subdoc = subdocs[i]; if (doc.isModified(subdoc.$basePath, modifiedPaths) && !doc.isDirectModified(subdoc.$basePath)) { // Remove child paths for now, because we'll be validating the whole // subdoc paths = paths.filter(function(p) { return p != null && p.indexOf(subdoc.$basePath + '.') !== 0; }); paths.push(subdoc.$basePath); skipSchemaValidators[subdoc.$basePath] = true; } } } // gh-661: if a whole array is modified, make sure to run validation on all // the children as well len = paths.length; for (i = 0; i < len; ++i) { var path = paths[i]; var _pathType = doc.schema.path(path); if (!_pathType || !_pathType.$isMongooseArray || // To avoid potential performance issues, skip doc arrays whose children // are not required. `getPositionalPathType()` may be slow, so avoid // it unless we have a case of #6364 (_pathType.$isMongooseDocumentArray && !get(_pathType, 'schemaOptions.required'))) { continue; } var val = doc.getValue(path); if (val) { var numElements = val.length; for (var j = 0; j < numElements; ++j) { paths.push(path + '.' + j); } } } var flattenOptions = { skipArrays: true }; len = paths.length; for (i = 0; i < len; ++i) { var pathToCheck = paths[i]; if (doc.schema.nested[pathToCheck]) { var _v = doc.getValue(pathToCheck); if (isMongooseObject(_v)) { _v = _v.toObject({ transform: false }); } var flat = flatten(_v, '', flattenOptions); var _subpaths = Object.keys(flat).map(function(p) { return pathToCheck + '.' + p; }); paths = paths.concat(_subpaths); } } len = paths.length; for (i = 0; i < len; ++i) { const path = paths[i]; const _pathType = doc.schema.path(path); if (!_pathType || !_pathType.$isSchemaMap) { continue; } const val = doc.getValue(path); if (val == null) { continue; } for (let key of val.keys()) { paths.push(path + '.' + key); } } return [paths, skipSchemaValidators]; } /*! * ignore */ Document.prototype.$__validate = function(callback) { const _this = this; const _complete = function() { var err = _this.$__.validationError; _this.$__.validationError = undefined; _this.emit('validate', _this); _this.constructor.emit('validate', _this); if (err) { for (var key in err.errors) { // Make sure cast errors persist if (!_this.__parent && err.errors[key] instanceof MongooseError.CastError) { _this.invalidate(key, err.errors[key]); } } return err; } }; // only validate required fields when necessary const pathDetails = _getPathsToValidate(this); const paths = pathDetails[0]; const skipSchemaValidators = pathDetails[1]; if (paths.length === 0) { return process.nextTick(function() { const error = _complete(); if (error) { return _this.schema.s.hooks.execPost('validate:error', _this, [ _this], { error: error }, function(error) { callback(error); }); } callback(null, _this); }); } const validated = {}; let total = 0; var complete = function() { const error = _complete(); if (error) { return _this.schema.s.hooks.execPost('validate:error', _this, [ _this], { error: error }, function(error) { callback(error); }); } callback(null, _this); }; var validatePath = function(path) { if (path == null || validated[path]) { return; } validated[path] = true; total++; process.nextTick(function() { const p = _this.schema.path(path); if (!p) { return --total || complete(); } // If user marked as invalid or there was a cast error, don't validate if (!_this.$isValid(path)) { --total || complete(); return; } const val = _this.getValue(path); const scope = path in _this.$__.pathsToScopes ? _this.$__.pathsToScopes[path] : _this; p.doValidate(val, function(err) { if (err) { _this.invalidate(path, err, undefined, true); } --total || complete(); }, scope, { skipSchemaValidators: skipSchemaValidators[path] }); }); }; const numPaths = paths.length; for (let i = 0; i < numPaths; ++i) { validatePath(paths[i]); } }; /** * Executes registered validation rules (skipping asynchronous validators) for this document. * * ####Note: * * This method is useful if you need synchronous validation. * * ####Example: * * var err = doc.validateSync(); * if ( err ){ * handleError( err ); * } else { * // validation passed * } * * @param {Array|string} pathsToValidate only validate the given paths * @return {MongooseError|undefined} MongooseError if there are errors during validation, or undefined if there is no error. * @api public */ Document.prototype.validateSync = function(pathsToValidate) { const _this = this; if (typeof pathsToValidate === 'string') { pathsToValidate = pathsToValidate.split(' '); } // only validate required fields when necessary const pathDetails = _getPathsToValidate(this); let paths = pathDetails[0]; const skipSchemaValidators = pathDetails[1]; if (pathsToValidate && pathsToValidate.length) { var tmp = []; for (var i = 0; i < paths.length; ++i) { if (pathsToValidate.indexOf(paths[i]) !== -1) { tmp.push(paths[i]); } } paths = tmp; } var validating = {}; paths.forEach(function(path) { if (validating[path]) { return; } validating[path] = true; var p = _this.schema.path(path); if (!p) { return; } if (!_this.$isValid(path)) { return; } var val = _this.getValue(path); var err = p.doValidateSync(val, _this, { skipSchemaValidators: skipSchemaValidators[path] }); if (err) { _this.invalidate(path, err, undefined, true); } }); var err = _this.$__.validationError; _this.$__.validationError = undefined; _this.emit('validate', _this); _this.constructor.emit('validate', _this); if (err) { for (var key in err.errors) { // Make sure cast errors persist if (err.errors[key] instanceof MongooseError.CastError) { _this.invalidate(key, err.errors[key]); } } } return err; }; /** * Marks a path as invalid, causing validation to fail. * * The `errorMsg` argument will become the message of the `ValidationError`. * * The `value` argument (if passed) will be available through the `ValidationError.value` property. * * doc.invalidate('size', 'must be less than 20', 14); * doc.validate(function (err) { * console.log(err) * // prints * { message: 'Validation failed', * name: 'ValidationError', * errors: * { size: * { message: 'must be less than 20', * name: 'ValidatorError', * path: 'size', * type: 'user defined', * value: 14 } } } * }) * * @param {String} path the field to invalidate * @param {String|Error} errorMsg the error which states the reason `path` was invalid * @param {Object|String|Number|any} value optional invalid value * @param {String} [kind] optional `kind` property for the error * @return {ValidationError} the current ValidationError, with all currently invalidated paths * @api public */ Document.prototype.invalidate = function(path, err, val, kind) { if (!this.$__.validationError) { this.$__.validationError = new ValidationError(this); } if (this.$__.validationError.errors[path]) { return; } if (!err || typeof err === 'string') { err = new ValidatorError({ path: path, message: err, type: kind || 'user defined', value: val }); } if (this.$__.validationError === err) { return this.$__.validationError; } this.$__.validationError.addError(path, err); return this.$__.validationError; }; /** * Marks a path as valid, removing existing validation errors. * * @param {String} path the field to mark as valid * @api public * @memberOf Document * @method $markValid */ Document.prototype.$markValid = function(path) { if (!this.$__.validationError || !this.$__.validationError.errors[path]) { return; } delete this.$__.validationError.errors[path]; if (Object.keys(this.$__.validationError.errors).length === 0) { this.$__.validationError = null; } }; /** * Saves this document. * * ####Example: * * product.sold = Date.now(); * product.save(function (err, product) { * if (err) .. * }) * * The callback will receive three parameters * * 1. `err` if an error occurred * 2. `product` which is the saved `product` * * As an extra measure of flow control, save will return a Promise. * ####Example: * product.save().then(function(product) { * ... * }); * * @param {Object} [options] options optional options * @param {Object} [options.safe] overrides [schema's safe option](http://mongoosejs.com//docs/guide.html#safe) * @param {Boolean} [options.validateBeforeSave] set to false to save without validating. * @param {Function} [fn] optional callback * @method save * @memberOf Document * @return {Promise} Promise * @api public * @see middleware http://mongoosejs.com/docs/middleware.html */ /** * Checks if a path is invalid * * @param {String} path the field to check * @method $isValid * @memberOf Document * @api private */ Document.prototype.$isValid = function(path) { return !this.$__.validationError || !this.$__.validationError.errors[path]; }; /** * Resets the internal modified state of this document. * * @api private * @return {Document} * @method $__reset * @memberOf Document */ Document.prototype.$__reset = function reset() { var _this = this; DocumentArray || (DocumentArray = require('./types/documentarray')); this.$__.activePaths .map('init', 'modify', function(i) { return _this.getValue(i); }) .filter(function(val) { return val && val instanceof Array && val.isMongooseDocumentArray && val.length; }) .forEach(function(array) { var i = array.length; while (i--) { var doc = array[i]; if (!doc) { continue; } doc.$__reset(); } }); this.$__.activePaths. map('init', 'modify', function(i) { return _this.getValue(i); }). filter(function(val) { return val && val.$isSingleNested; }). forEach(function(doc) { doc.$__reset(); }); // clear atomics this.$__dirty().forEach(function(dirt) { var type = dirt.value; if (type && type._atomics) { type._atomics = {}; } }); // Clear 'dirty' cache this.$__.activePaths.clear('modify'); this.$__.activePaths.clear('default'); this.$__.validationError = undefined; this.errors = undefined; _this = this; this.schema.requiredPaths().forEach(function(path) { _this.$__.activePaths.require(path); }); return this; }; /** * Returns this documents dirty paths / vals. * * @api private * @method $__dirty * @memberOf Document */ Document.prototype.$__dirty = function() { var _this = this; var all = this.$__.activePaths.map('modify', function(path) { return { path: path, value: _this.getValue(path), schema: _this.$__path(path) }; }); // gh-2558: if we had to set a default and the value is not undefined, // we have to save as well all = all.concat(this.$__.activePaths.map('default', function(path) { if (path === '_id' || !_this.getValue(path)) { return; } return { path: path, value: _this.getValue(path), schema: _this.$__path(path) }; })); // Sort dirty paths in a flat hierarchy. all.sort(function(a, b) { return (a.path < b.path ? -1 : (a.path > b.path ? 1 : 0)); }); // Ignore "foo.a" if "foo" is dirty already. var minimal = [], lastPath, top; all.forEach(function(item) { if (!item) { return; } if (item.path.indexOf(lastPath) !== 0) { lastPath = item.path + '.'; minimal.push(item); top = item; } else { // special case for top level MongooseArrays if (top.value && top.value._atomics && top.value.hasAtomics()) { // the `top` array itself and a sub path of `top` are being modified. // the only way to honor all of both modifications is through a $set // of entire array. top.value._atomics = {}; top.value._atomics.$set = top.value; } } }); top = lastPath = null; return minimal; }; /** * Assigns/compiles `schema` into this documents prototype. * * @param {Schema} schema * @api private * @method $__setSchema * @memberOf Document */ Document.prototype.$__setSchema = function(schema) { schema.plugin(idGetter, { deduplicate: true }); compile(schema.tree, this, undefined, schema.options); this.schema = schema; }; /** * Get active path that were changed and are arrays * * @api private * @method $__getArrayPathsToValidate * @memberOf Document */ Document.prototype.$__getArrayPathsToValidate = function() { DocumentArray || (DocumentArray = require('./types/documentarray')); // validate all document arrays. return this.$__.activePaths .map('init', 'modify', function(i) { return this.getValue(i); }.bind(this)) .filter(function(val) { return val && val instanceof Array && val.isMongooseDocumentArray && val.length; }).reduce(function(seed, array) { return seed.concat(array); }, []) .filter(function(doc) { return doc; }); }; /** * Get all subdocs (by bfs) * * @api private * @method $__getAllSubdocs * @memberOf Document */ Document.prototype.$__getAllSubdocs = function() { DocumentArray || (DocumentArray = require('./types/documentarray')); Embedded = Embedded || require('./types/embedded'); function docReducer(doc, seed, path) { var val = doc[path]; if (val instanceof Embedded) { seed.push(val); } if (val && val.$isSingleNested) { seed = Object.keys(val._doc).reduce(function(seed, path) { return docReducer(val._doc, seed, path); }, seed); seed.push(val); } if (val && val.isMongooseDocumentArray) { val.forEach(function _docReduce(doc) { if (!doc || !doc._doc) { return; } if (doc instanceof Embedded) { seed.push(doc); } seed = Object.keys(doc._doc).reduce(function(seed, path) { return docReducer(doc._doc, seed, path); }, seed); }); } else if (val instanceof Document && val.$__isNested) { if (val) { seed = Object.keys(val).reduce(function(seed, path) { return docReducer(val, seed, path); }, seed); } } return seed; } var _this = this; var subDocs = Object.keys(this._doc).reduce(function(seed, path) { return docReducer(_this, seed, path); }, []); return subDocs; }; /*! * Runs queued functions */ function applyQueue(doc) { var q = doc.schema && doc.schema.callQueue; if (!q.length) { return; } var pair; for (var i = 0; i < q.length; ++i) { pair = q[i]; if (pair[0] !== 'pre' && pair[0] !== 'post' && pair[0] !== 'on') { doc[pair[0]].apply(doc, pair[1]); } } } /*! * ignore */ Document.prototype.$__handleReject = function handleReject(err) { // emit on the Model if listening if (this.listeners('error').length) { this.emit('error', err); } else if (this.constructor.listeners && this.constructor.listeners('error').length) { this.constructor.emit('error', err); } else if (this.listeners && this.listeners('error').length) { this.emit('error', err); } }; /** * Internal helper for toObject() and toJSON() that doesn't manipulate options * * @api private * @method $toObject * @memberOf Document */ Document.prototype.$toObject = function(options, json) { let defaultOptions = { transform: true, flattenDecimals: true }; // merge base default options with Schema's set default options if available. // `clone` is necessary here because `utils.options` directly modifies the second input. if (json && this.schema.options.toJSON) { defaultOptions = utils.options(defaultOptions, clone(this.schema.options.toJSON)); } else if (this.schema.options.toObject) { defaultOptions = utils.options(defaultOptions, clone(this.schema.options.toObject)); } // If options do not exist or is not an object, set it to empty object options = options && utils.getFunctionName(options.constructor) === 'Object' ? clone(options) : {}; let _minimize; if (options.minimize != null) { _minimize = options.minimize; } else if (defaultOptions.minimize != null) { _minimize = defaultOptions.minimize; } else { _minimize = this.schema.options.minimize; } // The original options that will be passed to `clone()`. Important because // `clone()` will recursively call `$toObject()` on embedded docs, so we // need the original options the user passed in, plus `_isNested` and // `_parentOptions` for checking whether we need to depopulate. const cloneOptions = Object.assign(utils.clone(options), { _isNested: true, json: json, minimize: _minimize }); const depopulate = options.depopulate || get(options, '_parentOptions.depopulate', false); // _isNested will only be true if this is not the top level document, we // should never depopulate if (depopulate && options._isNested && this.$__.wasPopulated) { // populated paths that we set to a document return clone(this._id, cloneOptions); } // merge default options with input options. options = utils.options(defaultOptions, options); options._isNested = true; options.json = json; options.minimize = _minimize; cloneOptions._parentOptions = options; // remember the root transform function // to save it from being overwritten by sub-transform functions var originalTransform = options.transform; var ret = clone(this._doc, cloneOptions) || {}; if (options.getters) { applyGetters(this, ret, 'paths', cloneOptions); // applyGetters for paths will add nested empty objects; // if minimize is set, we need to remove them. if (options.minimize) { ret = minimize(ret) || {}; } } if (options.virtuals || options.getters && options.virtuals !== false) { applyGetters(this, ret, 'virtuals', cloneOptions); } if (options.versionKey === false && this.schema.options.versionKey) { delete ret[this.schema.options.versionKey]; } var transform = options.transform; // In the case where a subdocument has its own transform function, we need to // check and see if the parent has a transform (options.transform) and if the // child schema has a transform (this.schema.options.toObject) In this case, // we need to adjust options.transform to be the child schema's transform and // not the parent schema's if (transform === true || (this.schema.options.toObject && transform)) { var opts = options.json ? this.schema.options.toJSON : this.schema.options.toObject; if (opts) { transform = (typeof options.transform === 'function' ? options.transform : opts.transform); } } else { options.transform = originalTransform; } if (typeof transform === 'function') { var xformed = transform(this, ret, options); if (typeof xformed !== 'undefined') { ret = xformed; } } return ret; }; /** * Converts this document into a plain javascript object, ready for storage in MongoDB. * * Buffers are converted to instances of [mongodb.Binary](http://mongodb.github.com/node-mongodb-native/api-bson-generated/binary.html) for proper storage. * * ####Options: * * - `getters` apply all getters (path and virtual getters) * - `virtuals` apply virtual getters (can override `getters` option) * - `minimize` remove empty objects (defaults to true) * - `transform` a transform function to apply to the resulting document before returning * - `depopulate` depopulate any populated paths, replacing them with their original refs (defaults to false) * - `versionKey` whether to include the version key (defaults to true) * * ####Getters/Virtuals * * Example of only applying path getters * * doc.toObject({ getters: true, virtuals: false }) * * Example of only applying virtual getters * * doc.toObject({ virtuals: true }) * * Example of applying both path and virtual getters * * doc.toObject({ getters: true }) * * To apply these options to every document of your schema by default, set your [schemas](#schema_Schema) `toObject` option to the same argument. * * schema.set('toObject', { virtuals: true }) * * ####Transform * * We may need to perform a transformation of the resulting object based on some criteria, say to remove some sensitive information or return a custom object. In this case we set the optional `transform` function. * * Transform functions receive three arguments * * function (doc, ret, options) {} * * - `doc` The mongoose document which is being converted * - `ret` The plain object representation which has been converted * - `options` The options in use (either schema options or the options passed inline) * * ####Example * * // specify the transform schema option * if (!schema.options.toObject) schema.options.toObject = {}; * schema.options.toObject.transform = function (doc, ret, options) { * // remove the _id of every document before returning the result * delete ret._id; * return ret; * } * * // without the transformation in the schema * doc.toObject(); // { _id: 'anId', name: 'Wreck-it Ralph' } * * // with the transformation * doc.toObject(); // { name: 'Wreck-it Ralph' } * * With transformations we can do a lot more than remove properties. We can even return completely new customized objects: * * if (!schema.options.toObject) schema.options.toObject = {}; * schema.options.toObject.transform = function (doc, ret, options) { * return { movie: ret.name } * } * * // without the transformation in the schema * doc.toObject(); // { _id: 'anId', name: 'Wreck-it Ralph' } * * // with the transformation * doc.toObject(); // { movie: 'Wreck-it Ralph' } * * _Note: if a transform function returns `undefined`, the return value will be ignored._ * * Transformations may also be applied inline, overridding any transform set in the options: * * function xform (doc, ret, options) { * return { inline: ret.name, custom: true } * } * * // pass the transform as an inline option * doc.toObject({ transform: xform }); // { inline: 'Wreck-it Ralph', custom: true } * * If you want to skip transformations, use `transform: false`: * * if (!schema.options.toObject) schema.options.toObject = {}; * schema.options.toObject.hide = '_id'; * schema.options.toObject.transform = function (doc, ret, options) { * if (options.hide) { * options.hide.split(' ').forEach(function (prop) { * delete ret[prop]; * }); * } * return ret; * } * * var doc = new Doc({ _id: 'anId', secret: 47, name: 'Wreck-it Ralph' }); * doc.toObject(); // { secret: 47, name: 'Wreck-it Ralph' } * doc.toObject({ hide: 'secret _id', transform: false });// { _id: 'anId', secret: 47, name: 'Wreck-it Ralph' } * doc.toObject({ hide: 'secret _id', transform: true }); // { name: 'Wreck-it Ralph' } * * Transforms are applied _only to the document and are not applied to sub-documents_. * * Transforms, like all of these options, are also available for `toJSON`. * * See [schema options](/docs/guide.html#toObject) for some more details. * * _During save, no custom options are applied to the document before being sent to the database._ * * @param {Object} [options] * @return {Object} js object * @see mongodb.Binary http://mongodb.github.com/node-mongodb-native/api-bson-generated/binary.html * @api public * @memberOf Document */ Document.prototype.toObject = function(options) { return this.$toObject(options); }; /*! * Minimizes an object, removing undefined values and empty objects * * @param {Object} object to minimize * @return {Object} */ function minimize(obj) { var keys = Object.keys(obj), i = keys.length, hasKeys, key, val; while (i--) { key = keys[i]; val = obj[key]; if (utils.isObject(val) && !Buffer.isBuffer(val)) { obj[key] = minimize(val); } if (undefined === obj[key]) { delete obj[key]; continue; } hasKeys = true; } return hasKeys ? obj : undefined; } /*! * Applies virtuals properties to `json`. * * @param {Document} self * @param {Object} json * @param {String} type either `virtuals` or `paths` * @return {Object} `json` */ function applyGetters(self, json, type, options) { var schema = self.schema; var paths = Object.keys(schema[type]); var i = paths.length; var numPaths = i; var path; var assignPath; var cur = self._doc; var v; if (!cur) { return json; } if (type === 'virtuals') { options = options || {}; for (i = 0; i < numPaths; ++i) { path = paths[i]; // We may be applying virtuals to a nested object, for example if calling // `doc.nestedProp.toJSON()`. If so, the path we assign to, `assignPath`, // will be a trailing substring of the `path`. assignPath = path; if (options.path != null) { if (!path.startsWith(options.path + '.')) { continue; } assignPath = path.substr(options.path.length + 1); } parts = assignPath.split('.'); v = clone(self.get(path), options); if (v === void 0) { continue; } plen = parts.length; cur = json; for (var j = 0; j < plen - 1; ++j) { cur[parts[j]] = cur[parts[j]] || {}; cur = cur[parts[j]]; } cur[parts[plen - 1]] = v; } return json; } while (i--) { path = paths[i]; var parts = path.split('.'); var plen = parts.length; var last = plen - 1; var branch = json; var part; cur = self._doc; for (var ii = 0; ii < plen; ++ii) { part = parts[ii]; v = cur[part]; if (ii === last) { branch[part] = clone(self.get(path), options); } else if (v == null) { if (part in cur) { branch[part] = v; } break; } else { branch = branch[part] || (branch[part] = {}); } cur = v; } } return json; } /** * The return value of this method is used in calls to JSON.stringify(doc). * * This method accepts the same options as [Document#toObject](#document_Document-toObject). To apply the options to every document of your schema by default, set your [schemas](#schema_Schema) `toJSON` option to the same argument. * * schema.set('toJSON', { virtuals: true }) * * See [schema options](/docs/guide.html#toJSON) for details. * * @param {Object} options * @return {Object} * @see Document#toObject #document_Document-toObject * @api public * @memberOf Document */ Document.prototype.toJSON = function(options) { return this.$toObject(options, true); }; /** * Helper for console.log * * @api public * @method inspect * @memberOf Document */ Document.prototype.inspect = function(options) { var isPOJO = options && utils.getFunctionName(options.constructor) === 'Object'; var opts; if (isPOJO) { opts = options; opts.minimize = false; } return this.toObject(opts); }; /** * Helper for console.log * * @api public * @method toString * @memberOf Document */ Document.prototype.toString = function() { return inspect(this.inspect()); }; /** * Returns true if the Document stores the same data as doc. * * Documents are considered equal when they have matching `_id`s, unless neither * document has an `_id`, in which case this function falls back to using * `deepEqual()`. * * @param {Document} doc a document to compare * @return {Boolean} * @api public * @memberOf Document */ Document.prototype.equals = function(doc) { if (!doc) { return false; } var tid = this.get('_id'); var docid = doc.get ? doc.get('_id') : doc; if (!tid && !docid) { return deepEqual(this, doc); } return tid && tid.equals ? tid.equals(docid) : tid === docid; }; /** * Populates document references, executing the `callback` when complete. * If you want to use promises instead, use this function with * [`execPopulate()`](#document_Document-execPopulate) * * ####Example: * * doc * .populate('company') * .populate({ * path: 'notes', * match: /airline/, * select: 'text', * model: 'modelName' * options: opts * }, function (err, user) { * assert(doc._id === user._id) // the document itself is passed * }) * * // summary * doc.populate(path) // not executed * doc.populate(options); // not executed * doc.populate(path, callback) // executed * doc.populate(options, callback); // executed * doc.populate(callback); // executed * doc.populate(options).execPopulate() // executed, returns promise * * * ####NOTE: * * Population does not occur unless a `callback` is passed *or* you explicitly * call `execPopulate()`. * Passing the same path a second time will overwrite the previous path options. * See [Model.populate()](#model_Model.populate) for explaination of options. * * @see Model.populate #model_Model.populate * @see Document.execPopulate #document_Document-execPopulate * @param {String|Object} [path] The path to populate or an options object * @param {Function} [callback] When passed, population is invoked * @api public * @return {Document} this * @memberOf Document */ Document.prototype.populate = function populate() { if (arguments.length === 0) { return this; } var pop = this.$__.populate || (this.$__.populate = {}); var args = utils.args(arguments); var fn; if (typeof args[args.length - 1] === 'function') { fn = args.pop(); } // allow `doc.populate(callback)` if (args.length) { // use hash to remove duplicate paths var res = utils.populate.apply(null, args); for (var i = 0; i < res.length; ++i) { pop[res[i].path] = res[i]; } } if (fn) { var paths = utils.object.vals(pop); this.$__.populate = undefined; paths.__noPromise = true; var topLevelModel = this.constructor; if (this.$__isNested) { topLevelModel = this.$__.scope.constructor; var nestedPath = this.$__.nestedPath; paths.forEach(function(populateOptions) { populateOptions.path = nestedPath + '.' + populateOptions.path; }); } topLevelModel.populate(this, paths, fn); } return this; }; /** * Explicitly executes population and returns a promise. Useful for ES2015 * integration. * * ####Example: * * var promise = doc. * populate('company'). * populate({ * path: 'notes', * match: /airline/, * select: 'text', * model: 'modelName' * options: opts * }). * execPopulate(); * * // summary * doc.execPopulate().then(resolve, reject); * * * @see Document.populate #document_Document-populate * @api public * @return {Promise} promise that resolves to the document when population is done * @memberOf Document */ Document.prototype.execPopulate = function() { return utils.promiseOrCallback(null, cb => { this.populate(cb); }); }; /** * Gets _id(s) used during population of the given `path`. * * ####Example: * * Model.findOne().populate('author').exec(function (err, doc) { * console.log(doc.author.name) // Dr.Seuss * console.log(doc.populated('author')) // '5144cf8050f071d979c118a7' * }) * * If the path was not populated, undefined is returned. * * @param {String} path * @return {Array|ObjectId|Number|Buffer|String|undefined} * @memberOf Document * @api public */ Document.prototype.populated = function(path, val, options) { // val and options are internal if (val === null || val === void 0) { if (!this.$__.populated) { return undefined; } var v = this.$__.populated[path]; if (v) { return v.value; } return undefined; } // internal if (val === true) { if (!this.$__.populated) { return undefined; } return this.$__.populated[path]; } this.$__.populated || (this.$__.populated = {}); this.$__.populated[path] = {value: val, options: options}; return val; }; /** * Takes a populated field and returns it to its unpopulated state. * * ####Example: * * Model.findOne().populate('author').exec(function (err, doc) { * console.log(doc.author.name); // Dr.Seuss * console.log(doc.depopulate('author')); * console.log(doc.author); // '5144cf8050f071d979c118a7' * }) * * If the path was not populated, this is a no-op. * * @param {String} path * @return {Document} this * @see Document.populate #document_Document-populate * @api public * @memberOf Document */ Document.prototype.depopulate = function(path) { if (typeof path === 'string') { path = path.split(' '); } var i; var populatedIds; if (arguments.length === 0) { // Depopulate all var keys = Object.keys(this.$__.populated); for (i = 0; i < keys.length; i++) { populatedIds = this.populated(keys[i]); if (!populatedIds) { continue; } delete this.$__.populated[keys[i]]; this.$set(keys[i], populatedIds); } return this; } for (i = 0; i < path.length; i++) { populatedIds = this.populated(path[i]); if (!populatedIds) { continue; } delete this.$__.populated[path[i]]; this.$set(path[i], populatedIds); } return this; }; /** * Returns the full path to this document. * * @param {String} [path] * @return {String} * @api private * @method $__fullPath * @memberOf Document */ Document.prototype.$__fullPath = function(path) { // overridden in SubDocuments return path || ''; }; /*! * Module exports. */ Document.ValidationError = ValidationError; module.exports = exports = Document;
1
13,765
This check is a little odd, and will still mess up with empty strings `''` because empty string is falsy. Can we change this to `|| _this.getValue(path) == null`?
Automattic-mongoose
js
@@ -0,0 +1,13 @@ +/** + * Note: + * Check + * - if element is focusable + * - if element is in focus order via `tabindex` + */ +const isFocusable = virtualNode.isFocusable; + +let tabIndex = virtualNode.actualNode.getAttribute('tabindex'); +tabIndex = + tabIndex && !isNaN(parseInt(tabIndex, 10)) ? parseInt(tabIndex) : null; + +return tabIndex ? isFocusable && tabIndex >= 0 : isFocusable;
1
1
14,080
Just a minor suggestion: If you `parseInt` when you access the attribute then you shouldn't have to do it twice in the ternary.
dequelabs-axe-core
js
@@ -241,6 +241,7 @@ describe "BoltServer::TransportApp" do password: target[:password], port: target[:port] }) + body[:target]['host-key-check'] = false if transport == 'ssh' post(path, JSON.generate(body), 'CONTENT_TYPE' => 'text/json') end
1
# frozen_string_literal: true require 'spec_helper' require 'bolt_spec/bolt_server' require 'bolt_spec/conn' require 'bolt_spec/file_cache' require 'bolt_server/config' require 'bolt_server/transport_app' require 'json' require 'rack/test' describe "BoltServer::TransportApp" do include BoltSpec::BoltServer include BoltSpec::Conn include BoltSpec::FileCache include Rack::Test::Methods def app moduledir = File.join(__dir__, '..', 'fixtures', 'modules') mock_file_cache(moduledir) config = BoltServer::Config.new(default_config) BoltServer::TransportApp.new(config) end def file_data(file) { 'uri' => { 'path' => "/tasks/#{File.basename(file)}", 'params' => { 'param' => 'val' } }, 'filename' => File.basename(file), 'sha256' => Digest::SHA256.file(file), 'size' => File.size(file) } end it 'responds ok' do get '/' expect(last_response).to be_ok expect(last_response.status).to eq(200) end context 'when raising errors' do it 'returns non-html 404 when the endpoint is not found' do post '/ssh/run_tasksss', JSON.generate({}), 'CONTENT_TYPE' => 'text/json' expect(last_response).not_to be_ok expect(last_response.status).to eq(404) result = JSON.parse(last_response.body) expect(result['msg']).to eq("Could not find route /ssh/run_tasksss") expect(result['kind']).to eq("boltserver/not-found") end it 'returns non-html 500 when the request times out' do get '/500_error' expect(last_response).not_to be_ok expect(last_response.status).to eq(500) result = JSON.parse(last_response.body) expect(result['msg']).to eq('500: Unknown error: Unexpected error') expect(result['kind']).to eq('boltserver/server-error') end end describe 'transport routes' do let(:action) { 'run_task' } let(:result) { double(Bolt::Result, status_hash: { status: 'test_status' }) } before(:each) do allow_any_instance_of(BoltServer::TransportApp) .to receive(action.to_sym).and_return( Bolt::ResultSet.new([result]) ) end describe '/ssh/*' do let(:path) { "/ssh/#{action}" } let(:target) { conn_info('ssh') } it 'returns a non-html 404 if the action does not exist' do post('/ssh/not_an_action', JSON.generate({}), 'CONTENT_TYPE' => 'text/json') expect(last_response).not_to be_ok expect(last_response.status).to eq(404) result = JSON.parse(last_response.body) expect(result['kind']).to eq('boltserver/not-found') end it 'errors if both password and private-key-content are present' do body = { target: { password: 'password', 'private-key-content': 'private-key-content' } } post(path, JSON.generate(body), 'CONTENT_TYPE' => 'text/json') expect(last_response).not_to be_ok expect(last_response.status).to eq(400) result = JSON.parse(last_response.body) regex = %r{The property '#/target' of type object matched more than one of the required schemas} expect(result['details'].join).to match(regex) end it 'fails if no authorization is present' do body = { target: { hostname: target[:host], user: target[:user], port: target[:port], 'host-key-check': false } } post(path, JSON.generate(body), 'CONTENT_TYPE' => 'text/json') expect(last_response).not_to be_ok expect(last_response.status).to eq(400) result = last_response.body expect(result).to match(%r{The property '#/target' of type object did not match any of the required schemas}) end it 'performs the action when using a password and scrubs any stack traces' do body = { 'target': { 'hostname': target[:host], 'user': target[:user], 'password': target[:password], 'port': target[:port], 'host-key-check': false } } expect_any_instance_of(BoltServer::TransportApp) .to receive(:scrub_stack_trace).with(result.status_hash).and_return({}) post(path, JSON.generate(body), 'CONTENT_TYPE' => 'text/json') expect(last_response).to be_ok expect(last_response.status).to eq(200) end it 'performs an action when using a private key and scrubs any stack traces' do private_key = ENV['BOLT_SSH_KEY'] || Dir["spec/fixtures/keys/id_rsa"][0] private_key_content = File.read(private_key) body = { 'target': { 'hostname': target[:host], 'user': target[:user], 'private-key-content': private_key_content, 'port': target[:port], 'host-key-check': false } } expect_any_instance_of(BoltServer::TransportApp) .to receive(:scrub_stack_trace).with(result.status_hash).and_return({}) post(path, JSON.generate(body), 'CONTENT_TYPE' => 'text/json') expect(last_response).to be_ok expect(last_response.status).to eq(200) end end describe '/winrm/*' do let(:path) { "/winrm/#{action}" } let(:target) { conn_info('winrm') } it 'returns a non-html 404 if the action does not exist' do post('/winrm/not_an_action', JSON.generate({}), 'CONTENT_TYPE' => 'text/json') expect(last_response).not_to be_ok expect(last_response.status).to eq(404) result = JSON.parse(last_response.body) expect(result['kind']).to eq('boltserver/not-found') end it 'fails if no authorization is present' do body = { target: { hostname: target[:host], user: target[:user], port: target[:port] } } post(path, JSON.generate(body), 'CONTENT_TYPE' => 'text/json') expect(last_response).not_to be_ok expect(last_response.status).to eq(400) result = last_response.body expect(result).to match(%r{The property '#/target' did not contain a required property of 'password'}) end it 'fails if either port or connect-timeout is a string' do body = { target: { hostname: target[:host], uaser: target[:user], password: target[:password], port: 'port', 'connect-timeout': 'timeout' } } post(path, JSON.generate(body), 'CONTENT_TYPE' => 'text/json') expect(last_response).not_to be_ok expect(last_response.status).to eq(400) result = last_response.body [ %r{The property '#/target/port' of type string did not match the following type: integer}, %r{The property '#/target/connect-timeout' of type string did not match the following type: integer} ].each do |re| expect(result).to match(re) end end it 'performs the action and scrubs any stack traces from the result' do body = { target: { hostname: target[:host], user: target[:user], password: target[:password], port: target[:port] } } expect_any_instance_of(BoltServer::TransportApp) .to receive(:scrub_stack_trace).with(result.status_hash).and_return({}) post(path, JSON.generate(body), 'CONTENT_TYPE' => 'text/json') expect(last_response).to be_ok expect(last_response.status).to eq(200) end end end describe 'action endpoint' do # Helper to set the transport on a body hash, and then post # to an action endpoint (/ssh/<action> or /winrm/<action>) def post_over_transport(transport, action, body_defaults = {}) path = "/#{transport}/#{action}" target = conn_info(transport) body = body_defaults.merge(target: { hostname: target[:host], user: target[:user], password: target[:password], port: target[:port] }) post(path, JSON.generate(body), 'CONTENT_TYPE' => 'text/json') end describe 'run_task' do it 'runs a simple echo task over SSH', :ssh do example_task = { task: { name: 'sample::echo', metadata: { description: 'Echo a message', parameters: { message: 'Default message' } }, files: [{ filename: "echo.sh", sha256: "foo", uri: { path: 'foo', params: { environment: 'foo' } } }] }, parameters: { message: "Hello!" } } post_over_transport('ssh', 'run_task', example_task) expect(last_response).to be_ok expect(last_response.status).to eq(200) result = JSON.parse(last_response.body) expect(result).to include('status' => 'success') expect(result['result']['_output']).to match(/got passed the message: Hello!/) end it "runs a simple echo task over WinRM", :winrm do example_task = { task: { name: 'sample::wininput', metadata: { description: 'Echo a message', input_method: 'stdin' }, files: [{ filename: 'wininput.ps1', sha256: 'foo', uri: { path: 'foo', params: { environment: 'foo' } } }] }, parameters: { input: 'Hello!' } } post_over_transport('winrm', 'run_task', example_task) expect(last_response).to be_ok expect(last_response.status).to eq(200) result = JSON.parse(last_response.body) expect(result).to include('status' => 'success') expect(result['result']['_output']).to match(/INPUT.*Hello!/) end end end end
1
11,751
This change in particular is strange. I'm not sure why it would now be necessary. The previous default would've been true, and the default behavior without a new net-ssh version should be unchanged.
puppetlabs-bolt
rb
@@ -25,7 +25,7 @@ class BatchActionFormModel /** * @Assert\Type("boolean") */ - public bool $autoEndOnErrors = true; + public ?bool $autoEndOnErrors = true; /** * @Assert\Valid()
1
<?php /** * Copyright © Bold Brand Commerce Sp. z o.o. All rights reserved. * See LICENSE.txt for license details. */ declare(strict_types=1); namespace Ergonode\BatchAction\Application\Form\Model; use Ergonode\BatchAction\Application\Validator\AllFilterDisabled; use Symfony\Component\Validator\Constraints as Assert; class BatchActionFormModel { /** * @Assert\NotBlank(message="Batch action type is required") * @Assert\Length( * max=20, * maxMessage="Batch action type is too long. It should contain {{ limit }} characters or less." * ) */ public ?string $type = null; /** * @Assert\Type("boolean") */ public bool $autoEndOnErrors = true; /** * @Assert\Valid() * @Assert\NotBlank() * @AllFilterDisabled() * * @var string|BatchActionFilterFormModel $filter */ public $filter = null; /** * @Assert\Valid() * * @var mixed */ public $payload = null; }
1
9,631
Why is that? Should be redundant as the default value exists.
ergonode-backend
php
@@ -252,6 +252,7 @@ type Config struct { DebugSimulateCalcGraphHangAfter time.Duration `config:"seconds;0"` DebugSimulateDataplaneHangAfter time.Duration `config:"seconds;0"` DebugPanicAfter time.Duration `config:"seconds;0"` + DebugSimulateDataRace bool `config:"bool;false"` // Configure where Felix gets its routing information. // - workloadIPs: use workload endpoints to construct routes.
1
// Copyright (c) 2020 Tigera, Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package config import ( "errors" "fmt" "net" "os" "reflect" "regexp" "strconv" "strings" "time" log "github.com/sirupsen/logrus" "github.com/projectcalico/libcalico-go/lib/apiconfig" "github.com/projectcalico/libcalico-go/lib/names" "github.com/projectcalico/libcalico-go/lib/numorstring" "github.com/projectcalico/felix/idalloc" ) var ( // RegexpIfaceElemRegexp matches an individual element in the overall interface list; // assumes the value represents a regular expression and is marked by '/' at the start // and end and cannot have spaces RegexpIfaceElemRegexp = regexp.MustCompile(`^\/[^\s]+\/$`) // NonRegexpIfaceElemRegexp matches an individual element in the overall interface list; // assumes the value is between 1-15 chars long and only be alphanumeric or - or _ NonRegexpIfaceElemRegexp = regexp.MustCompile(`^[a-zA-Z0-9_-]{1,15}$`) IfaceListRegexp = regexp.MustCompile(`^[a-zA-Z0-9_-]{1,15}(,[a-zA-Z0-9_-]{1,15})*$`) AuthorityRegexp = regexp.MustCompile(`^[^:/]+:\d+$`) HostnameRegexp = regexp.MustCompile(`^[a-zA-Z0-9_.-]+$`) StringRegexp = regexp.MustCompile(`^.*$`) IfaceParamRegexp = regexp.MustCompile(`^[a-zA-Z0-9:._+-]{1,15}$`) // Hostname have to be valid ipv4, ipv6 or strings up to 64 characters. HostAddressRegexp = regexp.MustCompile(`^[a-zA-Z0-9:._+-]{1,64}$`) ) const ( maxUint = ^uint(0) maxInt = int(maxUint >> 1) minInt = -maxInt - 1 ) // Source of a config value. Values from higher-numbered sources override // those from lower-numbered sources. Note: some parameters (such as those // needed to connect to the datastore) can only be set from a local source. type Source uint8 const ( Default = iota DatastoreGlobal DatastorePerHost ConfigFile EnvironmentVariable InternalOverride ) var SourcesInDescendingOrder = []Source{InternalOverride, EnvironmentVariable, ConfigFile, DatastorePerHost, DatastoreGlobal} func (source Source) String() string { switch source { case Default: return "<default>" case DatastoreGlobal: return "datastore (global)" case DatastorePerHost: return "datastore (per-host)" case ConfigFile: return "config file" case EnvironmentVariable: return "environment variable" case InternalOverride: return "internal override" } return fmt.Sprintf("<unknown(%v)>", uint8(source)) } func (source Source) Local() bool { switch source { case Default, ConfigFile, EnvironmentVariable, InternalOverride: return true default: return false } } // Config contains the best, parsed config values loaded from the various sources. // We use tags to control the parsing and validation. type Config struct { // Configuration parameters. UseInternalDataplaneDriver bool `config:"bool;true"` DataplaneDriver string `config:"file(must-exist,executable);calico-iptables-plugin;non-zero,die-on-fail,skip-default-validation"` // Wireguard configuration WireguardEnabled bool `config:"bool;false"` WireguardListeningPort int `config:"int;51820"` WireguardRoutingRulePriority int `config:"int;99"` WireguardInterfaceName string `config:"iface-param;wireguard.cali;non-zero"` WireguardMTU int `config:"int;1420;non-zero"` BPFEnabled bool `config:"bool;false"` BPFDisableUnprivileged bool `config:"bool;true"` BPFLogLevel string `config:"oneof(off,info,debug);off;non-zero"` BPFDataIfacePattern *regexp.Regexp `config:"regexp;^(en.*|eth.*|tunl0$)"` BPFConnectTimeLoadBalancingEnabled bool `config:"bool;true"` BPFExternalServiceMode string `config:"oneof(tunnel,dsr);tunnel;non-zero"` BPFKubeProxyIptablesCleanupEnabled bool `config:"bool;true"` BPFKubeProxyMinSyncPeriod time.Duration `config:"seconds;1"` BPFKubeProxyEndpointSlicesEnabled bool `config:"bool;false"` // DebugBPFCgroupV2 controls the cgroup v2 path that we apply the connect-time load balancer to. Most distros // are configured for cgroup v1, which prevents all but hte root cgroup v2 from working so this is only useful // for development right now. DebugBPFCgroupV2 string `config:"string;;local"` // DebugBPFMapRepinEnabled can be used to prevent Felix from repinning its BPF maps at startup. This is useful for // testing with multiple Felix instances running on one host. DebugBPFMapRepinEnabled bool `config:"bool;true;local"` DatastoreType string `config:"oneof(kubernetes,etcdv3);etcdv3;non-zero,die-on-fail,local"` FelixHostname string `config:"hostname;;local,non-zero"` EtcdAddr string `config:"authority;127.0.0.1:2379;local"` EtcdScheme string `config:"oneof(http,https);http;local"` EtcdKeyFile string `config:"file(must-exist);;local"` EtcdCertFile string `config:"file(must-exist);;local"` EtcdCaFile string `config:"file(must-exist);;local"` EtcdEndpoints []string `config:"endpoint-list;;local"` TyphaAddr string `config:"authority;;local"` TyphaK8sServiceName string `config:"string;;local"` TyphaK8sNamespace string `config:"string;kube-system;non-zero,local"` TyphaReadTimeout time.Duration `config:"seconds;30;local"` TyphaWriteTimeout time.Duration `config:"seconds;10;local"` // Client-side TLS config for Felix's communication with Typha. If any of these are // specified, they _all_ must be - except that either TyphaCN or TyphaURISAN may be left // unset. Felix will then initiate a secure (TLS) connection to Typha. Typha must present // a certificate signed by a CA in TyphaCAFile, and with CN matching TyphaCN or URI SAN // matching TyphaURISAN. TyphaKeyFile string `config:"file(must-exist);;local"` TyphaCertFile string `config:"file(must-exist);;local"` TyphaCAFile string `config:"file(must-exist);;local"` TyphaCN string `config:"string;;local"` TyphaURISAN string `config:"string;;local"` Ipv6Support bool `config:"bool;true"` IptablesBackend string `config:"oneof(legacy,nft,auto);legacy"` RouteRefreshInterval time.Duration `config:"seconds;90"` DeviceRouteSourceAddress net.IP `config:"ipv4;"` DeviceRouteProtocol int `config:"int;3"` RemoveExternalRoutes bool `config:"bool;true"` IptablesRefreshInterval time.Duration `config:"seconds;90"` IptablesPostWriteCheckIntervalSecs time.Duration `config:"seconds;1"` IptablesLockFilePath string `config:"file;/run/xtables.lock"` IptablesLockTimeoutSecs time.Duration `config:"seconds;0"` IptablesLockProbeIntervalMillis time.Duration `config:"millis;50"` IpsetsRefreshInterval time.Duration `config:"seconds;10"` MaxIpsetSize int `config:"int;1048576;non-zero"` XDPRefreshInterval time.Duration `config:"seconds;90"` PolicySyncPathPrefix string `config:"file;;"` NetlinkTimeoutSecs time.Duration `config:"seconds;10"` MetadataAddr string `config:"hostname;127.0.0.1;die-on-fail"` MetadataPort int `config:"int(0,65535);8775;die-on-fail"` OpenstackRegion string `config:"region;;die-on-fail"` InterfacePrefix string `config:"iface-list;cali;non-zero,die-on-fail"` InterfaceExclude []*regexp.Regexp `config:"iface-list-regexp;kube-ipvs0"` ChainInsertMode string `config:"oneof(insert,append);insert;non-zero,die-on-fail"` DefaultEndpointToHostAction string `config:"oneof(DROP,RETURN,ACCEPT);DROP;non-zero,die-on-fail"` IptablesFilterAllowAction string `config:"oneof(ACCEPT,RETURN);ACCEPT;non-zero,die-on-fail"` IptablesMangleAllowAction string `config:"oneof(ACCEPT,RETURN);ACCEPT;non-zero,die-on-fail"` LogPrefix string `config:"string;calico-packet"` LogFilePath string `config:"file;/var/log/calico/felix.log;die-on-fail"` LogSeverityFile string `config:"oneof(DEBUG,INFO,WARNING,ERROR,FATAL);INFO"` LogSeverityScreen string `config:"oneof(DEBUG,INFO,WARNING,ERROR,FATAL);INFO"` LogSeveritySys string `config:"oneof(DEBUG,INFO,WARNING,ERROR,FATAL);INFO"` VXLANEnabled bool `config:"bool;false"` VXLANPort int `config:"int;4789"` VXLANVNI int `config:"int;4096"` VXLANMTU int `config:"int;1410;non-zero"` IPv4VXLANTunnelAddr net.IP `config:"ipv4;"` VXLANTunnelMACAddr string `config:"string;"` IpInIpEnabled bool `config:"bool;false"` IpInIpMtu int `config:"int;1440;non-zero"` IpInIpTunnelAddr net.IP `config:"ipv4;"` ReportingIntervalSecs time.Duration `config:"seconds;30"` ReportingTTLSecs time.Duration `config:"seconds;90"` EndpointReportingEnabled bool `config:"bool;false"` EndpointReportingDelaySecs time.Duration `config:"seconds;1"` IptablesMarkMask uint32 `config:"mark-bitmask;0xffff0000;non-zero,die-on-fail"` DisableConntrackInvalidCheck bool `config:"bool;false"` HealthEnabled bool `config:"bool;false"` HealthPort int `config:"int(0,65535);9099"` HealthHost string `config:"host-address;localhost"` PrometheusMetricsEnabled bool `config:"bool;false"` PrometheusMetricsHost string `config:"host-address;"` PrometheusMetricsPort int `config:"int(0,65535);9091"` PrometheusGoMetricsEnabled bool `config:"bool;true"` PrometheusProcessMetricsEnabled bool `config:"bool;true"` FailsafeInboundHostPorts []ProtoPort `config:"port-list;tcp:22,udp:68,tcp:179,tcp:2379,tcp:2380,tcp:6443,tcp:6666,tcp:6667;die-on-fail"` FailsafeOutboundHostPorts []ProtoPort `config:"port-list;udp:53,udp:67,tcp:179,tcp:2379,tcp:2380,tcp:6443,tcp:6666,tcp:6667;die-on-fail"` KubeNodePortRanges []numorstring.Port `config:"portrange-list;30000:32767"` NATPortRange numorstring.Port `config:"portrange;"` NATOutgoingAddress net.IP `config:"ipv4;"` UsageReportingEnabled bool `config:"bool;true"` UsageReportingInitialDelaySecs time.Duration `config:"seconds;300"` UsageReportingIntervalSecs time.Duration `config:"seconds;86400"` ClusterGUID string `config:"string;baddecaf"` ClusterType string `config:"string;"` CalicoVersion string `config:"string;"` ExternalNodesCIDRList []string `config:"cidr-list;;die-on-fail"` DebugMemoryProfilePath string `config:"file;;"` DebugCPUProfilePath string `config:"file;/tmp/felix-cpu-<timestamp>.pprof;"` DebugDisableLogDropping bool `config:"bool;false"` DebugSimulateCalcGraphHangAfter time.Duration `config:"seconds;0"` DebugSimulateDataplaneHangAfter time.Duration `config:"seconds;0"` DebugPanicAfter time.Duration `config:"seconds;0"` // Configure where Felix gets its routing information. // - workloadIPs: use workload endpoints to construct routes. // - calicoIPAM: use IPAM data to contruct routes. RouteSource string `config:"oneof(WorkloadIPs,CalicoIPAM);CalicoIPAM"` RouteTableRange idalloc.IndexRange `config:"route-table-range;1-250;die-on-fail"` IptablesNATOutgoingInterfaceFilter string `config:"iface-param;"` SidecarAccelerationEnabled bool `config:"bool;false"` XDPEnabled bool `config:"bool;true"` GenericXDPEnabled bool `config:"bool;false"` // State tracking. // internalOverrides contains our highest priority config source, generated from internal constraints // such as kernel version support. internalOverrides map[string]string // sourceToRawConfig maps each source to the set of config that was give to us via UpdateFrom. sourceToRawConfig map[Source]map[string]string // rawValues maps keys to the current highest-priority raw value. rawValues map[string]string // Err holds the most recent error from a config update. Err error loadClientConfigFromEnvironment func() (*apiconfig.CalicoAPIConfig, error) useNodeResourceUpdates bool } type ProtoPort struct { Protocol string Port uint16 } // Load parses and merges the rawData from one particular source into this config object. // If there is a config value already loaded from a higher-priority source, then // the new value will be ignored (after validation). func (config *Config) UpdateFrom(rawData map[string]string, source Source) (changed bool, err error) { log.Infof("Merging in config from %v: %v", source, rawData) // Defensively take a copy of the raw data, in case we've been handed // a mutable map by mistake. rawDataCopy := make(map[string]string) for k, v := range rawData { if v == "" { log.WithFields(log.Fields{ "name": k, "source": source, }).Info("Ignoring empty configuration parameter. Use value 'none' if " + "your intention is to explicitly disable the default value.") continue } rawDataCopy[k] = v } config.sourceToRawConfig[source] = rawDataCopy changed, err = config.resolve() return } func (c *Config) InterfacePrefixes() []string { return strings.Split(c.InterfacePrefix, ",") } func (config *Config) OpenstackActive() bool { if strings.Contains(strings.ToLower(config.ClusterType), "openstack") { // OpenStack is explicitly known to be present. Newer versions of the OpenStack plugin // set this flag. log.Debug("Cluster type contains OpenStack") return true } // If we get here, either OpenStack isn't present or we're running against an old version // of the OpenStack plugin, which doesn't set the flag. Use heuristics based on the // presence of the OpenStack-related parameters. if config.MetadataAddr != "" && config.MetadataAddr != "127.0.0.1" { log.Debug("OpenStack metadata IP set to non-default, assuming OpenStack active") return true } if config.MetadataPort != 0 && config.MetadataPort != 8775 { log.Debug("OpenStack metadata port set to non-default, assuming OpenStack active") return true } for _, prefix := range config.InterfacePrefixes() { if prefix == "tap" { log.Debug("Interface prefix list contains 'tap', assuming OpenStack") return true } } log.Debug("No evidence this is an OpenStack deployment; disabling OpenStack special-cases") return false } func (config *Config) resolve() (changed bool, err error) { newRawValues := make(map[string]string) // Map from lower-case version of name to the highest-priority source found so far. // We use the lower-case version of the name since we can calculate it both for // expected and "raw" parameters, which may be used by plugins. nameToSource := make(map[string]Source) for _, source := range SourcesInDescendingOrder { valueLoop: for rawName, rawValue := range config.sourceToRawConfig[source] { lowerCaseName := strings.ToLower(rawName) currentSource := nameToSource[lowerCaseName] param, ok := knownParams[lowerCaseName] if !ok { if source >= currentSource { // Stash the raw value in case it's useful for an external // dataplane driver. Use the raw name since the driver may // want it. newRawValues[rawName] = rawValue nameToSource[lowerCaseName] = source } log.WithField("raw name", rawName).Info( "Ignoring unknown config param.") continue valueLoop } metadata := param.GetMetadata() name := metadata.Name if metadata.Local && !source.Local() { log.Warningf("Ignoring local-only configuration for %v from %v", name, source) continue valueLoop } log.Infof("Parsing value for %v: %v (from %v)", name, rawValue, source) var value interface{} if strings.ToLower(rawValue) == "none" { // Special case: we allow a value of "none" to force the value to // the zero value for a field. The zero value often differs from // the default value. Typically, the zero value means "turn off // the feature". if metadata.NonZero { err = errors.New("non-zero field cannot be set to none") log.Errorf( "Failed to parse value for %v: %v from source %v. %v", name, rawValue, source, err) config.Err = err return } value = metadata.ZeroValue log.Infof("Value set to 'none', replacing with zero-value: %#v.", value) } else { value, err = param.Parse(rawValue) if err != nil { logCxt := log.WithError(err).WithField("source", source) if metadata.DieOnParseFailure { logCxt.Error("Invalid (required) config value.") config.Err = err return } else { logCxt.WithField("default", metadata.Default).Warn( "Replacing invalid value with default") value = metadata.Default err = nil } } } log.Infof("Parsed value for %v: %v (from %v)", name, value, source) if source < currentSource { log.Infof("Skipping config value for %v from %v; "+ "already have a value from %v", name, source, currentSource) continue } field := reflect.ValueOf(config).Elem().FieldByName(name) field.Set(reflect.ValueOf(value)) newRawValues[name] = rawValue nameToSource[lowerCaseName] = source } } changed = !reflect.DeepEqual(newRawValues, config.rawValues) config.rawValues = newRawValues return } func (config *Config) setBy(name string, source Source) bool { _, set := config.sourceToRawConfig[source][name] return set } func (config *Config) setByConfigFileOrEnvironment(name string) bool { return config.setBy(name, ConfigFile) || config.setBy(name, EnvironmentVariable) } func (config *Config) DatastoreConfig() apiconfig.CalicoAPIConfig { // We want Felix's datastore connection to be fully configurable using the same // CALICO_XXX_YYY (or just XXX_YYY) environment variables that work for any libcalico-go // client - for both the etcdv3 and KDD cases. However, for the etcd case, Felix has for a // long time supported FELIX_XXXYYY environment variables, and we want those to keep working // too. // To achieve that, first build a CalicoAPIConfig using libcalico-go's // LoadClientConfigFromEnvironment - which means incorporating defaults and CALICO_XXX_YYY // and XXX_YYY variables. cfg, err := config.loadClientConfigFromEnvironment() if err != nil { log.WithError(err).Panic("Failed to create datastore config") } // Now allow FELIX_XXXYYY variables or XxxYyy config file settings to override that, in the // etcd case. Note that that etcd options are set even if the DatastoreType isn't etcdv3. // This allows the user to rely the default DatastoreType being etcdv3 and still being able // to configure the other etcdv3 options. As of the time of this code change, the etcd options // have no affect if the DatastoreType is not etcdv3. // Datastore type, either etcdv3 or kubernetes if config.setByConfigFileOrEnvironment("DatastoreType") { log.Infof("Overriding DatastoreType from felix config to %s", config.DatastoreType) if config.DatastoreType == string(apiconfig.EtcdV3) { cfg.Spec.DatastoreType = apiconfig.EtcdV3 } else if config.DatastoreType == string(apiconfig.Kubernetes) { cfg.Spec.DatastoreType = apiconfig.Kubernetes } } // Endpoints. if config.setByConfigFileOrEnvironment("EtcdEndpoints") && len(config.EtcdEndpoints) > 0 { log.Infof("Overriding EtcdEndpoints from felix config to %s", config.EtcdEndpoints) cfg.Spec.EtcdEndpoints = strings.Join(config.EtcdEndpoints, ",") } else if config.setByConfigFileOrEnvironment("EtcdAddr") { etcdEndpoints := config.EtcdScheme + "://" + config.EtcdAddr log.Infof("Overriding EtcdEndpoints from felix config to %s", etcdEndpoints) cfg.Spec.EtcdEndpoints = etcdEndpoints } // TLS. if config.setByConfigFileOrEnvironment("EtcdKeyFile") { log.Infof("Overriding EtcdKeyFile from felix config to %s", config.EtcdKeyFile) cfg.Spec.EtcdKeyFile = config.EtcdKeyFile } if config.setByConfigFileOrEnvironment("EtcdCertFile") { log.Infof("Overriding EtcdCertFile from felix config to %s", config.EtcdCertFile) cfg.Spec.EtcdCertFile = config.EtcdCertFile } if config.setByConfigFileOrEnvironment("EtcdCaFile") { log.Infof("Overriding EtcdCaFile from felix config to %s", config.EtcdCaFile) cfg.Spec.EtcdCACertFile = config.EtcdCaFile } if !(config.IpInIpEnabled || config.VXLANEnabled || config.BPFEnabled) { // Polling k8s for node updates is expensive (because we get many superfluous // updates) so disable if we don't need it. log.Info("Encap disabled, disabling node poll (if KDD is in use).") cfg.Spec.K8sDisableNodePoll = true } return *cfg } // Validate() performs cross-field validation. func (config *Config) Validate() (err error) { if config.FelixHostname == "" { err = errors.New("Failed to determine hostname") } if config.DatastoreType == "etcdv3" && len(config.EtcdEndpoints) == 0 { if config.EtcdScheme == "" { err = errors.New("EtcdEndpoints and EtcdScheme both missing") } if config.EtcdAddr == "" { err = errors.New("EtcdEndpoints and EtcdAddr both missing") } } // If any client-side TLS config parameters are specified, they _all_ must be - except that // either TyphaCN or TyphaURISAN may be left unset. if config.TyphaCAFile != "" || config.TyphaCertFile != "" || config.TyphaKeyFile != "" || config.TyphaCN != "" || config.TyphaURISAN != "" { // Some TLS config specified. if config.TyphaKeyFile == "" || config.TyphaCertFile == "" || config.TyphaCAFile == "" || (config.TyphaCN == "" && config.TyphaURISAN == "") { err = errors.New("If any Felix-Typha TLS config parameters are specified," + " they _all_ must be" + " - except that either TyphaCN or TyphaURISAN may be left unset.") } } if err != nil { config.Err = err } return } var knownParams map[string]param func loadParams() { knownParams = make(map[string]param) config := Config{} kind := reflect.TypeOf(config) metaRegexp := regexp.MustCompile(`^([^;(]+)(?:\(([^)]*)\))?;` + `([^;]*)(?:;` + `([^;]*))?$`) for ii := 0; ii < kind.NumField(); ii++ { field := kind.Field(ii) tag := field.Tag.Get("config") if tag == "" { continue } captures := metaRegexp.FindStringSubmatch(tag) if len(captures) == 0 { log.Panicf("Failed to parse metadata for config param %v", field.Name) } log.Debugf("%v: metadata captures: %#v", field.Name, captures) kind := captures[1] // Type: "int|oneof|bool|port-list|..." kindParams := captures[2] // Parameters for the type: e.g. for oneof "http,https" defaultStr := captures[3] // Default value e.g "1.0" flags := captures[4] var param param var err error switch kind { case "bool": param = &BoolParam{} case "int": min := minInt max := maxInt if kindParams != "" { minAndMax := strings.Split(kindParams, ",") min, err = strconv.Atoi(minAndMax[0]) if err != nil { log.Panicf("Failed to parse min value for %v", field.Name) } max, err = strconv.Atoi(minAndMax[1]) if err != nil { log.Panicf("Failed to parse max value for %v", field.Name) } } param = &IntParam{Min: min, Max: max} case "int32": param = &Int32Param{} case "mark-bitmask": param = &MarkBitmaskParam{} case "float": param = &FloatParam{} case "seconds": param = &SecondsParam{} case "millis": param = &MillisParam{} case "iface-list": param = &RegexpParam{Regexp: IfaceListRegexp, Msg: "invalid Linux interface name"} case "iface-list-regexp": param = &RegexpPatternListParam{ NonRegexpElemRegexp: NonRegexpIfaceElemRegexp, RegexpElemRegexp: RegexpIfaceElemRegexp, Delimiter: ",", Msg: "list contains invalid Linux interface name or regex pattern", } case "regexp": param = &RegexpPatternParam{} case "iface-param": param = &RegexpParam{Regexp: IfaceParamRegexp, Msg: "invalid Linux interface parameter"} case "file": param = &FileParam{ MustExist: strings.Contains(kindParams, "must-exist"), Executable: strings.Contains(kindParams, "executable"), } case "authority": param = &RegexpParam{Regexp: AuthorityRegexp, Msg: "invalid URL authority"} case "ipv4": param = &Ipv4Param{} case "endpoint-list": param = &EndpointListParam{} case "port-list": param = &PortListParam{} case "portrange": param = &PortRangeParam{} case "portrange-list": param = &PortRangeListParam{} case "hostname": param = &RegexpParam{Regexp: HostnameRegexp, Msg: "invalid hostname"} case "host-address": param = &RegexpParam{Regexp: HostAddressRegexp, Msg: "invalid host address"} case "region": param = &RegionParam{} case "oneof": options := strings.Split(kindParams, ",") lowerCaseToCanon := make(map[string]string) for _, option := range options { lowerCaseToCanon[strings.ToLower(option)] = option } param = &OneofListParam{ lowerCaseOptionsToCanonical: lowerCaseToCanon} case "string": param = &RegexpParam{Regexp: StringRegexp, Msg: "invalid string"} case "cidr-list": param = &CIDRListParam{} case "route-table-range": param = &RouteTableRangeParam{} default: log.Panicf("Unknown type of parameter: %v", kind) } metadata := param.GetMetadata() metadata.Name = field.Name metadata.ZeroValue = reflect.ValueOf(config).FieldByName(field.Name).Interface() if strings.Contains(flags, "non-zero") { metadata.NonZero = true } if strings.Contains(flags, "die-on-fail") { metadata.DieOnParseFailure = true } if strings.Contains(flags, "local") { metadata.Local = true } if defaultStr != "" { if strings.Contains(flags, "skip-default-validation") { metadata.Default = defaultStr } else { // Parse the default value and save it in the metadata. Doing // that here ensures that we syntax-check the defaults now. defaultVal, err := param.Parse(defaultStr) if err != nil { log.Panicf("Invalid default value: %v", err) } metadata.Default = defaultVal } } else { metadata.Default = metadata.ZeroValue } knownParams[strings.ToLower(field.Name)] = param } } func (config *Config) SetUseNodeResourceUpdates(b bool) { config.useNodeResourceUpdates = b } func (config *Config) UseNodeResourceUpdates() bool { return config.useNodeResourceUpdates } func (config *Config) RawValues() map[string]string { return config.rawValues } func (config *Config) SetLoadClientConfigFromEnvironmentFunction(fnc func() (*apiconfig.CalicoAPIConfig, error)) { config.loadClientConfigFromEnvironment = fnc } // OverrideParam installs a maximum priority parameter override for the given parameter. This is useful for // disabling features that are found to be unsupported, for example. By using an extra priority class, the // override will persist even if the host/global config is updated. func (config *Config) OverrideParam(name, value string) (bool, error) { config.internalOverrides[name] = value return config.UpdateFrom(config.internalOverrides, InternalOverride) } func New() *Config { if knownParams == nil { loadParams() } p := &Config{ rawValues: map[string]string{}, sourceToRawConfig: map[Source]map[string]string{}, internalOverrides: map[string]string{}, } for _, param := range knownParams { param.setDefault(p) } hostname, err := names.Hostname() if err != nil { log.Warningf("Failed to get hostname from kernel, "+ "trying HOSTNAME variable: %v", err) hostname = strings.ToLower(os.Getenv("HOSTNAME")) } p.FelixHostname = hostname p.loadClientConfigFromEnvironment = apiconfig.LoadClientConfigFromEnvironment return p } type param interface { GetMetadata() *Metadata Parse(raw string) (result interface{}, err error) setDefault(*Config) }
1
18,007
Are all fields beginning with "Debug" automatically `;local` ? (I guess so, but just checking.)
projectcalico-felix
c
@@ -115,6 +115,13 @@ public interface Table { */ UpdateSchema updateSchema(); + /** + * Create a new {@link UpdateNameMapping} to update name mapping of this table and commit the change. + * + * @return a new {@link UpdateNameMapping} + */ + UpdateNameMapping updateNameMapping(); + /** * Create a new {@link UpdateProperties} to update table properties and commit the changes. *
1
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.iceberg; import java.util.List; import java.util.Map; import org.apache.iceberg.encryption.EncryptionManager; import org.apache.iceberg.io.FileIO; import org.apache.iceberg.io.LocationProvider; /** * Represents a table. */ public interface Table { /** * Refresh the current table metadata. */ void refresh(); /** * Create a new {@link TableScan scan} for this table. * <p> * Once a table scan is created, it can be refined to project columns and filter data. * * @return a table scan for this table */ TableScan newScan(); /** * Return the {@link Schema schema} for this table. * * @return this table's schema */ Schema schema(); /** * Return the {@link PartitionSpec partition spec} for this table. * * @return this table's partition spec */ PartitionSpec spec(); /** * Return a map of {@link PartitionSpec partition specs} for this table. * * @return this table's partition specs map */ Map<Integer, PartitionSpec> specs(); /** * Return a map of string properties for this table. * * @return this table's properties map */ Map<String, String> properties(); /** * Return the table's base location. * * @return this table's location */ String location(); /** * Get the current {@link Snapshot snapshot} for this table, or null if there are no snapshots. * * @return the current table Snapshot. */ Snapshot currentSnapshot(); /** * Get the {@link Snapshot snapshot} of this table with the given id, or null if there is no * matching snapshot. * * @return the {@link Snapshot} with the given id. */ Snapshot snapshot(long snapshotId); /** * Get the {@link Snapshot snapshots} of this table. * * @return an Iterable of snapshots of this table. */ Iterable<Snapshot> snapshots(); /** * Get the snapshot history of this table. * * @return a list of {@link HistoryEntry history entries} */ List<HistoryEntry> history(); /** * Create a new {@link UpdateSchema} to alter the columns of this table and commit the change. * * @return a new {@link UpdateSchema} */ UpdateSchema updateSchema(); /** * Create a new {@link UpdateProperties} to update table properties and commit the changes. * * @return a new {@link UpdateProperties} */ UpdateProperties updateProperties(); /** * Create a new {@link UpdateLocation} to update table location and commit the changes. * * @return a new {@link UpdateLocation} */ UpdateLocation updateLocation(); /** * Create a new {@link AppendFiles append API} to add files to this table and commit. * * @return a new {@link AppendFiles} */ AppendFiles newAppend(); /** * Create a new {@link AppendFiles append API} to add files to this table and commit. * <p> * Using this method signals to the underlying implementation that the append should not perform * extra work in order to commit quickly. Fast appends are not recommended for normal writes * because the fast commit may cause split planning to slow down over time. * <p> * Implementations may not support fast appends, in which case this will return the same appender * as {@link #newAppend()}. * * @return a new {@link AppendFiles} */ default AppendFiles newFastAppend() { return newAppend(); } /** * Create a new {@link RewriteFiles rewrite API} to replace files in this table and commit. * * @return a new {@link RewriteFiles} */ RewriteFiles newRewrite(); /** * Create a new {@link RewriteManifests rewrite manifests API} to replace manifests for this * table and commit. * * @return a new {@link RewriteManifests} */ RewriteManifests rewriteManifests(); /** * Create a new {@link OverwriteFiles overwrite API} to overwrite files by a filter expression. * * @return a new {@link OverwriteFiles} */ OverwriteFiles newOverwrite(); /** * Not recommended: Create a new {@link ReplacePartitions replace partitions API} to dynamically * overwrite partitions in the table with new data. * <p> * This is provided to implement SQL compatible with Hive table operations but is not recommended. * Instead, use the {@link OverwriteFiles overwrite API} to explicitly overwrite data. * * @return a new {@link ReplacePartitions} */ ReplacePartitions newReplacePartitions(); /** * Create a new {@link DeleteFiles delete API} to replace files in this table and commit. * * @return a new {@link DeleteFiles} */ DeleteFiles newDelete(); /** * Create a new {@link ExpireSnapshots expire API} to manage snapshots in this table and commit. * * @return a new {@link ExpireSnapshots} */ ExpireSnapshots expireSnapshots(); /** * Create a new {@link Rollback rollback API} to roll back to a previous snapshot and commit. * * @return a new {@link Rollback} * @deprecated Replaced by {@link #manageSnapshots()} */ @Deprecated Rollback rollback(); /** * Create a new {@link ManageSnapshots manage snapshots API} to manage snapshots in this table and commit. * @return a new {@link ManageSnapshots} */ ManageSnapshots manageSnapshots(); /** * Create a new {@link Transaction transaction API} to commit multiple table operations at once. * * @return a new {@link Transaction} */ Transaction newTransaction(); /** * @return a {@link FileIO} to read and write table data and metadata files */ FileIO io(); /** * @return an {@link org.apache.iceberg.encryption.EncryptionManager} to encrypt and decrypt * data files. */ EncryptionManager encryption(); /** * @return a {@link LocationProvider} to provide locations for new data files */ LocationProvider locationProvider(); }
1
18,141
While I think it makes sense to update the mapping programmatically, I don't see much value in exposing it as part of the table API. We want to keep the Table API small, so if we can handle this by using a separate API that consumes and produces JSON, then that is preferred. Also, we may have more than one mapping in the future, so keeping this decoupled also helps out for those use cases. For now, let's build a utility class to perform these updates. We can always add this to the public API later if we choose to.
apache-iceberg
java
@@ -540,7 +540,7 @@ static Int64 SikGcInterval = -1; void SsmpGlobals::work() { - getIpcEnv()->getAllConnections()->waitOnAll(getStatsMergeTimeout()); + getIpcEnv()->getAllConnections()->waitOnAll(0); finishPendingSscpMessages(); // Cleanup IpcEnvironment
1
/********************************************************************** // @@@ START COPYRIGHT @@@ // // Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. // // @@@ END COPYRIGHT @@@ // File: ssmpipc.cpp // Description: Class declaration for SSCP IPC infrastructure // // Created: 5/02/2006 **********************************************************************/ #include "Platform.h" #include "ex_stdh.h" #include "ssmpipc.h" #include "ComCextdecs.h" #include <semaphore.h> #include "nsk/nskport.h" #include "zsysc.h" #include "NAStdlib.h" #include "Ex_esp_msg.h" #include "ComQueue.h" #include "ComRtUtils.h" #include "ComSqlId.h" #include "Globals.h" #include "SqlStats.h" #include "ex_stdh.h" #include "ExStats.h" #include "ComDiags.h" #include "PortProcessCalls.h" #include "Statement.h" #include "ComSqlId.h" ExSsmpManager::ExSsmpManager(IpcEnvironment *env) : env_(env) { ssmpServerClass_ = new(env->getHeap()) IpcServerClass( env_, IPC_SQLSSMP_SERVER, IPC_USE_PROCESS); ssmps_ = new (env->getHeap()) HashQueue(env->getHeap(), 16); deletedSsmps_ = new(env->getHeap()) NAList<IpcServer *>(env->getHeap()); } ExSsmpManager::~ExSsmpManager() { cleanupDeletedSsmpServers(); NADELETE(deletedSsmps_, NAList, env_->getHeap()); ssmps_->position(); NADELETE(ssmps_, HashQueue, env_->getHeap()); if (ssmpServerClass_) { NADELETE(ssmpServerClass_, IpcServerClass, env_->getHeap()); } } IpcServer *ExSsmpManager::getSsmpServer(NAHeap *heap, char *nodeName, short cpuNum, ComDiagsArea *&diagsArea) { char ssmpProcessName[50]; IpcServer *ssmpServer = NULL; Int32 processNameLen = 0; char *tmpProcessName; tmpProcessName = ssmpServerClass_->getProcessName(nodeName, (short) str_len(nodeName), cpuNum, ssmpProcessName); ex_assert(tmpProcessName != NULL, "ProcessName can't be null"); processNameLen = str_len(tmpProcessName); // Check if we already have this SSMP server ssmps_->position(tmpProcessName, processNameLen); ssmpServer = (IpcServer *) ssmps_->getNext(); while (ssmpServer != NULL) { if (str_cmp(ssmpServer->castToIpcGuardianServer()->getProcessName(), tmpProcessName, processNameLen) == 0) { GuaConnectionToServer *cbGCTS = ssmpServer->getControlConnection()->castToGuaConnectionToServer(); // We need to keep 2 entries free - To send QueryFinishedMessage and to get the response for query started message if (cbGCTS->numReceiveCallbacksPending()+2 >= cbGCTS->getNowaitDepth()) { if (diagsArea == NULL) diagsArea = ComDiagsArea::allocate(heap); *diagsArea << DgSqlCode(-2026) << DgString0(tmpProcessName) << DgInt0(GetCliGlobals()->myCpu()) << DgInt1(GetCliGlobals()->myPin()); return NULL; } return ssmpServer; } ssmpServer = (IpcServer *) ssmps_->getNext(); } // We don't have this SSMP server, so we'll try to allocate one. ssmpServer = ssmpServerClass_->allocateServerProcess(&diagsArea, env_->getHeap(), nodeName, cpuNum, IPC_PRIORITY_DONT_CARE, FALSE); if (ssmpServer != NULL && ssmpServer->castToIpcGuardianServer()->isReady()) { tmpProcessName = (char *)ssmpServer->castToIpcGuardianServer()->getProcessName(); ssmps_->insert(tmpProcessName, str_len(tmpProcessName), ssmpServer); } else { if (ssmpServer != NULL) { ssmpServerClass_->freeServerProcess(ssmpServer); ssmpServer = NULL; } } return ssmpServer; } void ExSsmpManager::removeSsmpServer(char *nodeName, short cpuNum) { char ssmpProcessName[50]; IpcServer *ssmpServer = NULL; Int32 processNameLen = 0; char *tmpProcessName; tmpProcessName = ssmpServerClass_->getProcessName(nodeName, (short) str_len(nodeName), cpuNum, ssmpProcessName); ex_assert(tmpProcessName != NULL, "ProcessName can't be null"); processNameLen = str_len(tmpProcessName); ssmps_->position(tmpProcessName, processNameLen); ssmpServer = (IpcServer *)ssmps_->getNext(); while (ssmpServer != NULL) { //Only remove the returned ssmpServer if its processName matches the processName //we passed in. if (str_cmp(ssmpServer->castToIpcGuardianServer()->getProcessName(), tmpProcessName, processNameLen) == 0) { ssmps_->remove(ssmpServer); deletedSsmps_->insert(ssmpServer); break; } ssmpServer = (IpcServer *)ssmps_->getNext(); } } void ExSsmpManager::cleanupDeletedSsmpServers() { IpcServer *ssmp; while (deletedSsmps_->getFirst(ssmp)) { ssmpServerClass_->freeServerProcess(ssmp); } } SsmpGlobals::SsmpGlobals(NAHeap *ssmpheap, IpcEnvironment *ipcEnv, StatsGlobals *statsGlobals) : heap_(ssmpheap), statsGlobals_(statsGlobals), ipcEnv_(ipcEnv), recipients_(ipcEnv_->getAllConnections(),ipcEnv_->getHeap()), activeQueryMgr_(ipcEnv_, ssmpheap), pendingQueryMgr_(this, ssmpheap) { sscpServerClass_ = NULL; sscps_ = new (heap_) HashQueue(heap_, 16); deletedSscps_ = new(heap_) NAList<IpcServer *>(heap_); #ifdef _DEBUG_RTS statsCollectionInterval_ = 5 * 60; //in seconds #else statsCollectionInterval_ = 30;// in seconds #endif char defineName[24+1]; int error; short mergeInterval, statsTimeout, sqlSrcLen; //Check to see if the user wants to use a different merge interval (default is 30 seocnds). //Set this define as follows: ADD DEFINE =_MX_RTS_MERGE_INTERVAL, CLASS DEFAULTS, VOLUME $A.Bnnnnn //where nnnnn is the interval in seconds. char *ln_attrValue = getenv("_MX_RTS_MERGE_INTERVAL"); if (ln_attrValue) { mergeInterval = atoi(ln_attrValue); statsCollectionInterval_ = (Int64) MAXOF (mergeInterval, 30); } statsTimeout_ = 300; // in Centi-seconds //Set this define as follows: ADD DEFINE =_MX_RTS_MERGE_TIMEOUT, CLASS DEFAULTS, VOLUME $A.Bnnnnn //where nnnnn is the max number of queries. ln_attrValue = getenv("_MX_RTS_MERGE_TIMEOUT"); if (ln_attrValue) { statsTimeout = atoi(ln_attrValue); if (statsTimeout > 1) statsTimeout_ = statsTimeout; else statsTimeout_ = 300; } storeSqlSrcLen_ = RMS_STORE_SQL_SOURCE_LEN; //Set this define as follows: ADD DEFINE =_MX_RTS_SQL_SOURCE_LEN, CLASS DEFAULTS, VOLUME $A.Bnnnnn //where nnnnn is the length of the source string ln_attrValue = getenv("_MX_RTS_SQL_SOURCE_LEN"); if (ln_attrValue) { sqlSrcLen = atoi(ln_attrValue); if (sqlSrcLen < 0) storeSqlSrcLen_ = 0; else storeSqlSrcLen_ = sqlSrcLen; } CliGlobals *cliGlobals = GetCliGlobals(); char programDir[100]; short processType; char myNodeName[MAX_SEGMENT_NAME_LEN+1]; Lng32 myNodeNumber; short myNodeNameLen = MAX_SEGMENT_NAME_LEN; Int64 myStartTime; short pri; char myProcessName[PROCESSNAME_STRING_LEN]; error = statsGlobals_->openStatsSemaphore(semId_); ex_assert(error == 0, "BINSEM_OPEN returned an error"); if (ComRtGetProgramInfo(programDir, 100, processType, myCpu_, myPin_, myNodeNumber, myNodeName, myNodeNameLen, myStartTime, myProcessName)) { ex_assert(0,"Error in ComRtGetProgramInfo"); } pri = 0; error = statsGlobals_->getStatsSemaphore(semId_, myPin_); NAProcessHandle phandle; (void)phandle.getmine(statsGlobals->getSsmpProcHandle()); statsGlobals_->setSsmpPid(myPin_); statsGlobals_->setSsmpPriority(pri); statsGlobals_->setSsmpTimestamp(myStartTime); statsGlobals_->setStoreSqlSrcLen(storeSqlSrcLen_); statsGlobals_->setSsmpProcSemId(semId_); cliGlobals->setSemId(semId_); statsHeap_ = (NAHeap *)statsGlobals->getStatsHeap()->allocateHeapMemory(sizeof *statsHeap_); statsHeap_ = new (statsHeap_, statsGlobals->getStatsHeap()) NAHeap("Process Stats Heap", statsGlobals->getStatsHeap(), 8192, 0); statsGlobals_->setSscpOpens(0); statsGlobals_->setSscpDeletedOpens(0); statsGlobals_->releaseStatsSemaphore(semId_, myPin_); deallocatedSscps_ = new (heap_) Queue(heap_); doingGC_ = FALSE; // Debug code to force merge if the =_MX_SSMP_FORCE_MERGE define // is specified.. ln_attrValue = getenv("_MX_SSMP_FORCE_MERGE"); if (ln_attrValue) forceMerge_ = TRUE; else forceMerge_ = FALSE; pendingSscpMessages_ = new (heap_) Queue(heap_); } SsmpGlobals::~SsmpGlobals() { cleanupDeletedSscpServers(); NADELETE(deletedSscps_, NAList, heap_); NADELETE(sscps_, HashQueue, heap_); if (sscpServerClass_ != NULL) { NADELETE(sscpServerClass_, IpcServerClass, heap_); } sem_close((sem_t *)semId_); } ULng32 SsmpGlobals::allocateServers() { // Attempt connect to all SSCPs if (sscpServerClass_ == NULL) { Int32 noOfNodes; Int32 *cpuArray = NULL; noOfNodes = ComRtGetCPUArray(cpuArray, heap_); if (noOfNodes == 0) return 0; statsGlobals_->setNodesInCluster(noOfNodes); sscpServerClass_ = new(heap_) IpcServerClass(ipcEnv_, IPC_SQLSSCP_SERVER, IPC_USE_PROCESS); for (Int32 i = 0 ; i < noOfNodes ; i++) { allocateServer(NULL, 0, cpuArray[i]); } NADELETEBASIC(cpuArray, heap_); } else { ServerId *serverId; IpcServer *server; deallocatedSscps_->position(); while ((serverId = (ServerId *)deallocatedSscps_->getNext()) != NULL) { server = allocateServer(serverId->nodeName_, (short)str_len(serverId->nodeName_), serverId->cpuNum_); if (server != NULL) { deallocatedSscps_->remove(NULL); statsGlobals_->setSscpDeletedOpens(getNumDeallocatedServers()); NADELETEBASIC(serverId, heap_); } } } return sscps_->entries(); } IpcServer *SsmpGlobals::allocateServer(char *nodeName, short nodeNameLen, short cpuNum) { short len; IpcServer *server; const char *processName; ComDiagsArea *diagsArea = NULL; ServerId serverId; // No first connection yet if (sscpServerClass_ == NULL) return NULL; serverId.nodeName_[0] = '\0'; serverId.cpuNum_ = cpuNum; IpcAllocateDiagsArea(diagsArea, heap_); server = sscpServerClass_->allocateServerProcess(&diagsArea, heap_, serverId.nodeName_, serverId.cpuNum_, IPC_PRIORITY_DONT_CARE, 1, // espLevel FALSE); if (server != NULL && server->castToIpcGuardianServer()->isReady()) { processName = server->castToIpcGuardianServer()->getProcessName(); sscps_->insert(processName, str_len(processName), server); statsGlobals_->setSscpOpens(getNumAllocatedServers()); recipients_ += server->getControlConnection()->getId(); } else { if (server != NULL) { sscpServerClass_->freeServerProcess(server); server = NULL; } insertDeallocatedSscp(serverId.nodeName_, serverId.cpuNum_); } diagsArea->decrRefCount(); return server; } void SsmpGlobals::insertDeallocatedSscp(char *nodeName, short cpuNum ) { ServerId *serverId; short len = strlen(nodeName); deallocatedSscps_->position(); while ((serverId = (ServerId *)deallocatedSscps_->getNext()) != NULL) { // ServerId already exists in deallocatedSscps_ list and hence don't add again // But, do delete it if the cpu is Down if ((str_cmp(serverId->nodeName_, nodeName, len) == 0) && serverId->cpuNum_ == cpuNum) { if (!ComRtGetCpuStatus(nodeName, cpuNum)) { deallocatedSscps_->remove(NULL); statsGlobals_->setSscpDeletedOpens(getNumDeallocatedServers()); NADELETEBASIC(serverId, heap_); } return; } } // If the CPU is DOWN, then we don’t // want to insert it into deallocateSscps_ until we // get a CPUUP message later. Because there is no point // trying to reopen the process until its CPU is up. if (ComRtGetCpuStatus(nodeName, cpuNum)) { ServerId *entry = new (heap_) ServerId; str_cpy_all(entry->nodeName_, nodeName, len); entry->nodeName_[len] = '\0'; entry->cpuNum_ = cpuNum; deallocatedSscps_->insert(entry, sizeof(ServerId)); statsGlobals_->setSscpDeletedOpens(getNumDeallocatedServers()); } } ULng32 SsmpGlobals::deAllocateServer(char *nodeName, short nodeNameLen, short cpuNum ) { char sscpProcessName[50]; ServerId serverId; short len; IpcServer *sscpServer; if (sscpServerClass_ == NULL) return 0; serverId.nodeName_[0] = '\0'; serverId.cpuNum_ = cpuNum; char *tmpProcessName; tmpProcessName = sscpServerClass_->getProcessName(serverId.nodeName_, (short) str_len(serverId.nodeName_), cpuNum, sscpProcessName); ex_assert(tmpProcessName != NULL, "ProcessName can't be null"); sscps_->position(tmpProcessName, str_len(tmpProcessName)); while ((sscpServer = (IpcServer *)sscps_->getNext()) != NULL) { //Only remove the returned sscpServer if its processName matches the processName //we passed in. if (str_cmp(sscpServer->castToIpcGuardianServer()->getProcessName(), tmpProcessName, str_len(tmpProcessName)) == 0) break; } if (sscpServer != NULL) { // Remove the sscpServer if it is in the list, i.e., it has not already // been removed. sscps_->remove(sscpServer); deletedSscps_->insert(sscpServer); statsGlobals_->setSscpOpens(getNumAllocatedServers()); insertDeallocatedSscp(serverId.nodeName_, serverId.cpuNum_); } return sscps_->entries(); } void SsmpGlobals::cleanupDeletedSscpServers() { NAList<IpcServer *> notReadyToCleanup(heap_); IpcServer *sscp; while (deletedSscps_->getFirst(sscp)) { IpcConnection *conn = sscp->getControlConnection(); if (conn->numQueuedSendMessages() || conn->numQueuedReceiveMessages() || conn->numReceiveCallbacksPending() || conn->hasActiveIOs()) { notReadyToCleanup.insert(sscp); } else { sscpServerClass_->freeServerProcess(sscp); } } deletedSscps_->insert(notReadyToCleanup); } void SsmpGlobals::allocateServerOnNextRequest(char *nodeName, short nodeNameLen, short cpuNum) { char sscpProcessName[50]; ServerId serverId; short len; IpcServer *sscpServer; if (sscpServerClass_ == NULL) return; serverId.nodeName_[0] = '\0'; serverId.cpuNum_ = cpuNum; // Next, the code will do an integrity check, to make sure that // this server isn't already allocated. If we fail the test, // we will pretend that we got a NodeDown and NodeUp message and // issue an EMS event. char *tmpProcessName; tmpProcessName = sscpServerClass_->getProcessName(serverId.nodeName_, (short) str_len(serverId.nodeName_), cpuNum, sscpProcessName); ex_assert(tmpProcessName != NULL, "ProcessName can't be null"); sscps_->position(tmpProcessName, str_len(tmpProcessName)); while ((sscpServer = (IpcServer *)sscps_->getNext()) != NULL) { if (str_cmp(sscpServer->castToIpcGuardianServer()->getProcessName(), tmpProcessName, str_len(tmpProcessName)) == 0) break; } if (sscpServer != NULL) { // We don't seem to have gotten a Node Down message. // Pretend we got one and do the Node Down message processing. // Note that deAllocateServer will execute insertDeallocatedSscp. deAllocateServer(nodeName, nodeNameLen, cpuNum); // Issue an EMS event to help track occurrences of this phenomenon char msg[100]; str_sprintf(msg,"Node UP received before Node DOWN for nid: %d", cpuNum); SQLMXLoggingArea::logExecRtInfo(__FILE__, __LINE__, msg, 0); } else { // By placing the server into the deallocatedSscps_, we ensure that // the next time we need to send a request to the server for stats, // we will first allocate it. insertDeallocatedSscp(serverId.nodeName_, serverId.cpuNum_); } return; } static Int64 GcInterval = -1; static Int64 SikGcInterval = -1; void SsmpGlobals::work() { getIpcEnv()->getAllConnections()->waitOnAll(getStatsMergeTimeout()); finishPendingSscpMessages(); // Cleanup IpcEnvironment cleanupDeletedSscpServers(); getIpcEnv()->deleteCompletedMessages(); //Perform cancel escalation, as needed. pendingQueryMgr_.killPendingCanceled(); StatsGlobals *statsGlobals = getStatsGlobals(); statsGlobals->cleanupDanglingSemaphore(TRUE); // Every time we get here either because we timed out or because a request came in, check whether // any GC needs to be done to deallocate space for the master executor for any final stats. If // it has been at least 20 minutes since the last time we did GC, and it has been at least 15 // minutes since a collector has requested stats for a query, and the master stats for that // query has its canBeGCed flag set to true, we'll clean it up. Int64 currTime = NA_JulianTimestamp(); Int64 temp2 = (Int64)(currTime - statsGlobals->getLastGCTime()); if (GcInterval < 0) { // call getenv once per process char *sct = getenv("RMS_GC_INTERVAL_SECONDS"); if (sct) { GcInterval = ((Int64) str_atoi(sct, str_len(sct))) * 1000 * 1000; if (GcInterval < 10*1000*1000) GcInterval = 10*1000*1000; } else GcInterval = 10 * 60 * 1000 * 1000; // 10 minutes } if (SikGcInterval < 0) { // Note: If you change this logic see also the logic to update // siKeyGCinterval_ in optimizer/opt.cpp. // call getenv once per process char *sct = getenv("RMS_SIK_GC_INTERVAL_SECONDS"); if (sct) { SikGcInterval = ((Int64) str_atoi(sct, str_len(sct))) * 1000 * 1000; if (SikGcInterval < 10*1000*1000) SikGcInterval = 10*1000*1000; } else SikGcInterval = (Int64)24 * 60 * 60 * 1000 * 1000; // 24 hours } if ((temp2 > GcInterval) && !doingGC()) { // It's been more than 20 minutes since we did a full GC. Do it again now // and update the time of last GC. statsGlobals->checkForDeadProcesses(myPin_); setDoingGC(TRUE); int error = statsGlobals->getStatsSemaphore(semId_, myPin_); statsGlobals->doFullGC(); statsGlobals->setLastGCTime(NA_JulianTimestamp()); statsGlobals->cleanupOldSikeys(SikGcInterval); setDoingGC(FALSE); statsGlobals->releaseStatsSemaphore(semId_, myPin_); } } void SsmpGlobals::addRecipients(SscpClientMsgStream *msgStream) { IpcServer *server; sscps_->position(); while ((server = (IpcServer *)sscps_->getNext()) != NULL) { msgStream->addRecipient(server->getControlConnection()); msgStream->incNumOfClientRequestsSent(); } } void SsmpGlobals::finishPendingSscpMessages() { SscpClientMsgStream *sscpClientMsgStream; if (pendingSscpMessages_->numEntries() == 0) return; pendingSscpMessages_->position(); Int64 currTimestamp = NA_JulianTimestamp(); while ((sscpClientMsgStream = (SscpClientMsgStream *)pendingSscpMessages_->getNext()) != NULL) { if ((currTimestamp - sscpClientMsgStream->getMergeStartTime()) >= (getStatsMergeTimeout() * 1000)) { sscpClientMsgStream->sendMergedStats(); pendingSscpMessages_->remove(NULL); } } } void SsmpGlobals::removePendingSscpMessage(SscpClientMsgStream *sscpClientMsgStream) { SscpClientMsgStream *lcSscpClientMsgStream; pendingSscpMessages_->position(); while ((lcSscpClientMsgStream = (SscpClientMsgStream *)pendingSscpMessages_->getNext()) != NULL) { if (lcSscpClientMsgStream == sscpClientMsgStream) { pendingSscpMessages_->remove(NULL); break; } } } bool SsmpGlobals::getQidFromPid( Int32 pid, // IN Int32 minimumAge, // IN char *queryId, // OUT Lng32 &queryIdLen // OUT ) { bool foundQid = false; StatsGlobals *statsGlobals = getStatsGlobals(); short error = statsGlobals->getStatsSemaphore(getSemId(), myPin()); SyncHashQueue *ssList = statsGlobals->getStmtStatsList(); ssList->position(); StmtStats *ss = (StmtStats *)ssList->getNext(); while (ss != NULL) { if (ss->getPid() == pid && ss->getMasterStats() && (ss->getMasterStats()->timeSinceBlocking(minimumAge) > 0)) { bool finishedSearch = true; ExMasterStats *m = ss->getMasterStats(); char *parentQid = m->getParentQid(); Lng32 parentQidLen = m->getParentQidLen(); if (parentQid != NULL) { // If this query has a parent in this cpu, will keep looking. Int64 parentCpu = -1; ComSqlId::getSqlQueryIdAttr(ComSqlId::SQLQUERYID_CPUNUM, parentQid, parentQidLen, parentCpu, NULL); if (parentCpu == statsGlobals->getCpu()) { finishedSearch = false; } else ; // Even tho this query has a parent, it is not // executing on the same node as this query's // process, so following the semantics of // cancel-by-pid, we will not attempt to // cancel the parent. } if (finishedSearch) { queryIdLen = ss->getQueryIdLen(); str_cpy_all(queryId, ss->getQueryId(), queryIdLen); foundQid = true; break; } } ss = (StmtStats *)ssList->getNext(); } statsGlobals->releaseStatsSemaphore(getSemId(), myPin()); return foundQid; } bool SsmpGlobals::cancelQueryTree(char *queryId, Lng32 queryIdLen, CancelQueryRequest *request, ComDiagsArea **diags) { bool didCancel = false; bool hasChildQid = false; char childQid[ComSqlId::MAX_QUERY_ID_LEN + 1]; Lng32 childQidLen = 0; StatsGlobals *statsGlobals = getStatsGlobals(); int error = statsGlobals->getStatsSemaphore(getSemId(), myPin()); StmtStats *cqStmtStats = statsGlobals->getMasterStmtStats( queryId, queryIdLen, RtsQueryId::ANY_QUERY_); ExMasterStats *m = cqStmtStats ? cqStmtStats->getMasterStats(): NULL; if (m && m->getChildQid()) { hasChildQid = true; str_cpy_all(childQid, m->getChildQid(), m->getChildQidLen()); childQid[m->getChildQidLen()] = '\0'; childQidLen = m->getChildQidLen(); } statsGlobals->releaseStatsSemaphore(getSemId(), myPin()); if (cqStmtStats == NULL) { ; // race condition return false; } if (hasChildQid) { if (request->getCancelLogging()) { char thisQid[ComSqlId::MAX_QUERY_ID_LEN + 1]; str_cpy_all(thisQid, queryId, queryIdLen); thisQid[queryIdLen] = '\0'; char msg[120 + // the constant text ComSqlId::MAX_QUERY_ID_LEN + ComSqlId::MAX_QUERY_ID_LEN ]; str_sprintf(msg, "cancelQueryTree for %s , found a child qid %s", thisQid, childQid); SQLMXLoggingArea::logExecRtInfo(__FILE__, __LINE__, msg, 0); } didCancel = cancelQueryTree(childQid,childQidLen, request, diags); } if (cancelQuery(queryId, queryIdLen, request, diags)) didCancel = true; return didCancel; } bool SsmpGlobals::cancelQuery(char *queryId, Lng32 queryIdLen, CancelQueryRequest *request, ComDiagsArea **diags) { bool didAttemptCancel = false; Int64 cancelStartTime = request->getCancelStartTime (); Int32 ceFirstInterval = request->getFirstEscalationInterval(); Int32 ceSecondInterval = request->getSecondEscalationInterval(); NABoolean ceSaveabend = request->getCancelEscalationSaveabend(); bool cancelLogging = request->getCancelLogging(); short sqlErrorCode = 0; const char *sqlErrorDesc = NULL; StatsGlobals *statsGlobals = getStatsGlobals(); int error; char tempQid[ComSqlId::MAX_QUERY_ID_LEN+1]; ActiveQueryEntry * aq = (queryId ? getActiveQueryMgr().getActiveQuery( queryId, queryIdLen) : NULL); if (aq == NULL) { error = statsGlobals->getStatsSemaphore(getSemId(), myPin()); StmtStats *cqStmtStats = statsGlobals->getMasterStmtStats( queryId, queryIdLen, RtsQueryId::ANY_QUERY_); if (cqStmtStats == NULL) sqlErrorCode = -EXE_CANCEL_QID_NOT_FOUND; else { ExMasterStats * cMasterStats = cqStmtStats->getMasterStats(); if (cMasterStats) { Statement::State stmtState = (Statement::State)cMasterStats->getState(); if (stmtState != Statement::OPEN_ && stmtState != Statement::FETCH_ && stmtState != Statement::STMT_EXECUTE_) { sqlErrorCode = -EXE_CANCEL_NOT_POSSIBLE; sqlErrorDesc = "The query is not in OPEN or FETCH or EXECUTE state"; } else { sqlErrorCode = -EXE_CANCEL_NOT_POSSIBLE; sqlErrorDesc = "The query is not registered with the cancel broker"; } } else { sqlErrorCode = -EXE_CANCEL_NOT_POSSIBLE; sqlErrorDesc = "The query state is not known"; } } statsGlobals->releaseStatsSemaphore(getSemId(), myPin()); } else if (aq && (aq->getQueryStartTime() <= cancelStartTime)) { didAttemptCancel = true; // Make sure query is activated first. If it is already activated, // some error conditions will be raised. Ignore these. ComDiagsArea *unimportantDiags = NULL; activateFromQid(queryId, queryIdLen, ACTIVATE, unimportantDiags, cancelLogging); if (unimportantDiags) unimportantDiags->decrRefCount(); StatsGlobals *statsGlobals = getStatsGlobals(); int error = statsGlobals->getStatsSemaphore(getSemId(), myPin()); StmtStats *cqStmtStats = statsGlobals->getMasterStmtStats( queryId, queryIdLen, RtsQueryId::ANY_QUERY_); if (cqStmtStats == NULL) { ; // race condition - query is gone, but we haven't cleanup up // active query entry yet. } else { ExMasterStats * cMasterStats = cqStmtStats->getMasterStats(); if (cMasterStats) { cMasterStats->setCanceledTime(JULIANTIMESTAMP()); cMasterStats->setCancelComment(request->getComment()); } } statsGlobals->releaseStatsSemaphore(getSemId(), myPin()); // Set up for escalation later. if ((ceFirstInterval != 0) || (ceSecondInterval != 0)) { getPendingQueryMgr().addPendingQuery(aq, ceFirstInterval, ceSecondInterval, ceSaveabend, cancelLogging); } // This call makes the reply to the Query Started message. getActiveQueryMgr().rmActiveQuery( queryId, queryIdLen, getHeap(), CB_CANCEL, cancelLogging); } else { sqlErrorCode = -EXE_CANCEL_NOT_POSSIBLE; sqlErrorDesc = "You tried to cancel the subsequent execution of the query"; } ComDiagsArea *lcDiags = NULL; if (sqlErrorCode != 0) { str_cpy_all(tempQid, queryId, queryIdLen); tempQid[queryIdLen] = '\0'; lcDiags = ComDiagsArea::allocate(getHeap()); if (sqlErrorDesc != NULL) { *lcDiags << DgSqlCode(sqlErrorCode) << DgString0(tempQid) << DgString1(sqlErrorDesc); } else *lcDiags << DgSqlCode(sqlErrorCode) << DgString0(tempQid); } *diags = lcDiags; return didAttemptCancel; } void SsmpGlobals::suspendOrActivate( char *queryId, Lng32 qidLen, SuspendOrActivate sOrA, bool suspendLogging) { allocateServers(); SscpClientMsgStream *sscpMsgStream = new (getHeap()) SscpClientMsgStream((NAHeap *)getHeap(), getIpcEnv(), this, NULL); sscpMsgStream->setUsedToSendCbMsgs(); addRecipients(sscpMsgStream); sscpMsgStream->clearAllObjects(); SuspendActivateServersRequest *requestForSscp = new (getHeap()) SuspendActivateServersRequest((RtsHandle) sscpMsgStream, getHeap(), (sOrA == SUSPEND), suspendLogging ); *sscpMsgStream << *requestForSscp; RtsQueryId *qidObjForSscp = new (getHeap()) RtsQueryId( getHeap(), queryId, qidLen); *sscpMsgStream << *qidObjForSscp; // Send the Message to all // Do not do this? ssmpGlobals_->addPendingSscpMessage(sscpMsgStream); sscpMsgStream->send(FALSE); requestForSscp->decrRefCount(); qidObjForSscp->decrRefCount(); } bool SsmpGlobals::activateFromQid( char *qid, Lng32 qidLen, SuspendOrActivate /* sOrAOrC */, ComDiagsArea *&diags, bool suspendLogging) { bool doAttemptActivate = true; // Find the query. StatsGlobals *statsGlobals = getStatsGlobals(); int error = statsGlobals->getStatsSemaphore(getSemId(), myPin()); SyncHashQueue *stmtStatsList = statsGlobals->getStmtStatsList(); stmtStatsList->position(qid, qidLen); StmtStats *kqStmtStats = NULL; ExMasterStats *masterStats = NULL; while (NULL != (kqStmtStats = (StmtStats *)stmtStatsList->getNext())) { if (str_cmp(kqStmtStats->getQueryId(), qid, qidLen) == 0) { // Can control only queries which are executing. masterStats = kqStmtStats->getMasterStats(); if ( masterStats && (masterStats->getExeStartTime() != -1) && (masterStats->getExeEndTime() == -1) ) break; } } if (!masterStats) { doAttemptActivate = false; if (diags == NULL) diags = ComDiagsArea::allocate(getHeap()); *diags << DgSqlCode(-EXE_RTS_QID_NOT_FOUND) << DgString0(qid); } else if (!masterStats->isQuerySuspended()) { doAttemptActivate = false; if (diags == NULL) diags = ComDiagsArea::allocate(getHeap()); if (!masterStats->isReadyToSuspend()) *diags << DgSqlCode(-EXE_SUSPEND_QID_NOT_ACTIVE); else *diags << DgSqlCode(-EXE_SUSPEND_NOT_SUSPENDED); } if (doAttemptActivate) { suspendOrActivate(qid, qidLen, ACTIVATE, suspendLogging); masterStats->setQuerySuspended(false); } statsGlobals->releaseStatsSemaphore(getSemId(), myPin()); if (doAttemptActivate && suspendLogging) { char msg[80 + // the constant text ComSqlId::MAX_QUERY_ID_LEN]; str_sprintf(msg, "MXSSMP has processed a request to reactivate query %s.", qid); SQLMXLoggingArea::logExecRtInfo(__FILE__, __LINE__, msg, 0); } return doAttemptActivate; } void SsmpGuaReceiveControlConnection::actOnSystemMessage( short messageNum, IpcMessageBufferPtr sysMsg, IpcMessageObjSize sysMsgLen, short clientFileNumber, const GuaProcessHandle &clientPhandle, GuaConnectionToClient *connection) { switch (messageNum) { case ZSYS_VAL_SMSG_OPEN: { SsmpNewIncomingConnectionStream *newStream = new(getEnv()->getHeap()) SsmpNewIncomingConnectionStream((NAHeap *)getEnv()->getHeap(), getEnv(),getSsmpGlobals()); ex_assert(connection != NULL,"Must create connection for open sys msg"); newStream->addRecipient(connection); newStream->receive(FALSE); } initialized_ = TRUE; break; case ZSYS_VAL_SMSG_CLOSE: ssmpGlobals_->getActiveQueryMgr().clientIsGone(clientPhandle, clientFileNumber); ssmpGlobals_->getPendingQueryMgr().clientIsGone(clientPhandle, clientFileNumber); break; case ZSYS_VAL_SMSG_PROCDEATH: { zsys_ddl_smsg_procdeath_def *msg = (zsys_ddl_smsg_procdeath_def *) sysMsg; SB_Phandle_Type *phandle = (SB_Phandle_Type *)&msg->z_phandle; Int32 cpu; pid_t pid; SB_Int64_Type seqNum = 0; if (XZFIL_ERR_OK == XPROCESSHANDLE_DECOMPOSE_( phandle, &cpu, &pid , NULL // nodeNumber , NULL // nodeName , 0 // nodeNameLen input , NULL // nodeNameLen output , NULL // processName , 0 // processNameLen input , NULL // processNameLen output , &seqNum )) { if (cpu == ssmpGlobals_->myCpu()) ssmpGlobals_->getStatsGlobals()->verifyAndCleanup(pid, seqNum); } } break; case ZSYS_VAL_SMSG_CPUDOWN: { zsys_ddl_smsg_cpudown_def *msg = (zsys_ddl_smsg_cpudown_def *) sysMsg; #ifdef _DEBUG cout << "Cpu Down received for NULL " << msg->z_cpunumber << endl; #endif ssmpGlobals_->deAllocateServer(NULL, 0, msg->z_cpunumber); } break; case ZSYS_VAL_SMSG_REMOTECPUDOWN: { zsys_ddl_smsg_remotecpudown_def *msg = (zsys_ddl_smsg_remotecpudown_def *) sysMsg; #ifdef _DEBUG char la_nodename[msg->z_nodename_len + 1]; memcpy(la_nodename, msg->z_nodename, (size_t) msg->z_nodename_len); la_nodename[msg->z_nodename_len] = '\0'; cout << "Remote CPU DOWN received for " << la_nodename << " " << msg->z_cpunumber << endl; #endif ssmpGlobals_->deAllocateServer(msg->z_nodename, msg->z_nodename_len, msg->z_cpunumber); } break; case ZSYS_VAL_SMSG_CPUUP: { zsys_ddl_smsg_cpuup_def *msg = (zsys_ddl_smsg_cpuup_def *) sysMsg; #ifdef _DEBUG cout << "CPU UP received for NULL " << msg->z_cpunumber << endl; #endif ssmpGlobals_->allocateServerOnNextRequest(NULL, 0, msg->z_cpunumber); } break; case ZSYS_VAL_SMSG_REMOTECPUUP: { zsys_ddl_smsg_remotecpuup_def *msg = (zsys_ddl_smsg_remotecpuup_def *) sysMsg; #ifdef _DEBUG char la_nodename[msg->z_nodename_len + 1]; memcpy( la_nodename, msg->z_nodename, (size_t) msg->z_nodename_len ); la_nodename[msg->z_nodename_len] = '\0'; cout << "Remote CPU UP received for " << la_nodename << " " << msg->z_cpunumber; #endif ssmpGlobals_->allocateServerOnNextRequest(msg->z_nodename, msg->z_nodename_len, msg->z_cpunumber); } break; case ZSYS_VAL_SMSG_NODEUP: break; case ZSYS_VAL_SMSG_NODEDOWN: ssmpGlobals_->releaseOrphanEntries(); break; case XZSYS_VAL_SMSG_SHUTDOWN: NAExit(0); break; default: // do nothing for all other kinds of system messages break; } // switch } SsmpNewIncomingConnectionStream::~SsmpNewIncomingConnectionStream() { if (sscpDiagsArea_) { sscpDiagsArea_->decrRefCount(); sscpDiagsArea_ = NULL; } } void SsmpNewIncomingConnectionStream::actOnSend(IpcConnection *connection) { // check for OS errors if (connection->getErrorInfo() == 0) ssmpGlobals_->incSsmpReplyMsg(connection->getLastSentMsg()->getMessageLength()); } void SsmpNewIncomingConnectionStream::actOnSendAllComplete() { // Wait for the next request for the same stream clearAllObjects(); receive(FALSE); } void SsmpNewIncomingConnectionStream::actOnReceive(IpcConnection *connection) { // check for OS errors if (connection->getErrorInfo() != 0) return; ssmpGlobals_->incSsmpReqMsg(connection->getLastReceivedMsg()->getMessageLength()); // take a look at the type of the first object in the message switch(getNextObjType()) { case RTS_MSG_STATS_REQ: actOnStatsReq(connection); break; case RTS_MSG_CPU_STATS_REQ: actOnCpuStatsReq(connection); break; case RTS_MSG_EXPLAIN_REQ: actOnExplainReq(connection); break; case CANCEL_QUERY_STARTED_REQ: actOnQueryStartedReq(connection); break; case CANCEL_QUERY_FINISHED_REQ: actOnQueryFinishedReq(connection); break; case CANCEL_QUERY_REQ: actOnCancelQueryReq(connection); break; case SUSPEND_QUERY_REQ: actOnSuspendQueryReq(connection); break; case ACTIVATE_QUERY_REQ: actOnActivateQueryReq(connection); break; case SECURITY_INVALID_KEY_REQ: actOnSecInvalidKeyReq(connection); break; case LOB_LOCK_REQ: actOnLobLockReq(connection); break; default: ex_assert(FALSE,"Invalid request from client"); } } void SsmpNewIncomingConnectionStream::actOnReceiveAllComplete() { if (getState() == ERROR_STATE) addToCompletedList(); } void SsmpNewIncomingConnectionStream::actOnQueryStartedReq(IpcConnection *connection) { IpcMessageObjVersion msgVer; msgVer = getNextObjVersion(); ex_assert(msgVer <= currRtsStatsReqVersionNumber, "Up-rev message received."); QueryStarted *request = new (getHeap()) QueryStarted(INVALID_RTS_HANDLE, getHeap()); *this >> *request; setHandle(request->getHandle()); if (moreObjects()) { RtsMessageObjType objType = (RtsMessageObjType) getNextObjType(); switch (objType) { case RTS_QUERY_ID: { RtsQueryId *queryId = new (getHeap()) RtsQueryId(getHeap()); // Get the query Id from IPC *this >> *queryId; clearAllObjects(); ssmpGlobals_->getActiveQueryMgr().addActiveQuery( queryId->getQid(), queryId->getQueryIdLen(), request->getStartTime(), request->getMasterPhandle(), request->getExecutionCount(), this, connection); request->decrRefCount(); queryId->decrRefCount(); } break; default: ex_assert(0, "something besides an RTS_QUERY_ID followed QueryStarted"); break; } } else ex_assert(0, "expected an RTS_QUERY_ID following a QueryStarted"); // start another receive operation for the next request receive(FALSE); return; } void SsmpNewIncomingConnectionStream::actOnQueryFinishedReq( IpcConnection *connection) { ex_assert(getNextObjVersion() <= currRtsStatsReqVersionNumber, "Up-rev message received."); QueryFinished *request = new (getHeap()) QueryFinished(INVALID_RTS_HANDLE, getHeap()); *this >> *request; setHandle(request->getHandle()); request->decrRefCount(); if (moreObjects()) { RtsMessageObjType objType = (RtsMessageObjType) getNextObjType(); switch (objType) { case RTS_QUERY_ID: { RtsQueryId *queryId = new (getHeap()) RtsQueryId(getHeap()); // Get the query Id from IPC *this >> *queryId; clearAllObjects(); // This call makes the reply to the Query Started message. ssmpGlobals_->getActiveQueryMgr().rmActiveQuery(queryId->getQid(), queryId->getQueryIdLen(), getHeap(), CB_COMPLETE, false /*no cancel logging */); queryId->decrRefCount(); // Now, make a reply to the Query Finished message. RmsGenericReply *qfReply = new (getHeap()) RmsGenericReply(getHeap()); *this << *qfReply; qfReply->decrRefCount(); send(FALSE); } break; default: ex_assert(0, "something besides an RTS_QUERY_ID followed QueryFinished"); break; } } else ex_assert(0, "expected an RTS_QUERY_ID following a QueryFinished"); } void SsmpNewIncomingConnectionStream::actOnCancelQueryReq( IpcConnection *connection) { ex_assert(getNextObjVersion() <= currRtsStatsReqVersionNumber, "Up-rev message received."); CancelQueryRequest *request = new (getHeap()) CancelQueryRequest(INVALID_RTS_HANDLE, getHeap()); *this >> *request; setHandle(request->getHandle()); Int32 minimumAge = request->getMinimumAge(); bool cancelByPid = request->getCancelByPid(); char queryId[200]; Lng32 queryIdLen = 0; bool didAttemptCancel = false; bool haveAQid = false; if (cancelByPid) { haveAQid = ssmpGlobals_->getQidFromPid(request->getCancelPid(), minimumAge, queryId, queryIdLen); } else { ex_assert(moreObjects(), "expected an RTS_QUERY_ID following a CancelQuery"); RtsMessageObjType objType = (RtsMessageObjType) getNextObjType(); ex_assert(RTS_QUERY_ID == objType, "something besides an RTS_QUERY_ID followed CancelQuery"); RtsQueryId *msgQueryId = new (getHeap()) RtsQueryId(getHeap()); // Get the query Id from IPC *this >> *msgQueryId; ex_assert(msgQueryId->getQueryIdLen() <= sizeof(queryId), "query id received is too long"); queryIdLen = msgQueryId->getQueryIdLen(); str_cpy_all(queryId, msgQueryId->getQid(), queryIdLen); msgQueryId->decrRefCount(); haveAQid = true; } clearAllObjects(); ComDiagsArea *diags = NULL; if (haveAQid) didAttemptCancel = getSsmpGlobals()->cancelQueryTree( queryId, queryIdLen, request, &diags); // Now, make a reply to the Cancel Query message. RtsHandle rtsHandle = (RtsHandle) this; ControlQueryReply *cqReply = new (getHeap()) ControlQueryReply(rtsHandle, getHeap(), didAttemptCancel || cancelByPid); *this << *cqReply; if (!(didAttemptCancel ||cancelByPid)) { if (diags == NULL) { diags = ComDiagsArea::allocate(getHeap()); *diags << DgSqlCode(-EXE_CANCEL_QID_NOT_FOUND); } *this << *diags; } send(FALSE); cqReply->decrRefCount(); if (diags) diags->decrRefCount(); request->decrRefCount(); } void SsmpNewIncomingConnectionStream::actOnSuspendQueryReq( IpcConnection *connection) { ex_assert(getNextObjVersion() <= CurrSuspendQueryReplyVersionNumber, "Up-rev message received."); SuspendQueryRequest *request = new (getHeap()) SuspendQueryRequest(INVALID_RTS_HANDLE, getHeap()); *this >> *request; setHandle(request->getHandle()); if (moreObjects()) { RtsMessageObjType objType = (RtsMessageObjType) getNextObjType(); switch (objType) { case RTS_QUERY_ID: { bool doAttemptSuspend = true; ComDiagsArea *diags = NULL; RtsQueryId *queryId = new (getHeap()) RtsQueryId(getHeap()); // Get the query Id from IPC *this >> *queryId; clearAllObjects(); char *qid = queryId->getQid(); Lng32 qidLen = queryId->getQueryIdLen(); // Find the query. StatsGlobals *statsGlobals = ssmpGlobals_->getStatsGlobals(); int error = statsGlobals->getStatsSemaphore( ssmpGlobals_->getSemId(), ssmpGlobals_->myPin()); SyncHashQueue *stmtStatsList = statsGlobals->getStmtStatsList(); stmtStatsList->position(qid, qidLen); StmtStats *kqStmtStats = NULL; while (NULL != (kqStmtStats = (StmtStats *)stmtStatsList->getNext())) { if (str_cmp(kqStmtStats->getQueryId(), qid, qidLen) == 0) { // Can control only queries which have an ExMasterStats. if (NULL != kqStmtStats->getMasterStats()) break; } } ExMasterStats *masterStats = NULL; if (kqStmtStats) masterStats = kqStmtStats->getMasterStats(); if (masterStats) { if(!masterStats->isReadyToSuspend()) { doAttemptSuspend = false; diags = ComDiagsArea::allocate(getHeap()); *diags << DgSqlCode(-EXE_SUSPEND_QID_NOT_ACTIVE); } else if (!request->getIsForced()) { // See if safe to suspend. if (masterStats && masterStats->getSuspendMayHaveAuditPinned()) { doAttemptSuspend = false; diags = ComDiagsArea::allocate(getHeap()); *diags << DgSqlCode(-EXE_SUSPEND_AUDIT); } else if (masterStats && masterStats->getSuspendMayHoldLock()) { doAttemptSuspend = false; diags = ComDiagsArea::allocate(getHeap()); *diags << DgSqlCode(-EXE_SUSPEND_LOCKS); } } if (doAttemptSuspend && masterStats->isQuerySuspended()) { doAttemptSuspend = false; diags = ComDiagsArea::allocate(getHeap()); *diags << DgSqlCode(-EXE_SUSPEND_ALREADY_SUSPENDED); } if (doAttemptSuspend) { // sanity checking here - may be better for MXSSMP to fail now // than for MXSSCPs to fail later. ExStatisticsArea *statsArea = kqStmtStats->getStatsArea(); ex_assert(statsArea, "Eligible subject query has no ExStatisticsArea"); ExOperStats *rootStats = statsArea->getRootStats(); ex_assert(rootStats, "Eligible subject query has no root ExOperStats"); ex_assert((rootStats->statType() == ExOperStats::ROOT_OPER_STATS) || (rootStats->statType() == ExOperStats::MEAS_STATS), "Eligible subject query does not have correct stats."); ssmpGlobals_->suspendOrActivate(qid, qidLen, SUSPEND, request->getSuspendLogging()); masterStats->setQuerySuspended(true); } } else { doAttemptSuspend = false; diags = ComDiagsArea::allocate(getHeap()); *diags << DgSqlCode(-EXE_RTS_QID_NOT_FOUND) << DgString0(queryId->getQid()); } statsGlobals->releaseStatsSemaphore(ssmpGlobals_->getSemId(), ssmpGlobals_->myPin()); if (doAttemptSuspend && request->getSuspendLogging()) { char msg[80 + // the constant text ComSqlId::MAX_QUERY_ID_LEN]; str_sprintf(msg, "MXSSMP has processed a request to suspend query %s.", qid); SQLMXLoggingArea::logExecRtInfo(__FILE__, __LINE__, msg, 0); } queryId->decrRefCount(); // Now, make a reply to the Suspend Query message. RtsHandle rtsHandle = (RtsHandle) this; ControlQueryReply *cqReply = new (getHeap()) ControlQueryReply(rtsHandle, getHeap(), doAttemptSuspend); *this << *cqReply; cqReply->decrRefCount(); if (diags) { *this << *diags; diags->decrRefCount(); } send(FALSE); } break; default: ex_assert(0, "something besides an RTS_QUERY_ID followed SuspendQueryRequest"); break; } } else ex_assert(0, "expected an RTS_QUERY_ID following a SuspendQueryRequest"); request->decrRefCount(); } void SsmpNewIncomingConnectionStream::actOnActivateQueryReq( IpcConnection *connection) { ex_assert(getNextObjVersion() <= CurrSuspendQueryReplyVersionNumber, "Up-rev message received."); ActivateQueryRequest *request = new (getHeap()) ActivateQueryRequest(INVALID_RTS_HANDLE, getHeap()); *this >> *request; setHandle(request->getHandle()); bool suspendLogging = request->getSuspendLogging(); request->decrRefCount(); if (moreObjects()) { RtsMessageObjType objType = (RtsMessageObjType) getNextObjType(); switch (objType) { case RTS_QUERY_ID: { ComDiagsArea *diags = NULL; RtsQueryId *queryId = new (getHeap()) RtsQueryId(getHeap()); // Get the query Id from IPC *this >> *queryId; clearAllObjects(); char *qid = queryId->getQid(); Lng32 qidLen = queryId->getQueryIdLen(); bool didAttemptActivate = ssmpGlobals_->activateFromQid(qid, qidLen, ACTIVATE, diags, suspendLogging); queryId->decrRefCount(); // Now, make a reply to the Activate Query message. RtsHandle rtsHandle = (RtsHandle) this; ControlQueryReply *cqReply = new (getHeap()) ControlQueryReply(rtsHandle, getHeap(), didAttemptActivate); *this << *cqReply; cqReply->decrRefCount(); if (diags) { *this << *diags; diags->decrRefCount(); } send(FALSE); } break; default: ex_assert(0, "something besides an RTS_QUERY_ID followed SuspendQueryRequest"); break; } } else ex_assert(0, "expected an RTS_QUERY_ID following a SuspendQueryRequest"); } void SsmpNewIncomingConnectionStream::actOnLobLockReq( IpcConnection *connection) { IpcMessageObjVersion msgVer = getNextObjVersion(); StatsGlobals *statsGlobals; NABoolean releasingLock = FALSE; CliGlobals *cliGlobals = GetCliGlobals(); ex_assert(msgVer <= CurrLobLockVersionNumber, "Up-rev message received."); LobLockRequest *llReq= new (getHeap()) LobLockRequest(getHeap()); *this >> *llReq; setHandle(llReq->getHandle()); ex_assert(!moreObjects(),"Unexpected objects following LobLockRequest"); clearAllObjects(); //check and set the lock in the local shared segment statsGlobals = ssmpGlobals_->getStatsGlobals(); char *inLobLockId = NULL; inLobLockId = llReq->getLobLockId(); if (inLobLockId[0] == '-') //we are releasing this lock. No need to check. inLobLockId = NULL; else { inLobLockId = &inLobLockId[1]; statsGlobals->checkLobLock(cliGlobals, inLobLockId); } if (inLobLockId) { //It's already set, don't propagate if (sscpDiagsArea_== NULL) sscpDiagsArea_ = ComDiagsArea::allocate(ssmpGlobals_->getHeap()); *sscpDiagsArea_<< DgSqlCode(-EXE_LOB_CONCURRENT_ACCESS_ERROR); RmsGenericReply *reply = new(getHeap()) RmsGenericReply(getHeap()); *this << *reply; *this << *sscpDiagsArea_; this->clearSscpDiagsArea(); send(FALSE); reply->decrRefCount(); } else { // Forward request to all mxsscps. ssmpGlobals_->allocateServers(); SscpClientMsgStream *sscpMsgStream = new (heap_) SscpClientMsgStream(heap_, getIpcEnv(), ssmpGlobals_, this); sscpMsgStream->setUsedToSendLLMsgs(); ssmpGlobals_->addRecipients(sscpMsgStream); sscpMsgStream->clearAllObjects(); *sscpMsgStream << *llReq; llReq->decrRefCount(); sscpMsgStream->send(FALSE); } // Reply to client when the msgs to mxsscp have all completed. The reply // is made from the sscpMsgStream's callback. } void SsmpNewIncomingConnectionStream::actOnSecInvalidKeyReq( IpcConnection *connection) { IpcMessageObjVersion msgVer = getNextObjVersion(); ex_assert(msgVer <= CurrSecurityInvalidKeyVersionNumber, "Up-rev message received."); SecInvalidKeyRequest *sikReq = new (getHeap()) SecInvalidKeyRequest(getHeap()); *this >> *sikReq; setHandle(sikReq->getHandle()); ex_assert( !moreObjects(), "Unexpected objects following SecInvalidKeyRequest"); clearAllObjects(); // Forward request to all mxsscps. ssmpGlobals_->allocateServers(); SscpClientMsgStream *sscpMsgStream = new (heap_) SscpClientMsgStream(heap_, getIpcEnv(), ssmpGlobals_, this); sscpMsgStream->setUsedToSendSikMsgs(); ssmpGlobals_->addRecipients(sscpMsgStream); sscpMsgStream->clearAllObjects(); *sscpMsgStream << *sikReq; sikReq->decrRefCount(); sscpMsgStream->send(FALSE); // Reply to client when the msgs to mxsscp have all completed. The reply // is made from the sscpMsgStream's callback. } void SsmpNewIncomingConnectionStream::actOnStatsReq(IpcConnection *connection) { IpcMessageObjVersion msgVer; StatsGlobals *statsGlobals; int error; char *qid; pid_t pid = 0; short cpu; Int64 timeStamp; Lng32 queryNumber; RtsQueryId *rtsQueryId = NULL; StmtStats *stmtStats = NULL; msgVer = getNextObjVersion(); ex_assert(msgVer <= currRtsStatsReqVersionNumber, "Up-rev message received."); RtsStatsReq *request = new (getHeap()) RtsStatsReq(INVALID_RTS_HANDLE, getHeap()); *this >> *request; setHandle(request->getHandle()); setWmsProcess(request->getWmsProcess()); if (moreObjects()) { RtsMessageObjType objType = (RtsMessageObjType) getNextObjType(); switch (objType) { case RTS_QUERY_ID: { RtsQueryId *queryId = new (getHeap()) RtsQueryId(getHeap()); // Get the query Id from IPC *this >> *queryId; clearAllObjects(); statsGlobals = ssmpGlobals_->getStatsGlobals(); short reqType = queryId->getStatsReqType(); short subReqType = queryId->getSubReqType(); switch (reqType) { case SQLCLI_STATS_REQ_QID: qid = queryId->getQid(); error = statsGlobals->getStatsSemaphore(ssmpGlobals_->getSemId(), ssmpGlobals_->myPin()); stmtStats = statsGlobals->getMasterStmtStats(qid, str_len(qid), queryId->getActiveQueryNum()); if (stmtStats != NULL) stmtStats->setStmtStatsUsed(TRUE); statsGlobals->releaseStatsSemaphore(ssmpGlobals_->getSemId(), ssmpGlobals_->myPin()); if (stmtStats != NULL) { getMergedStats(request, queryId, stmtStats, reqType, queryId->getStatsMergeType()); } break; case SQLCLI_STATS_REQ_QID_DETAIL: qid = queryId->getQid(); error = statsGlobals->getStatsSemaphore(ssmpGlobals_->getSemId(), ssmpGlobals_->myPin()); stmtStats = statsGlobals->getMasterStmtStats(qid, str_len(qid), 1); if (stmtStats != NULL) stmtStats->setStmtStatsUsed(TRUE); statsGlobals->releaseStatsSemaphore(ssmpGlobals_->getSemId(), ssmpGlobals_->myPin()); if (stmtStats != NULL) { getMergedStats(request, queryId, stmtStats, reqType, queryId->getStatsMergeType()); } break; case SQLCLI_STATS_REQ_PROCESS_INFO: pid = queryId->getPid(); getProcessStats(reqType, subReqType, pid); break; case SQLCLI_STATS_REQ_PID: error = statsGlobals->getStatsSemaphore(ssmpGlobals_->getSemId(), ssmpGlobals_->myPin()); stmtStats = statsGlobals->getStmtStats(pid, queryId->getActiveQueryNum()); if (stmtStats != NULL) stmtStats->setStmtStatsUsed(TRUE); statsGlobals->releaseStatsSemaphore(ssmpGlobals_->getSemId(), ssmpGlobals_->myPin()); if (stmtStats != NULL) { if (stmtStats->isMaster()) { getMergedStats(request, NULL, stmtStats, reqType, queryId->getStatsMergeType()); } else { RtsStatsReply *reply = new (getHeap()) RtsStatsReply(request->getHandle(), getHeap()); clearAllObjects(); setType(IPC_MSG_SSMP_REPLY); setVersion(CurrSsmpReplyMessageVersion); *this << *reply; rtsQueryId = new (getHeap()) RtsQueryId(getHeap(), stmtStats->getQueryId(), stmtStats->getQueryIdLen(), // We need to use ANY_QUERY_ otherwise you might get QUERY_ID not found // if the query gets finished before stats is merged (UInt16)SQLCLI_SAME_STATS, RtsQueryId::ANY_QUERY_); *this << *(rtsQueryId); send(FALSE); stmtStats->setStmtStatsUsed(FALSE); reply->decrRefCount(); rtsQueryId->decrRefCount(); } } break; case SQLCLI_STATS_REQ_QID_INTERNAL: cpu = queryId->getCpu(); pid = queryId->getPid(); timeStamp = queryId->getTimeStamp(); queryNumber = queryId->getQueryNumber(); error = statsGlobals->getStatsSemaphore(ssmpGlobals_->getSemId(), ssmpGlobals_->myPin()); stmtStats = statsGlobals->getStmtStats(cpu, pid, timeStamp, queryNumber); if (stmtStats != NULL) stmtStats->setStmtStatsUsed(TRUE); statsGlobals->releaseStatsSemaphore(ssmpGlobals_->getSemId(), ssmpGlobals_->myPin()); if (stmtStats != NULL) { ex_assert(stmtStats->isMaster() == TRUE, "Should be Master here"); getMergedStats(request, NULL, stmtStats, reqType, queryId->getStatsMergeType()); } break; case SQLCLI_STATS_REQ_CPU: error = statsGlobals->getStatsSemaphore(ssmpGlobals_->getSemId(), ssmpGlobals_->myPin()); stmtStats = statsGlobals->getStmtStats(queryId->getActiveQueryNum()); if (stmtStats != NULL) stmtStats->setStmtStatsUsed(TRUE); statsGlobals->releaseStatsSemaphore(ssmpGlobals_->getSemId(), ssmpGlobals_->myPin()); if (stmtStats != NULL) { ex_assert(stmtStats->isMaster() == TRUE, "Should be Master here"); getMergedStats(request, NULL, stmtStats, reqType, queryId->getStatsMergeType()); } break; default: break; } request->decrRefCount(); queryId->decrRefCount(); // If there is no stmt stats, reply back with empty stats if (stmtStats == NULL && reqType != SQLCLI_STATS_REQ_PROCESS_INFO) sendMergedStats(NULL, 0, reqType, NULL, FALSE); } break; default: break; } } } void SsmpNewIncomingConnectionStream::getProcessStats(short reqType, short subReqType, pid_t pid) { ExStatisticsArea *mergedStats=NULL; ExProcessStats *exProcessStats=NULL; ExProcessStats *tmpExProcessStats=NULL; StatsGlobals *statsGlobals = ssmpGlobals_->getStatsGlobals(); exProcessStats = statsGlobals->getExProcessStats(pid); if (exProcessStats != NULL) { mergedStats = new (getHeap()) ExStatisticsArea(getHeap(), 0, ComTdb::OPERATOR_STATS, ComTdb::OPERATOR_STATS); mergedStats->setStatsEnabled(TRUE); mergedStats->setSubReqType(subReqType); tmpExProcessStats = new (mergedStats->getHeap()) ExProcessStats(mergedStats->getHeap()); tmpExProcessStats->copyContents(exProcessStats); mergedStats->insert(tmpExProcessStats); } sendMergedStats(mergedStats, 0, reqType, NULL, FALSE); } void SsmpNewIncomingConnectionStream::getMergedStats(RtsStatsReq *request, RtsQueryId *queryId, StmtStats *stmtStats, short reqType, UInt16 statsMergeType) { Int64 currTime = NA_JulianTimestamp(); RtsQueryId *rtsQueryId = NULL; ExStatisticsArea *mergedStats; ExStatisticsArea *srcMergedStats; // MergedStats_ in stmtStats can't be used directly in Ipc layer // since the stats->entries_ will be traversed at the time of send // This can cause corruption since hashQueue is not protected using semaphore // So, use local stats area instead when the statsArea is returned within the // RTS_MERGE_INTERVAL_TIMEOUT from SSMP itself. When the statsArea is merged from SSCPs // set the mergedStats in stmtStats_ after it has been shipped out, to avoid other processes // accessing the mergedStats_ while it is being traversed in Ipc layer if (reqType == SQLCLI_STATS_REQ_QID && (ssmpGlobals_->getForceMerge() == FALSE) && (ssmpGlobals_->getNumDeallocatedServers() == 0) && (stmtStats->mergeReqd() == FALSE) && ((currTime - stmtStats->getLastMergedTime()) < (ssmpGlobals_->getStatsCollectionInterval() * 1000000)) && isWmsProcess()) { if ((srcMergedStats = stmtStats->getMergedStats()) != NULL) { ComTdb::CollectStatsType tempStatsMergeType = (statsMergeType == SQLCLI_SAME_STATS ? srcMergedStats->getOrigCollectStatsType() : (ComTdb::CollectStatsType)statsMergeType); if (srcMergedStats->getCollectStatsType() == tempStatsMergeType) { mergedStats = new (getHeap()) ExStatisticsArea(getHeap(), 0, tempStatsMergeType, srcMergedStats->getOrigCollectStatsType()); mergedStats->setStatsEnabled(TRUE); StatsGlobals *statsGlobals = ssmpGlobals_->getStatsGlobals(); int error = statsGlobals->getStatsSemaphore(ssmpGlobals_->getSemId(), ssmpGlobals_->myPin()); mergedStats->merge(srcMergedStats, statsMergeType); statsGlobals->releaseStatsSemaphore(ssmpGlobals_->getSemId(), ssmpGlobals_->myPin()); sendMergedStats(mergedStats, 0, reqType, stmtStats, FALSE); return; } } } ssmpGlobals_->allocateServers(); // Give the received message to SscpClientMsgStream SscpClientMsgStream *sscpMsgStream = new (heap_) SscpClientMsgStream(heap_, getIpcEnv(), ssmpGlobals_, this); ssmpGlobals_->addRecipients(sscpMsgStream); sscpMsgStream->clearAllObjects(); *sscpMsgStream << *request; // If the incoming RtsQueryId (queryId) is null, construct the // RtsQueryId from stmtStats if (queryId == NULL) { rtsQueryId = new (getHeap()) RtsQueryId(getHeap(), stmtStats->getQueryId(), stmtStats->getQueryIdLen(), statsMergeType, RtsQueryId::ANY_QUERY_); *sscpMsgStream << *rtsQueryId; sscpMsgStream->setReqType(rtsQueryId->getStatsReqType()); sscpMsgStream->setDetailLevel(rtsQueryId->getDetailLevel()); } else { *sscpMsgStream << *queryId ; sscpMsgStream->setReqType(queryId->getStatsReqType()); sscpMsgStream->setDetailLevel(queryId->getDetailLevel()); } sscpMsgStream->setStmtStats(stmtStats); sscpMsgStream->setMergeStartTime(NA_JulianTimestamp()); // Send the Message to all ssmpGlobals_->addPendingSscpMessage(sscpMsgStream); sscpMsgStream->send(FALSE); if (rtsQueryId != NULL) rtsQueryId->decrRefCount(); return; } void SsmpNewIncomingConnectionStream::actOnCpuStatsReq(IpcConnection *connection) { IpcMessageObjVersion msgVer; StmtStats *stmtStats; StatsGlobals *statsGlobals; ExStatisticsArea *stats; ExStatisticsArea *cpuStats = NULL; ExMasterStats *masterStats; short currQueryNum = 0; msgVer = getNextObjVersion(); if (msgVer > currRtsCpuStatsReqVersionNumber) // Send Error ; RtsCpuStatsReq *request = new (getHeap()) RtsCpuStatsReq(INVALID_RTS_HANDLE, getHeap()); *this >> *request; setHandle(request->getHandle()); short reqType = request->getReqType(); short noOfQueries = request->getNoOfQueries(); Lng32 filter = request->getFilter(); short subReqType = request->getSubReqType(); clearAllObjects(); if (request->getCpu() != -1) { setType(IPC_MSG_SSMP_REPLY); setVersion(CurrSsmpReplyMessageVersion); RtsStatsReply *reply = new (getHeap()) RtsStatsReply(request->getHandle(), getHeap()); *this << *reply; statsGlobals = ssmpGlobals_->getStatsGlobals(); switch (reqType) { case SQLCLI_STATS_REQ_RMS_INFO: { cpuStats = new (getHeap()) ExStatisticsArea(getHeap(), 0, ComTdb::RMS_INFO_STATS, ComTdb::RMS_INFO_STATS); cpuStats->setSubReqType(subReqType); cpuStats->setStatsEnabled(TRUE); ExRMSStats *rmsStats = new (getHeap()) ExRMSStats(getHeap()); rmsStats->copyContents(statsGlobals->getRMSStats()); NAHeap *statsHeap = statsGlobals->getStatsHeap(); rmsStats->setGlobalStatsHeapAlloc(statsHeap->getTotalSize()); rmsStats->setGlobalStatsHeapUsed(statsHeap->getAllocSize()); rmsStats->setStatsHeapWaterMark(statsHeap->getHighWaterMark()); rmsStats->setNoOfStmtStats(statsGlobals->getStmtStatsList()->numEntries()); rmsStats->setSemPid(statsGlobals->getSemPid()); rmsStats->setSscpOpens(ssmpGlobals_->getNumAllocatedServers()); rmsStats->setSscpDeletedOpens(ssmpGlobals_->getNumDeallocatedServers()); rmsStats->setNumQueryInvKeys(statsGlobals->getRecentSikeys()->entries()); cpuStats->insert(rmsStats); if (request->getNoOfQueries() == RtsCpuStatsReq::INIT_RMS_STATS_) statsGlobals->getRMSStats()->reset(); break; } case SQLCLI_STATS_REQ_ET_OFFENDER: { cpuStats = new (getHeap()) ExStatisticsArea(getHeap(), 0, ComTdb::ET_OFFENDER_STATS, ComTdb::ET_OFFENDER_STATS); cpuStats->setStatsEnabled(TRUE); int error = statsGlobals->getStatsSemaphore(ssmpGlobals_->getSemId(), ssmpGlobals_->myPin()); SyncHashQueue *stmtStatsList = statsGlobals->getStmtStatsList(); stmtStatsList->position(); Int64 currTimestamp = NA_JulianTimestamp(); while ((stmtStats = (StmtStats *)stmtStatsList->getNext()) != NULL) { masterStats = stmtStats->getMasterStats(); if (masterStats != NULL) cpuStats->appendCpuStats(masterStats, FALSE, subReqType, filter, currTimestamp); } statsGlobals->releaseStatsSemaphore(ssmpGlobals_->getSemId(), ssmpGlobals_->myPin()); break; } case SQLCLI_STATS_REQ_MEM_OFFENDER: { cpuStats = new (getHeap()) ExStatisticsArea(getHeap(), 0, ComTdb::MEM_OFFENDER_STATS, ComTdb::MEM_OFFENDER_STATS); cpuStats->setStatsEnabled(TRUE); cpuStats->setSubReqType(subReqType); statsGlobals->getMemOffender(cpuStats, filter); } break; case SQLCLI_STATS_REQ_CPU_OFFENDER: { short noOfQueries = request->getNoOfQueries(); int error = statsGlobals->getStatsSemaphore(ssmpGlobals_->getSemId(), ssmpGlobals_->myPin()); SyncHashQueue * stmtStatsList = statsGlobals->getStmtStatsList(); stmtStatsList->position(); switch (noOfQueries) { case RtsCpuStatsReq::INIT_CPU_STATS_HISTORY_: while ((stmtStats = (StmtStats *)stmtStatsList->getNext()) != NULL) { stats = stmtStats->getStatsArea(); if (stats != NULL) stats->setCpuStatsHistory(); } break; default: if ( noOfQueries == RtsCpuStatsReq::ALL_ACTIVE_QUERIES_) noOfQueries = 32767; cpuStats = new (getHeap()) ExStatisticsArea(getHeap(), 0, ComTdb::CPU_OFFENDER_STATS, ComTdb::CPU_OFFENDER_STATS); cpuStats->setStatsEnabled(TRUE); while ((stmtStats = (StmtStats *)stmtStatsList->getNext()) != NULL && currQueryNum <= noOfQueries) { stats = stmtStats->getStatsArea(); if (stats != NULL) { if (cpuStats->appendCpuStats(stats)) currQueryNum++; } } break; } statsGlobals->releaseStatsSemaphore(ssmpGlobals_->getSemId(), ssmpGlobals_->myPin()); break; } default: break; } if (cpuStats != NULL && cpuStats->numEntries() > 0) *this << *(cpuStats); send(FALSE); reply->decrRefCount(); if (cpuStats != NULL) NADELETE(cpuStats, ExStatisticsArea, cpuStats->getHeap()); } else { ssmpGlobals_->allocateServers(); // Give the received message to SscpClientMsgStream SscpClientMsgStream *sscpMsgStream = new (heap_) SscpClientMsgStream(heap_, getIpcEnv(), ssmpGlobals_, this); ssmpGlobals_->addRecipients(sscpMsgStream); sscpMsgStream->clearAllObjects(); *sscpMsgStream << *request; sscpMsgStream->setMergeStartTime(NA_JulianTimestamp()); sscpMsgStream->setReqType(request->getReqType()); sscpMsgStream->setSubReqType(request->getSubReqType()); ssmpGlobals_->addPendingSscpMessage(sscpMsgStream); sscpMsgStream->send(FALSE); } request->decrRefCount(); } void SsmpNewIncomingConnectionStream::actOnExplainReq(IpcConnection *connection) { IpcMessageObjVersion msgVer; StmtStats *stmtStats; StatsGlobals *statsGlobals; RtsExplainFrag *explainFrag = NULL; RtsExplainFrag *srcExplainFrag; short currQueryNum = 0; msgVer = getNextObjVersion(); if (msgVer > currRtsExplainReqVersionNumber) // Send Error ; RtsExplainReq *request = new (getHeap()) RtsExplainReq(INVALID_RTS_HANDLE, getHeap()); *this >> *request; setHandle(request->getHandle()); clearAllObjects(); setType(RTS_MSG_EXPLAIN_REPLY); setVersion(currRtsExplainReplyVersionNumber); RtsExplainReply *reply = new (getHeap()) RtsExplainReply(request->getHandle(), getHeap()); *this << *reply; statsGlobals = ssmpGlobals_->getStatsGlobals(); int error = statsGlobals->getStatsSemaphore(ssmpGlobals_->getSemId(), ssmpGlobals_->myPin()); stmtStats = statsGlobals->getMasterStmtStats(request->getQid(), request->getQidLen(), RtsQueryId::ANY_QUERY_); if (stmtStats != NULL) { srcExplainFrag = stmtStats->getExplainInfo(); if (srcExplainFrag != NULL) explainFrag = new (getHeap()) RtsExplainFrag(getHeap(), srcExplainFrag); } statsGlobals->releaseStatsSemaphore(ssmpGlobals_->getSemId(), ssmpGlobals_->myPin()); if (explainFrag) *this << *(explainFrag); send(FALSE); reply->decrRefCount(); request->decrRefCount(); if (explainFrag) explainFrag->decrRefCount(); } void SsmpNewIncomingConnectionStream::sscpIpcError(IpcConnection *conn) { if (sscpDiagsArea_ == NULL) sscpDiagsArea_ = ComDiagsArea::allocate(ssmpGlobals_->getHeap()); conn->populateDiagsArea(sscpDiagsArea_, ssmpGlobals_->getHeap()); } void SscpClientMsgStream::actOnStatsReply(IpcConnection *connection) { IpcMessageObjVersion msgVer; msgVer = getNextObjVersion(); int error; if (msgVer > currRtsStatsReplyVersionNumber) // Send Error ; RtsStatsReply *reply = new (getHeap()) RtsStatsReply(INVALID_RTS_HANDLE, getHeap()); *this >> *reply; incNumSqlProcs(reply->getNumSqlProcs()); incNumCpus(reply->getNumCpus()); while (moreObjects()) { RtsMessageObjType objType = (RtsMessageObjType) getNextObjType(); short reqType = getReqType(); switch (objType) { case IPC_SQL_STATS_AREA: { ExStatisticsArea *stats = new (getHeap()) ExStatisticsArea(getHeap()); // Get the query Id from IPC *this >> *stats; switch (reqType) { case SQLCLI_STATS_REQ_QID: { StatsGlobals *statsGlobals = ssmpGlobals_->getStatsGlobals(); error = statsGlobals->getStatsSemaphore(ssmpGlobals_->getSemId(), ssmpGlobals_->myPin()); if (mergedStats_ == NULL) { if (isReplySent()) mergedStats_ = new (getHeap()) ExStatisticsArea(getHeap(), 0, stats->getCollectStatsType(), stats->getOrigCollectStatsType()); else mergedStats_ = new (getSsmpGlobals()->getStatsHeap()) ExStatisticsArea(getSsmpGlobals()->getStatsHeap(), 0, stats->getCollectStatsType(), stats->getOrigCollectStatsType()); // Can we always assume that the stats is enabled ????? // The stats should be enabled for the DISPLAY STATISTICS or SELECT ...TABLE(STATSISTICS...) // to display something mergedStats_->setStatsEnabled(TRUE); } mergedStats_->merge(stats); statsGlobals->releaseStatsSemaphore(ssmpGlobals_->getSemId(), ssmpGlobals_->myPin()); break; } case SQLCLI_STATS_REQ_QID_DETAIL: case SQLCLI_STATS_REQ_CPU_OFFENDER: case SQLCLI_STATS_REQ_SE_OFFENDER: case SQLCLI_STATS_REQ_ET_OFFENDER: case SQLCLI_STATS_REQ_MEM_OFFENDER: case SQLCLI_STATS_REQ_RMS_INFO: { if (mergedStats_ == NULL) { mergedStats_ = new (getHeap()) ExStatisticsArea(getHeap(), 0, stats->getCollectStatsType(), stats->getOrigCollectStatsType()); mergedStats_->setStatsEnabled(TRUE); mergedStats_->setSubReqType(subReqType_); } mergedStats_->setDetailLevel(getDetailLevel()); mergedStats_->appendCpuStats(stats, TRUE); if (reqType == SQLCLI_STATS_REQ_QID_DETAIL && stats->getMasterStats() != NULL) { ExMasterStats *masterStats = new (getHeap()) ExMasterStats((NAHeap *)getHeap()); masterStats->copyContents(stats->getMasterStats()); mergedStats_->setMasterStats(masterStats); } break; } default: break; } NADELETE(stats, ExStatisticsArea, getHeap()); } break; default: break; } } reply->decrRefCount(); #ifdef _DEBUG_RTS cerr << "Ssmp Merged Stats " << mergedStats_ << " \n"; #endif } SscpClientMsgStream::~SscpClientMsgStream() { if (mergedStats_ != NULL) { NADELETE(mergedStats_, ExStatisticsArea, mergedStats_->getHeap()); } } // method called upon send complete void SscpClientMsgStream::actOnSendAllComplete() { // once all sends have completed, initiate a nowait receive from all // SSCPs that we sent this message to clearAllObjects(); receive(FALSE); } void SscpClientMsgStream::actOnReceive(IpcConnection* connection) { IpcMessageObjType replyType; if (connection->getErrorInfo() != 0) { if (ssmpStream_) ssmpStream_->sscpIpcError(connection); delinkConnection(connection); return; } else { numOfClientRequestsSent_--; // check for protocol errors ex_assert(getType() == IPC_MSG_SSCP_REPLY AND getVersion() == CurrSscpReplyMessageVersion AND moreObjects(), "Invalid message from client"); // take a look at the type of the first object in the message replyType = getNextObjType(); switch(replyType) { case RTS_MSG_STATS_REPLY: actOnStatsReply(connection); break; case CANCEL_QUERY_KILL_SERVERS_REPLY: { CancelQueryKillServersReply *reply = new (getHeap()) CancelQueryKillServersReply(INVALID_RTS_HANDLE, getHeap()); *this >> *reply; reply->decrRefCount(); break; } case IPC_MSG_RMS_REPLY: { RmsGenericReply *reply = new (getHeap()) RmsGenericReply(getHeap()); *this >> *reply; reply->decrRefCount(); break; } default: ex_assert(FALSE,"Invalid reply from client"); } } } void SscpClientMsgStream::actOnReceiveAllComplete() { // If we have received responses to all requests we sent out, we mark this stream as having // completed its work. The IPCEnv will call the destructor at a time when it is safe to do so. if (! isReplySent()) { switch (completionProcessing_) { case STATS: { sendMergedStats(); break; } case CB: { // Control broker - cancel/suspend/activate. Nothing to do. break; } case SIK: { replySik(); break; } case LL: { replyLL(); break; } default: { ex_assert(FALSE, "Unknown completionProcessing_ flag."); break; } } ssmpGlobals_->removePendingSscpMessage(this); } addToCompletedList(); } void SscpClientMsgStream::replySik() { RmsGenericReply *reply = new(getHeap()) RmsGenericReply(getHeap()); *ssmpStream_ << *reply; if (ssmpStream_->getSscpDiagsArea()) { // Pass errors from communication w/ SSCPs back to the // client. *ssmpStream_ << *(ssmpStream_->getSscpDiagsArea()); ssmpStream_->clearSscpDiagsArea(); } ssmpStream_->send(FALSE); reply->decrRefCount(); } void SscpClientMsgStream::replyLL() { RmsGenericReply *reply = new(getHeap()) RmsGenericReply(getHeap()); *ssmpStream_ << *reply; if (ssmpStream_->getSscpDiagsArea()) { // Pass errors from communication w/ SSCPs back to the // client. *ssmpStream_ << *(ssmpStream_->getSscpDiagsArea()); ssmpStream_->clearSscpDiagsArea(); } ssmpStream_->send(FALSE); reply->decrRefCount(); } void SscpClientMsgStream::sendMergedStats() { StmtStats *stmtStats; ExStatisticsArea *mergedStats; int error; stmtStats = getStmtStats(); short reqType = getReqType(); if (mergedStats_ == NULL && reqType == SQLCLI_STATS_REQ_QID && stmtStats != NULL && stmtStats->getMasterStats() != NULL) { StatsGlobals *statsGlobals = ssmpGlobals_->getStatsGlobals(); error = statsGlobals->getStatsSemaphore(ssmpGlobals_->getSemId(), ssmpGlobals_->myPin()); mergedStats_ = new (ssmpGlobals_->getStatsHeap()) ExStatisticsArea(ssmpGlobals_->getStatsHeap(), 0, stmtStats->getMasterStats()->getCollectStatsType()); // Can we always assume that the stats is enabled ????? // The stats should be enabled for the DISPLAY STATISTICS or SELECT ...TABLE(STATSISTICS...) // to display something mergedStats_->setStatsEnabled(TRUE); ExMasterStats *masterStats = new (ssmpGlobals_->getStatsHeap()) ExMasterStats(ssmpGlobals_->getStatsHeap()); masterStats->copyContents(stmtStats->getMasterStats()); mergedStats_->setMasterStats(masterStats); statsGlobals->releaseStatsSemaphore(ssmpGlobals_->getSemId(), ssmpGlobals_->myPin()); } ExMasterStats *masterStats; if (mergedStats_ != NULL) { masterStats = mergedStats_->getMasterStats(); if (masterStats != NULL) { masterStats->setNumCpus(getNumCpus()); } } // Get a reference to mergedStats_ before setReplySent() sets it zero mergedStats = mergedStats_; setReplySent(); ssmpStream_->sendMergedStats(mergedStats, getNumOfErrorRequests()+getNumOfClientRequestsPending()+ getSsmpGlobals()->getNumDeallocatedServers(), reqType, stmtStats, (reqType == SQLCLI_STATS_REQ_QID)); } void SscpClientMsgStream::delinkConnection(IpcConnection *conn) { char nodeName[MAX_SEGMENT_NAME_LEN+1]; IpcCpuNum cpu; numOfErrorRequests_++; numOfClientRequestsSent_--; conn->getOtherEnd().getNodeName().getNodeNameAsString(nodeName); cpu = conn->getOtherEnd().getCpuNum(); #ifdef _DEBUG cout << "delinking " << nodeName << " in CPU " << cpu << endl; #endif ssmpGlobals_->deAllocateServer(nodeName, (short)str_len(nodeName), (short)cpu); } void SsmpNewIncomingConnectionStream::sendMergedStats(ExStatisticsArea *mergedStats, short numErrors, short reqType, StmtStats *stmtStats, NABoolean updateMergeStats) { StatsGlobals *statsGlobals; int error; RtsStatsReply *reply = new (getHeap()) RtsStatsReply(getHandle(), getHeap()); reply->setNumSscpErrors(numErrors); setType(IPC_MSG_SSMP_REPLY); setVersion(CurrSsmpReplyMessageVersion); *this << *reply; switch (reqType) { case SQLCLI_STATS_REQ_QID: if (mergedStats != NULL) *this << *(mergedStats); send (FALSE); statsGlobals = ssmpGlobals_->getStatsGlobals(); error = statsGlobals->getStatsSemaphore(ssmpGlobals_->getSemId(), ssmpGlobals_->myPin()); if (stmtStats != NULL) { if (updateMergeStats) stmtStats->setMergedStats(mergedStats); stmtStats->setStmtStatsUsed(FALSE); if (isWmsProcess() && stmtStats->canbeGCed()) statsGlobals->removeQuery(stmtStats->getPid(), stmtStats, TRUE); } statsGlobals->releaseStatsSemaphore(ssmpGlobals_->getSemId(), ssmpGlobals_->myPin()); break; case SQLCLI_STATS_REQ_QID_DETAIL: if (mergedStats != NULL) *this << *(mergedStats); send (FALSE); if (stmtStats != NULL) stmtStats->setStmtStatsUsed(FALSE); break; case SQLCLI_STATS_REQ_CPU_OFFENDER: case SQLCLI_STATS_REQ_SE_OFFENDER: case SQLCLI_STATS_REQ_ET_OFFENDER: case SQLCLI_STATS_REQ_RMS_INFO: case SQLCLI_STATS_REQ_MEM_OFFENDER: case SQLCLI_STATS_REQ_PROCESS_INFO: if (mergedStats != NULL) *this << *(mergedStats); send (FALSE); break; case SQLCLI_STATS_REQ_PID: case SQLCLI_STATS_REQ_CPU: case SQLCLI_STATS_REQ_QID_INTERNAL: if (mergedStats != NULL && mergedStats->getMasterStats() != NULL) *this << *(mergedStats); send (FALSE); if (stmtStats != NULL) stmtStats->setStmtStatsUsed(FALSE); break; } reply->decrRefCount(); if (!(updateMergeStats) && mergedStats != NULL) { NADELETE(mergedStats, ExStatisticsArea, mergedStats->getHeap()); } } void SsmpClientMsgStream::actOnSend(IpcConnection* connection) { if (connection->getErrorInfo() != 0) stats_ = NULL; } void SsmpClientMsgStream::actOnSendAllComplete() { clearAllObjects(); receive(FALSE); } void SsmpClientMsgStream::actOnReceive(IpcConnection* connection) { replyRecvd_ = TRUE; if (connection->getErrorInfo() != 0) { stats_ = NULL; if (diagsForClient_) connection->populateDiagsArea(diagsForClient_, getHeap()); delinkConnection(connection); return; } // take a look at the type of the first object in the message switch(getNextObjType()) { case RTS_MSG_STATS_REPLY: actOnStatsReply(connection); break; case RTS_MSG_EXPLAIN_REPLY: actOnExplainReply(connection); break; case IPC_MSG_RMS_REPLY: actOnGenericReply(); break; default: ex_assert(FALSE,"Invalid reply from first client message"); } } void SsmpClientMsgStream::actOnReceiveAllComplete() { // We mark this stream as having completed its work. The IPCEnv will // call the destructor at a time when it is safe to do so. addToCompletedList(); } void SsmpClientMsgStream::actOnStatsReply(IpcConnection* connection) { IpcMessageObjVersion msgVer; msgVer = getNextObjVersion(); if (msgVer > currRtsStatsReplyVersionNumber) // Send Error ; RtsStatsReply *reply = new (getHeap()) RtsStatsReply(INVALID_RTS_HANDLE, getHeap()); *this >> *reply; numSscpReqFailed_ = reply->getNumSscpErrors(); if (moreObjects()) { RtsMessageObjType objType = (RtsMessageObjType) getNextObjType(); switch (objType) { case IPC_SQL_STATS_AREA: { stats_ = new (getHeap()) ExStatisticsArea(getHeap()); *this >> *stats_; } break; case RTS_QUERY_ID: { rtsQueryId_ = new (getHeap()) RtsQueryId(getHeap()); *this >> *rtsQueryId_; } break; default: break; } } else stats_ = NULL; reply->decrRefCount(); // we don't want to decrement the stats, since we want to pass it on } void SsmpClientMsgStream::actOnExplainReply(IpcConnection* connection) { IpcMessageObjVersion msgVer; msgVer = getNextObjVersion(); if (msgVer > currRtsExplainReplyVersionNumber) // Send Error ; RtsExplainReply *reply = new (getHeap()) RtsExplainReply(INVALID_RTS_HANDLE, getHeap()); *this >> *reply; numSscpReqFailed_ = 0; if (moreObjects()) { RtsMessageObjType objType = (RtsMessageObjType) getNextObjType(); switch (objType) { case RTS_EXPLAIN_FRAG: { explainFrag_ = new (getHeap()) RtsExplainFrag(getHeap()); *this >> *explainFrag_; } break; default: break; } } else explainFrag_ = NULL; reply->decrRefCount(); } void SsmpClientMsgStream::delinkConnection(IpcConnection *conn) { char nodeName[MAX_SEGMENT_NAME_LEN+1]; IpcCpuNum cpu; conn->getOtherEnd().getNodeName().getNodeNameAsString(nodeName); cpu = conn->getOtherEnd().getCpuNum(); ssmpManager_->removeSsmpServer(nodeName, (short)cpu); } void SsmpClientMsgStream::actOnGenericReply() { RmsGenericReply *reply = new (getHeap()) RmsGenericReply(getHeap()); *this >> *reply; while (moreObjects()) { IpcMessageObjType objType = getNextObjType(); ex_assert(objType == IPC_SQL_DIAG_AREA, "Unknown object returned from mxssmp."); if (diagsForClient_) *this >> *diagsForClient_; } reply->decrRefCount(); }
1
22,806
We would want ssmp to wake up every 3 seconds if there are no other requests to it and do some cleanup tasks. So, it is not clear why do you want to set this to 0.
apache-trafodion
cpp
@@ -252,6 +252,13 @@ func Compile(pkgName string, machine llvm.TargetMachine, config *compileopts.Con c.loadASTComments(lprogram) + // Forcibly preload special types. + runtimePkg := c.ir.Program.ImportedPackage("runtime") + c.getLLVMType(runtimePkg.Type("_interface").Type()) + c.getLLVMType(runtimePkg.Type("_string").Type()) + c.getLLVMType(runtimePkg.Type("hashmap").Type()) + c.getLLVMType(runtimePkg.Type("channel").Type()) + // Declare runtime types. // TODO: lazily create runtime types in getLLVMRuntimeType when they are // needed. Eventually this will be required anyway, when packages are
1
package compiler import ( "debug/dwarf" "errors" "fmt" "go/ast" "go/build" "go/constant" "go/token" "go/types" "os" "path/filepath" "strconv" "strings" "github.com/tinygo-org/tinygo/compileopts" "github.com/tinygo-org/tinygo/compiler/llvmutil" "github.com/tinygo-org/tinygo/goenv" "github.com/tinygo-org/tinygo/ir" "github.com/tinygo-org/tinygo/loader" "golang.org/x/tools/go/ssa" "tinygo.org/x/go-llvm" ) func init() { llvm.InitializeAllTargets() llvm.InitializeAllTargetMCs() llvm.InitializeAllTargetInfos() llvm.InitializeAllAsmParsers() llvm.InitializeAllAsmPrinters() } // The TinyGo import path. const tinygoPath = "github.com/tinygo-org/tinygo" // compilerContext contains function-independent data that should still be // available while compiling every function. It is not strictly read-only, but // must not contain function-dependent data such as an IR builder. type compilerContext struct { *compileopts.Config mod llvm.Module ctx llvm.Context dibuilder *llvm.DIBuilder cu llvm.Metadata difiles map[string]llvm.Metadata ditypes map[types.Type]llvm.Metadata machine llvm.TargetMachine targetData llvm.TargetData intType llvm.Type i8ptrType llvm.Type // for convenience funcPtrAddrSpace int uintptrType llvm.Type ir *ir.Program diagnostics []error astComments map[string]*ast.CommentGroup } // builder contains all information relevant to build a single function. type builder struct { *compilerContext llvm.Builder fn *ir.Function locals map[ssa.Value]llvm.Value // local variables blockEntries map[*ssa.BasicBlock]llvm.BasicBlock // a *ssa.BasicBlock may be split up blockExits map[*ssa.BasicBlock]llvm.BasicBlock // these are the exit blocks currentBlock *ssa.BasicBlock phis []Phi taskHandle llvm.Value deferPtr llvm.Value difunc llvm.Metadata dilocals map[*types.Var]llvm.Metadata allDeferFuncs []interface{} deferFuncs map[*ir.Function]int deferInvokeFuncs map[string]int deferClosureFuncs map[*ir.Function]int selectRecvBuf map[*ssa.Select]llvm.Value } type Phi struct { ssa *ssa.Phi llvm llvm.Value } // NewTargetMachine returns a new llvm.TargetMachine based on the passed-in // configuration. It is used by the compiler and is needed for machine code // emission. func NewTargetMachine(config *compileopts.Config) (llvm.TargetMachine, error) { target, err := llvm.GetTargetFromTriple(config.Triple()) if err != nil { return llvm.TargetMachine{}, err } features := strings.Join(config.Features(), ",") machine := target.CreateTargetMachine(config.Triple(), config.CPU(), features, llvm.CodeGenLevelDefault, llvm.RelocStatic, llvm.CodeModelDefault) return machine, nil } // Compile the given package path or .go file path. Return an error when this // fails (in any stage). If successful it returns the LLVM module and a list of // extra C files to be compiled. If not, one or more errors will be returned. // // The fact that it returns a list of filenames to compile is a layering // violation. Eventually, this Compile function should only compile a single // package and not the whole program, and loading of the program (including CGo // processing) should be moved outside the compiler package. func Compile(pkgName string, machine llvm.TargetMachine, config *compileopts.Config) (llvm.Module, []string, []error) { c := &compilerContext{ Config: config, difiles: make(map[string]llvm.Metadata), ditypes: make(map[types.Type]llvm.Metadata), machine: machine, targetData: machine.CreateTargetData(), } c.ctx = llvm.NewContext() c.mod = c.ctx.NewModule(pkgName) c.mod.SetTarget(config.Triple()) c.mod.SetDataLayout(c.targetData.String()) if c.Debug() { c.dibuilder = llvm.NewDIBuilder(c.mod) } c.uintptrType = c.ctx.IntType(c.targetData.PointerSize() * 8) if c.targetData.PointerSize() <= 4 { // 8, 16, 32 bits targets c.intType = c.ctx.Int32Type() } else if c.targetData.PointerSize() == 8 { // 64 bits target c.intType = c.ctx.Int64Type() } else { panic("unknown pointer size") } c.i8ptrType = llvm.PointerType(c.ctx.Int8Type(), 0) dummyFuncType := llvm.FunctionType(c.ctx.VoidType(), nil, false) dummyFunc := llvm.AddFunction(c.mod, "tinygo.dummy", dummyFuncType) c.funcPtrAddrSpace = dummyFunc.Type().PointerAddressSpace() dummyFunc.EraseFromParentAsFunction() // Prefix the GOPATH with the system GOROOT, as GOROOT is already set to // the TinyGo root. overlayGopath := goenv.Get("GOPATH") if overlayGopath == "" { overlayGopath = goenv.Get("GOROOT") } else { overlayGopath = goenv.Get("GOROOT") + string(filepath.ListSeparator) + overlayGopath } wd, err := os.Getwd() if err != nil { return c.mod, nil, []error{err} } lprogram := &loader.Program{ Build: &build.Context{ GOARCH: c.GOARCH(), GOOS: c.GOOS(), GOROOT: goenv.Get("GOROOT"), GOPATH: goenv.Get("GOPATH"), CgoEnabled: c.CgoEnabled(), UseAllFiles: false, Compiler: "gc", // must be one of the recognized compilers BuildTags: c.BuildTags(), }, OverlayBuild: &build.Context{ GOARCH: c.GOARCH(), GOOS: c.GOOS(), GOROOT: goenv.Get("TINYGOROOT"), GOPATH: overlayGopath, CgoEnabled: c.CgoEnabled(), UseAllFiles: false, Compiler: "gc", // must be one of the recognized compilers BuildTags: c.BuildTags(), }, OverlayPath: func(path string) string { // Return the (overlay) import path when it should be overlaid, and // "" if it should not. if strings.HasPrefix(path, tinygoPath+"/src/") { // Avoid issues with packages that are imported twice, one from // GOPATH and one from TINYGOPATH. path = path[len(tinygoPath+"/src/"):] } switch path { case "machine", "os", "reflect", "runtime", "runtime/interrupt", "runtime/volatile", "sync", "testing", "internal/reflectlite", "internal/task": return path default: if strings.HasPrefix(path, "device/") || strings.HasPrefix(path, "examples/") { return path } else if path == "syscall" { for _, tag := range c.BuildTags() { if tag == "baremetal" || tag == "darwin" { return path } } } } return "" }, TypeChecker: types.Config{ Sizes: &StdSizes{ IntSize: int64(c.targetData.TypeAllocSize(c.intType)), PtrSize: int64(c.targetData.PointerSize()), MaxAlign: int64(c.targetData.PrefTypeAlignment(c.i8ptrType)), }, }, Dir: wd, TINYGOROOT: goenv.Get("TINYGOROOT"), CFlags: c.CFlags(), ClangHeaders: c.ClangHeaders, } if strings.HasSuffix(pkgName, ".go") { _, err = lprogram.ImportFile(pkgName) if err != nil { return c.mod, nil, []error{err} } } else { _, err = lprogram.Import(pkgName, wd, token.Position{ Filename: "build command-line-arguments", }) if err != nil { return c.mod, nil, []error{err} } } _, err = lprogram.Import("runtime", "", token.Position{ Filename: "build default import", }) if err != nil { return c.mod, nil, []error{err} } err = lprogram.Parse(c.TestConfig.CompileTestBinary) if err != nil { return c.mod, nil, []error{err} } c.ir = ir.NewProgram(lprogram, pkgName) // Run a simple dead code elimination pass. c.ir.SimpleDCE() // Initialize debug information. if c.Debug() { c.cu = c.dibuilder.CreateCompileUnit(llvm.DICompileUnit{ Language: 0xb, // DW_LANG_C99 (0xc, off-by-one?) File: pkgName, Dir: "", Producer: "TinyGo", Optimized: true, }) } c.loadASTComments(lprogram) // Declare runtime types. // TODO: lazily create runtime types in getLLVMRuntimeType when they are // needed. Eventually this will be required anyway, when packages are // compiled independently (and the runtime types are not available). for _, member := range c.ir.Program.ImportedPackage("runtime").Members { if member, ok := member.(*ssa.Type); ok { if typ, ok := member.Type().(*types.Named); ok { if _, ok := typ.Underlying().(*types.Struct); ok { c.getLLVMType(typ) } } } } // Declare all functions. for _, f := range c.ir.Functions { c.createFunctionDeclaration(f) } // Add definitions to declarations. var initFuncs []llvm.Value irbuilder := c.ctx.NewBuilder() defer irbuilder.Dispose() for _, f := range c.ir.Functions { if f.Synthetic == "package initializer" { initFuncs = append(initFuncs, f.LLVMFn) } if f.CName() != "" { continue } if f.Blocks == nil { continue // external function } // Create the function definition. b := builder{ compilerContext: c, Builder: irbuilder, fn: f, locals: make(map[ssa.Value]llvm.Value), dilocals: make(map[*types.Var]llvm.Metadata), blockEntries: make(map[*ssa.BasicBlock]llvm.BasicBlock), blockExits: make(map[*ssa.BasicBlock]llvm.BasicBlock), } b.createFunctionDefinition() } // After all packages are imported, add a synthetic initializer function // that calls the initializer of each package. initFn := c.ir.GetFunction(c.ir.Program.ImportedPackage("runtime").Members["initAll"].(*ssa.Function)) initFn.LLVMFn.SetLinkage(llvm.InternalLinkage) initFn.LLVMFn.SetUnnamedAddr(true) if c.Debug() { difunc := c.attachDebugInfo(initFn) pos := c.ir.Program.Fset.Position(initFn.Pos()) irbuilder.SetCurrentDebugLocation(uint(pos.Line), uint(pos.Column), difunc, llvm.Metadata{}) } block := c.ctx.AddBasicBlock(initFn.LLVMFn, "entry") irbuilder.SetInsertPointAtEnd(block) for _, fn := range initFuncs { irbuilder.CreateCall(fn, []llvm.Value{llvm.Undef(c.i8ptrType), llvm.Undef(c.i8ptrType)}, "") } irbuilder.CreateRetVoid() // Conserve for goroutine lowering. Without marking these as external, they // would be optimized away. realMain := c.mod.NamedFunction(c.ir.MainPkg().Pkg.Path() + ".main") realMain.SetLinkage(llvm.ExternalLinkage) // keep alive until goroutine lowering // Replace callMain placeholder with actual main function. c.mod.NamedFunction("runtime.callMain").ReplaceAllUsesWith(realMain) // Load some attributes getAttr := func(attrName string) llvm.Attribute { attrKind := llvm.AttributeKindID(attrName) return c.ctx.CreateEnumAttribute(attrKind, 0) } nocapture := getAttr("nocapture") readonly := getAttr("readonly") // Tell the optimizer that runtime.alloc is an allocator, meaning that it // returns values that are never null and never alias to an existing value. for _, attrName := range []string{"noalias", "nonnull"} { c.mod.NamedFunction("runtime.alloc").AddAttributeAtIndex(0, getAttr(attrName)) } // On *nix systems, the "abort" functuion in libc is used to handle fatal panics. // Mark it as noreturn so LLVM can optimize away code. if abort := c.mod.NamedFunction("abort"); !abort.IsNil() && abort.IsDeclaration() { abort.AddFunctionAttr(getAttr("noreturn")) } // This function is necessary for tracking pointers on the stack in a // portable way (see gc.go). Indicate to the optimizer that the only thing // we'll do is read the pointer. trackPointer := c.mod.NamedFunction("runtime.trackPointer") if !trackPointer.IsNil() { trackPointer.AddAttributeAtIndex(1, nocapture) trackPointer.AddAttributeAtIndex(1, readonly) } // see: https://reviews.llvm.org/D18355 if c.Debug() { c.mod.AddNamedMetadataOperand("llvm.module.flags", c.ctx.MDNode([]llvm.Metadata{ llvm.ConstInt(c.ctx.Int32Type(), 1, false).ConstantAsMetadata(), // Error on mismatch llvm.GlobalContext().MDString("Debug Info Version"), llvm.ConstInt(c.ctx.Int32Type(), 3, false).ConstantAsMetadata(), // DWARF version }), ) c.mod.AddNamedMetadataOperand("llvm.module.flags", c.ctx.MDNode([]llvm.Metadata{ llvm.ConstInt(c.ctx.Int32Type(), 1, false).ConstantAsMetadata(), llvm.GlobalContext().MDString("Dwarf Version"), llvm.ConstInt(c.ctx.Int32Type(), 4, false).ConstantAsMetadata(), }), ) c.dibuilder.Finalize() } // Gather the list of (C) file paths that should be included in the build. var extraFiles []string for _, pkg := range c.ir.LoaderProgram.Sorted() { for _, file := range pkg.CFiles { extraFiles = append(extraFiles, filepath.Join(pkg.Package.Dir, file)) } } return c.mod, extraFiles, c.diagnostics } // getLLVMRuntimeType obtains a named type from the runtime package and returns // it as a LLVM type, creating it if necessary. It is a shorthand for // getLLVMType(getRuntimeType(name)). func (c *compilerContext) getLLVMRuntimeType(name string) llvm.Type { fullName := "runtime." + name typ := c.mod.GetTypeByName(fullName) if typ.IsNil() { println(c.mod.String()) panic("could not find runtime type: " + fullName) } return typ } // getLLVMType creates and returns a LLVM type for a Go type. In the case of // named struct types (or Go types implemented as named LLVM structs such as // strings) it also creates it first if necessary. func (c *compilerContext) getLLVMType(goType types.Type) llvm.Type { switch typ := goType.(type) { case *types.Array: elemType := c.getLLVMType(typ.Elem()) return llvm.ArrayType(elemType, int(typ.Len())) case *types.Basic: switch typ.Kind() { case types.Bool, types.UntypedBool: return c.ctx.Int1Type() case types.Int8, types.Uint8: return c.ctx.Int8Type() case types.Int16, types.Uint16: return c.ctx.Int16Type() case types.Int32, types.Uint32: return c.ctx.Int32Type() case types.Int, types.Uint: return c.intType case types.Int64, types.Uint64: return c.ctx.Int64Type() case types.Float32: return c.ctx.FloatType() case types.Float64: return c.ctx.DoubleType() case types.Complex64: return c.ctx.StructType([]llvm.Type{c.ctx.FloatType(), c.ctx.FloatType()}, false) case types.Complex128: return c.ctx.StructType([]llvm.Type{c.ctx.DoubleType(), c.ctx.DoubleType()}, false) case types.String, types.UntypedString: return c.getLLVMRuntimeType("_string") case types.Uintptr: return c.uintptrType case types.UnsafePointer: return c.i8ptrType default: panic("unknown basic type: " + typ.String()) } case *types.Chan: return llvm.PointerType(c.getLLVMRuntimeType("channel"), 0) case *types.Interface: return c.getLLVMRuntimeType("_interface") case *types.Map: return llvm.PointerType(c.getLLVMRuntimeType("hashmap"), 0) case *types.Named: if st, ok := typ.Underlying().(*types.Struct); ok { // Structs are a special case. While other named types are ignored // in LLVM IR, named structs are implemented as named structs in // LLVM. This is because it is otherwise impossible to create // self-referencing types such as linked lists. llvmName := typ.Obj().Pkg().Path() + "." + typ.Obj().Name() llvmType := c.mod.GetTypeByName(llvmName) if llvmType.IsNil() { llvmType = c.ctx.StructCreateNamed(llvmName) underlying := c.getLLVMType(st) llvmType.StructSetBody(underlying.StructElementTypes(), false) } return llvmType } return c.getLLVMType(typ.Underlying()) case *types.Pointer: ptrTo := c.getLLVMType(typ.Elem()) return llvm.PointerType(ptrTo, 0) case *types.Signature: // function value return c.getFuncType(typ) case *types.Slice: elemType := c.getLLVMType(typ.Elem()) members := []llvm.Type{ llvm.PointerType(elemType, 0), c.uintptrType, // len c.uintptrType, // cap } return c.ctx.StructType(members, false) case *types.Struct: members := make([]llvm.Type, typ.NumFields()) for i := 0; i < typ.NumFields(); i++ { members[i] = c.getLLVMType(typ.Field(i).Type()) } return c.ctx.StructType(members, false) case *types.Tuple: members := make([]llvm.Type, typ.Len()) for i := 0; i < typ.Len(); i++ { members[i] = c.getLLVMType(typ.At(i).Type()) } return c.ctx.StructType(members, false) default: panic("unknown type: " + goType.String()) } } // Is this a pointer type of some sort? Can be unsafe.Pointer or any *T pointer. func isPointer(typ types.Type) bool { if _, ok := typ.(*types.Pointer); ok { return true } else if typ, ok := typ.(*types.Basic); ok && typ.Kind() == types.UnsafePointer { return true } else { return false } } // Get the DWARF type for this Go type. func (c *compilerContext) getDIType(typ types.Type) llvm.Metadata { if md, ok := c.ditypes[typ]; ok { return md } md := c.createDIType(typ) c.ditypes[typ] = md return md } // createDIType creates a new DWARF type. Don't call this function directly, // call getDIType instead. func (c *compilerContext) createDIType(typ types.Type) llvm.Metadata { llvmType := c.getLLVMType(typ) sizeInBytes := c.targetData.TypeAllocSize(llvmType) switch typ := typ.(type) { case *types.Array: return c.dibuilder.CreateArrayType(llvm.DIArrayType{ SizeInBits: sizeInBytes * 8, AlignInBits: uint32(c.targetData.ABITypeAlignment(llvmType)) * 8, ElementType: c.getDIType(typ.Elem()), Subscripts: []llvm.DISubrange{ llvm.DISubrange{ Lo: 0, Count: typ.Len(), }, }, }) case *types.Basic: var encoding llvm.DwarfTypeEncoding if typ.Info()&types.IsBoolean != 0 { encoding = llvm.DW_ATE_boolean } else if typ.Info()&types.IsFloat != 0 { encoding = llvm.DW_ATE_float } else if typ.Info()&types.IsComplex != 0 { encoding = llvm.DW_ATE_complex_float } else if typ.Info()&types.IsUnsigned != 0 { encoding = llvm.DW_ATE_unsigned } else if typ.Info()&types.IsInteger != 0 { encoding = llvm.DW_ATE_signed } else if typ.Kind() == types.UnsafePointer { return c.dibuilder.CreatePointerType(llvm.DIPointerType{ Name: "unsafe.Pointer", SizeInBits: c.targetData.TypeAllocSize(llvmType) * 8, AlignInBits: uint32(c.targetData.ABITypeAlignment(llvmType)) * 8, AddressSpace: 0, }) } else if typ.Info()&types.IsString != 0 { return c.dibuilder.CreateStructType(llvm.Metadata{}, llvm.DIStructType{ Name: "string", SizeInBits: sizeInBytes * 8, AlignInBits: uint32(c.targetData.ABITypeAlignment(llvmType)) * 8, Elements: []llvm.Metadata{ c.dibuilder.CreateMemberType(llvm.Metadata{}, llvm.DIMemberType{ Name: "ptr", SizeInBits: c.targetData.TypeAllocSize(c.i8ptrType) * 8, AlignInBits: uint32(c.targetData.ABITypeAlignment(c.i8ptrType)) * 8, OffsetInBits: 0, Type: c.getDIType(types.NewPointer(types.Typ[types.Byte])), }), c.dibuilder.CreateMemberType(llvm.Metadata{}, llvm.DIMemberType{ Name: "len", SizeInBits: c.targetData.TypeAllocSize(c.uintptrType) * 8, AlignInBits: uint32(c.targetData.ABITypeAlignment(c.uintptrType)) * 8, OffsetInBits: c.targetData.ElementOffset(llvmType, 1) * 8, Type: c.getDIType(types.Typ[types.Uintptr]), }), }, }) } else { panic("unknown basic type") } return c.dibuilder.CreateBasicType(llvm.DIBasicType{ Name: typ.String(), SizeInBits: sizeInBytes * 8, Encoding: encoding, }) case *types.Chan: return c.getDIType(types.NewPointer(c.ir.Program.ImportedPackage("runtime").Members["channel"].(*ssa.Type).Type())) case *types.Interface: return c.getDIType(c.ir.Program.ImportedPackage("runtime").Members["_interface"].(*ssa.Type).Type()) case *types.Map: return c.getDIType(types.NewPointer(c.ir.Program.ImportedPackage("runtime").Members["hashmap"].(*ssa.Type).Type())) case *types.Named: return c.dibuilder.CreateTypedef(llvm.DITypedef{ Type: c.getDIType(typ.Underlying()), Name: typ.String(), }) case *types.Pointer: return c.dibuilder.CreatePointerType(llvm.DIPointerType{ Pointee: c.getDIType(typ.Elem()), SizeInBits: c.targetData.TypeAllocSize(llvmType) * 8, AlignInBits: uint32(c.targetData.ABITypeAlignment(llvmType)) * 8, AddressSpace: 0, }) case *types.Signature: // actually a closure fields := llvmType.StructElementTypes() return c.dibuilder.CreateStructType(llvm.Metadata{}, llvm.DIStructType{ SizeInBits: sizeInBytes * 8, AlignInBits: uint32(c.targetData.ABITypeAlignment(llvmType)) * 8, Elements: []llvm.Metadata{ c.dibuilder.CreateMemberType(llvm.Metadata{}, llvm.DIMemberType{ Name: "context", SizeInBits: c.targetData.TypeAllocSize(fields[1]) * 8, AlignInBits: uint32(c.targetData.ABITypeAlignment(fields[1])) * 8, OffsetInBits: 0, Type: c.getDIType(types.Typ[types.UnsafePointer]), }), c.dibuilder.CreateMemberType(llvm.Metadata{}, llvm.DIMemberType{ Name: "fn", SizeInBits: c.targetData.TypeAllocSize(fields[0]) * 8, AlignInBits: uint32(c.targetData.ABITypeAlignment(fields[0])) * 8, OffsetInBits: c.targetData.ElementOffset(llvmType, 1) * 8, Type: c.getDIType(types.Typ[types.UnsafePointer]), }), }, }) case *types.Slice: fields := llvmType.StructElementTypes() return c.dibuilder.CreateStructType(llvm.Metadata{}, llvm.DIStructType{ Name: typ.String(), SizeInBits: sizeInBytes * 8, AlignInBits: uint32(c.targetData.ABITypeAlignment(llvmType)) * 8, Elements: []llvm.Metadata{ c.dibuilder.CreateMemberType(llvm.Metadata{}, llvm.DIMemberType{ Name: "ptr", SizeInBits: c.targetData.TypeAllocSize(fields[0]) * 8, AlignInBits: uint32(c.targetData.ABITypeAlignment(fields[0])) * 8, OffsetInBits: 0, Type: c.getDIType(types.NewPointer(typ.Elem())), }), c.dibuilder.CreateMemberType(llvm.Metadata{}, llvm.DIMemberType{ Name: "len", SizeInBits: c.targetData.TypeAllocSize(c.uintptrType) * 8, AlignInBits: uint32(c.targetData.ABITypeAlignment(c.uintptrType)) * 8, OffsetInBits: c.targetData.ElementOffset(llvmType, 1) * 8, Type: c.getDIType(types.Typ[types.Uintptr]), }), c.dibuilder.CreateMemberType(llvm.Metadata{}, llvm.DIMemberType{ Name: "cap", SizeInBits: c.targetData.TypeAllocSize(c.uintptrType) * 8, AlignInBits: uint32(c.targetData.ABITypeAlignment(c.uintptrType)) * 8, OffsetInBits: c.targetData.ElementOffset(llvmType, 2) * 8, Type: c.getDIType(types.Typ[types.Uintptr]), }), }, }) case *types.Struct: // Placeholder metadata node, to be replaced afterwards. temporaryMDNode := c.dibuilder.CreateReplaceableCompositeType(llvm.Metadata{}, llvm.DIReplaceableCompositeType{ Tag: dwarf.TagStructType, SizeInBits: sizeInBytes * 8, AlignInBits: uint32(c.targetData.ABITypeAlignment(llvmType)) * 8, }) c.ditypes[typ] = temporaryMDNode elements := make([]llvm.Metadata, typ.NumFields()) for i := range elements { field := typ.Field(i) fieldType := field.Type() llvmField := c.getLLVMType(fieldType) elements[i] = c.dibuilder.CreateMemberType(llvm.Metadata{}, llvm.DIMemberType{ Name: field.Name(), SizeInBits: c.targetData.TypeAllocSize(llvmField) * 8, AlignInBits: uint32(c.targetData.ABITypeAlignment(llvmField)) * 8, OffsetInBits: c.targetData.ElementOffset(llvmType, i) * 8, Type: c.getDIType(fieldType), }) } md := c.dibuilder.CreateStructType(llvm.Metadata{}, llvm.DIStructType{ SizeInBits: sizeInBytes * 8, AlignInBits: uint32(c.targetData.ABITypeAlignment(llvmType)) * 8, Elements: elements, }) temporaryMDNode.ReplaceAllUsesWith(md) return md default: panic("unknown type while generating DWARF debug type: " + typ.String()) } } // getLocalVariable returns a debug info entry for a local variable, which may // either be a parameter or a regular variable. It will create a new metadata // entry if there isn't one for the variable yet. func (b *builder) getLocalVariable(variable *types.Var) llvm.Metadata { if dilocal, ok := b.dilocals[variable]; ok { // DILocalVariable was already created, return it directly. return dilocal } pos := b.ir.Program.Fset.Position(variable.Pos()) // Check whether this is a function parameter. for i, param := range b.fn.Params { if param.Object().(*types.Var) == variable { // Yes it is, create it as a function parameter. dilocal := b.dibuilder.CreateParameterVariable(b.difunc, llvm.DIParameterVariable{ Name: param.Name(), File: b.getDIFile(pos.Filename), Line: pos.Line, Type: b.getDIType(variable.Type()), AlwaysPreserve: true, ArgNo: i + 1, }) b.dilocals[variable] = dilocal return dilocal } } // No, it's not a parameter. Create a regular (auto) variable. dilocal := b.dibuilder.CreateAutoVariable(b.difunc, llvm.DIAutoVariable{ Name: variable.Name(), File: b.getDIFile(pos.Filename), Line: pos.Line, Type: b.getDIType(variable.Type()), AlwaysPreserve: true, }) b.dilocals[variable] = dilocal return dilocal } // createFunctionDeclaration creates a LLVM function declaration without body. // It can later be filled with frame.createFunctionDefinition(). func (c *compilerContext) createFunctionDeclaration(f *ir.Function) { var retType llvm.Type if f.Signature.Results() == nil { retType = c.ctx.VoidType() } else if f.Signature.Results().Len() == 1 { retType = c.getLLVMType(f.Signature.Results().At(0).Type()) } else { results := make([]llvm.Type, 0, f.Signature.Results().Len()) for i := 0; i < f.Signature.Results().Len(); i++ { results = append(results, c.getLLVMType(f.Signature.Results().At(i).Type())) } retType = c.ctx.StructType(results, false) } var paramTypes []llvm.Type var paramTypeVariants []paramFlags for _, param := range f.Params { paramType := c.getLLVMType(param.Type()) paramTypeFragments, paramTypeFragmentVariants := expandFormalParamType(paramType, param.Type()) paramTypes = append(paramTypes, paramTypeFragments...) paramTypeVariants = append(paramTypeVariants, paramTypeFragmentVariants...) } // Add an extra parameter as the function context. This context is used in // closures and bound methods, but should be optimized away when not used. if !f.IsExported() { paramTypes = append(paramTypes, c.i8ptrType) // context paramTypes = append(paramTypes, c.i8ptrType) // parent coroutine paramTypeVariants = append(paramTypeVariants, 0, 0) } fnType := llvm.FunctionType(retType, paramTypes, false) name := f.LinkName() f.LLVMFn = c.mod.NamedFunction(name) if f.LLVMFn.IsNil() { f.LLVMFn = llvm.AddFunction(c.mod, name, fnType) } dereferenceableOrNullKind := llvm.AttributeKindID("dereferenceable_or_null") for i, typ := range paramTypes { if paramTypeVariants[i]&paramIsDeferenceableOrNull == 0 { continue } if typ.TypeKind() == llvm.PointerTypeKind { el := typ.ElementType() size := c.targetData.TypeAllocSize(el) if size == 0 { // dereferenceable_or_null(0) appears to be illegal in LLVM. continue } dereferenceableOrNull := c.ctx.CreateEnumAttribute(dereferenceableOrNullKind, size) f.LLVMFn.AddAttributeAtIndex(i+1, dereferenceableOrNull) } } // External/exported functions may not retain pointer values. // https://golang.org/cmd/cgo/#hdr-Passing_pointers if f.IsExported() { // Set the wasm-import-module attribute if the function's module is set. if f.Module() != "" { wasmImportModuleAttr := c.ctx.CreateStringAttribute("wasm-import-module", f.Module()) f.LLVMFn.AddFunctionAttr(wasmImportModuleAttr) } nocaptureKind := llvm.AttributeKindID("nocapture") nocapture := c.ctx.CreateEnumAttribute(nocaptureKind, 0) for i, typ := range paramTypes { if typ.TypeKind() == llvm.PointerTypeKind { f.LLVMFn.AddAttributeAtIndex(i+1, nocapture) } } } } // attachDebugInfo adds debug info to a function declaration. It returns the // DISubprogram metadata node. func (c *compilerContext) attachDebugInfo(f *ir.Function) llvm.Metadata { pos := c.ir.Program.Fset.Position(f.Syntax().Pos()) return c.attachDebugInfoRaw(f, f.LLVMFn, "", pos.Filename, pos.Line) } // attachDebugInfo adds debug info to a function declaration. It returns the // DISubprogram metadata node. This method allows some more control over how // debug info is added to the function. func (c *compilerContext) attachDebugInfoRaw(f *ir.Function, llvmFn llvm.Value, suffix, filename string, line int) llvm.Metadata { // Debug info for this function. diparams := make([]llvm.Metadata, 0, len(f.Params)) for _, param := range f.Params { diparams = append(diparams, c.getDIType(param.Type())) } diFuncType := c.dibuilder.CreateSubroutineType(llvm.DISubroutineType{ File: c.getDIFile(filename), Parameters: diparams, Flags: 0, // ? }) difunc := c.dibuilder.CreateFunction(c.getDIFile(filename), llvm.DIFunction{ Name: f.RelString(nil) + suffix, LinkageName: f.LinkName() + suffix, File: c.getDIFile(filename), Line: line, Type: diFuncType, LocalToUnit: true, IsDefinition: true, ScopeLine: 0, Flags: llvm.FlagPrototyped, Optimized: true, }) llvmFn.SetSubprogram(difunc) return difunc } // getDIFile returns a DIFile metadata node for the given filename. It tries to // use one that was already created, otherwise it falls back to creating a new // one. func (c *compilerContext) getDIFile(filename string) llvm.Metadata { if _, ok := c.difiles[filename]; !ok { dir, file := filepath.Split(filename) if dir != "" { dir = dir[:len(dir)-1] } c.difiles[filename] = c.dibuilder.CreateFile(file, dir) } return c.difiles[filename] } // createFunctionDefinition builds the LLVM IR implementation for this function. // The function must be declared but not yet defined, otherwise this function // will create a diagnostic. func (b *builder) createFunctionDefinition() { if b.DumpSSA() { fmt.Printf("\nfunc %s:\n", b.fn.Function) } if !b.fn.LLVMFn.IsDeclaration() { errValue := b.fn.Name() + " redeclared in this program" fnPos := getPosition(b.fn.LLVMFn) if fnPos.IsValid() { errValue += "\n\tprevious declaration at " + fnPos.String() } b.addError(b.fn.Pos(), errValue) return } if !b.fn.IsExported() { b.fn.LLVMFn.SetLinkage(llvm.InternalLinkage) b.fn.LLVMFn.SetUnnamedAddr(true) } // Some functions have a pragma controlling the inlining level. switch b.fn.Inline() { case ir.InlineHint: // Add LLVM inline hint to functions with //go:inline pragma. inline := b.ctx.CreateEnumAttribute(llvm.AttributeKindID("inlinehint"), 0) b.fn.LLVMFn.AddFunctionAttr(inline) case ir.InlineNone: // Add LLVM attribute to always avoid inlining this function. noinline := b.ctx.CreateEnumAttribute(llvm.AttributeKindID("noinline"), 0) b.fn.LLVMFn.AddFunctionAttr(noinline) } // Add debug info, if needed. if b.Debug() { if b.fn.Synthetic == "package initializer" { // Package initializers have no debug info. Create some fake debug // info to at least have *something*. b.difunc = b.attachDebugInfoRaw(b.fn, b.fn.LLVMFn, "", "", 0) } else if b.fn.Syntax() != nil { // Create debug info file if needed. b.difunc = b.attachDebugInfo(b.fn) } pos := b.ir.Program.Fset.Position(b.fn.Pos()) b.SetCurrentDebugLocation(uint(pos.Line), uint(pos.Column), b.difunc, llvm.Metadata{}) } // Pre-create all basic blocks in the function. for _, block := range b.fn.DomPreorder() { llvmBlock := b.ctx.AddBasicBlock(b.fn.LLVMFn, block.Comment) b.blockEntries[block] = llvmBlock b.blockExits[block] = llvmBlock } entryBlock := b.blockEntries[b.fn.Blocks[0]] b.SetInsertPointAtEnd(entryBlock) // Load function parameters llvmParamIndex := 0 for _, param := range b.fn.Params { llvmType := b.getLLVMType(param.Type()) fields := make([]llvm.Value, 0, 1) fieldFragments, _ := expandFormalParamType(llvmType, nil) for range fieldFragments { fields = append(fields, b.fn.LLVMFn.Param(llvmParamIndex)) llvmParamIndex++ } b.locals[param] = b.collapseFormalParam(llvmType, fields) // Add debug information to this parameter (if available) if b.Debug() && b.fn.Syntax() != nil { dbgParam := b.getLocalVariable(param.Object().(*types.Var)) loc := b.GetCurrentDebugLocation() if len(fields) == 1 { expr := b.dibuilder.CreateExpression(nil) b.dibuilder.InsertValueAtEnd(fields[0], dbgParam, expr, loc, entryBlock) } else { fieldOffsets := b.expandFormalParamOffsets(llvmType) for i, field := range fields { expr := b.dibuilder.CreateExpression([]int64{ 0x1000, // DW_OP_LLVM_fragment int64(fieldOffsets[i]) * 8, // offset in bits int64(b.targetData.TypeAllocSize(field.Type())) * 8, // size in bits }) b.dibuilder.InsertValueAtEnd(field, dbgParam, expr, loc, entryBlock) } } } } // Load free variables from the context. This is a closure (or bound // method). var context llvm.Value if !b.fn.IsExported() { parentHandle := b.fn.LLVMFn.LastParam() parentHandle.SetName("parentHandle") context = llvm.PrevParam(parentHandle) context.SetName("context") } if len(b.fn.FreeVars) != 0 { // Get a list of all variable types in the context. freeVarTypes := make([]llvm.Type, len(b.fn.FreeVars)) for i, freeVar := range b.fn.FreeVars { freeVarTypes[i] = b.getLLVMType(freeVar.Type()) } // Load each free variable from the context pointer. // A free variable is always a pointer when this is a closure, but it // can be another type when it is a wrapper for a bound method (these // wrappers are generated by the ssa package). for i, val := range b.emitPointerUnpack(context, freeVarTypes) { b.locals[b.fn.FreeVars[i]] = val } } if b.fn.Recover != nil { // This function has deferred function calls. Set some things up for // them. b.deferInitFunc() } // Fill blocks with instructions. for _, block := range b.fn.DomPreorder() { if b.DumpSSA() { fmt.Printf("%d: %s:\n", block.Index, block.Comment) } b.SetInsertPointAtEnd(b.blockEntries[block]) b.currentBlock = block for _, instr := range block.Instrs { if instr, ok := instr.(*ssa.DebugRef); ok { if !b.Debug() { continue } object := instr.Object() variable, ok := object.(*types.Var) if !ok { // Not a local variable. continue } if instr.IsAddr { // TODO, this may happen for *ssa.Alloc and *ssa.FieldAddr // for example. continue } dbgVar := b.getLocalVariable(variable) pos := b.ir.Program.Fset.Position(instr.Pos()) b.dibuilder.InsertValueAtEnd(b.getValue(instr.X), dbgVar, b.dibuilder.CreateExpression(nil), llvm.DebugLoc{ Line: uint(pos.Line), Col: uint(pos.Column), Scope: b.difunc, }, b.GetInsertBlock()) continue } if b.DumpSSA() { if val, ok := instr.(ssa.Value); ok && val.Name() != "" { fmt.Printf("\t%s = %s\n", val.Name(), val.String()) } else { fmt.Printf("\t%s\n", instr.String()) } } b.createInstruction(instr) } if b.fn.Name() == "init" && len(block.Instrs) == 0 { b.CreateRetVoid() } } // Resolve phi nodes for _, phi := range b.phis { block := phi.ssa.Block() for i, edge := range phi.ssa.Edges { llvmVal := b.getValue(edge) llvmBlock := b.blockExits[block.Preds[i]] phi.llvm.AddIncoming([]llvm.Value{llvmVal}, []llvm.BasicBlock{llvmBlock}) } } } // createInstruction builds the LLVM IR equivalent instructions for the // particular Go SSA instruction. func (b *builder) createInstruction(instr ssa.Instruction) { if b.Debug() { pos := b.ir.Program.Fset.Position(instr.Pos()) b.SetCurrentDebugLocation(uint(pos.Line), uint(pos.Column), b.difunc, llvm.Metadata{}) } switch instr := instr.(type) { case ssa.Value: if value, err := b.createExpr(instr); err != nil { // This expression could not be parsed. Add the error to the list // of diagnostics and continue with an undef value. // The resulting IR will be incorrect (but valid). However, // compilation can proceed which is useful because there may be // more compilation errors which can then all be shown together to // the user. b.diagnostics = append(b.diagnostics, err) b.locals[instr] = llvm.Undef(b.getLLVMType(instr.Type())) } else { b.locals[instr] = value if len(*instr.Referrers()) != 0 && b.NeedsStackObjects() { b.trackExpr(instr, value) } } case *ssa.DebugRef: // ignore case *ssa.Defer: b.createDefer(instr) case *ssa.Go: // Get all function parameters to pass to the goroutine. var params []llvm.Value for _, param := range instr.Call.Args { params = append(params, b.getValue(param)) } // Start a new goroutine. if callee := instr.Call.StaticCallee(); callee != nil { // Static callee is known. This makes it easier to start a new // goroutine. calleeFn := b.ir.GetFunction(callee) var context llvm.Value switch value := instr.Call.Value.(type) { case *ssa.Function: // Goroutine call is regular function call. No context is necessary. context = llvm.Undef(b.i8ptrType) case *ssa.MakeClosure: // A goroutine call on a func value, but the callee is trivial to find. For // example: immediately applied functions. funcValue := b.getValue(value) context = b.extractFuncContext(funcValue) default: panic("StaticCallee returned an unexpected value") } params = append(params, context) // context parameter b.createGoInstruction(calleeFn.LLVMFn, params) } else if !instr.Call.IsInvoke() { // This is a function pointer. // At the moment, two extra params are passed to the newly started // goroutine: // * The function context, for closures. // * The function pointer (for tasks). funcPtr, context := b.decodeFuncValue(b.getValue(instr.Call.Value), instr.Call.Value.Type().(*types.Signature)) params = append(params, context) // context parameter switch b.Scheduler() { case "none", "coroutines": // There are no additional parameters needed for the goroutine start operation. case "tasks": // Add the function pointer as a parameter to start the goroutine. params = append(params, funcPtr) default: panic("unknown scheduler type") } b.createGoInstruction(funcPtr, params) } else { b.addError(instr.Pos(), "todo: go on interface call") } case *ssa.If: cond := b.getValue(instr.Cond) block := instr.Block() blockThen := b.blockEntries[block.Succs[0]] blockElse := b.blockEntries[block.Succs[1]] b.CreateCondBr(cond, blockThen, blockElse) case *ssa.Jump: blockJump := b.blockEntries[instr.Block().Succs[0]] b.CreateBr(blockJump) case *ssa.MapUpdate: m := b.getValue(instr.Map) key := b.getValue(instr.Key) value := b.getValue(instr.Value) mapType := instr.Map.Type().Underlying().(*types.Map) b.createMapUpdate(mapType.Key(), m, key, value, instr.Pos()) case *ssa.Panic: value := b.getValue(instr.X) b.createRuntimeCall("_panic", []llvm.Value{value}, "") b.CreateUnreachable() case *ssa.Return: if len(instr.Results) == 0 { b.CreateRetVoid() } else if len(instr.Results) == 1 { b.CreateRet(b.getValue(instr.Results[0])) } else { // Multiple return values. Put them all in a struct. retVal := llvm.ConstNull(b.fn.LLVMFn.Type().ElementType().ReturnType()) for i, result := range instr.Results { val := b.getValue(result) retVal = b.CreateInsertValue(retVal, val, i, "") } b.CreateRet(retVal) } case *ssa.RunDefers: b.createRunDefers() case *ssa.Send: b.createChanSend(instr) case *ssa.Store: llvmAddr := b.getValue(instr.Addr) llvmVal := b.getValue(instr.Val) b.createNilCheck(instr.Addr, llvmAddr, "store") if b.targetData.TypeAllocSize(llvmVal.Type()) == 0 { // nothing to store return } b.CreateStore(llvmVal, llvmAddr) default: b.addError(instr.Pos(), "unknown instruction: "+instr.String()) } } // createBuiltin lowers a builtin Go function (append, close, delete, etc.) to // LLVM IR. It uses runtime calls for some builtins. func (b *builder) createBuiltin(args []ssa.Value, callName string, pos token.Pos) (llvm.Value, error) { switch callName { case "append": src := b.getValue(args[0]) elems := b.getValue(args[1]) srcBuf := b.CreateExtractValue(src, 0, "append.srcBuf") srcPtr := b.CreateBitCast(srcBuf, b.i8ptrType, "append.srcPtr") srcLen := b.CreateExtractValue(src, 1, "append.srcLen") srcCap := b.CreateExtractValue(src, 2, "append.srcCap") elemsBuf := b.CreateExtractValue(elems, 0, "append.elemsBuf") elemsPtr := b.CreateBitCast(elemsBuf, b.i8ptrType, "append.srcPtr") elemsLen := b.CreateExtractValue(elems, 1, "append.elemsLen") elemType := srcBuf.Type().ElementType() elemSize := llvm.ConstInt(b.uintptrType, b.targetData.TypeAllocSize(elemType), false) result := b.createRuntimeCall("sliceAppend", []llvm.Value{srcPtr, elemsPtr, srcLen, srcCap, elemsLen, elemSize}, "append.new") newPtr := b.CreateExtractValue(result, 0, "append.newPtr") newBuf := b.CreateBitCast(newPtr, srcBuf.Type(), "append.newBuf") newLen := b.CreateExtractValue(result, 1, "append.newLen") newCap := b.CreateExtractValue(result, 2, "append.newCap") newSlice := llvm.Undef(src.Type()) newSlice = b.CreateInsertValue(newSlice, newBuf, 0, "") newSlice = b.CreateInsertValue(newSlice, newLen, 1, "") newSlice = b.CreateInsertValue(newSlice, newCap, 2, "") return newSlice, nil case "cap": value := b.getValue(args[0]) var llvmCap llvm.Value switch args[0].Type().(type) { case *types.Chan: // Channel. Buffered channels haven't been implemented yet so always // return 0. llvmCap = llvm.ConstInt(b.intType, 0, false) case *types.Slice: llvmCap = b.CreateExtractValue(value, 2, "cap") default: return llvm.Value{}, b.makeError(pos, "todo: cap: unknown type") } if b.targetData.TypeAllocSize(llvmCap.Type()) < b.targetData.TypeAllocSize(b.intType) { llvmCap = b.CreateZExt(llvmCap, b.intType, "len.int") } return llvmCap, nil case "close": b.createChanClose(args[0]) return llvm.Value{}, nil case "complex": r := b.getValue(args[0]) i := b.getValue(args[1]) t := args[0].Type().Underlying().(*types.Basic) var cplx llvm.Value switch t.Kind() { case types.Float32: cplx = llvm.Undef(b.ctx.StructType([]llvm.Type{b.ctx.FloatType(), b.ctx.FloatType()}, false)) case types.Float64: cplx = llvm.Undef(b.ctx.StructType([]llvm.Type{b.ctx.DoubleType(), b.ctx.DoubleType()}, false)) default: return llvm.Value{}, b.makeError(pos, "unsupported type in complex builtin: "+t.String()) } cplx = b.CreateInsertValue(cplx, r, 0, "") cplx = b.CreateInsertValue(cplx, i, 1, "") return cplx, nil case "copy": dst := b.getValue(args[0]) src := b.getValue(args[1]) dstLen := b.CreateExtractValue(dst, 1, "copy.dstLen") srcLen := b.CreateExtractValue(src, 1, "copy.srcLen") dstBuf := b.CreateExtractValue(dst, 0, "copy.dstArray") srcBuf := b.CreateExtractValue(src, 0, "copy.srcArray") elemType := dstBuf.Type().ElementType() dstBuf = b.CreateBitCast(dstBuf, b.i8ptrType, "copy.dstPtr") srcBuf = b.CreateBitCast(srcBuf, b.i8ptrType, "copy.srcPtr") elemSize := llvm.ConstInt(b.uintptrType, b.targetData.TypeAllocSize(elemType), false) return b.createRuntimeCall("sliceCopy", []llvm.Value{dstBuf, srcBuf, dstLen, srcLen, elemSize}, "copy.n"), nil case "delete": m := b.getValue(args[0]) key := b.getValue(args[1]) return llvm.Value{}, b.createMapDelete(args[1].Type(), m, key, pos) case "imag": cplx := b.getValue(args[0]) return b.CreateExtractValue(cplx, 1, "imag"), nil case "len": value := b.getValue(args[0]) var llvmLen llvm.Value switch args[0].Type().Underlying().(type) { case *types.Basic, *types.Slice: // string or slice llvmLen = b.CreateExtractValue(value, 1, "len") case *types.Chan: // Channel. Buffered channels haven't been implemented yet so always // return 0. llvmLen = llvm.ConstInt(b.intType, 0, false) case *types.Map: llvmLen = b.createRuntimeCall("hashmapLen", []llvm.Value{value}, "len") default: return llvm.Value{}, b.makeError(pos, "todo: len: unknown type") } if b.targetData.TypeAllocSize(llvmLen.Type()) < b.targetData.TypeAllocSize(b.intType) { llvmLen = b.CreateZExt(llvmLen, b.intType, "len.int") } return llvmLen, nil case "print", "println": for i, arg := range args { if i >= 1 && callName == "println" { b.createRuntimeCall("printspace", nil, "") } value := b.getValue(arg) typ := arg.Type().Underlying() switch typ := typ.(type) { case *types.Basic: switch typ.Kind() { case types.String, types.UntypedString: b.createRuntimeCall("printstring", []llvm.Value{value}, "") case types.Uintptr: b.createRuntimeCall("printptr", []llvm.Value{value}, "") case types.UnsafePointer: ptrValue := b.CreatePtrToInt(value, b.uintptrType, "") b.createRuntimeCall("printptr", []llvm.Value{ptrValue}, "") default: // runtime.print{int,uint}{8,16,32,64} if typ.Info()&types.IsInteger != 0 { name := "print" if typ.Info()&types.IsUnsigned != 0 { name += "uint" } else { name += "int" } name += strconv.FormatUint(b.targetData.TypeAllocSize(value.Type())*8, 10) b.createRuntimeCall(name, []llvm.Value{value}, "") } else if typ.Kind() == types.Bool { b.createRuntimeCall("printbool", []llvm.Value{value}, "") } else if typ.Kind() == types.Float32 { b.createRuntimeCall("printfloat32", []llvm.Value{value}, "") } else if typ.Kind() == types.Float64 { b.createRuntimeCall("printfloat64", []llvm.Value{value}, "") } else if typ.Kind() == types.Complex64 { b.createRuntimeCall("printcomplex64", []llvm.Value{value}, "") } else if typ.Kind() == types.Complex128 { b.createRuntimeCall("printcomplex128", []llvm.Value{value}, "") } else { return llvm.Value{}, b.makeError(pos, "unknown basic arg type: "+typ.String()) } } case *types.Interface: b.createRuntimeCall("printitf", []llvm.Value{value}, "") case *types.Map: b.createRuntimeCall("printmap", []llvm.Value{value}, "") case *types.Pointer: ptrValue := b.CreatePtrToInt(value, b.uintptrType, "") b.createRuntimeCall("printptr", []llvm.Value{ptrValue}, "") default: return llvm.Value{}, b.makeError(pos, "unknown arg type: "+typ.String()) } } if callName == "println" { b.createRuntimeCall("printnl", nil, "") } return llvm.Value{}, nil // print() or println() returns void case "real": cplx := b.getValue(args[0]) return b.CreateExtractValue(cplx, 0, "real"), nil case "recover": return b.createRuntimeCall("_recover", nil, ""), nil case "ssa:wrapnilchk": // TODO: do an actual nil check? return b.getValue(args[0]), nil default: return llvm.Value{}, b.makeError(pos, "todo: builtin: "+callName) } } // createFunctionCall lowers a Go SSA call instruction (to a simple function, // closure, function pointer, builtin, method, etc.) to LLVM IR, usually a call // instruction. // // This is also where compiler intrinsics are implemented. func (b *builder) createFunctionCall(instr *ssa.CallCommon) (llvm.Value, error) { if instr.IsInvoke() { fnCast, args := b.getInvokeCall(instr) return b.createCall(fnCast, args, ""), nil } // Try to call the function directly for trivially static calls. var callee, context llvm.Value exported := false if fn := instr.StaticCallee(); fn != nil { // Direct function call, either to a named or anonymous (directly // applied) function call. If it is anonymous, it may be a closure. name := fn.RelString(nil) switch { case name == "runtime.memcpy" || name == "runtime.memmove" || name == "reflect.memcpy": return b.createMemoryCopyCall(fn, instr.Args) case name == "runtime.memzero": return b.createMemoryZeroCall(instr.Args) case name == "device/arm.ReadRegister" || name == "device/riscv.ReadRegister": return b.createReadRegister(name, instr.Args) case name == "device/arm.Asm" || name == "device/avr.Asm" || name == "device/riscv.Asm": return b.createInlineAsm(instr.Args) case name == "device/arm.AsmFull" || name == "device/avr.AsmFull" || name == "device/riscv.AsmFull": return b.createInlineAsmFull(instr) case strings.HasPrefix(name, "device/arm.SVCall"): return b.emitSVCall(instr.Args) case strings.HasPrefix(name, "(device/riscv.CSR)."): return b.emitCSROperation(instr) case strings.HasPrefix(name, "syscall.Syscall"): return b.createSyscall(instr) case strings.HasPrefix(name, "runtime/volatile.Load"): return b.createVolatileLoad(instr) case strings.HasPrefix(name, "runtime/volatile.Store"): return b.createVolatileStore(instr) case name == "runtime/interrupt.New": return b.createInterruptGlobal(instr) } targetFunc := b.ir.GetFunction(fn) if targetFunc.LLVMFn.IsNil() { return llvm.Value{}, b.makeError(instr.Pos(), "undefined function: "+targetFunc.LinkName()) } switch value := instr.Value.(type) { case *ssa.Function: // Regular function call. No context is necessary. context = llvm.Undef(b.i8ptrType) case *ssa.MakeClosure: // A call on a func value, but the callee is trivial to find. For // example: immediately applied functions. funcValue := b.getValue(value) context = b.extractFuncContext(funcValue) default: panic("StaticCallee returned an unexpected value") } callee = targetFunc.LLVMFn exported = targetFunc.IsExported() } else if call, ok := instr.Value.(*ssa.Builtin); ok { // Builtin function (append, close, delete, etc.).) return b.createBuiltin(instr.Args, call.Name(), instr.Pos()) } else { // Function pointer. value := b.getValue(instr.Value) // This is a func value, which cannot be called directly. We have to // extract the function pointer and context first from the func value. callee, context = b.decodeFuncValue(value, instr.Value.Type().Underlying().(*types.Signature)) b.createNilCheck(instr.Value, callee, "fpcall") } var params []llvm.Value for _, param := range instr.Args { params = append(params, b.getValue(param)) } if !exported { // This function takes a context parameter. // Add it to the end of the parameter list. params = append(params, context) // Parent coroutine handle. params = append(params, llvm.Undef(b.i8ptrType)) } return b.createCall(callee, params, ""), nil } // getValue returns the LLVM value of a constant, function value, global, or // already processed SSA expression. func (b *builder) getValue(expr ssa.Value) llvm.Value { switch expr := expr.(type) { case *ssa.Const: return b.createConst(b.fn.LinkName(), expr) case *ssa.Function: fn := b.ir.GetFunction(expr) if fn.IsExported() { b.addError(expr.Pos(), "cannot use an exported function as value: "+expr.String()) return llvm.Undef(b.getLLVMType(expr.Type())) } return b.createFuncValue(fn.LLVMFn, llvm.Undef(b.i8ptrType), fn.Signature) case *ssa.Global: value := b.getGlobal(expr) if value.IsNil() { b.addError(expr.Pos(), "global not found: "+expr.RelString(nil)) return llvm.Undef(b.getLLVMType(expr.Type())) } return value default: // other (local) SSA value if value, ok := b.locals[expr]; ok { return value } else { // indicates a compiler bug panic("local has not been parsed: " + expr.String()) } } } // createExpr translates a Go SSA expression to LLVM IR. This can be zero, one, // or multiple LLVM IR instructions and/or runtime calls. func (b *builder) createExpr(expr ssa.Value) (llvm.Value, error) { if _, ok := b.locals[expr]; ok { // sanity check panic("instruction has already been created: " + expr.String()) } switch expr := expr.(type) { case *ssa.Alloc: typ := b.getLLVMType(expr.Type().Underlying().(*types.Pointer).Elem()) if expr.Heap { size := b.targetData.TypeAllocSize(typ) // Calculate ^uintptr(0) maxSize := llvm.ConstNot(llvm.ConstInt(b.uintptrType, 0, false)).ZExtValue() if size > maxSize { // Size would be truncated if truncated to uintptr. return llvm.Value{}, b.makeError(expr.Pos(), fmt.Sprintf("value is too big (%v bytes)", size)) } sizeValue := llvm.ConstInt(b.uintptrType, size, false) buf := b.createRuntimeCall("alloc", []llvm.Value{sizeValue}, expr.Comment) buf = b.CreateBitCast(buf, llvm.PointerType(typ, 0), "") return buf, nil } else { buf := llvmutil.CreateEntryBlockAlloca(b.Builder, typ, expr.Comment) if b.targetData.TypeAllocSize(typ) != 0 { b.CreateStore(llvm.ConstNull(typ), buf) // zero-initialize var } return buf, nil } case *ssa.BinOp: x := b.getValue(expr.X) y := b.getValue(expr.Y) return b.createBinOp(expr.Op, expr.X.Type(), x, y, expr.Pos()) case *ssa.Call: return b.createFunctionCall(expr.Common()) case *ssa.ChangeInterface: // Do not change between interface types: always use the underlying // (concrete) type in the type number of the interface. Every method // call on an interface will do a lookup which method to call. // This is different from how the official Go compiler works, because of // heap allocation and because it's easier to implement, see: // https://research.swtch.com/interfaces return b.getValue(expr.X), nil case *ssa.ChangeType: // This instruction changes the type, but the underlying value remains // the same. This is often a no-op, but sometimes we have to change the // LLVM type as well. x := b.getValue(expr.X) llvmType := b.getLLVMType(expr.Type()) if x.Type() == llvmType { // Different Go type but same LLVM type (for example, named int). // This is the common case. return x, nil } // Figure out what kind of type we need to cast. switch llvmType.TypeKind() { case llvm.StructTypeKind: // Unfortunately, we can't just bitcast structs. We have to // actually create a new struct of the correct type and insert the // values from the previous struct in there. value := llvm.Undef(llvmType) for i := 0; i < llvmType.StructElementTypesCount(); i++ { field := b.CreateExtractValue(x, i, "changetype.field") value = b.CreateInsertValue(value, field, i, "changetype.struct") } return value, nil case llvm.PointerTypeKind: // This can happen with pointers to structs. This case is easy: // simply bitcast the pointer to the destination type. return b.CreateBitCast(x, llvmType, "changetype.pointer"), nil default: return llvm.Value{}, errors.New("todo: unknown ChangeType type: " + expr.X.Type().String()) } case *ssa.Const: panic("const is not an expression") case *ssa.Convert: x := b.getValue(expr.X) return b.createConvert(expr.X.Type(), expr.Type(), x, expr.Pos()) case *ssa.Extract: if _, ok := expr.Tuple.(*ssa.Select); ok { return b.getChanSelectResult(expr), nil } value := b.getValue(expr.Tuple) return b.CreateExtractValue(value, expr.Index, ""), nil case *ssa.Field: value := b.getValue(expr.X) result := b.CreateExtractValue(value, expr.Field, "") return result, nil case *ssa.FieldAddr: val := b.getValue(expr.X) // Check for nil pointer before calculating the address, from the spec: // > For an operand x of type T, the address operation &x generates a // > pointer of type *T to x. [...] If the evaluation of x would cause a // > run-time panic, then the evaluation of &x does too. b.createNilCheck(expr.X, val, "gep") // Do a GEP on the pointer to get the field address. indices := []llvm.Value{ llvm.ConstInt(b.ctx.Int32Type(), 0, false), llvm.ConstInt(b.ctx.Int32Type(), uint64(expr.Field), false), } return b.CreateInBoundsGEP(val, indices, ""), nil case *ssa.Function: panic("function is not an expression") case *ssa.Global: panic("global is not an expression") case *ssa.Index: array := b.getValue(expr.X) index := b.getValue(expr.Index) // Check bounds. arrayLen := expr.X.Type().(*types.Array).Len() arrayLenLLVM := llvm.ConstInt(b.uintptrType, uint64(arrayLen), false) b.createLookupBoundsCheck(arrayLenLLVM, index, expr.Index.Type()) // Can't load directly from array (as index is non-constant), so have to // do it using an alloca+gep+load. alloca, allocaPtr, allocaSize := b.createTemporaryAlloca(array.Type(), "index.alloca") b.CreateStore(array, alloca) zero := llvm.ConstInt(b.ctx.Int32Type(), 0, false) ptr := b.CreateInBoundsGEP(alloca, []llvm.Value{zero, index}, "index.gep") result := b.CreateLoad(ptr, "index.load") b.emitLifetimeEnd(allocaPtr, allocaSize) return result, nil case *ssa.IndexAddr: val := b.getValue(expr.X) index := b.getValue(expr.Index) // Get buffer pointer and length var bufptr, buflen llvm.Value switch ptrTyp := expr.X.Type().Underlying().(type) { case *types.Pointer: typ := expr.X.Type().Underlying().(*types.Pointer).Elem().Underlying() switch typ := typ.(type) { case *types.Array: bufptr = val buflen = llvm.ConstInt(b.uintptrType, uint64(typ.Len()), false) // Check for nil pointer before calculating the address, from // the spec: // > For an operand x of type T, the address operation &x // > generates a pointer of type *T to x. [...] If the // > evaluation of x would cause a run-time panic, then the // > evaluation of &x does too. b.createNilCheck(expr.X, bufptr, "gep") default: return llvm.Value{}, b.makeError(expr.Pos(), "todo: indexaddr: "+typ.String()) } case *types.Slice: bufptr = b.CreateExtractValue(val, 0, "indexaddr.ptr") buflen = b.CreateExtractValue(val, 1, "indexaddr.len") default: return llvm.Value{}, b.makeError(expr.Pos(), "todo: indexaddr: "+ptrTyp.String()) } // Bounds check. b.createLookupBoundsCheck(buflen, index, expr.Index.Type()) switch expr.X.Type().Underlying().(type) { case *types.Pointer: indices := []llvm.Value{ llvm.ConstInt(b.ctx.Int32Type(), 0, false), index, } return b.CreateInBoundsGEP(bufptr, indices, ""), nil case *types.Slice: return b.CreateInBoundsGEP(bufptr, []llvm.Value{index}, ""), nil default: panic("unreachable") } case *ssa.Lookup: value := b.getValue(expr.X) index := b.getValue(expr.Index) switch xType := expr.X.Type().Underlying().(type) { case *types.Basic: // Value type must be a string, which is a basic type. if xType.Info()&types.IsString == 0 { panic("lookup on non-string?") } // Bounds check. length := b.CreateExtractValue(value, 1, "len") b.createLookupBoundsCheck(length, index, expr.Index.Type()) // Lookup byte buf := b.CreateExtractValue(value, 0, "") bufPtr := b.CreateInBoundsGEP(buf, []llvm.Value{index}, "") return b.CreateLoad(bufPtr, ""), nil case *types.Map: valueType := expr.Type() if expr.CommaOk { valueType = valueType.(*types.Tuple).At(0).Type() } return b.createMapLookup(xType.Key(), valueType, value, index, expr.CommaOk, expr.Pos()) default: panic("unknown lookup type: " + expr.String()) } case *ssa.MakeChan: return b.createMakeChan(expr), nil case *ssa.MakeClosure: return b.parseMakeClosure(expr) case *ssa.MakeInterface: val := b.getValue(expr.X) return b.createMakeInterface(val, expr.X.Type(), expr.Pos()), nil case *ssa.MakeMap: return b.createMakeMap(expr) case *ssa.MakeSlice: sliceLen := b.getValue(expr.Len) sliceCap := b.getValue(expr.Cap) sliceType := expr.Type().Underlying().(*types.Slice) llvmElemType := b.getLLVMType(sliceType.Elem()) elemSize := b.targetData.TypeAllocSize(llvmElemType) elemSizeValue := llvm.ConstInt(b.uintptrType, elemSize, false) // Calculate (^uintptr(0)) >> 1, which is the max value that fits in // uintptr if uintptr were signed. maxSize := llvm.ConstLShr(llvm.ConstNot(llvm.ConstInt(b.uintptrType, 0, false)), llvm.ConstInt(b.uintptrType, 1, false)) if elemSize > maxSize.ZExtValue() { // This seems to be checked by the typechecker already, but let's // check it again just to be sure. return llvm.Value{}, b.makeError(expr.Pos(), fmt.Sprintf("slice element type is too big (%v bytes)", elemSize)) } // Bounds checking. lenType := expr.Len.Type().(*types.Basic) capType := expr.Cap.Type().(*types.Basic) b.createSliceBoundsCheck(maxSize, sliceLen, sliceCap, sliceCap, lenType, capType, capType) // Allocate the backing array. sliceCapCast, err := b.createConvert(expr.Cap.Type(), types.Typ[types.Uintptr], sliceCap, expr.Pos()) if err != nil { return llvm.Value{}, err } sliceSize := b.CreateBinOp(llvm.Mul, elemSizeValue, sliceCapCast, "makeslice.cap") slicePtr := b.createRuntimeCall("alloc", []llvm.Value{sliceSize}, "makeslice.buf") slicePtr = b.CreateBitCast(slicePtr, llvm.PointerType(llvmElemType, 0), "makeslice.array") // Extend or truncate if necessary. This is safe as we've already done // the bounds check. sliceLen, err = b.createConvert(expr.Len.Type(), types.Typ[types.Uintptr], sliceLen, expr.Pos()) if err != nil { return llvm.Value{}, err } sliceCap, err = b.createConvert(expr.Cap.Type(), types.Typ[types.Uintptr], sliceCap, expr.Pos()) if err != nil { return llvm.Value{}, err } // Create the slice. slice := b.ctx.ConstStruct([]llvm.Value{ llvm.Undef(slicePtr.Type()), llvm.Undef(b.uintptrType), llvm.Undef(b.uintptrType), }, false) slice = b.CreateInsertValue(slice, slicePtr, 0, "") slice = b.CreateInsertValue(slice, sliceLen, 1, "") slice = b.CreateInsertValue(slice, sliceCap, 2, "") return slice, nil case *ssa.Next: rangeVal := expr.Iter.(*ssa.Range).X llvmRangeVal := b.getValue(rangeVal) it := b.getValue(expr.Iter) if expr.IsString { return b.createRuntimeCall("stringNext", []llvm.Value{llvmRangeVal, it}, "range.next"), nil } else { // map llvmKeyType := b.getLLVMType(rangeVal.Type().Underlying().(*types.Map).Key()) llvmValueType := b.getLLVMType(rangeVal.Type().Underlying().(*types.Map).Elem()) mapKeyAlloca, mapKeyPtr, mapKeySize := b.createTemporaryAlloca(llvmKeyType, "range.key") mapValueAlloca, mapValuePtr, mapValueSize := b.createTemporaryAlloca(llvmValueType, "range.value") ok := b.createRuntimeCall("hashmapNext", []llvm.Value{llvmRangeVal, it, mapKeyPtr, mapValuePtr}, "range.next") tuple := llvm.Undef(b.ctx.StructType([]llvm.Type{b.ctx.Int1Type(), llvmKeyType, llvmValueType}, false)) tuple = b.CreateInsertValue(tuple, ok, 0, "") tuple = b.CreateInsertValue(tuple, b.CreateLoad(mapKeyAlloca, ""), 1, "") tuple = b.CreateInsertValue(tuple, b.CreateLoad(mapValueAlloca, ""), 2, "") b.emitLifetimeEnd(mapKeyPtr, mapKeySize) b.emitLifetimeEnd(mapValuePtr, mapValueSize) return tuple, nil } case *ssa.Phi: phi := b.CreatePHI(b.getLLVMType(expr.Type()), "") b.phis = append(b.phis, Phi{expr, phi}) return phi, nil case *ssa.Range: var iteratorType llvm.Type switch typ := expr.X.Type().Underlying().(type) { case *types.Basic: // string iteratorType = b.getLLVMRuntimeType("stringIterator") case *types.Map: iteratorType = b.getLLVMRuntimeType("hashmapIterator") default: panic("unknown type in range: " + typ.String()) } it, _, _ := b.createTemporaryAlloca(iteratorType, "range.it") b.CreateStore(llvm.ConstNull(iteratorType), it) return it, nil case *ssa.Select: return b.createSelect(expr), nil case *ssa.Slice: value := b.getValue(expr.X) var lowType, highType, maxType *types.Basic var low, high, max llvm.Value if expr.Low != nil { lowType = expr.Low.Type().Underlying().(*types.Basic) low = b.getValue(expr.Low) if low.Type().IntTypeWidth() < b.uintptrType.IntTypeWidth() { if lowType.Info()&types.IsUnsigned != 0 { low = b.CreateZExt(low, b.uintptrType, "") } else { low = b.CreateSExt(low, b.uintptrType, "") } } } else { lowType = types.Typ[types.Uintptr] low = llvm.ConstInt(b.uintptrType, 0, false) } if expr.High != nil { highType = expr.High.Type().Underlying().(*types.Basic) high = b.getValue(expr.High) if high.Type().IntTypeWidth() < b.uintptrType.IntTypeWidth() { if highType.Info()&types.IsUnsigned != 0 { high = b.CreateZExt(high, b.uintptrType, "") } else { high = b.CreateSExt(high, b.uintptrType, "") } } } else { highType = types.Typ[types.Uintptr] } if expr.Max != nil { maxType = expr.Max.Type().Underlying().(*types.Basic) max = b.getValue(expr.Max) if max.Type().IntTypeWidth() < b.uintptrType.IntTypeWidth() { if maxType.Info()&types.IsUnsigned != 0 { max = b.CreateZExt(max, b.uintptrType, "") } else { max = b.CreateSExt(max, b.uintptrType, "") } } } else { maxType = types.Typ[types.Uintptr] } switch typ := expr.X.Type().Underlying().(type) { case *types.Pointer: // pointer to array // slice an array length := typ.Elem().Underlying().(*types.Array).Len() llvmLen := llvm.ConstInt(b.uintptrType, uint64(length), false) if high.IsNil() { high = llvmLen } if max.IsNil() { max = llvmLen } indices := []llvm.Value{ llvm.ConstInt(b.ctx.Int32Type(), 0, false), low, } b.createSliceBoundsCheck(llvmLen, low, high, max, lowType, highType, maxType) // Truncate ints bigger than uintptr. This is after the bounds // check so it's safe. if b.targetData.TypeAllocSize(low.Type()) > b.targetData.TypeAllocSize(b.uintptrType) { low = b.CreateTrunc(low, b.uintptrType, "") } if b.targetData.TypeAllocSize(high.Type()) > b.targetData.TypeAllocSize(b.uintptrType) { high = b.CreateTrunc(high, b.uintptrType, "") } if b.targetData.TypeAllocSize(max.Type()) > b.targetData.TypeAllocSize(b.uintptrType) { max = b.CreateTrunc(max, b.uintptrType, "") } sliceLen := b.CreateSub(high, low, "slice.len") slicePtr := b.CreateInBoundsGEP(value, indices, "slice.ptr") sliceCap := b.CreateSub(max, low, "slice.cap") slice := b.ctx.ConstStruct([]llvm.Value{ llvm.Undef(slicePtr.Type()), llvm.Undef(b.uintptrType), llvm.Undef(b.uintptrType), }, false) slice = b.CreateInsertValue(slice, slicePtr, 0, "") slice = b.CreateInsertValue(slice, sliceLen, 1, "") slice = b.CreateInsertValue(slice, sliceCap, 2, "") return slice, nil case *types.Slice: // slice a slice oldPtr := b.CreateExtractValue(value, 0, "") oldLen := b.CreateExtractValue(value, 1, "") oldCap := b.CreateExtractValue(value, 2, "") if high.IsNil() { high = oldLen } if max.IsNil() { max = oldCap } b.createSliceBoundsCheck(oldCap, low, high, max, lowType, highType, maxType) // Truncate ints bigger than uintptr. This is after the bounds // check so it's safe. if b.targetData.TypeAllocSize(low.Type()) > b.targetData.TypeAllocSize(b.uintptrType) { low = b.CreateTrunc(low, b.uintptrType, "") } if b.targetData.TypeAllocSize(high.Type()) > b.targetData.TypeAllocSize(b.uintptrType) { high = b.CreateTrunc(high, b.uintptrType, "") } if b.targetData.TypeAllocSize(max.Type()) > b.targetData.TypeAllocSize(b.uintptrType) { max = b.CreateTrunc(max, b.uintptrType, "") } newPtr := b.CreateInBoundsGEP(oldPtr, []llvm.Value{low}, "") newLen := b.CreateSub(high, low, "") newCap := b.CreateSub(max, low, "") slice := b.ctx.ConstStruct([]llvm.Value{ llvm.Undef(newPtr.Type()), llvm.Undef(b.uintptrType), llvm.Undef(b.uintptrType), }, false) slice = b.CreateInsertValue(slice, newPtr, 0, "") slice = b.CreateInsertValue(slice, newLen, 1, "") slice = b.CreateInsertValue(slice, newCap, 2, "") return slice, nil case *types.Basic: if typ.Info()&types.IsString == 0 { return llvm.Value{}, b.makeError(expr.Pos(), "unknown slice type: "+typ.String()) } // slice a string if expr.Max != nil { // This might as well be a panic, as the frontend should have // handled this already. return llvm.Value{}, b.makeError(expr.Pos(), "slicing a string with a max parameter is not allowed by the spec") } oldPtr := b.CreateExtractValue(value, 0, "") oldLen := b.CreateExtractValue(value, 1, "") if high.IsNil() { high = oldLen } b.createSliceBoundsCheck(oldLen, low, high, high, lowType, highType, maxType) // Truncate ints bigger than uintptr. This is after the bounds // check so it's safe. if b.targetData.TypeAllocSize(low.Type()) > b.targetData.TypeAllocSize(b.uintptrType) { low = b.CreateTrunc(low, b.uintptrType, "") } if b.targetData.TypeAllocSize(high.Type()) > b.targetData.TypeAllocSize(b.uintptrType) { high = b.CreateTrunc(high, b.uintptrType, "") } newPtr := b.CreateInBoundsGEP(oldPtr, []llvm.Value{low}, "") newLen := b.CreateSub(high, low, "") str := llvm.Undef(b.getLLVMRuntimeType("_string")) str = b.CreateInsertValue(str, newPtr, 0, "") str = b.CreateInsertValue(str, newLen, 1, "") return str, nil default: return llvm.Value{}, b.makeError(expr.Pos(), "unknown slice type: "+typ.String()) } case *ssa.TypeAssert: return b.createTypeAssert(expr), nil case *ssa.UnOp: return b.createUnOp(expr) default: return llvm.Value{}, b.makeError(expr.Pos(), "todo: unknown expression: "+expr.String()) } } // createBinOp creates a LLVM binary operation (add, sub, mul, etc) for a Go // binary operation. This is almost a direct mapping, but there are some subtle // differences such as the requirement in LLVM IR that both sides must have the // same type, even for bitshifts. Also, signedness in Go is encoded in the type // and is encoded in the operation in LLVM IR: this is important for some // operations such as divide. func (b *builder) createBinOp(op token.Token, typ types.Type, x, y llvm.Value, pos token.Pos) (llvm.Value, error) { switch typ := typ.Underlying().(type) { case *types.Basic: if typ.Info()&types.IsInteger != 0 { // Operations on integers signed := typ.Info()&types.IsUnsigned == 0 switch op { case token.ADD: // + return b.CreateAdd(x, y, ""), nil case token.SUB: // - return b.CreateSub(x, y, ""), nil case token.MUL: // * return b.CreateMul(x, y, ""), nil case token.QUO: // / if signed { return b.CreateSDiv(x, y, ""), nil } else { return b.CreateUDiv(x, y, ""), nil } case token.REM: // % if signed { return b.CreateSRem(x, y, ""), nil } else { return b.CreateURem(x, y, ""), nil } case token.AND: // & return b.CreateAnd(x, y, ""), nil case token.OR: // | return b.CreateOr(x, y, ""), nil case token.XOR: // ^ return b.CreateXor(x, y, ""), nil case token.SHL, token.SHR: sizeX := b.targetData.TypeAllocSize(x.Type()) sizeY := b.targetData.TypeAllocSize(y.Type()) if sizeX > sizeY { // x and y must have equal sizes, make Y bigger in this case. // y is unsigned, this has been checked by the Go type checker. y = b.CreateZExt(y, x.Type(), "") } else if sizeX < sizeY { // What about shifting more than the integer width? // I'm not entirely sure what the Go spec is on that, but as // Intel CPUs have undefined behavior when shifting more // than the integer width I'm assuming it is also undefined // in Go. y = b.CreateTrunc(y, x.Type(), "") } switch op { case token.SHL: // << return b.CreateShl(x, y, ""), nil case token.SHR: // >> if signed { return b.CreateAShr(x, y, ""), nil } else { return b.CreateLShr(x, y, ""), nil } default: panic("unreachable") } case token.EQL: // == return b.CreateICmp(llvm.IntEQ, x, y, ""), nil case token.NEQ: // != return b.CreateICmp(llvm.IntNE, x, y, ""), nil case token.AND_NOT: // &^ // Go specific. Calculate "and not" with x & (~y) inv := b.CreateNot(y, "") // ~y return b.CreateAnd(x, inv, ""), nil case token.LSS: // < if signed { return b.CreateICmp(llvm.IntSLT, x, y, ""), nil } else { return b.CreateICmp(llvm.IntULT, x, y, ""), nil } case token.LEQ: // <= if signed { return b.CreateICmp(llvm.IntSLE, x, y, ""), nil } else { return b.CreateICmp(llvm.IntULE, x, y, ""), nil } case token.GTR: // > if signed { return b.CreateICmp(llvm.IntSGT, x, y, ""), nil } else { return b.CreateICmp(llvm.IntUGT, x, y, ""), nil } case token.GEQ: // >= if signed { return b.CreateICmp(llvm.IntSGE, x, y, ""), nil } else { return b.CreateICmp(llvm.IntUGE, x, y, ""), nil } default: panic("binop on integer: " + op.String()) } } else if typ.Info()&types.IsFloat != 0 { // Operations on floats switch op { case token.ADD: // + return b.CreateFAdd(x, y, ""), nil case token.SUB: // - return b.CreateFSub(x, y, ""), nil case token.MUL: // * return b.CreateFMul(x, y, ""), nil case token.QUO: // / return b.CreateFDiv(x, y, ""), nil case token.EQL: // == return b.CreateFCmp(llvm.FloatUEQ, x, y, ""), nil case token.NEQ: // != return b.CreateFCmp(llvm.FloatUNE, x, y, ""), nil case token.LSS: // < return b.CreateFCmp(llvm.FloatULT, x, y, ""), nil case token.LEQ: // <= return b.CreateFCmp(llvm.FloatULE, x, y, ""), nil case token.GTR: // > return b.CreateFCmp(llvm.FloatUGT, x, y, ""), nil case token.GEQ: // >= return b.CreateFCmp(llvm.FloatUGE, x, y, ""), nil default: panic("binop on float: " + op.String()) } } else if typ.Info()&types.IsComplex != 0 { r1 := b.CreateExtractValue(x, 0, "r1") r2 := b.CreateExtractValue(y, 0, "r2") i1 := b.CreateExtractValue(x, 1, "i1") i2 := b.CreateExtractValue(y, 1, "i2") switch op { case token.EQL: // == req := b.CreateFCmp(llvm.FloatOEQ, r1, r2, "") ieq := b.CreateFCmp(llvm.FloatOEQ, i1, i2, "") return b.CreateAnd(req, ieq, ""), nil case token.NEQ: // != req := b.CreateFCmp(llvm.FloatOEQ, r1, r2, "") ieq := b.CreateFCmp(llvm.FloatOEQ, i1, i2, "") neq := b.CreateAnd(req, ieq, "") return b.CreateNot(neq, ""), nil case token.ADD, token.SUB: var r, i llvm.Value switch op { case token.ADD: r = b.CreateFAdd(r1, r2, "") i = b.CreateFAdd(i1, i2, "") case token.SUB: r = b.CreateFSub(r1, r2, "") i = b.CreateFSub(i1, i2, "") default: panic("unreachable") } cplx := llvm.Undef(b.ctx.StructType([]llvm.Type{r.Type(), i.Type()}, false)) cplx = b.CreateInsertValue(cplx, r, 0, "") cplx = b.CreateInsertValue(cplx, i, 1, "") return cplx, nil case token.MUL: // Complex multiplication follows the current implementation in // the Go compiler, with the difference that complex64 // components are not first scaled up to float64 for increased // precision. // https://github.com/golang/go/blob/170b8b4b12be50eeccbcdadb8523fb4fc670ca72/src/cmd/compile/internal/gc/ssa.go#L2089-L2127 // The implementation is as follows: // r := real(a) * real(b) - imag(a) * imag(b) // i := real(a) * imag(b) + imag(a) * real(b) // Note: this does NOT follow the C11 specification (annex G): // http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1548.pdf#page=549 // See https://github.com/golang/go/issues/29846 for a related // discussion. r := b.CreateFSub(b.CreateFMul(r1, r2, ""), b.CreateFMul(i1, i2, ""), "") i := b.CreateFAdd(b.CreateFMul(r1, i2, ""), b.CreateFMul(i1, r2, ""), "") cplx := llvm.Undef(b.ctx.StructType([]llvm.Type{r.Type(), i.Type()}, false)) cplx = b.CreateInsertValue(cplx, r, 0, "") cplx = b.CreateInsertValue(cplx, i, 1, "") return cplx, nil case token.QUO: // Complex division. // Do this in a library call because it's too difficult to do // inline. switch r1.Type().TypeKind() { case llvm.FloatTypeKind: return b.createRuntimeCall("complex64div", []llvm.Value{x, y}, ""), nil case llvm.DoubleTypeKind: return b.createRuntimeCall("complex128div", []llvm.Value{x, y}, ""), nil default: panic("unexpected complex type") } default: panic("binop on complex: " + op.String()) } } else if typ.Info()&types.IsBoolean != 0 { // Operations on booleans switch op { case token.EQL: // == return b.CreateICmp(llvm.IntEQ, x, y, ""), nil case token.NEQ: // != return b.CreateICmp(llvm.IntNE, x, y, ""), nil default: panic("binop on bool: " + op.String()) } } else if typ.Kind() == types.UnsafePointer { // Operations on pointers switch op { case token.EQL: // == return b.CreateICmp(llvm.IntEQ, x, y, ""), nil case token.NEQ: // != return b.CreateICmp(llvm.IntNE, x, y, ""), nil default: panic("binop on pointer: " + op.String()) } } else if typ.Info()&types.IsString != 0 { // Operations on strings switch op { case token.ADD: // + return b.createRuntimeCall("stringConcat", []llvm.Value{x, y}, ""), nil case token.EQL: // == return b.createRuntimeCall("stringEqual", []llvm.Value{x, y}, ""), nil case token.NEQ: // != result := b.createRuntimeCall("stringEqual", []llvm.Value{x, y}, "") return b.CreateNot(result, ""), nil case token.LSS: // < return b.createRuntimeCall("stringLess", []llvm.Value{x, y}, ""), nil case token.LEQ: // <= result := b.createRuntimeCall("stringLess", []llvm.Value{y, x}, "") return b.CreateNot(result, ""), nil case token.GTR: // > result := b.createRuntimeCall("stringLess", []llvm.Value{x, y}, "") return b.CreateNot(result, ""), nil case token.GEQ: // >= return b.createRuntimeCall("stringLess", []llvm.Value{y, x}, ""), nil default: panic("binop on string: " + op.String()) } } else { return llvm.Value{}, b.makeError(pos, "todo: unknown basic type in binop: "+typ.String()) } case *types.Signature: // Get raw scalars from the function value and compare those. // Function values may be implemented in multiple ways, but they all // have some way of getting a scalar value identifying the function. // This is safe: function pointers are generally not comparable // against each other, only against nil. So one of these has to be nil. x = b.extractFuncScalar(x) y = b.extractFuncScalar(y) switch op { case token.EQL: // == return b.CreateICmp(llvm.IntEQ, x, y, ""), nil case token.NEQ: // != return b.CreateICmp(llvm.IntNE, x, y, ""), nil default: return llvm.Value{}, b.makeError(pos, "binop on signature: "+op.String()) } case *types.Interface: switch op { case token.EQL, token.NEQ: // ==, != result := b.createRuntimeCall("interfaceEqual", []llvm.Value{x, y}, "") if op == token.NEQ { result = b.CreateNot(result, "") } return result, nil default: return llvm.Value{}, b.makeError(pos, "binop on interface: "+op.String()) } case *types.Chan, *types.Map, *types.Pointer: // Maps are in general not comparable, but can be compared against nil // (which is a nil pointer). This means they can be trivially compared // by treating them as a pointer. // Channels behave as pointers in that they are equal as long as they // are created with the same call to make or if both are nil. switch op { case token.EQL: // == return b.CreateICmp(llvm.IntEQ, x, y, ""), nil case token.NEQ: // != return b.CreateICmp(llvm.IntNE, x, y, ""), nil default: return llvm.Value{}, b.makeError(pos, "todo: binop on pointer: "+op.String()) } case *types.Slice: // Slices are in general not comparable, but can be compared against // nil. Assume at least one of them is nil to make the code easier. xPtr := b.CreateExtractValue(x, 0, "") yPtr := b.CreateExtractValue(y, 0, "") switch op { case token.EQL: // == return b.CreateICmp(llvm.IntEQ, xPtr, yPtr, ""), nil case token.NEQ: // != return b.CreateICmp(llvm.IntNE, xPtr, yPtr, ""), nil default: return llvm.Value{}, b.makeError(pos, "todo: binop on slice: "+op.String()) } case *types.Array: // Compare each array element and combine the result. From the spec: // Array values are comparable if values of the array element type // are comparable. Two array values are equal if their corresponding // elements are equal. result := llvm.ConstInt(b.ctx.Int1Type(), 1, true) for i := 0; i < int(typ.Len()); i++ { xField := b.CreateExtractValue(x, i, "") yField := b.CreateExtractValue(y, i, "") fieldEqual, err := b.createBinOp(token.EQL, typ.Elem(), xField, yField, pos) if err != nil { return llvm.Value{}, err } result = b.CreateAnd(result, fieldEqual, "") } switch op { case token.EQL: // == return result, nil case token.NEQ: // != return b.CreateNot(result, ""), nil default: return llvm.Value{}, b.makeError(pos, "unknown: binop on struct: "+op.String()) } case *types.Struct: // Compare each struct field and combine the result. From the spec: // Struct values are comparable if all their fields are comparable. // Two struct values are equal if their corresponding non-blank // fields are equal. result := llvm.ConstInt(b.ctx.Int1Type(), 1, true) for i := 0; i < typ.NumFields(); i++ { if typ.Field(i).Name() == "_" { // skip blank fields continue } fieldType := typ.Field(i).Type() xField := b.CreateExtractValue(x, i, "") yField := b.CreateExtractValue(y, i, "") fieldEqual, err := b.createBinOp(token.EQL, fieldType, xField, yField, pos) if err != nil { return llvm.Value{}, err } result = b.CreateAnd(result, fieldEqual, "") } switch op { case token.EQL: // == return result, nil case token.NEQ: // != return b.CreateNot(result, ""), nil default: return llvm.Value{}, b.makeError(pos, "unknown: binop on struct: "+op.String()) } default: return llvm.Value{}, b.makeError(pos, "todo: binop type: "+typ.String()) } } // createConst creates a LLVM constant value from a Go constant. func (b *builder) createConst(prefix string, expr *ssa.Const) llvm.Value { switch typ := expr.Type().Underlying().(type) { case *types.Basic: llvmType := b.getLLVMType(typ) if typ.Info()&types.IsBoolean != 0 { b := constant.BoolVal(expr.Value) n := uint64(0) if b { n = 1 } return llvm.ConstInt(llvmType, n, false) } else if typ.Info()&types.IsString != 0 { str := constant.StringVal(expr.Value) strLen := llvm.ConstInt(b.uintptrType, uint64(len(str)), false) objname := prefix + "$string" global := llvm.AddGlobal(b.mod, llvm.ArrayType(b.ctx.Int8Type(), len(str)), objname) global.SetInitializer(b.ctx.ConstString(str, false)) global.SetLinkage(llvm.InternalLinkage) global.SetGlobalConstant(true) global.SetUnnamedAddr(true) zero := llvm.ConstInt(b.ctx.Int32Type(), 0, false) strPtr := b.CreateInBoundsGEP(global, []llvm.Value{zero, zero}, "") strObj := llvm.ConstNamedStruct(b.getLLVMRuntimeType("_string"), []llvm.Value{strPtr, strLen}) return strObj } else if typ.Kind() == types.UnsafePointer { if !expr.IsNil() { value, _ := constant.Uint64Val(expr.Value) return llvm.ConstIntToPtr(llvm.ConstInt(b.uintptrType, value, false), b.i8ptrType) } return llvm.ConstNull(b.i8ptrType) } else if typ.Info()&types.IsUnsigned != 0 { n, _ := constant.Uint64Val(expr.Value) return llvm.ConstInt(llvmType, n, false) } else if typ.Info()&types.IsInteger != 0 { // signed n, _ := constant.Int64Val(expr.Value) return llvm.ConstInt(llvmType, uint64(n), true) } else if typ.Info()&types.IsFloat != 0 { n, _ := constant.Float64Val(expr.Value) return llvm.ConstFloat(llvmType, n) } else if typ.Kind() == types.Complex64 { r := b.createConst(prefix, ssa.NewConst(constant.Real(expr.Value), types.Typ[types.Float32])) i := b.createConst(prefix, ssa.NewConst(constant.Imag(expr.Value), types.Typ[types.Float32])) cplx := llvm.Undef(b.ctx.StructType([]llvm.Type{b.ctx.FloatType(), b.ctx.FloatType()}, false)) cplx = b.CreateInsertValue(cplx, r, 0, "") cplx = b.CreateInsertValue(cplx, i, 1, "") return cplx } else if typ.Kind() == types.Complex128 { r := b.createConst(prefix, ssa.NewConst(constant.Real(expr.Value), types.Typ[types.Float64])) i := b.createConst(prefix, ssa.NewConst(constant.Imag(expr.Value), types.Typ[types.Float64])) cplx := llvm.Undef(b.ctx.StructType([]llvm.Type{b.ctx.DoubleType(), b.ctx.DoubleType()}, false)) cplx = b.CreateInsertValue(cplx, r, 0, "") cplx = b.CreateInsertValue(cplx, i, 1, "") return cplx } else { panic("unknown constant of basic type: " + expr.String()) } case *types.Chan: if expr.Value != nil { panic("expected nil chan constant") } return llvm.ConstNull(b.getLLVMType(expr.Type())) case *types.Signature: if expr.Value != nil { panic("expected nil signature constant") } return llvm.ConstNull(b.getLLVMType(expr.Type())) case *types.Interface: if expr.Value != nil { panic("expected nil interface constant") } // Create a generic nil interface with no dynamic type (typecode=0). fields := []llvm.Value{ llvm.ConstInt(b.uintptrType, 0, false), llvm.ConstPointerNull(b.i8ptrType), } return llvm.ConstNamedStruct(b.getLLVMRuntimeType("_interface"), fields) case *types.Pointer: if expr.Value != nil { panic("expected nil pointer constant") } return llvm.ConstPointerNull(b.getLLVMType(typ)) case *types.Slice: if expr.Value != nil { panic("expected nil slice constant") } elemType := b.getLLVMType(typ.Elem()) llvmPtr := llvm.ConstPointerNull(llvm.PointerType(elemType, 0)) llvmLen := llvm.ConstInt(b.uintptrType, 0, false) slice := b.ctx.ConstStruct([]llvm.Value{ llvmPtr, // backing array llvmLen, // len llvmLen, // cap }, false) return slice case *types.Map: if !expr.IsNil() { // I believe this is not allowed by the Go spec. panic("non-nil map constant") } llvmType := b.getLLVMType(typ) return llvm.ConstNull(llvmType) default: panic("unknown constant: " + expr.String()) } } // createConvert creates a Go type conversion instruction. func (b *builder) createConvert(typeFrom, typeTo types.Type, value llvm.Value, pos token.Pos) (llvm.Value, error) { llvmTypeFrom := value.Type() llvmTypeTo := b.getLLVMType(typeTo) // Conversion between unsafe.Pointer and uintptr. isPtrFrom := isPointer(typeFrom.Underlying()) isPtrTo := isPointer(typeTo.Underlying()) if isPtrFrom && !isPtrTo { return b.CreatePtrToInt(value, llvmTypeTo, ""), nil } else if !isPtrFrom && isPtrTo { if !value.IsABinaryOperator().IsNil() && value.InstructionOpcode() == llvm.Add { // This is probably a pattern like the following: // unsafe.Pointer(uintptr(ptr) + index) // Used in functions like memmove etc. for lack of pointer // arithmetic. Convert it to real pointer arithmatic here. ptr := value.Operand(0) index := value.Operand(1) if !index.IsAPtrToIntInst().IsNil() { // Swap if necessary, if ptr and index are reversed. ptr, index = index, ptr } if !ptr.IsAPtrToIntInst().IsNil() { origptr := ptr.Operand(0) if origptr.Type() == b.i8ptrType { // This pointer can be calculated from the original // ptrtoint instruction with a GEP. The leftover inttoptr // instruction is trivial to optimize away. // Making it an in bounds GEP even though it's easy to // create a GEP that is not in bounds. However, we're // talking about unsafe code here so the programmer has to // be careful anyway. return b.CreateInBoundsGEP(origptr, []llvm.Value{index}, ""), nil } } } return b.CreateIntToPtr(value, llvmTypeTo, ""), nil } // Conversion between pointers and unsafe.Pointer. if isPtrFrom && isPtrTo { return b.CreateBitCast(value, llvmTypeTo, ""), nil } switch typeTo := typeTo.Underlying().(type) { case *types.Basic: sizeFrom := b.targetData.TypeAllocSize(llvmTypeFrom) if typeTo.Info()&types.IsString != 0 { switch typeFrom := typeFrom.Underlying().(type) { case *types.Basic: // Assume a Unicode code point, as that is the only possible // value here. // Cast to an i32 value as expected by // runtime.stringFromUnicode. if sizeFrom > 4 { value = b.CreateTrunc(value, b.ctx.Int32Type(), "") } else if sizeFrom < 4 && typeTo.Info()&types.IsUnsigned != 0 { value = b.CreateZExt(value, b.ctx.Int32Type(), "") } else if sizeFrom < 4 { value = b.CreateSExt(value, b.ctx.Int32Type(), "") } return b.createRuntimeCall("stringFromUnicode", []llvm.Value{value}, ""), nil case *types.Slice: switch typeFrom.Elem().(*types.Basic).Kind() { case types.Byte: return b.createRuntimeCall("stringFromBytes", []llvm.Value{value}, ""), nil case types.Rune: return b.createRuntimeCall("stringFromRunes", []llvm.Value{value}, ""), nil default: return llvm.Value{}, b.makeError(pos, "todo: convert to string: "+typeFrom.String()) } default: return llvm.Value{}, b.makeError(pos, "todo: convert to string: "+typeFrom.String()) } } typeFrom := typeFrom.Underlying().(*types.Basic) sizeTo := b.targetData.TypeAllocSize(llvmTypeTo) if typeFrom.Info()&types.IsInteger != 0 && typeTo.Info()&types.IsInteger != 0 { // Conversion between two integers. if sizeFrom > sizeTo { return b.CreateTrunc(value, llvmTypeTo, ""), nil } else if typeFrom.Info()&types.IsUnsigned != 0 { // if unsigned return b.CreateZExt(value, llvmTypeTo, ""), nil } else { // if signed return b.CreateSExt(value, llvmTypeTo, ""), nil } } if typeFrom.Info()&types.IsFloat != 0 && typeTo.Info()&types.IsFloat != 0 { // Conversion between two floats. if sizeFrom > sizeTo { return b.CreateFPTrunc(value, llvmTypeTo, ""), nil } else if sizeFrom < sizeTo { return b.CreateFPExt(value, llvmTypeTo, ""), nil } else { return value, nil } } if typeFrom.Info()&types.IsFloat != 0 && typeTo.Info()&types.IsInteger != 0 { // Conversion from float to int. if typeTo.Info()&types.IsUnsigned != 0 { // if unsigned return b.CreateFPToUI(value, llvmTypeTo, ""), nil } else { // if signed return b.CreateFPToSI(value, llvmTypeTo, ""), nil } } if typeFrom.Info()&types.IsInteger != 0 && typeTo.Info()&types.IsFloat != 0 { // Conversion from int to float. if typeFrom.Info()&types.IsUnsigned != 0 { // if unsigned return b.CreateUIToFP(value, llvmTypeTo, ""), nil } else { // if signed return b.CreateSIToFP(value, llvmTypeTo, ""), nil } } if typeFrom.Kind() == types.Complex128 && typeTo.Kind() == types.Complex64 { // Conversion from complex128 to complex64. r := b.CreateExtractValue(value, 0, "real.f64") i := b.CreateExtractValue(value, 1, "imag.f64") r = b.CreateFPTrunc(r, b.ctx.FloatType(), "real.f32") i = b.CreateFPTrunc(i, b.ctx.FloatType(), "imag.f32") cplx := llvm.Undef(b.ctx.StructType([]llvm.Type{b.ctx.FloatType(), b.ctx.FloatType()}, false)) cplx = b.CreateInsertValue(cplx, r, 0, "") cplx = b.CreateInsertValue(cplx, i, 1, "") return cplx, nil } if typeFrom.Kind() == types.Complex64 && typeTo.Kind() == types.Complex128 { // Conversion from complex64 to complex128. r := b.CreateExtractValue(value, 0, "real.f32") i := b.CreateExtractValue(value, 1, "imag.f32") r = b.CreateFPExt(r, b.ctx.DoubleType(), "real.f64") i = b.CreateFPExt(i, b.ctx.DoubleType(), "imag.f64") cplx := llvm.Undef(b.ctx.StructType([]llvm.Type{b.ctx.DoubleType(), b.ctx.DoubleType()}, false)) cplx = b.CreateInsertValue(cplx, r, 0, "") cplx = b.CreateInsertValue(cplx, i, 1, "") return cplx, nil } return llvm.Value{}, b.makeError(pos, "todo: convert: basic non-integer type: "+typeFrom.String()+" -> "+typeTo.String()) case *types.Slice: if basic, ok := typeFrom.(*types.Basic); !ok || basic.Info()&types.IsString == 0 { panic("can only convert from a string to a slice") } elemType := typeTo.Elem().Underlying().(*types.Basic) // must be byte or rune switch elemType.Kind() { case types.Byte: return b.createRuntimeCall("stringToBytes", []llvm.Value{value}, ""), nil case types.Rune: return b.createRuntimeCall("stringToRunes", []llvm.Value{value}, ""), nil default: panic("unexpected type in string to slice conversion") } default: return llvm.Value{}, b.makeError(pos, "todo: convert "+typeTo.String()+" <- "+typeFrom.String()) } } // createUnOp creates LLVM IR for a given Go unary operation. // Most unary operators are pretty simple, such as the not and minus operator // which can all be directly lowered to IR. However, there is also the channel // receive operator which is handled in the runtime directly. func (b *builder) createUnOp(unop *ssa.UnOp) (llvm.Value, error) { x := b.getValue(unop.X) switch unop.Op { case token.NOT: // !x return b.CreateNot(x, ""), nil case token.SUB: // -x if typ, ok := unop.X.Type().Underlying().(*types.Basic); ok { if typ.Info()&types.IsInteger != 0 { return b.CreateSub(llvm.ConstInt(x.Type(), 0, false), x, ""), nil } else if typ.Info()&types.IsFloat != 0 { return b.CreateFSub(llvm.ConstFloat(x.Type(), 0.0), x, ""), nil } else { return llvm.Value{}, b.makeError(unop.Pos(), "todo: unknown basic type for negate: "+typ.String()) } } else { return llvm.Value{}, b.makeError(unop.Pos(), "todo: unknown type for negate: "+unop.X.Type().Underlying().String()) } case token.MUL: // *x, dereference pointer unop.X.Type().Underlying().(*types.Pointer).Elem() if b.targetData.TypeAllocSize(x.Type().ElementType()) == 0 { // zero-length data return llvm.ConstNull(x.Type().ElementType()), nil } else if strings.HasSuffix(unop.X.String(), "$funcaddr") { // CGo function pointer. The cgo part has rewritten CGo function // pointers as stub global variables of the form: // var C.add unsafe.Pointer // Instead of a load from the global, create a bitcast of the // function pointer itself. globalName := b.getGlobalInfo(unop.X.(*ssa.Global)).linkName name := globalName[:len(globalName)-len("$funcaddr")] fn := b.mod.NamedFunction(name) if fn.IsNil() { return llvm.Value{}, b.makeError(unop.Pos(), "cgo function not found: "+name) } return b.CreateBitCast(fn, b.i8ptrType, ""), nil } else { b.createNilCheck(unop.X, x, "deref") load := b.CreateLoad(x, "") return load, nil } case token.XOR: // ^x, toggle all bits in integer return b.CreateXor(x, llvm.ConstInt(x.Type(), ^uint64(0), false), ""), nil case token.ARROW: // <-x, receive from channel return b.createChanRecv(unop), nil default: return llvm.Value{}, b.makeError(unop.Pos(), "todo: unknown unop") } }
1
9,256
Is this really necessary? I would expect these types to be included with the loop below.
tinygo-org-tinygo
go
@@ -41,8 +41,17 @@ namespace OpenTelemetry.Exporter.Jaeger.Implementation traceId.CopyTo(bytes); - this.High = BitConverter.ToInt64(bytes, 0); - this.Low = BitConverter.ToInt64(bytes, 8); + if (BitConverter.IsLittleEndian) + { + Array.Reverse(bytes); + this.High = BitConverter.ToInt64(bytes, 8); + this.Low = BitConverter.ToInt64(bytes, 0); + } + else + { + this.High = BitConverter.ToInt64(bytes, 0); + this.Low = BitConverter.ToInt64(bytes, 8); + } } public long High { get; set; }
1
// <copyright file="Int128.cs" company="OpenTelemetry Authors"> // Copyright 2018, OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // </copyright> using System; using System.Diagnostics; namespace OpenTelemetry.Exporter.Jaeger.Implementation { public struct Int128 { public static Int128 Empty = default; private const int SpanIdBytes = 8; private const int TraceIdBytes = 16; public Int128(ActivitySpanId spanId) { var bytes = new byte[SpanIdBytes]; spanId.CopyTo(bytes); this.High = 0; this.Low = BitConverter.ToInt64(bytes, 0); } public Int128(ActivityTraceId traceId) { var bytes = new byte[TraceIdBytes]; traceId.CopyTo(bytes); this.High = BitConverter.ToInt64(bytes, 0); this.Low = BitConverter.ToInt64(bytes, 8); } public long High { get; set; } public long Low { get; set; } } }
1
12,536
You could do span<byte> and it's magic of typecast to int for better efficiency
open-telemetry-opentelemetry-dotnet
.cs
@@ -87,8 +87,9 @@ Object.keys(rulesGroupByDocumentFragment).forEach(key => { return; } - // check if transform style exists - const transformStyleValue = cssRule.style.transform || false; + // check if transform style exists (don't forget vendor prefixes) + const transformStyleValue = + cssRule.style.transform || cssRule.style.webkitTransform || false; // transformStyleValue -> is the value applied to property // eg: "rotate(-90deg)" if (!transformStyleValue) {
1
/* global context */ // extract asset of type `cssom` from context const { cssom = undefined } = context || {}; // if there is no cssom <- return incomplete if (!cssom || !cssom.length) { return undefined; } // combine all rules from each sheet into one array const rulesGroupByDocumentFragment = cssom.reduce( (out, { sheet, root, shadowId }) => { // construct key based on shadowId or top level document const key = shadowId ? shadowId : 'topDocument'; // init property if does not exist if (!out[key]) { out[key] = { root, rules: [] }; } // check if sheet and rules exist if (!sheet || !sheet.cssRules) { //return return out; } const rules = Array.from(sheet.cssRules); // add rules into same document fragment out[key].rules = out[key].rules.concat(rules); //return return out; }, {} ); // Note: // Some of these functions can be extracted to utils, but best to do it when other cssom rules are authored. // extract styles for each orientation rule to verify transform is applied let isLocked = false; let relatedElements = []; Object.keys(rulesGroupByDocumentFragment).forEach(key => { const { root, rules } = rulesGroupByDocumentFragment[key]; // filter media rules from all rules const mediaRules = rules.filter(r => { // doc: https://developer.mozilla.org/en-US/docs/Web/API/CSSMediaRule // type value of 4 (CSSRule.MEDIA_RULE) pertains to media rules return r.type === 4; }); if (!mediaRules || !mediaRules.length) { return; } // narrow down to media rules with `orientation` as a keyword const orientationRules = mediaRules.filter(r => { // conditionText exists on media rules, which contains only the @media condition // eg: screen and (max-width: 767px) and (min-width: 320px) and (orientation: landscape) const cssText = r.cssText; return ( /orientation:\s+landscape/i.test(cssText) || /orientation:\s+portrait/i.test(cssText) ); }); if (!orientationRules || !orientationRules.length) { return; } orientationRules.forEach(r => { // r.cssRules is a RULEList and not an array if (!r.cssRules.length) { return; } // cssRules ia a list of rules // a media query has framents of css styles applied to various selectors // iteration through cssRules and see if orientation lock has been applied Array.from(r.cssRules).forEach(cssRule => { // ensure selectorText exists if (!cssRule.selectorText) { return; } // ensure the given selector has styles declared (non empty selector) if (cssRule.style.length <= 0) { return; } // check if transform style exists const transformStyleValue = cssRule.style.transform || false; // transformStyleValue -> is the value applied to property // eg: "rotate(-90deg)" if (!transformStyleValue) { return; } const rotate = transformStyleValue.match(/rotate\(([^)]+)deg\)/); const deg = parseInt((rotate && rotate[1]) || 0); const locked = deg % 90 === 0 && deg % 180 !== 0; // if locked // and not root HTML // preserve as relatedNodes if (locked && cssRule.selectorText.toUpperCase() !== 'HTML') { const selector = cssRule.selectorText; const elms = Array.from(root.querySelectorAll(selector)); if (elms && elms.length) { relatedElements = relatedElements.concat(elms); } } // set locked boolean isLocked = locked; }); }); }); if (!isLocked) { // return return true; } // set relatedNodes if (relatedElements.length) { this.relatedNodes(relatedElements); } // return fail return false;
1
14,518
Looks like you've covered `-webkit-transform`, but what about `-ms-transform`?
dequelabs-axe-core
js
@@ -241,15 +241,10 @@ namespace MvvmCross.Droid.Support.V7.RecyclerView protected virtual void OnItemsSourceCollectionChanged(object sender, NotifyCollectionChangedEventArgs e) { - if (Looper.MainLooper == Looper.MyLooper()) - { - NotifyDataSetChanged(e); - } - else - { - var h = new Handler(Looper.MainLooper); - h.Post(() => NotifyDataSetChanged(e)); - } + if (Looper.MainLooper != Looper.MyLooper()) + MvxBindingLog.Error("All collection changes "); + + NotifyDataSetChanged(e); } public virtual void NotifyDataSetChanged(NotifyCollectionChangedEventArgs e)
1
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MS-PL license. // See the LICENSE file in the project root for more information. using System; using System.Collections; using System.Collections.Specialized; using System.Windows.Input; using Android.OS; using Android.Runtime; using Android.Views; using Android.Widget; using MvvmCross.Binding; using MvvmCross.Binding.Attributes; using MvvmCross.Binding.Extensions; using MvvmCross.Droid.Support.V7.RecyclerView.ItemTemplates; using MvvmCross.Droid.Support.V7.RecyclerView.Model; using MvvmCross.Exceptions; using MvvmCross.Logging; using MvvmCross.Platforms.Android.Binding.BindingContext; using MvvmCross.WeakSubscription; using Object = Java.Lang.Object; namespace MvvmCross.Droid.Support.V7.RecyclerView { [Register("mvvmcross.droid.support.v7.recyclerview.MvxRecyclerAdapter")] public class MvxRecyclerAdapter : Android.Support.V7.Widget.RecyclerView.Adapter, IMvxRecyclerAdapter, IMvxRecyclerAdapterBindableHolder { private readonly IMvxAndroidBindingContext _bindingContext; private ICommand _itemClick, _itemLongClick; private IEnumerable _itemsSource; private IDisposable _subscription; private IMvxTemplateSelector _itemTemplateSelector; protected IMvxAndroidBindingContext BindingContext => _bindingContext; public MvxRecyclerAdapter() : this(MvxAndroidBindingContextHelpers.Current()) { } public MvxRecyclerAdapter(IMvxAndroidBindingContext bindingContext) { _bindingContext = bindingContext; } public MvxRecyclerAdapter(IntPtr javaReference, JniHandleOwnership transfer) : base(javaReference, transfer) { } public bool ReloadOnAllItemsSourceSets { get; set; } public ICommand ItemClick { get { return _itemClick; } set { if (ReferenceEquals(_itemClick, value)) { return; } if (_itemClick != null) { MvxAndroidLog.Instance.Warn("Changing ItemClick may cause inconsistencies where some items still call the old command."); } _itemClick = value; } } public ICommand ItemLongClick { get { return _itemLongClick; } set { if (ReferenceEquals(_itemLongClick, value)) { return; } if (_itemLongClick != null) { MvxAndroidLog.Instance.Warn("Changing ItemLongClick may cause inconsistencies where some items still call the old command."); } _itemLongClick = value; } } [MvxSetToNullAfterBinding] public virtual IEnumerable ItemsSource { get { return _itemsSource; } set { SetItemsSource(value); } } public virtual IMvxTemplateSelector ItemTemplateSelector { get { return _itemTemplateSelector; } set { if (ReferenceEquals(_itemTemplateSelector, value)) return; _itemTemplateSelector = value; // since the template selector has changed then let's force the list to redisplay by firing NotifyDataSetChanged() if (_itemsSource != null) NotifyDataSetChanged(); } } public override void OnViewAttachedToWindow(Object holder) { base.OnViewAttachedToWindow(holder); var viewHolder = (IMvxRecyclerViewHolder)holder; viewHolder.OnAttachedToWindow(); } public override void OnViewDetachedFromWindow(Object holder) { base.OnViewDetachedFromWindow(holder); var viewHolder = (IMvxRecyclerViewHolder)holder; viewHolder.OnDetachedFromWindow(); } public override void OnViewRecycled(Object holder) { base.OnViewRecycled(holder); var viewHolder = (IMvxRecyclerViewHolder)holder; viewHolder.OnViewRecycled(); } public override Android.Support.V7.Widget.RecyclerView.ViewHolder OnCreateViewHolder(ViewGroup parent, int viewType) { var itemBindingContext = new MvxAndroidBindingContext(parent.Context, BindingContext.LayoutInflaterHolder); var vh = new MvxRecyclerViewHolder(InflateViewForHolder(parent, viewType, itemBindingContext), itemBindingContext) { Click = ItemClick, LongClick = ItemLongClick, Id = ItemTemplateSelector.GetItemLayoutId(viewType) }; return vh; } public override int GetItemViewType(int position) { var itemAtPosition = GetItem(position); return ItemTemplateSelector.GetItemViewType(itemAtPosition); } protected virtual View InflateViewForHolder(ViewGroup parent, int viewType, IMvxAndroidBindingContext bindingContext) { var layoutId = ItemTemplateSelector.GetItemLayoutId(viewType); return bindingContext.BindingInflate(layoutId, parent, false); } public override void OnBindViewHolder(Android.Support.V7.Widget.RecyclerView.ViewHolder holder, int position) { var dataContext = GetItem(position); if (((IMvxRecyclerViewHolder) holder).Id == global::Android.Resource.Layout.SimpleListItem1) ((TextView) holder.ItemView).Text = dataContext?.ToString(); ((IMvxRecyclerViewHolder)holder).DataContext = dataContext; OnMvxViewHolderBound(new MvxViewHolderBoundEventArgs(position, dataContext, holder)); } public override int ItemCount => _itemsSource.Count(); public virtual object GetItem(int viewPosition) { var itemsSourcePosition = GetItemsSourcePosition(viewPosition); if (itemsSourcePosition >= 0 && itemsSourcePosition < _itemsSource.Count()) { return _itemsSource.ElementAt(itemsSourcePosition); } return null; } protected virtual int GetViewPosition(object item) { var itemsSourcePosition = _itemsSource.GetPosition(item); return GetViewPosition(itemsSourcePosition); } protected virtual int GetViewPosition(int itemsSourcePosition) { return itemsSourcePosition; } protected virtual int GetItemsSourcePosition(int viewPosition) { return viewPosition; } public int ItemTemplateId { get; set; } protected virtual void SetItemsSource(IEnumerable value) { if (ReferenceEquals(_itemsSource, value) && !ReloadOnAllItemsSourceSets) { return; } _subscription?.Dispose(); _subscription = null; _itemsSource = value; if (_itemsSource != null && !(_itemsSource is IList)) { MvxBindingLog.Warning("Binding to IEnumerable rather than IList - this can be inefficient, especially for large lists"); } var newObservable = _itemsSource as INotifyCollectionChanged; if (newObservable != null) { _subscription = newObservable.WeakSubscribe(OnItemsSourceCollectionChanged); } NotifyDataSetChanged(); } protected virtual void OnItemsSourceCollectionChanged(object sender, NotifyCollectionChangedEventArgs e) { if (Looper.MainLooper == Looper.MyLooper()) { NotifyDataSetChanged(e); } else { var h = new Handler(Looper.MainLooper); h.Post(() => NotifyDataSetChanged(e)); } } public virtual void NotifyDataSetChanged(NotifyCollectionChangedEventArgs e) { try { switch (e.Action) { case NotifyCollectionChangedAction.Add: NotifyItemRangeInserted(GetViewPosition(e.NewStartingIndex), e.NewItems.Count); break; case NotifyCollectionChangedAction.Move: for (int i = 0; i < e.NewItems.Count; i++) NotifyItemMoved(GetViewPosition(e.OldStartingIndex + i), GetViewPosition(e.NewStartingIndex + i)); break; case NotifyCollectionChangedAction.Replace: NotifyItemRangeChanged(GetViewPosition(e.NewStartingIndex), e.NewItems.Count); break; case NotifyCollectionChangedAction.Remove: NotifyItemRangeRemoved(GetViewPosition(e.OldStartingIndex), e.OldItems.Count); break; case NotifyCollectionChangedAction.Reset: NotifyDataSetChanged(); break; } } catch (Exception exception) { MvxAndroidLog.Instance.Warn( "Exception masked during Adapter RealNotifyDataSetChanged {0}. Are you trying to update your collection from a background task? See http://goo.gl/0nW0L6", exception.ToLongString()); } } public event Action<MvxViewHolderBoundEventArgs> MvxViewHolderBound; protected virtual void OnMvxViewHolderBound(MvxViewHolderBoundEventArgs obj) { MvxViewHolderBound?.Invoke(obj); } } }
1
14,876
Is there something missing from the message here?
MvvmCross-MvvmCross
.cs
@@ -48,3 +48,14 @@ from luigi import event from luigi.event import Event from .tools import range # just makes the tool classes available from command line + + +__all__ = [ + 'task', 'Task', 'Config', 'ExternalTask', 'WrapperTask', 'namespace', + 'target', 'Target', 'File', 'LocalTarget', 'rpc', 'RemoteScheduler', + 'RPCError', 'parameter', 'Parameter', 'DateParameter', 'MonthParameter', + 'YearParameter', 'DateHourParameter', 'DateMinuteParameter', 'range', + 'DateIntervalParameter', 'TimeDeltaParameter', 'IntParameter', + 'FloatParameter', 'BooleanParameter', 'BoolParameter', 'TaskParameter', + 'configuration', 'interface', 'file', 'run', 'build', 'event', 'Event' +]
1
# -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Package containing core luigi functionality. """ from luigi import task from luigi.task import Task, Config, ExternalTask, WrapperTask, namespace from luigi import target from luigi.target import Target from luigi import file # wtf @ naming from luigi.file import File, LocalTarget from luigi import rpc from luigi.rpc import RemoteScheduler, RPCError from luigi import parameter from luigi.parameter import ( Parameter, DateParameter, MonthParameter, YearParameter, DateHourParameter, DateMinuteParameter, DateIntervalParameter, TimeDeltaParameter, IntParameter, FloatParameter, BooleanParameter, BoolParameter, TaskParameter, ) from luigi import configuration from luigi import interface from luigi.interface import run, build from luigi import event from luigi.event import Event from .tools import range # just makes the tool classes available from command line
1
12,824
Hmm, what does this syntax mean?
spotify-luigi
py
@@ -98,7 +98,12 @@ func (r *nDCActivityReplicatorImpl) SyncActivity( RunId: request.RunId, } - context, release, err := r.historyCache.getOrCreateWorkflowExecution(ctx, namespaceID, execution) + context, release, err := r.historyCache.getOrCreateWorkflowExecution( + ctx, + namespaceID, + execution, + callerTypeAPI, + ) if err != nil { // for get workflow execution context, with valid run id // err will not be of type EntityNotExistsError
1
// The MIT License // // Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. // // Copyright (c) 2020 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. //go:generate mockgen -copyright_file ../../LICENSE -package $GOPACKAGE -source $GOFILE -destination nDCActivityReplicator_mock.go package history import ( "context" "time" commonpb "go.temporal.io/api/common/v1" "go.temporal.io/api/serviceerror" enumsspb "go.temporal.io/server/api/enums/v1" historyspb "go.temporal.io/server/api/history/v1" "go.temporal.io/server/api/historyservice/v1" persistencespb "go.temporal.io/server/api/persistence/v1" "go.temporal.io/server/common" "go.temporal.io/server/common/clock" "go.temporal.io/server/common/cluster" "go.temporal.io/server/common/log" "go.temporal.io/server/common/log/tag" "go.temporal.io/server/common/persistence" "go.temporal.io/server/common/persistence/versionhistory" "go.temporal.io/server/common/primitives/timestamp" serviceerrors "go.temporal.io/server/common/serviceerror" "go.temporal.io/server/service/history/shard" ) const ( resendMissingEventMessage = "Resend missed sync activity events" resendHigherVersionMessage = "Resend sync activity events due to a higher version received" ) type ( nDCActivityReplicator interface { SyncActivity( ctx context.Context, request *historyservice.SyncActivityRequest, ) error } nDCActivityReplicatorImpl struct { historyCache *historyCache clusterMetadata cluster.Metadata logger log.Logger } ) func newNDCActivityReplicator( shard shard.Context, historyCache *historyCache, logger log.Logger, ) *nDCActivityReplicatorImpl { return &nDCActivityReplicatorImpl{ historyCache: historyCache, clusterMetadata: shard.GetService().GetClusterMetadata(), logger: log.With(logger, tag.ComponentHistoryReplicator), } } func (r *nDCActivityReplicatorImpl) SyncActivity( ctx context.Context, request *historyservice.SyncActivityRequest, ) (retError error) { // sync activity info will only be sent from active side, when // 1. activity has retry policy and activity got started // 2. activity heart beat // no sync activity task will be sent when active side fail / timeout activity, // since standby side does not have activity retry timer namespaceID := request.GetNamespaceId() execution := commonpb.WorkflowExecution{ WorkflowId: request.WorkflowId, RunId: request.RunId, } context, release, err := r.historyCache.getOrCreateWorkflowExecution(ctx, namespaceID, execution) if err != nil { // for get workflow execution context, with valid run id // err will not be of type EntityNotExistsError return err } defer func() { release(retError) }() mutableState, err := context.loadWorkflowExecution() if err != nil { if _, ok := err.(*serviceerror.NotFound); !ok { return err } // this can happen if the workflow start event and this sync activity task are out of order // or the target workflow is long gone // the safe solution to this is to throw away the sync activity task // or otherwise, worker attempt will exceeds limit and put this message to DLQ return nil } scheduleID := request.GetScheduledId() shouldApply, err := r.testVersionHistory( namespaceID, execution.GetWorkflowId(), execution.GetRunId(), scheduleID, mutableState, request.GetVersionHistory(), ) if err != nil || !shouldApply { return err } activityInfo, ok := mutableState.GetActivityInfo(scheduleID) if !ok { // this should not retry, can be caused by out of order delivery // since the activity is already finished return nil } if shouldApply := r.testActivity( request.GetVersion(), request.GetAttempt(), timestamp.TimeValue(request.GetLastHeartbeatTime()), activityInfo, ); !shouldApply { return nil } refreshTask := r.testRefreshActivityTimerTaskMask( request.GetVersion(), request.GetAttempt(), activityInfo, ) err = mutableState.ReplicateActivityInfo(request, refreshTask) if err != nil { return err } // see whether we need to refresh the activity timer eventTime := timestamp.TimeValue(request.GetScheduledTime()) startedTime := timestamp.TimeValue(request.GetStartedTime()) lastHeartbeatTime := timestamp.TimeValue(request.GetLastHeartbeatTime()) if eventTime.Before(startedTime) { eventTime = startedTime } if eventTime.Before(lastHeartbeatTime) { eventTime = lastHeartbeatTime } // passive logic need to explicitly call create timer now := eventTime if _, err := newTimerSequence( clock.NewEventTimeSource().Update(now), mutableState, ).createNextActivityTimer(); err != nil { return err } updateMode := persistence.UpdateWorkflowModeUpdateCurrent if state, _ := mutableState.GetWorkflowStateStatus(); state == enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE { updateMode = persistence.UpdateWorkflowModeBypassCurrent } return context.updateWorkflowExecutionWithNew( now, updateMode, nil, // no new workflow nil, // no new workflow transactionPolicyPassive, nil, ) } func (r *nDCActivityReplicatorImpl) testRefreshActivityTimerTaskMask( version int64, attempt int32, activityInfo *persistencespb.ActivityInfo, ) bool { // calculate whether to reset the activity timer task status bits // reset timer task status bits if // 1. same source cluster & attempt changes // 2. different source cluster if !r.clusterMetadata.IsVersionFromSameCluster(version, activityInfo.Version) { return true } else if activityInfo.Attempt != attempt { return true } return false } func (r *nDCActivityReplicatorImpl) testActivity( version int64, attempt int32, lastHeartbeatTime time.Time, activityInfo *persistencespb.ActivityInfo, ) bool { if activityInfo.Version > version { // this should not retry, can be caused by failover or reset return false } if activityInfo.Version < version { // incoming version larger then local version, should update activity return true } // activityInfo.Version == version if activityInfo.Attempt > attempt { // this should not retry, can be caused by failover or reset return false } // activityInfo.Version == version if activityInfo.Attempt < attempt { // version equal & attempt larger then existing, should update activity return true } // activityInfo.Version == version & activityInfo.Attempt == attempt // last heartbeat after existing heartbeat & should update activity if activityInfo.LastHeartbeatUpdateTime != nil && activityInfo.LastHeartbeatUpdateTime.After(lastHeartbeatTime) { // this should not retry, can be caused by out of order delivery return false } return true } func (r *nDCActivityReplicatorImpl) testVersionHistory( namespaceID string, workflowID string, runID string, scheduleID int64, mutableState mutableState, incomingVersionHistory *historyspb.VersionHistory, ) (bool, error) { currentVersionHistory, err := versionhistory.GetCurrentVersionHistory( mutableState.GetExecutionInfo().GetVersionHistories(), ) if err != nil { return false, err } lastLocalItem, err := versionhistory.GetLastVersionHistoryItem(currentVersionHistory) if err != nil { return false, err } lastIncomingItem, err := versionhistory.GetLastVersionHistoryItem(incomingVersionHistory) if err != nil { return false, err } lcaItem, err := versionhistory.FindLCAVersionHistoryItem(currentVersionHistory, incomingVersionHistory) if err != nil { return false, err } // case 1: local version history is superset of incoming version history // or incoming version history is superset of local version history // resend the missing event if local version history doesn't have the schedule event // case 2: local version history and incoming version history diverged // case 2-1: local version history has the higher version and discard the incoming event // case 2-2: incoming version history has the higher version and resend the missing incoming events if versionhistory.IsLCAVersionHistoryItemAppendable(currentVersionHistory, lcaItem) || versionhistory.IsLCAVersionHistoryItemAppendable(incomingVersionHistory, lcaItem) { // case 1 if scheduleID > lcaItem.GetEventId() { return false, serviceerrors.NewRetryReplication( resendMissingEventMessage, namespaceID, workflowID, runID, lcaItem.GetEventId(), lcaItem.GetVersion(), common.EmptyEventID, common.EmptyVersion, ) } } else { // case 2 if lastIncomingItem.GetVersion() < lastLocalItem.GetVersion() { // case 2-1 return false, nil } else if lastIncomingItem.GetVersion() > lastLocalItem.GetVersion() { // case 2-2 return false, serviceerrors.NewRetryReplication( resendHigherVersionMessage, namespaceID, workflowID, runID, lcaItem.GetEventId(), lcaItem.GetVersion(), common.EmptyEventID, common.EmptyVersion, ) } } state, _ := mutableState.GetWorkflowStateStatus() return state != enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED, nil }
1
11,844
should this be callerTypeTask?
temporalio-temporal
go
@@ -56,6 +56,10 @@ class PlanPolicy < ApplicationPolicy @plan.administerable_by?(@user.id) end + def set_test? + @plan.administerable_by?(@user.id) + end + # TODO: These routes are no lonmger used =begin def section_answers?
1
class PlanPolicy < ApplicationPolicy attr_reader :user attr_reader :plan def initialize(user, plan) raise Pundit::NotAuthorizedError, "must be logged in" unless user @user = user @plan = plan end def show? @plan.readable_by?(@user.id) end def edit? @plan.readable_by?(@user.id) end def update_guidance_choices? @plan.editable_by?(@user.id) end def share? @plan.readable_by?(@user.id) end def export? @plan.readable_by?(@user.id) end def show_export? @plan.readable_by?(@user.id) end def update? @plan.editable_by?(@user.id) end def destroy? @plan.editable_by?(@user.id) end def status? @plan.readable_by?(@user.id) end def possible_templates? @plan.id.nil? end def duplicate? @plan.editable_by?(@user.id) end def visibility? @plan.administerable_by?(@user.id) end # TODO: These routes are no lonmger used =begin def section_answers? @plan.readable_by?(@user.id) end def locked? @plan.readable_by?(@user.id) end def delete_recent_locks? @plan.editable_by?(@user.id) end def unlock_all_sections? @plan.editable_by?(@user.id) end def lock_section? @plan.editable_by?(@user.id) end def unlock_section? @plan.editable_by?(@user.id) end =end def answer? @plan.readable_by?(@user.id) end end
1
16,784
Currently update is set as @plan.editable_by?(@user.id) Which one is the correct behavior? I can see a case for only owners/co-owners to be able to set visibility, test status, and other plan details
DMPRoadmap-roadmap
rb
@@ -47,8 +47,7 @@ namespace Nethermind.Blockchain private readonly LruCache<Keccak, BlockHeader> _headerCache = new LruCache<Keccak, BlockHeader>(CacheSize); private readonly LruCache<long, ChainLevelInfo> _blockInfoCache = new LruCache<long, ChainLevelInfo>(CacheSize); - private const int MaxQueueSize = 10_000_000; - + private const int BestKnownSearchLimit = 256_000_000; public const int DbLoadBatchSize = 4000; private long _currentDbLoadBatchEnd;
1
/* * Copyright (c) 2018 Demerzel Solutions Limited * This file is part of the Nethermind library. * * The Nethermind library is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The Nethermind library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with the Nethermind. If not, see <http://www.gnu.org/licenses/>. */ using System; using System.Collections; using System.Collections.Concurrent; using System.Collections.Generic; using System.Diagnostics; using System.IO; using System.Linq; using System.Numerics; using System.Threading; using System.Threading.Tasks; using Nethermind.Blockchain.TxPools; using Nethermind.Blockchain.Validators; using Nethermind.Core; using Nethermind.Core.Crypto; using Nethermind.Core.Encoding; using Nethermind.Core.Extensions; using Nethermind.Core.Json; using Nethermind.Core.Specs; using Nethermind.Logging; using Nethermind.Store; namespace Nethermind.Blockchain { [Todo(Improve.Refactor, "After the fast sync work there are some duplicated code parts for the 'by header' and 'by block' approaches.")] public class BlockTree : IBlockTree { private const int CacheSize = 64; private readonly LruCache<Keccak, Block> _blockCache = new LruCache<Keccak, Block>(CacheSize); private readonly LruCache<Keccak, BlockHeader> _headerCache = new LruCache<Keccak, BlockHeader>(CacheSize); private readonly LruCache<long, ChainLevelInfo> _blockInfoCache = new LruCache<long, ChainLevelInfo>(CacheSize); private const int MaxQueueSize = 10_000_000; public const int DbLoadBatchSize = 4000; private long _currentDbLoadBatchEnd; private ReaderWriterLockSlim _blockInfoLock = new ReaderWriterLockSlim(); private object _batchInsertLock = new object(); private readonly IDb _blockDb; private readonly IDb _headerDb; private ConcurrentDictionary<long, HashSet<Keccak>> _invalidBlocks = new ConcurrentDictionary<long, HashSet<Keccak>>(); private readonly BlockDecoder _blockDecoder = new BlockDecoder(); private readonly HeaderDecoder _headerDecoder = new HeaderDecoder(); private readonly IDb _blockInfoDb; private readonly ILogger _logger; private readonly ISpecProvider _specProvider; private readonly ITxPool _txPool; private readonly ISyncConfig _syncConfig; public BlockHeader Genesis { get; private set; } public BlockHeader Head { get; private set; } public BlockHeader BestSuggestedHeader { get; private set; } public Block BestSuggestedBody { get; private set; } public BlockHeader LowestInsertedHeader { get; private set; } public Block LowestInsertedBody { get; private set; } public long BestKnownNumber { get; private set; } public int ChainId => _specProvider.ChainId; public bool CanAcceptNewBlocks { get; private set; } = true; // no need to sync it at the moment public BlockTree( IDb blockDb, IDb headerDb, IDb blockInfoDb, ISpecProvider specProvider, ITxPool txPool, ILogManager logManager) : this(blockDb, headerDb, blockInfoDb, specProvider, txPool, new SyncConfig(), logManager) { } public BlockTree( IDb blockDb, IDb headerDb, IDb blockInfoDb, ISpecProvider specProvider, ITxPool txPool, ISyncConfig syncConfig, ILogManager logManager) { _logger = logManager?.GetClassLogger() ?? throw new ArgumentNullException(nameof(logManager)); _blockDb = blockDb ?? throw new ArgumentNullException(nameof(blockDb)); _headerDb = headerDb ?? throw new ArgumentNullException(nameof(headerDb)); _blockInfoDb = blockInfoDb ?? throw new ArgumentNullException(nameof(blockInfoDb)); _specProvider = specProvider; _txPool = txPool ?? throw new ArgumentNullException(nameof(txPool)); _syncConfig = syncConfig ?? throw new ArgumentNullException(nameof(syncConfig)); ChainLevelInfo genesisLevel = LoadLevel(0, true); if (genesisLevel != null) { if (genesisLevel.BlockInfos.Length != 1) { // just for corrupted test bases genesisLevel.BlockInfos = new[] {genesisLevel.BlockInfos[0]}; PersistLevel(0, genesisLevel); //throw new InvalidOperationException($"Genesis level in DB has {genesisLevel.BlockInfos.Length} blocks"); } LoadLowestInsertedHeader(); LoadLowestInsertedBody(); LoadBestKnown(); if (genesisLevel.BlockInfos[0].WasProcessed) { BlockHeader genesisHeader = FindHeader(genesisLevel.BlockInfos[0].BlockHash, BlockTreeLookupOptions.None); Genesis = genesisHeader; LoadHeadBlockAtStart(); } } if (_logger.IsInfo) _logger.Info($"Block tree initialized, last processed is {Head?.ToString(BlockHeader.Format.Short) ?? "0"}, best queued is {BestSuggestedHeader?.Number.ToString() ?? "0"}, best known is {BestKnownNumber}, lowest inserted header {LowestInsertedHeader?.Number}, body {LowestInsertedBody?.Number}"); } private void LoadBestKnown() { long headNumber = Head?.Number ?? -1; long left = Math.Max(LowestInsertedHeader?.Number ?? 0, headNumber); long right = headNumber + MaxQueueSize; while (left != right) { long index = left + (right - left) / 2; ChainLevelInfo level = LoadLevel(index, true); if (level == null) { right = index; } else { left = index + 1; } } long result = left - 1; BestKnownNumber = result; if (BestKnownNumber < 0) { throw new InvalidOperationException($"Best known is {BestKnownNumber}"); } } private void LoadLowestInsertedHeader() { long left = 0L; long right = LongConverter.FromString(_syncConfig.PivotNumber ?? "0x0"); ChainLevelInfo lowestInsertedLevel = null; while (left != right) { if (_logger.IsTrace) _logger.Trace($"Finding lowest inserted header - L {left} | R {right}"); long index = left + (right - left) / 2 + 1; ChainLevelInfo level = LoadLevel(index, true); if (level == null) { left = index; } else { lowestInsertedLevel = level; right = index - 1L; } } if (lowestInsertedLevel == null) { if (_logger.IsTrace) _logger.Trace($"Lowest inserted header is null - L {left} | R {right}"); LowestInsertedHeader = null; } else { BlockInfo blockInfo = lowestInsertedLevel.BlockInfos[0]; LowestInsertedHeader = FindHeader(blockInfo.BlockHash, BlockTreeLookupOptions.None); if (_logger.IsDebug) _logger.Debug($"Lowest inserted header is {LowestInsertedHeader?.ToString(BlockHeader.Format.Short)} {right} - L {left} | R {right}"); } } private void LoadLowestInsertedBody() { long left = 0L; long right = LongConverter.FromString(_syncConfig.PivotNumber ?? "0x0"); Block lowestInsertedBlock = null; while (left != right) { if (_logger.IsDebug) _logger.Debug($"Finding lowest inserted body - L {left} | R {right}"); long index = left + (right - left) / 2 + 1; ChainLevelInfo level = LoadLevel(index, true); Block block = level == null ? null : FindBlock(level.BlockInfos[0].BlockHash, BlockTreeLookupOptions.TotalDifficultyNotNeeded); if (block == null) { left = index; } else { lowestInsertedBlock = block; right = index - 1; } } if (lowestInsertedBlock == null) { if (_logger.IsTrace) _logger.Trace($"Lowest inserted body is null - L {left} | R {right}"); LowestInsertedBody = null; } else { if (_logger.IsDebug) _logger.Debug($"Lowest inserted body is {LowestInsertedBody?.ToString(Block.Format.Short)} {right} - L {left} | R {right}"); LowestInsertedBody = lowestInsertedBlock; } } private async Task VisitBlocks(long startNumber, long blocksToVisit, Func<Block, Task<bool>> blockFound, Func<BlockHeader, Task<bool>> headerFound, Func<long, Task<bool>> noneFound, CancellationToken cancellationToken) { long blockNumber = startNumber; for (long i = 0; i < blocksToVisit; i++) { if (cancellationToken.IsCancellationRequested) { break; } ChainLevelInfo level = LoadLevel(blockNumber); if (level == null) { _logger.Warn($"Missing level - {blockNumber}"); break; } BigInteger maxDifficultySoFar = 0; BlockInfo maxDifficultyBlock = null; for (int blockIndex = 0; blockIndex < level.BlockInfos.Length; blockIndex++) { if (level.BlockInfos[blockIndex].TotalDifficulty > maxDifficultySoFar) { maxDifficultyBlock = level.BlockInfos[blockIndex]; maxDifficultySoFar = maxDifficultyBlock.TotalDifficulty; } } level = null; // ReSharper disable once ConditionIsAlwaysTrueOrFalse if (level != null) // ReSharper disable once HeuristicUnreachableCode { // ReSharper disable once HeuristicUnreachableCode throw new InvalidOperationException("just be aware that this level can be deleted by another thread after here"); } if (maxDifficultyBlock == null) { throw new InvalidOperationException($"Expected at least one block at level {blockNumber}"); } Block block = FindBlock(maxDifficultyBlock.BlockHash, BlockTreeLookupOptions.None); if (block == null) { BlockHeader header = FindHeader(maxDifficultyBlock.BlockHash, BlockTreeLookupOptions.None); if (header == null) { bool shouldContinue = await noneFound(blockNumber); if (!shouldContinue) { break; } } else { bool shouldContinue = await headerFound(header); if (!shouldContinue) { break; } } } else { bool shouldContinue = await blockFound(block); if (!shouldContinue) { break; } } blockNumber++; } if (cancellationToken.IsCancellationRequested) { _logger.Info($"Canceled visiting blocks in DB at block {blockNumber}"); } if (_logger.IsInfo) { _logger.Info($"Completed visiting blocks in DB at block {blockNumber} - best known {BestKnownNumber}"); } } public async Task LoadBlocksFromDb( CancellationToken cancellationToken, long? startBlockNumber = null, int batchSize = DbLoadBatchSize, int maxBlocksToLoad = int.MaxValue) { try { CanAcceptNewBlocks = false; byte[] deletePointer = _blockInfoDb.Get(DeletePointerAddressInDb); if (deletePointer != null) { Keccak deletePointerHash = new Keccak(deletePointer); if (_logger.IsInfo) _logger.Info($"Cleaning invalid blocks starting from {deletePointer}"); DeleteBlocks(deletePointerHash); } if (startBlockNumber == null) { startBlockNumber = Head?.Number ?? 0; } else { Head = startBlockNumber == 0 ? null : FindBlock(startBlockNumber.Value - 1, BlockTreeLookupOptions.RequireCanonical)?.Header; } long blocksToLoad = Math.Min(CountKnownAheadOfHead(), maxBlocksToLoad); if (blocksToLoad == 0) { if (_logger.IsInfo) _logger.Info("Found no blocks to load from DB"); return; } if (_logger.IsInfo) _logger.Info($"Found {blocksToLoad} blocks to load from DB starting from current head block {Head?.ToString(BlockHeader.Format.Short)}"); Task<bool> NoneFound(long number) { _blockInfoDb.Delete(number); BestKnownNumber = number - 1; return Task.FromResult(false); } Task<bool> HeaderFound(BlockHeader header) { BestSuggestedHeader = header; long i = header.Number - startBlockNumber.Value; // copy paste from below less batching if (i % batchSize == batchSize - 1 && i != blocksToLoad - 1 && Head.Number + batchSize < header.Number) { if (_logger.IsInfo) _logger.Info($"Loaded {i + 1} out of {blocksToLoad} headers from DB."); } return Task.FromResult(true); } async Task<bool> BlockFound(Block block) { BestSuggestedHeader = block.Header; BestSuggestedBody = block; NewBestSuggestedBlock?.Invoke(this, new BlockEventArgs(block)); long i = block.Number - startBlockNumber.Value; if (i % batchSize == batchSize - 1 && i != blocksToLoad - 1 && Head.Number + batchSize < block.Number) { if (_logger.IsInfo) { _logger.Info($"Loaded {i + 1} out of {blocksToLoad} blocks from DB into processing queue, waiting for processor before loading more."); } _dbBatchProcessed = new TaskCompletionSource<object>(); using (cancellationToken.Register(() => _dbBatchProcessed.SetCanceled())) { _currentDbLoadBatchEnd = block.Number - batchSize; await _dbBatchProcessed.Task; } } return true; } await VisitBlocks(startBlockNumber.Value, blocksToLoad, BlockFound, HeaderFound, NoneFound, cancellationToken); } finally { CanAcceptNewBlocks = true; } } public AddBlockResult Insert(BlockHeader header) { if (!CanAcceptNewBlocks) { return AddBlockResult.CannotAccept; } if (header.Number == 0) { throw new InvalidOperationException("Genesis block should not be inserted."); } // validate hash here Rlp newRlp = _headerDecoder.Encode(header); _headerDb.Set(header.Hash, newRlp.Bytes); BlockInfo blockInfo = new BlockInfo(header.Hash, header.TotalDifficulty ?? 0); try { _blockInfoLock.EnterWriteLock(); ChainLevelInfo chainLevel = new ChainLevelInfo(false, new[] {blockInfo}); PersistLevel(header.Number, chainLevel); } finally { _blockInfoLock.ExitWriteLock(); } if (header.Number < (LowestInsertedHeader?.Number ?? long.MaxValue)) { LowestInsertedHeader = header; } if (header.Number > BestKnownNumber) { BestKnownNumber = header.Number; } return AddBlockResult.Added; } public AddBlockResult Insert(Block block) { if (!CanAcceptNewBlocks) { return AddBlockResult.CannotAccept; } if (block.Number == 0) { throw new InvalidOperationException("Genesis block should not be inserted."); } Rlp newRlp = _blockDecoder.Encode(block); _blockDb.Set(block.Hash, newRlp.Bytes); long expectedNumber = (LowestInsertedBody?.Number - 1 ?? LongConverter.FromString(_syncConfig.PivotNumber ?? "0")); if (block.Number != expectedNumber) { throw new InvalidOperationException($"Trying to insert out of order block {block.Number} when expected number was {expectedNumber}"); } if (block.Number < (LowestInsertedBody?.Number ?? long.MaxValue)) { LowestInsertedBody = block; } return AddBlockResult.Added; } public void Insert(IEnumerable<Block> blocks) { lock (_batchInsertLock) { try { _blockDb.StartBatch(); foreach (Block block in blocks) { Insert(block); } } finally { _blockDb.CommitBatch(); } } } private AddBlockResult Suggest(Block block, BlockHeader header, bool shouldProcess = true) { #if DEBUG /* this is just to make sure that we do not fall into this trap when creating tests */ if (header.StateRoot == null && !header.IsGenesis) { throw new InvalidDataException($"State root is null in {header.ToString(BlockHeader.Format.Short)}"); } #endif if (!CanAcceptNewBlocks) { return AddBlockResult.CannotAccept; } if (_invalidBlocks.ContainsKey(header.Number) && _invalidBlocks[header.Number].Contains(header.Hash)) { return AddBlockResult.InvalidBlock; } bool isKnown = IsKnownBlock(header.Number, header.Hash); if (header.Number == 0) { if (BestSuggestedHeader != null) { throw new InvalidOperationException("Genesis block should be added only once"); } } else if (isKnown && (BestSuggestedHeader?.Number ?? 0) >= header.Number) { if (_logger.IsTrace) { _logger.Trace($"Block {header.Hash} already known."); } return AddBlockResult.AlreadyKnown; } else if (!IsKnownBlock(header.Number - 1, header.ParentHash)) { if (_logger.IsTrace) { _logger.Trace($"Could not find parent ({header.ParentHash}) of block {header.Hash}"); } return AddBlockResult.UnknownParent; } SetTotalDifficulty(header); if (block != null && !isKnown) { Rlp newRlp = _blockDecoder.Encode(block); _blockDb.Set(block.Hash, newRlp.Bytes); } if (!isKnown) { Rlp newRlp = _headerDecoder.Encode(header); _headerDb.Set(header.Hash, newRlp.Bytes); BlockInfo blockInfo = new BlockInfo(header.Hash, header.TotalDifficulty ?? 0); try { _blockInfoLock.EnterWriteLock(); UpdateOrCreateLevel(header.Number, blockInfo); } finally { _blockInfoLock.ExitWriteLock(); } } if (header.IsGenesis || header.TotalDifficulty > (BestSuggestedHeader?.TotalDifficulty ?? 0)) { if (header.IsGenesis) { Genesis = header; } BestSuggestedHeader = header; if (block != null && shouldProcess) { BestSuggestedBody = block; NewBestSuggestedBlock?.Invoke(this, new BlockEventArgs(block)); } } return AddBlockResult.Added; } public AddBlockResult SuggestHeader(BlockHeader header) { return Suggest(null, header); } public AddBlockResult SuggestBlock(Block block, bool shouldProcess = true) { if (Genesis == null && !block.IsGenesis) { throw new InvalidOperationException("Block tree should be initialized with genesis before suggesting other blocks."); } return Suggest(block, block.Header, shouldProcess); } public BlockHeader FindHeader(long number, BlockTreeLookupOptions options) { Keccak blockHash = GetBlockHashOnMainOrOnlyHash(number); return blockHash == null ? null : FindHeader(blockHash, options); } public BlockHeader FindHeader(Keccak blockHash, BlockTreeLookupOptions options) { if (blockHash == null || blockHash == Keccak.Zero) { // TODO: would be great to check why this is still needed (maybe it is something archaic) return null; } BlockHeader header = _headerCache.Get(blockHash); if (header == null) { IDbWithSpan spanHeaderDb = _headerDb as IDbWithSpan; if (spanHeaderDb != null) { Span<byte> data = spanHeaderDb.GetSpan(blockHash); if (data == null) { return null; } header = _headerDecoder.Decode(data.AsRlpValueContext(), RlpBehaviors.AllowExtraData); spanHeaderDb.DangerousReleaseMemory(data); } else { byte[] data = _headerDb.Get(blockHash); if (data == null) { return null; } header = _headerDecoder.Decode(data.AsRlpStream(), RlpBehaviors.AllowExtraData); } } bool totalDifficultyNeeded = (options & BlockTreeLookupOptions.TotalDifficultyNotNeeded) == BlockTreeLookupOptions.None; bool requiresCanonical = (options & BlockTreeLookupOptions.RequireCanonical) == BlockTreeLookupOptions.RequireCanonical; if ((totalDifficultyNeeded && header.TotalDifficulty == null) || requiresCanonical) { (BlockInfo blockInfo, ChainLevelInfo level) = LoadInfo(header.Number, header.Hash); if (level == null || blockInfo == null) { // TODO: this is here because storing block data is not transactional // TODO: would be great to remove it, he? SetTotalDifficulty(header); blockInfo = new BlockInfo(header.Hash, header.TotalDifficulty.Value); try { _blockInfoLock.EnterWriteLock(); UpdateOrCreateLevel(header.Number, blockInfo); } finally { _blockInfoLock.ExitWriteLock(); } (_, level) = LoadInfo(header.Number, header.Hash); } else { header.TotalDifficulty = blockInfo.TotalDifficulty; } if (requiresCanonical) { bool isMain = level.HasBlockOnMainChain && level.BlockInfos[0].BlockHash.Equals(blockHash); header = isMain ? header : null; } } if (header != null && ShouldCache(header.Number)) { _headerCache.Set(blockHash, header); } return header; } public Keccak FindHash(long number) { return GetBlockHashOnMainOrOnlyHash(number); } public BlockHeader[] FindHeaders(Keccak blockHash, int numberOfBlocks, int skip, bool reverse) { if (numberOfBlocks == 0) { return Array.Empty<BlockHeader>(); } if (blockHash == null) { return new BlockHeader[numberOfBlocks]; } BlockHeader startHeader = FindHeader(blockHash, BlockTreeLookupOptions.TotalDifficultyNotNeeded); if (startHeader == null) { return new BlockHeader[numberOfBlocks]; } if (numberOfBlocks == 1) { return new[] {startHeader}; } if (skip == 0) { /* if we do not skip and we have the last block then we can assume that all the blocks are there and we can use the fact that we can use parent hash and that searching by hash is much faster as it does not require the step of resolving number -> hash */ BlockHeader endHeader = FindHeader(startHeader.Number + numberOfBlocks - 1, BlockTreeLookupOptions.TotalDifficultyNotNeeded); if (endHeader != null) { return FindHeadersReversedFull(endHeader, numberOfBlocks); } } BlockHeader[] result = new BlockHeader[numberOfBlocks]; BlockHeader current = startHeader; int directionMultiplier = reverse ? -1 : 1; int responseIndex = 0; do { result[responseIndex] = current; responseIndex++; long nextNumber = startHeader.Number + directionMultiplier * (responseIndex * skip + responseIndex); if (nextNumber < 0) { break; } current = FindHeader(nextNumber, BlockTreeLookupOptions.TotalDifficultyNotNeeded); } while (current != null && responseIndex < numberOfBlocks); return result; } private BlockHeader[] FindHeadersReversedFull(BlockHeader startHeader, int numberOfBlocks) { if (startHeader == null) throw new ArgumentNullException(nameof(startHeader)); if (numberOfBlocks == 1) { return new[] {startHeader}; } BlockHeader[] result = new BlockHeader[numberOfBlocks]; BlockHeader current = startHeader; int responseIndex = numberOfBlocks - 1; do { result[responseIndex] = current; responseIndex--; if (responseIndex < 0) { break; } current = FindHeader(current.ParentHash, BlockTreeLookupOptions.TotalDifficultyNotNeeded); } while (current != null && responseIndex < numberOfBlocks); return result; } private Keccak GetBlockHashOnMainOrOnlyHash(long blockNumber) { if (blockNumber < 0) { throw new ArgumentException($"{nameof(blockNumber)} must be greater or equal zero and is {blockNumber}", nameof(blockNumber)); } ChainLevelInfo level = LoadLevel(blockNumber); if (level == null) { return null; } if (level.HasBlockOnMainChain) { return level.BlockInfos[0].BlockHash; } if (level.BlockInfos.Length != 1) { if (_logger.IsError) _logger.Error($"Invalid request for block {blockNumber} ({level.BlockInfos.Length} blocks at the same level)."); throw new InvalidOperationException($"Unexpected request by number for a block {blockNumber} that is not on the main chain and is not the only hash on chain"); } return level.BlockInfos[0].BlockHash; } public Block FindBlock(long blockNumber, BlockTreeLookupOptions options) { Keccak hash = GetBlockHashOnMainOrOnlyHash(blockNumber); return FindBlock(hash, options); } public void DeleteInvalidBlock(Block invalidBlock) { if (_logger.IsDebug) _logger.Debug($"Deleting invalid block {invalidBlock.ToString(Block.Format.FullHashAndNumber)}"); _invalidBlocks.AddOrUpdate( invalidBlock.Number, number => new HashSet<Keccak> {invalidBlock.Hash}, (number, set) => { set.Add(invalidBlock.Hash); return set; }); BestSuggestedHeader = Head; BestSuggestedBody = Head == null ? null : FindBlock(Head.Hash, BlockTreeLookupOptions.None); try { CanAcceptNewBlocks = false; } finally { DeleteBlocks(invalidBlock.Hash); CanAcceptNewBlocks = true; } } private void DeleteBlocks(Keccak deletePointer) { BlockHeader deleteHeader = FindHeader(deletePointer, BlockTreeLookupOptions.TotalDifficultyNotNeeded); long currentNumber = deleteHeader.Number; Keccak currentHash = deleteHeader.Hash; Keccak nextHash = null; ChainLevelInfo nextLevel = null; while (true) { ChainLevelInfo currentLevel = nextLevel ?? LoadLevel(currentNumber); nextLevel = LoadLevel(currentNumber + 1); bool shouldRemoveLevel = false; if (currentLevel != null) // preparing update of the level (removal of the invalid branch block) { if (currentLevel.BlockInfos.Length == 1) { shouldRemoveLevel = true; } else { for (int i = 0; i < currentLevel.BlockInfos.Length; i++) { if (currentLevel.BlockInfos[0].BlockHash == currentHash) { currentLevel.BlockInfos = currentLevel.BlockInfos.Where(bi => bi.BlockHash != currentHash).ToArray(); break; } } } } // just finding what the next descendant will be if (nextLevel != null) { nextHash = FindChild(nextLevel, currentHash); } UpdateDeletePointer(nextHash); try { _blockInfoLock.EnterWriteLock(); if (shouldRemoveLevel) { BestKnownNumber = Math.Min(BestKnownNumber, currentNumber - 1); _blockInfoCache.Delete(currentNumber); _blockInfoDb.Delete(currentNumber); } else { PersistLevel(currentNumber, currentLevel); } } finally { _blockInfoLock.ExitWriteLock(); } if (_logger.IsInfo) _logger.Info($"Deleting invalid block {currentHash} at level {currentNumber}"); _blockCache.Delete(currentHash); _blockDb.Delete(currentHash); _headerCache.Delete(currentHash); _headerDb.Delete(currentHash); if (nextHash == null) { break; } currentNumber++; currentHash = nextHash; nextHash = null; } } private Keccak FindChild(ChainLevelInfo level, Keccak parentHash) { Keccak childHash = null; for (int i = 0; i < level.BlockInfos.Length; i++) { BlockHeader potentialChild = FindHeader(level.BlockInfos[i].BlockHash, BlockTreeLookupOptions.TotalDifficultyNotNeeded); if (potentialChild.ParentHash == parentHash) { childHash = potentialChild.Hash; break; } } return childHash; } public bool IsMainChain(Keccak blockHash) { long number = LoadNumberOnly(blockHash); ChainLevelInfo level = LoadLevel(number); return level.HasBlockOnMainChain && level.BlockInfos[0].BlockHash.Equals(blockHash); } public bool WasProcessed(long number, Keccak blockHash) { ChainLevelInfo levelInfo = LoadLevel(number); int? index = FindIndex(blockHash, levelInfo); if (index == null) { throw new InvalidOperationException($"Not able to find block {blockHash} index on the chain level"); } return levelInfo.BlockInfos[index.Value].WasProcessed; } public void UpdateMainChain(Block[] processedBlocks) { if (processedBlocks.Length == 0) { return; } bool ascendingOrder = true; if (processedBlocks.Length > 1) { if (processedBlocks[processedBlocks.Length - 1].Number < processedBlocks[0].Number) { ascendingOrder = false; } } #if DEBUG for (int i = 0; i < processedBlocks.Length; i++) { if (i != 0) { if (ascendingOrder && processedBlocks[i].Number != processedBlocks[i - 1].Number + 1) { throw new InvalidOperationException("Update main chain invoked with gaps"); } if (!ascendingOrder && processedBlocks[i - 1].Number != processedBlocks[i].Number + 1) { throw new InvalidOperationException("Update main chain invoked with gaps"); } } } #endif long lastNumber = ascendingOrder ? processedBlocks[processedBlocks.Length - 1].Number : processedBlocks[0].Number; long previousHeadNumber = Head?.Number ?? 0L; try { _blockInfoLock.EnterWriteLock(); if (previousHeadNumber > lastNumber) { for (long i = 0; i < previousHeadNumber - lastNumber; i++) { long levelNumber = previousHeadNumber - i; ChainLevelInfo level = LoadLevel(levelNumber); level.HasBlockOnMainChain = false; PersistLevel(levelNumber, level); } } for (int i = 0; i < processedBlocks.Length; i++) { Block block = processedBlocks[i]; if (ShouldCache(block.Number)) { _blockCache.Set(block.Hash, processedBlocks[i]); _headerCache.Set(block.Hash, block.Header); } MoveToMain(processedBlocks[i]); } } finally { _blockInfoLock.ExitWriteLock(); } } private TaskCompletionSource<object> _dbBatchProcessed; private void MoveToMain(Block block) { if (_logger.IsTrace) _logger.Trace($"Moving {block.ToString(Block.Format.Short)} to main"); ChainLevelInfo level = LoadLevel(block.Number); int? index = FindIndex(block.Hash, level); if (index == null) { throw new InvalidOperationException($"Cannot move unknown block {block.ToString(Block.Format.FullHashAndNumber)} to main"); } Keccak hashOfThePreviousMainBlock = level.HasBlockOnMainChain ? level.BlockInfos[0].BlockHash : null; BlockInfo info = level.BlockInfos[index.Value]; info.WasProcessed = true; if (index.Value != 0) { (level.BlockInfos[index.Value], level.BlockInfos[0]) = (level.BlockInfos[0], level.BlockInfos[index.Value]); } level.HasBlockOnMainChain = true; PersistLevel(block.Number, level); BlockAddedToMain?.Invoke(this, new BlockEventArgs(block)); if (block.IsGenesis || block.TotalDifficulty > (Head?.TotalDifficulty ?? 0)) { if (block.Number == 0) { Genesis = block.Header; } if (block.TotalDifficulty == null) { throw new InvalidOperationException("Head block with null total difficulty"); } UpdateHeadBlock(block); } for (int i = 0; i < block.Transactions.Length; i++) { _txPool.RemoveTransaction(block.Transactions[i].Hash, block.Number); } // the hash will only be the same during perf test runs / modified DB states if (hashOfThePreviousMainBlock != null && hashOfThePreviousMainBlock != block.Hash) { Block previous = FindBlock(hashOfThePreviousMainBlock, BlockTreeLookupOptions.TotalDifficultyNotNeeded); for (int i = 0; i < previous.Transactions.Length; i++) { Transaction tx = previous.Transactions[i]; _txPool.AddTransaction(tx, previous.Number); } } if (_logger.IsTrace) _logger.Trace($"Block {block.ToString(Block.Format.Short)} added to main chain"); } [Todo(Improve.Refactor, "Look at this magic -1 behaviour, never liked it, now when it is split between BestKnownNumber and Head it is even worse")] private long CountKnownAheadOfHead() { long headNumber = Head?.Number ?? -1; return BestKnownNumber - headNumber; } private void LoadHeadBlockAtStart() { byte[] data = _blockInfoDb.Get(HeadAddressInDb); if (data != null) { BlockHeader headBlockHeader = data.Length == 32 ? FindHeader(new Keccak(data), BlockTreeLookupOptions.None) : Rlp.Decode<BlockHeader>(data.AsRlpStream(), RlpBehaviors.AllowExtraData); ChainLevelInfo level = LoadLevel(headBlockHeader.Number); int? index = FindIndex(headBlockHeader.Hash, level); if (!index.HasValue) { throw new InvalidDataException("Head block data missing from chain info"); } headBlockHeader.TotalDifficulty = level.BlockInfos[index.Value].TotalDifficulty; Head = BestSuggestedHeader = headBlockHeader; BestSuggestedBody = FindBlock(headBlockHeader.Hash, BlockTreeLookupOptions.None); } } public bool IsKnownBlock(long number, Keccak blockHash) { if (number > BestKnownNumber) { return false; } // IsKnownBlock will be mainly called when new blocks are incoming // and these are very likely to be all at the head of the chain if (blockHash == Head?.Hash) { return true; } if (_headerCache.Get(blockHash) != null) { return true; } ChainLevelInfo level = LoadLevel(number); return level != null && FindIndex(blockHash, level).HasValue; } internal static Keccak HeadAddressInDb = Keccak.Zero; internal static Keccak DeletePointerAddressInDb = new Keccak(new BitArray(32 * 8, true).ToBytes()); private void UpdateDeletePointer(Keccak hash) { if (hash == null) { _blockInfoDb.Delete(DeletePointerAddressInDb); return; } if (_logger.IsInfo) _logger.Info($"Deleting an invalid block or its descendant {hash}"); _blockInfoDb.Set(DeletePointerAddressInDb, hash.Bytes); } private void UpdateHeadBlock(Block block) { if (block.IsGenesis) { Genesis = block.Header; } Head = block.Header; _blockInfoDb.Set(HeadAddressInDb, Head.Hash.Bytes); NewHeadBlock?.Invoke(this, new BlockEventArgs(block)); if (_dbBatchProcessed != null) { if (block.Number == _currentDbLoadBatchEnd) { TaskCompletionSource<object> completionSource = _dbBatchProcessed; _dbBatchProcessed = null; completionSource.SetResult(null); } } } private void UpdateOrCreateLevel(long number, BlockInfo blockInfo) { ChainLevelInfo level = LoadLevel(number, false); if (level != null) { BlockInfo[] blockInfos = new BlockInfo[level.BlockInfos.Length + 1]; for (int i = 0; i < level.BlockInfos.Length; i++) { blockInfos[i] = level.BlockInfos[i]; } blockInfos[blockInfos.Length - 1] = blockInfo; level.BlockInfos = blockInfos; } else { if (number > BestKnownNumber) { BestKnownNumber = number; } level = new ChainLevelInfo(false, new[] {blockInfo}); } PersistLevel(number, level); } /* error-prone: all methods that load a level, change it and then persist need to execute everything under a lock */ private void PersistLevel(long number, ChainLevelInfo level) { // _blockInfoCache.Set(number, level); _blockInfoDb.Set(number, Rlp.Encode(level).Bytes); } private (BlockInfo Info, ChainLevelInfo Level) LoadInfo(long number, Keccak blockHash) { ChainLevelInfo chainLevelInfo = LoadLevel(number); if (chainLevelInfo == null) { return (null, null); } int? index = FindIndex(blockHash, chainLevelInfo); return index.HasValue ? (chainLevelInfo.BlockInfos[index.Value], chainLevelInfo) : (null, chainLevelInfo); } private int? FindIndex(Keccak blockHash, ChainLevelInfo level) { for (int i = 0; i < level.BlockInfos.Length; i++) { if (level.BlockInfos[i].BlockHash.Equals(blockHash)) { return i; } } return null; } private ChainLevelInfo LoadLevel(long number, bool forceLoad = true) { if (number > BestKnownNumber && !forceLoad) { return null; } ChainLevelInfo chainLevelInfo = _blockInfoCache.Get(number); if (chainLevelInfo == null) { byte[] levelBytes = _blockInfoDb.Get(number); if (levelBytes == null) { return null; } chainLevelInfo = Rlp.Decode<ChainLevelInfo>(new Rlp(levelBytes)); } return chainLevelInfo; } private long LoadNumberOnly(Keccak blockHash) { BlockHeader header = FindHeader(blockHash, BlockTreeLookupOptions.TotalDifficultyNotNeeded); if (header == null) { throw new InvalidOperationException( $"Not able to retrieve block number for an unknown block {blockHash}"); } return header.Number; } /// <summary> /// To make cache useful even when we handle sync requests /// </summary> /// <param name="number"></param> /// <returns></returns> private bool ShouldCache(long number) { return number == 0L || Head == null || number > Head.Number - CacheSize && number <= Head.Number + 1; } public Block FindBlock(Keccak blockHash, BlockTreeLookupOptions options) { if (blockHash == null || blockHash == Keccak.Zero) { return null; } Block block = _blockCache.Get(blockHash); if (block == null) { byte[] data = _blockDb.Get(blockHash); if (data == null) { return null; } block = _blockDecoder.Decode(data.AsRlpStream(), RlpBehaviors.AllowExtraData); } bool totalDifficultyNeeded = (options & BlockTreeLookupOptions.TotalDifficultyNotNeeded) == BlockTreeLookupOptions.None; bool requiresCanonical = (options & BlockTreeLookupOptions.RequireCanonical) == BlockTreeLookupOptions.RequireCanonical; if ((totalDifficultyNeeded && block.TotalDifficulty == null) || requiresCanonical) { (BlockInfo blockInfo, ChainLevelInfo level) = LoadInfo(block.Number, block.Hash); if (level == null || blockInfo == null) { // TODO: this is here because storing block data is not transactional // TODO: would be great to remove it, he? SetTotalDifficulty(block.Header); blockInfo = new BlockInfo(block.Hash, block.TotalDifficulty.Value); try { _blockInfoLock.EnterWriteLock(); UpdateOrCreateLevel(block.Number, blockInfo); } finally { _blockInfoLock.ExitWriteLock(); } (_, level) = LoadInfo(block.Number, block.Hash); } else { block.Header.TotalDifficulty = blockInfo.TotalDifficulty; } if (requiresCanonical) { bool isMain = level.HasBlockOnMainChain && level.BlockInfos[0].BlockHash.Equals(blockHash); block = isMain ? block : null; } } if (block != null && ShouldCache(block.Number)) { _blockCache.Set(blockHash, block); _headerCache.Set(blockHash, block.Header); } return block; } private void SetTotalDifficulty(BlockHeader header) { if (_logger.IsTrace) { _logger.Trace($"Calculating total difficulty for {header}"); } if (header.Number == 0) { header.TotalDifficulty = header.Difficulty; } else { BlockHeader parentHeader = this.FindParentHeader(header, BlockTreeLookupOptions.None); if (parentHeader == null) { throw new InvalidOperationException($"An orphaned block on the chain {header}"); } if (parentHeader.TotalDifficulty == null) { throw new InvalidOperationException( $"Parent's {nameof(parentHeader.TotalDifficulty)} unknown when calculating for {header}"); } header.TotalDifficulty = parentHeader.TotalDifficulty + header.Difficulty; } if (_logger.IsTrace) { _logger.Trace($"Calculated total difficulty for {header} is {header.TotalDifficulty}"); } } public event EventHandler<BlockEventArgs> BlockAddedToMain; public event EventHandler<BlockEventArgs> NewBestSuggestedBlock; public event EventHandler<BlockEventArgs> NewHeadBlock; public async Task FixFastSyncGaps(CancellationToken cancellationToken) { try { CanAcceptNewBlocks = false; long startNumber = Head?.Number ?? 0; if (startNumber == 0) { return; } long blocksToLoad = CountKnownAheadOfHead(); if (blocksToLoad == 0) { return; } long? gapStart = null; long? gapEnd = null; Keccak firstInvalidHash = null; bool shouldDelete = false; Task<bool> NoneFound(long number) => Task.FromResult(false); Task<bool> HeaderFound(BlockHeader header) { if (firstInvalidHash == null) { gapStart = header.Number; firstInvalidHash = header.Hash; } return Task.FromResult(true); } Task<bool> BlockFound(Block block) { if (firstInvalidHash != null && !shouldDelete) { gapEnd = block.Number; shouldDelete = true; } return Task.FromResult(true); } await VisitBlocks(startNumber + 1, blocksToLoad, BlockFound, HeaderFound, NoneFound, cancellationToken); if (shouldDelete) { if (_logger.IsWarn) _logger.Warn($"Deleting blocks starting with {firstInvalidHash} due to the gap found between {gapStart} and {gapEnd}"); DeleteBlocks(firstInvalidHash); BestSuggestedHeader = Head; BestSuggestedBody = Head == null ? null : FindBlock(Head.Hash, BlockTreeLookupOptions.None); } } finally { CanAcceptNewBlocks = true; } } } }
1
22,573
Maybe make it configurable? Or store the actual level index in DB, making this binary search obsolete?
NethermindEth-nethermind
.cs
@@ -66,6 +66,7 @@ public class HiveTableBaseTest extends HiveMetastoreTest { tableLocation.getFileSystem(hiveConf).delete(tableLocation, true); catalog.dropTable(TABLE_IDENTIFIER, false /* metadata only, location was already deleted */); } + private static String getTableBasePath(String tableName) { String databasePath = metastore.getDatabasePath(DB_NAME); return Paths.get(databasePath, tableName).toAbsolutePath().toString();
1
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.iceberg.hive; import java.io.File; import java.nio.file.Paths; import java.util.Arrays; import java.util.List; import java.util.stream.Collectors; import org.apache.hadoop.fs.Path; import org.apache.iceberg.PartitionSpec; import org.apache.iceberg.Schema; import org.apache.iceberg.TableMetadataParser; import org.apache.iceberg.catalog.TableIdentifier; import org.apache.iceberg.types.Types; import org.junit.After; import org.junit.Before; import static org.apache.iceberg.PartitionSpec.builderFor; import static org.apache.iceberg.TableMetadataParser.getFileExtension; import static org.apache.iceberg.types.Types.NestedField.optional; import static org.apache.iceberg.types.Types.NestedField.required; public class HiveTableBaseTest extends HiveMetastoreTest { static final String TABLE_NAME = "tbl"; static final TableIdentifier TABLE_IDENTIFIER = TableIdentifier.of(DB_NAME, TABLE_NAME); static final Schema schema = new Schema(Types.StructType.of( required(1, "id", Types.LongType.get())).fields()); static final Schema altered = new Schema(Types.StructType.of( required(1, "id", Types.LongType.get()), optional(2, "data", Types.LongType.get())).fields()); private static final PartitionSpec partitionSpec = builderFor(schema).identity("id").build(); private Path tableLocation; @Before public void createTestTable() { this.tableLocation = new Path(catalog.createTable(TABLE_IDENTIFIER, schema, partitionSpec).location()); } @After public void dropTestTable() throws Exception { // drop the table data tableLocation.getFileSystem(hiveConf).delete(tableLocation, true); catalog.dropTable(TABLE_IDENTIFIER, false /* metadata only, location was already deleted */); } private static String getTableBasePath(String tableName) { String databasePath = metastore.getDatabasePath(DB_NAME); return Paths.get(databasePath, tableName).toAbsolutePath().toString(); } protected static Path getTableLocationPath(String tableName) { return new Path("file", null, Paths.get(getTableBasePath(tableName)).toString()); } protected static String getTableLocation(String tableName) { return getTableLocationPath(tableName).toString(); } private static String metadataLocation(String tableName) { return Paths.get(getTableBasePath(tableName), "metadata").toString(); } private static List<String> metadataFiles(String tableName) { return Arrays.stream(new File(metadataLocation(tableName)).listFiles()) .map(File::getAbsolutePath) .collect(Collectors.toList()); } protected static List<String> metadataVersionFiles(String tableName) { return filterByExtension(tableName, getFileExtension(TableMetadataParser.Codec.NONE)); } protected static List<String> manifestFiles(String tableName) { return filterByExtension(tableName, ".avro"); } private static List<String> filterByExtension(String tableName, String extension) { return metadataFiles(tableName) .stream() .filter(f -> f.endsWith(extension)) .collect(Collectors.toList()); } }
1
22,085
Nit: this file doesn't need to change. Can you revert this to avoid git conflicts?
apache-iceberg
java
@@ -66,7 +66,7 @@ function HelpMenu( { children } ) { const handleMenuSelected = useCallback( () => { toggleMenu( false ); - } ); + }, [] ); return ( <div className="googlesitekit-dropdown-menu googlesitekit-dropdown-menu__icon-menu googlesitekit-help-menu mdc-menu-surface--anchor">
1
/** * HelpMenu component. * * Site Kit by Google, Copyright 2021 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * WordPress dependencies */ import { useState, useRef, useEffect, useCallback } from '@wordpress/element'; import { ESCAPE } from '@wordpress/keycodes'; import { __ } from '@wordpress/i18n'; /** * Internal dependencies */ import Button from '../Button'; import HelpIcon from '../../../svg/help.svg'; import HelpMenuLink from './HelpMenuLink'; import Menu from '../Menu'; function HelpMenu( { children } ) { const [ menuOpen, toggleMenu ] = useState( false ); const menuButtonRef = useRef(); const menuRef = useRef(); useEffect( () => { const handleMenuClose = ( event ) => { if ( menuButtonRef?.current && menuRef?.current ) { // Close the menu if the user presses the Escape key // or if they click outside of the menu. if ( ( ( 'keyup' === event.type && ESCAPE === event.keyCode ) || 'mouseup' === event.type ) && ! menuButtonRef.current.contains( event.target ) && ! menuRef.current.contains( event.target ) ) { toggleMenu( false ); } } }; global.addEventListener( 'mouseup', handleMenuClose ); global.addEventListener( 'keyup', handleMenuClose ); return () => { global.removeEventListener( 'mouseup', handleMenuClose ); global.removeEventListener( 'keyup', handleMenuClose ); }; }, [] ); const handleMenu = useCallback( () => { toggleMenu( ! menuOpen ); }, [ menuOpen ] ); const handleMenuSelected = useCallback( () => { toggleMenu( false ); } ); return ( <div className="googlesitekit-dropdown-menu googlesitekit-dropdown-menu__icon-menu googlesitekit-help-menu mdc-menu-surface--anchor"> <Button aria-controls="googlesitekit-help-menu" aria-expanded={ menuOpen } aria-label={ __( 'Open Help menu', 'google-site-kit' ) } aria-haspopup="menu" className="googlesitekit-header__dropdown googlesitekit-help-menu__button googlesitekit-margin-right-0 mdc-button--dropdown" icon={ <HelpIcon width="20" height="20" /> } onClick={ handleMenu } ref={ menuButtonRef } text /> <Menu className="googlesitekit-width-auto" ref={ menuRef } menuOpen={ menuOpen } id="googlesitekit-help-menu" onSelected={ handleMenuSelected } > { children } <HelpMenuLink gaEventLabel="fix_common_issues" href="https://sitekit.withgoogle.com/documentation/fix-common-issues/"> { __( 'Fix common issues', 'google-site-kit' ) } </HelpMenuLink> <HelpMenuLink gaEventLabel="documentation" href="https://sitekit.withgoogle.com/documentation/"> { __( 'Read help docs', 'google-site-kit' ) } </HelpMenuLink> <HelpMenuLink gaEventLabel="support_forum" href="https://wordpress.org/support/plugin/google-site-kit/"> { __( 'Get support', 'google-site-kit' ) } </HelpMenuLink> </Menu> </div> ); } export default HelpMenu;
1
37,754
There's no reason for this to be a callback now technically but as per our tech decision, we want all handlers to use `useCallback` now
google-site-kit-wp
js
@@ -33,6 +33,7 @@ const ( BalanceRecord HashID = "BR" Credential HashID = "CR" Genesis HashID = "GE" + Logic HashID = "LO" Message HashID = "MX" NetPrioResponse HashID = "NPR" OneTimeSigKey1 HashID = "OT1"
1
// Copyright (C) 2019 Algorand, Inc. // This file is part of go-algorand // // go-algorand is free software: you can redistribute it and/or modify // it under the terms of the GNU Affero General Public License as // published by the Free Software Foundation, either version 3 of the // License, or (at your option) any later version. // // go-algorand is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Affero General Public License for more details. // // You should have received a copy of the GNU Affero General Public License // along with go-algorand. If not, see <https://www.gnu.org/licenses/>. package protocol // HashID is a domain separation prefix for an object type that might be hashed // This ensures, for example, the hash of a transaction will never collide with the hash of a vote type HashID string // Hash IDs for specific object types, in lexicographic order to avoid dups. const ( AuctionBid HashID = "aB" AuctionDeposit HashID = "aD" AuctionOutcomes HashID = "aO" AuctionParams HashID = "aP" AuctionSettlement HashID = "aS" AgreementSelector HashID = "AS" BlockHeader HashID = "BH" BalanceRecord HashID = "BR" Credential HashID = "CR" Genesis HashID = "GE" Message HashID = "MX" NetPrioResponse HashID = "NPR" OneTimeSigKey1 HashID = "OT1" OneTimeSigKey2 HashID = "OT2" PaysetFlat HashID = "PF" Payload HashID = "PL" ProposerSeed HashID = "PS" Seed HashID = "SD" TestHashable HashID = "TE" Transaction HashID = "TX" Vote HashID = "VO" )
1
36,072
This isn't strictly part of this PR, but could you move `multiSigString` from `crypto/multisig.go` into this list of `HashID` values? Now that we have other things being hashed into addresses (specifically, these new logic addresses), it's important that the hash input for multisig addrs is domain-separated from logic addresses.
algorand-go-algorand
go
@@ -0,0 +1,8 @@ +from django.test import TestCase +from graphite.worker_pool.pool import stop_pool + + +class BaseTestCase(TestCase): + + def tearDown(self): + stop_pool()
1
1
11,375
nit: you could have named it just "TestCase" (if django's TestCase was imported differently)
graphite-project-graphite-web
py
@@ -47,6 +47,12 @@ module Selenium @bridge.send_command(cmd: cmd, params: params) end + def print_page(**options) + options[:page_ranges] &&= Array(options[:page_ranges]) + + bridge.print_page(options) + end + private def debugger_address
1
# frozen_string_literal: true # Licensed to the Software Freedom Conservancy (SFC) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The SFC licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. module Selenium module WebDriver module Chrome # # Driver implementation for Chrome. # @api private # class Driver < WebDriver::Driver include DriverExtensions::HasNetworkConditions include DriverExtensions::HasWebStorage include DriverExtensions::HasLocation include DriverExtensions::DownloadsFiles include DriverExtensions::HasDevTools include DriverExtensions::HasAuthentication include DriverExtensions::HasLogEvents def browser :chrome end def bridge_class Bridge end def execute_cdp(cmd, **params) @bridge.send_command(cmd: cmd, params: params) end private def debugger_address capabilities['goog:chromeOptions']['debuggerAddress'] end end # Driver end # Chrome end # WebDriver end # Selenium
1
18,367
the bridge here isn't defined as an accessor / reader to try mask it better. So you need to directly call the iVar `@bridge` here.
SeleniumHQ-selenium
py
@@ -42,7 +42,6 @@ std::string lldb_private::formatters::swift::SwiftOptionalSummaryProvider:: // retrieve the value of the Some case.. static PointerOrSP ExtractSomeIfAny(ValueObject *optional, - lldb::DynamicValueType dynamic_value = lldb::eNoDynamicValues, bool synthetic_value = false) { if (!optional) return nullptr;
1
//===-- SwiftOptional.cpp ---------------------------------------*- C++ -*-===// // // This source file is part of the Swift.org open source project // // Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors // Licensed under Apache License v2.0 with Runtime Library Exception // // See https://swift.org/LICENSE.txt for license information // See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors // //===----------------------------------------------------------------------===// #include "SwiftOptional.h" #include "lldb/DataFormatters/DataVisualization.h" #include "lldb/DataFormatters/TypeSummary.h" #include "lldb/DataFormatters/ValueObjectPrinter.h" #include "lldb/Symbol/SwiftASTContext.h" #include "lldb/Target/Process.h" #include "lldb/Target/SwiftLanguageRuntime.h" #include "lldb/Utility/DataBufferHeap.h" #include "lldb/Utility/DataExtractor.h" using namespace lldb; using namespace lldb_private; using namespace lldb_private::formatters; using namespace lldb_private::formatters::swift; std::string lldb_private::formatters::swift::SwiftOptionalSummaryProvider:: GetDescription() { StreamString sstr; sstr.Printf("`%s `%s%s%s%s%s%s%s", "Swift.Optional summary provider", Cascades() ? "" : " (not cascading)", " (may show children)", !DoesPrintValue(nullptr) ? " (hide value)" : "", IsOneLiner() ? " (one-line printout)" : "", SkipsPointers() ? " (skip pointers)" : "", SkipsReferences() ? " (skip references)" : "", HideNames(nullptr) ? " (hide member names)" : ""); return sstr.GetString(); } // if this ValueObject is an Optional<T> with the Some(T) case selected, // retrieve the value of the Some case.. static PointerOrSP ExtractSomeIfAny(ValueObject *optional, lldb::DynamicValueType dynamic_value = lldb::eNoDynamicValues, bool synthetic_value = false) { if (!optional) return nullptr; static ConstString g_Some("some"); static ConstString g_None("none"); ValueObjectSP non_synth_valobj = optional->GetNonSyntheticValue(); if (!non_synth_valobj) return nullptr; ConstString value(non_synth_valobj->GetValueAsCString()); if (!value || value == g_None) return nullptr; PointerOrSP value_sp( non_synth_valobj->GetChildMemberWithName(g_Some, true).get()); if (!value_sp) return nullptr; SwiftASTContext::NonTriviallyManagedReferenceStrategy strategy; if (SwiftASTContext::IsNonTriviallyManagedReferenceType( non_synth_valobj->GetCompilerType(), strategy) && strategy == SwiftASTContext::NonTriviallyManagedReferenceStrategy::eWeak) { if (auto process_sp = optional->GetProcessSP()) { if (auto swift_runtime = process_sp->GetSwiftLanguageRuntime()) { lldb::addr_t original_ptr = value_sp->GetValueAsUnsigned(LLDB_INVALID_ADDRESS); lldb::addr_t tweaked_ptr = swift_runtime->MaybeMaskNonTrivialReferencePointer(original_ptr, strategy); if (original_ptr != tweaked_ptr) { CompilerType value_type(value_sp->GetCompilerType()); DataBufferSP buffer_sp( new DataBufferHeap(&tweaked_ptr, sizeof(tweaked_ptr))); DataExtractor extractor(buffer_sp, process_sp->GetByteOrder(), process_sp->GetAddressByteSize()); ExecutionContext exe_ctx(process_sp); value_sp = PointerOrSP(ValueObject::CreateValueObjectFromData( value_sp->GetName().AsCString(), extractor, exe_ctx, value_type)); if (!value_sp) return nullptr; else value_sp->SetSyntheticChildrenGenerated(true); } } } } if (dynamic_value != lldb::eNoDynamicValues) { ValueObjectSP dyn_value_sp = value_sp->GetDynamicValue(dynamic_value); if (dyn_value_sp) value_sp = dyn_value_sp; } if (synthetic_value && value_sp->HasSyntheticValue()) value_sp = value_sp->GetSyntheticValue(); return value_sp; } static bool SwiftOptional_SummaryProvider_Impl(ValueObject &valobj, Stream &stream, const TypeSummaryOptions &options) { PointerOrSP some = ExtractSomeIfAny(&valobj, valobj.GetDynamicValueType(), true); if (!some) { stream.Printf("nil"); return true; } const char *value_summary = some->GetSummaryAsCString(); if (value_summary) stream.Printf("%s", value_summary); else if (lldb_private::DataVisualization::ShouldPrintAsOneLiner(*some)) { TypeSummaryImpl::Flags oneliner_flags; oneliner_flags.SetHideItemNames(false) .SetCascades(true) .SetDontShowChildren(false) .SetDontShowValue(false) .SetShowMembersOneLiner(true) .SetSkipPointers(false) .SetSkipReferences(false); StringSummaryFormat oneliner(oneliner_flags, ""); std::string buffer; oneliner.FormatObject(some, buffer, options); stream.Printf("%s", buffer.c_str()); } return true; } bool lldb_private::formatters::swift::SwiftOptionalSummaryProvider:: FormatObject(ValueObject *target_valobj_sp, std::string &dest, const TypeSummaryOptions &options) { if (!target_valobj_sp) return false; StreamString stream; bool is_ok = SwiftOptional_SummaryProvider_Impl(*target_valobj_sp, stream, options); dest.assign(stream.GetString()); return is_ok; } bool lldb_private::formatters::swift::SwiftOptionalSummaryProvider:: DoesPrintChildren(ValueObject *target_valobj) const { if (!target_valobj) return false; PointerOrSP some = ExtractSomeIfAny( target_valobj, target_valobj->GetDynamicValueType(), true); if (!some) return true; lldb_private::Flags some_flags(some->GetCompilerType().GetTypeInfo()); if (some_flags.AllSet(eTypeIsSwift)) { if (some_flags.AnySet(eTypeInstanceIsPointer | eTypeIsProtocol)) return true; } lldb::TypeSummaryImplSP summary_sp = some->GetSummaryFormat(); if (!summary_sp) { if (lldb_private::DataVisualization::ShouldPrintAsOneLiner(*some)) return false; else return (some->GetNumChildren() > 0); } else return (some->GetNumChildren() > 0) && (summary_sp->DoesPrintChildren(some)); } bool lldb_private::formatters::swift::SwiftOptionalSummaryProvider:: DoesPrintValue(ValueObject *valobj) const { return false; } lldb_private::formatters::swift::SwiftOptionalSyntheticFrontEnd:: SwiftOptionalSyntheticFrontEnd(lldb::ValueObjectSP valobj_sp) : SyntheticChildrenFrontEnd(*valobj_sp.get()), m_is_none(false), m_children(false), m_some(nullptr) {} bool lldb_private::formatters::swift::SwiftOptionalSyntheticFrontEnd::IsEmpty() const { return (m_is_none == true || m_children == false || m_some == nullptr); } size_t lldb_private::formatters::swift::SwiftOptionalSyntheticFrontEnd:: CalculateNumChildren() { if (IsEmpty()) return 0; return m_some->GetNumChildren(); } lldb::ValueObjectSP lldb_private::formatters::swift:: SwiftOptionalSyntheticFrontEnd::GetChildAtIndex(size_t idx) { if (IsEmpty()) return nullptr; auto child = m_some->GetChildAtIndex(idx, true); if (m_some->IsSyntheticChildrenGenerated()) child->SetSyntheticChildrenGenerated(true); return child; } bool lldb_private::formatters::swift::SwiftOptionalSyntheticFrontEnd::Update() { m_some = nullptr; m_is_none = true; m_children = false; m_some = ExtractSomeIfAny(&m_backend, m_backend.GetDynamicValueType(), true); if (!m_some) { m_is_none = true; m_children = false; return false; } m_is_none = false; m_children = (m_some->GetNumChildren() > 0); return false; } bool lldb_private::formatters::swift::SwiftOptionalSyntheticFrontEnd:: MightHaveChildren() { return IsEmpty() ? false : true; } size_t lldb_private::formatters::swift::SwiftOptionalSyntheticFrontEnd:: GetIndexOfChildWithName(const ConstString &name) { static ConstString g_Some("some"); if (IsEmpty()) return UINT32_MAX; return m_some->GetIndexOfChildWithName(name); } lldb::ValueObjectSP lldb_private::formatters::swift:: SwiftOptionalSyntheticFrontEnd::GetSyntheticValue() { if (m_some && m_some->CanProvideValue()) return m_some->GetSP(); return nullptr; } SyntheticChildrenFrontEnd * lldb_private::formatters::swift::SwiftOptionalSyntheticFrontEndCreator( CXXSyntheticChildren *, lldb::ValueObjectSP valobj_sp) { if (!valobj_sp) return nullptr; return (new SwiftOptionalSyntheticFrontEnd(valobj_sp)); } SyntheticChildrenFrontEnd * lldb_private::formatters::swift::SwiftUncheckedOptionalSyntheticFrontEndCreator( CXXSyntheticChildren *cxx_synth, lldb::ValueObjectSP valobj_sp) { if (!valobj_sp) return nullptr; return SwiftOptionalSyntheticFrontEndCreator(cxx_synth, valobj_sp); }
1
16,367
All the callers of `ExtractSomeIfAny`, always pass `true` to `synthetic_value`. Can we get rid of the extra argument?
apple-swift-lldb
cpp
@@ -109,7 +109,8 @@ def main(): logger.info('Environment info:\n' + dash_line + env_info + '\n' + dash_line) meta['env_info'] = env_info - + meta['config_dict'] = dict(cfg) + meta['config_file'] = args.config # log some basic info logger.info(f'Distributed training: {distributed}') logger.info(f'Config:\n{cfg.pretty_text}')
1
import argparse import copy import os import os.path as osp import time import mmcv import torch from mmcv import Config, DictAction from mmcv.runner import init_dist from mmdet import __version__ from mmdet.apis import set_random_seed, train_detector from mmdet.datasets import build_dataset from mmdet.models import build_detector from mmdet.utils import collect_env, get_root_logger def parse_args(): parser = argparse.ArgumentParser(description='Train a detector') parser.add_argument('config', help='train config file path') parser.add_argument('--work-dir', help='the dir to save logs and models') parser.add_argument( '--resume-from', help='the checkpoint file to resume from') parser.add_argument( '--no-validate', action='store_true', help='whether not to evaluate the checkpoint during training') group_gpus = parser.add_mutually_exclusive_group() group_gpus.add_argument( '--gpus', type=int, help='number of gpus to use ' '(only applicable to non-distributed training)') group_gpus.add_argument( '--gpu-ids', type=int, nargs='+', help='ids of gpus to use ' '(only applicable to non-distributed training)') parser.add_argument('--seed', type=int, default=None, help='random seed') parser.add_argument( '--deterministic', action='store_true', help='whether to set deterministic options for CUDNN backend.') parser.add_argument( '--options', nargs='+', action=DictAction, help='arguments in dict') parser.add_argument( '--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher') parser.add_argument('--local_rank', type=int, default=0) args = parser.parse_args() if 'LOCAL_RANK' not in os.environ: os.environ['LOCAL_RANK'] = str(args.local_rank) return args def main(): args = parse_args() cfg = Config.fromfile(args.config) if args.options is not None: cfg.merge_from_dict(args.options) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True # work_dir is determined in this priority: CLI > segment in file > filename if args.work_dir is not None: # update configs according to CLI args if args.work_dir is not None cfg.work_dir = args.work_dir elif cfg.get('work_dir', None) is None: # use config filename as default work_dir if cfg.work_dir is None cfg.work_dir = osp.join('./work_dirs', osp.splitext(osp.basename(args.config))[0]) if args.resume_from is not None: cfg.resume_from = args.resume_from if args.gpu_ids is not None: cfg.gpu_ids = args.gpu_ids else: cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus) # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # create work_dir mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir)) # dump config cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config))) # init the logger before other steps timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) log_file = osp.join(cfg.work_dir, f'{timestamp}.log') logger = get_root_logger(log_file=log_file, log_level=cfg.log_level) # init the meta dict to record some important information such as # environment info and seed, which will be logged meta = dict() # log env info env_info_dict = collect_env() env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()]) dash_line = '-' * 60 + '\n' logger.info('Environment info:\n' + dash_line + env_info + '\n' + dash_line) meta['env_info'] = env_info # log some basic info logger.info(f'Distributed training: {distributed}') logger.info(f'Config:\n{cfg.pretty_text}') # set random seeds if args.seed is not None: logger.info(f'Set random seed to {args.seed}, ' f'deterministic: {args.deterministic}') set_random_seed(args.seed, deterministic=args.deterministic) cfg.seed = args.seed meta['seed'] = args.seed model = build_detector( cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg) datasets = [build_dataset(cfg.data.train)] if len(cfg.workflow) == 2: val_dataset = copy.deepcopy(cfg.data.val) val_dataset.pipeline = cfg.data.train.pipeline datasets.append(build_dataset(val_dataset)) if cfg.checkpoint_config is not None: # save mmdet version, config file content and class names in # checkpoints as meta data cfg.checkpoint_config.meta = dict( mmdet_version=__version__, config=cfg.pretty_text, CLASSES=datasets[0].CLASSES) # add an attribute for visualization convenience model.CLASSES = datasets[0].CLASSES train_detector( model, datasets, cfg, distributed=distributed, validate=(not args.no_validate), timestamp=timestamp, meta=meta) if __name__ == '__main__': main()
1
20,978
Better to use the absolute path.
open-mmlab-mmdetection
py
@@ -45,8 +45,8 @@ const ( // VoucherInterval defines how many block pass before creating a new voucher VoucherInterval = 1000 - // ChannelExpiryBuffer defines how long the channel remains open past the last voucher - ChannelExpiryBuffer = 2000 + // ChannelExpiryInterval defines how long the channel remains open past the last voucher + ChannelExpiryInterval = 2000 // CreateChannelGasPrice is the gas price of the message used to create the payment channel CreateChannelGasPrice = 0
1
package storage import ( "context" "fmt" "math/big" "sync" "time" "gx/ipfs/QmR8BauakNcBa3RbE4nbQu76PDiJgoQgz8AJdhJuiU4TAw/go-cid" cbor "gx/ipfs/QmRoARq3nkUb13HSKZGepCZSWe5GrVPwx7xURJGZ7KWv9V/go-ipld-cbor" "gx/ipfs/QmVmDhyTTUcQXFD1rRQ64fGLMSAoaQvNH3hwuaCFAPq2hy/errors" "gx/ipfs/QmY5Grm8pJdiSSVsYxx4uNRgweY72EmYwuSDbRnbFok3iY/go-libp2p-peer" "gx/ipfs/QmZNkThpqfVXs9GNbexPrfBbXSLNYeKrE7jwFM2oqHbyqN/go-libp2p-protocol" "gx/ipfs/QmabLh8TrJ3emfAoQk5AbqbLTbMyj7XqumMFmAFxa9epo8/go-multistream" "gx/ipfs/QmaoXrM4Z41PD48JY36YqQGKQpLGjyLA2cKcLsES7YddAq/go-libp2p-host" ipld "gx/ipfs/QmcKKBwfz6FyQdHR2jsXrrF6XeSBXYL86anmWNewpFpoF5/go-ipld-format" "gx/ipfs/Qmf4xQhNomPNhrtZc67qSnfJSjxjXs9LWvknJtSXwimPrM/go-datastore" "gx/ipfs/Qmf4xQhNomPNhrtZc67qSnfJSjxjXs9LWvknJtSXwimPrM/go-datastore/query" "github.com/filecoin-project/go-filecoin/actor/builtin/miner" "github.com/filecoin-project/go-filecoin/actor/builtin/paymentbroker" "github.com/filecoin-project/go-filecoin/address" cbu "github.com/filecoin-project/go-filecoin/cborutil" "github.com/filecoin-project/go-filecoin/porcelain" "github.com/filecoin-project/go-filecoin/repo" "github.com/filecoin-project/go-filecoin/types" "github.com/filecoin-project/go-filecoin/util/convert" ) const ( _ = iota // ErrDupicateDeal indicates that a deal being proposed is a duplicate of an existing deal ErrDupicateDeal ) const clientDatastorePrefix = "client" // Errors map error codes to messages var Errors = map[uint8]error{ ErrDupicateDeal: errors.New("proposal is a duplicate of existing deal; if you would like to create a duplicate, add the --allow-duplicates flag"), } const ( // VoucherInterval defines how many block pass before creating a new voucher VoucherInterval = 1000 // ChannelExpiryBuffer defines how long the channel remains open past the last voucher ChannelExpiryBuffer = 2000 // CreateChannelGasPrice is the gas price of the message used to create the payment channel CreateChannelGasPrice = 0 // CreateChannelGasLimit is the gas limit of the message used to create the payment channel CreateChannelGasLimit = 300 ) type clientNode interface { GetFileSize(context.Context, cid.Cid) (uint64, error) MakeProtocolRequest(ctx context.Context, protocol protocol.ID, peer peer.ID, request interface{}, response interface{}) error GetBlockTime() time.Duration } type clientPorcelainAPI interface { ConfigGet(dottedPath string) (interface{}, error) ChainBlockHeight(ctx context.Context) (*types.BlockHeight, error) CreatePayments(ctx context.Context, config porcelain.CreatePaymentsParams) (*porcelain.CreatePaymentsReturn, error) MinerGetAsk(ctx context.Context, minerAddr address.Address, askID uint64) (miner.Ask, error) MinerGetOwnerAddress(ctx context.Context, minerAddr address.Address) (address.Address, error) MinerGetPeerID(ctx context.Context, minerAddr address.Address) (peer.ID, error) } type clientDeal struct { Miner address.Address Proposal *DealProposal Response *DealResponse } // Client is used to make deals directly with storage miners. type Client struct { deals map[cid.Cid]*clientDeal dealsDs repo.Datastore dealsLk sync.Mutex node clientNode api clientPorcelainAPI } func init() { cbor.RegisterCborType(clientDeal{}) } // NewClient creates a new storage client. func NewClient(nd clientNode, api clientPorcelainAPI, dealsDs repo.Datastore) (*Client, error) { smc := &Client{ deals: make(map[cid.Cid]*clientDeal), node: nd, api: api, dealsDs: dealsDs, } if err := smc.loadDeals(); err != nil { return nil, errors.Wrap(err, "failed to load client deals") } return smc, nil } // ProposeDeal is func (smc *Client) ProposeDeal(ctx context.Context, miner address.Address, data cid.Cid, askID uint64, duration uint64, allowDuplicates bool) (*DealResponse, error) { ctx, cancel := context.WithTimeout(ctx, 4*smc.node.GetBlockTime()) defer cancel() size, err := smc.node.GetFileSize(ctx, data) if err != nil { return nil, errors.Wrap(err, "failed to determine the size of the data") } ask, err := smc.api.MinerGetAsk(ctx, miner, askID) if err != nil { return nil, errors.Wrap(err, "failed to get ask price") } price := ask.Price chainHeight, err := smc.api.ChainBlockHeight(ctx) if err != nil { return nil, err } from, err := smc.api.ConfigGet("wallet.defaultAddress") if err != nil { return nil, err } fromAddress, ok := from.(address.Address) if !ok || fromAddress.Empty() { return nil, errors.New("Default wallet address is not set correctly") } minerOwner, err := smc.api.MinerGetOwnerAddress(ctx, miner) if err != nil { return nil, err } totalPrice := price.MulBigInt(big.NewInt(int64(size * duration))) proposal := &DealProposal{ PieceRef: data, Size: types.NewBytesAmount(size), TotalPrice: totalPrice, Duration: duration, MinerAddress: miner, // TODO: Sign this proposal } if smc.isMaybeDupDeal(proposal) && !allowDuplicates { return nil, Errors[ErrDupicateDeal] } // create payment information cpResp, err := smc.api.CreatePayments(ctx, porcelain.CreatePaymentsParams{ From: fromAddress, To: minerOwner, Value: *price.MulBigInt(big.NewInt(int64(size * duration))), Duration: duration, PaymentInterval: VoucherInterval, ChannelExpiry: *chainHeight.Add(types.NewBlockHeight(duration + ChannelExpiryBuffer)), GasPrice: *types.NewAttoFIL(big.NewInt(CreateChannelGasPrice)), GasLimit: types.NewGasUnits(CreateChannelGasLimit), }) if err != nil { return nil, errors.Wrap(err, "error creating payment") } proposal.Payment.Channel = cpResp.Channel proposal.Payment.ChannelMsgCid = cpResp.ChannelMsgCid.String() proposal.Payment.Vouchers = cpResp.Vouchers // send proposal pid, err := smc.api.MinerGetPeerID(ctx, miner) if err != nil { return nil, err } var response DealResponse err = smc.node.MakeProtocolRequest(ctx, makeDealProtocol, pid, proposal, &response) if err != nil { return nil, errors.Wrap(err, "error sending proposal") } if err := smc.checkDealResponse(ctx, &response); err != nil { return nil, errors.Wrap(err, "response check failed") } // Note: currently the miner requests the data out of band if err := smc.recordResponse(&response, miner, proposal); err != nil { return nil, errors.Wrap(err, "failed to track response") } return &response, nil } func (smc *Client) recordResponse(resp *DealResponse, miner address.Address, p *DealProposal) error { proposalCid, err := convert.ToCid(p) if err != nil { return errors.New("failed to get cid of proposal") } if !proposalCid.Equals(resp.ProposalCid) { return fmt.Errorf("cids not equal %s %s", proposalCid, resp.ProposalCid) } smc.dealsLk.Lock() defer smc.dealsLk.Unlock() _, ok := smc.deals[proposalCid] if ok { return fmt.Errorf("deal [%s] is already in progress", proposalCid.String()) } smc.deals[proposalCid] = &clientDeal{ Miner: miner, Proposal: p, Response: resp, } return smc.saveDeal(proposalCid) } func (smc *Client) checkDealResponse(ctx context.Context, resp *DealResponse) error { switch resp.State { case Rejected: return fmt.Errorf("deal rejected: %s", resp.Message) case Failed: return fmt.Errorf("deal failed: %s", resp.Message) case Accepted: return nil default: return fmt.Errorf("invalid proposal response: %s", resp.State) } } func (smc *Client) minerForProposal(c cid.Cid) (address.Address, error) { smc.dealsLk.Lock() defer smc.dealsLk.Unlock() st, ok := smc.deals[c] if !ok { return address.Address{}, fmt.Errorf("no such proposal by cid: %s", c) } return st.Miner, nil } // QueryDeal queries an in-progress proposal. func (smc *Client) QueryDeal(ctx context.Context, proposalCid cid.Cid) (*DealResponse, error) { mineraddr, err := smc.minerForProposal(proposalCid) if err != nil { return nil, err } minerpid, err := smc.api.MinerGetPeerID(ctx, mineraddr) if err != nil { return nil, err } q := queryRequest{proposalCid} var resp DealResponse err = smc.node.MakeProtocolRequest(ctx, queryDealProtocol, minerpid, q, &resp) if err != nil { return nil, errors.Wrap(err, "error querying deal") } return &resp, nil } func (smc *Client) loadDeals() error { res, err := smc.dealsDs.Query(query.Query{ Prefix: "/" + clientDatastorePrefix, }) if err != nil { return errors.Wrap(err, "failed to query deals from datastore") } smc.deals = make(map[cid.Cid]*clientDeal) for entry := range res.Next() { var deal clientDeal if err := cbor.DecodeInto(entry.Value, &deal); err != nil { return errors.Wrap(err, "failed to unmarshal deals from datastore") } smc.deals[deal.Response.ProposalCid] = &deal } return nil } func (smc *Client) saveDeal(cid cid.Cid) error { deal, ok := smc.deals[cid] if !ok { return errors.Errorf("Could not find client deal with cid: %s", cid.String()) } datum, err := cbor.DumpObject(deal) if err != nil { return errors.Wrap(err, "could not marshal storageDeal") } key := datastore.KeyWithNamespaces([]string{clientDatastorePrefix, cid.String()}) err = smc.dealsDs.Put(key, datum) if err != nil { return errors.Wrap(err, "could not save client deal to disk, in-memory deals differ from persisted deals!") } return nil } func (smc *Client) isMaybeDupDeal(p *DealProposal) bool { smc.dealsLk.Lock() defer smc.dealsLk.Unlock() for _, d := range smc.deals { if d.Miner == p.MinerAddress && d.Proposal.PieceRef.Equals(p.PieceRef) { return true } } return false } // LoadVouchersForDeal loads vouchers from disk for a given deal func (smc *Client) LoadVouchersForDeal(dealCid cid.Cid) ([]*paymentbroker.PaymentVoucher, error) { queryResults, err := smc.dealsDs.Query(query.Query{Prefix: "/" + clientDatastorePrefix}) if err != nil { return []*paymentbroker.PaymentVoucher{}, errors.Wrap(err, "failed to query vouchers from datastore") } var results []*paymentbroker.PaymentVoucher for entry := range queryResults.Next() { var deal clientDeal if err := cbor.DecodeInto(entry.Value, &deal); err != nil { return results, errors.Wrap(err, "failed to unmarshal deals from datastore") } if deal.Response.ProposalCid == dealCid { results = append(results, deal.Proposal.Payment.Vouchers...) } } return results, nil } // ClientNodeImpl implements the client node interface type ClientNodeImpl struct { dserv ipld.DAGService host host.Host blockTime time.Duration } // NewClientNodeImpl constructs a ClientNodeImpl func NewClientNodeImpl(ds ipld.DAGService, host host.Host, bt time.Duration) *ClientNodeImpl { return &ClientNodeImpl{ dserv: ds, host: host, blockTime: bt, } } // GetBlockTime returns the blocktime this node is configured with. func (cni *ClientNodeImpl) GetBlockTime() time.Duration { return cni.blockTime } // GetFileSize returns the size of the file referenced by 'c' func (cni *ClientNodeImpl) GetFileSize(ctx context.Context, c cid.Cid) (uint64, error) { return getFileSize(ctx, c, cni.dserv) } // MakeProtocolRequest makes a request and expects a response from the host using the given protocol. func (cni *ClientNodeImpl) MakeProtocolRequest(ctx context.Context, protocol protocol.ID, peer peer.ID, request interface{}, response interface{}) error { s, err := cni.host.NewStream(ctx, peer, protocol) if err != nil { if err == multistream.ErrNotSupported { return errors.New("could not establish connection with peer. Peer does not support protocol") } return errors.Wrap(err, "failed to establish connection with the peer") } if err := cbu.NewMsgWriter(s).WriteMsg(request); err != nil { return errors.Wrap(err, "failed to write request") } if err := cbu.NewMsgReader(s).ReadMsg(response); err != nil { return errors.Wrap(err, "failed to read response") } return nil }
1
16,849
this is 16 hours, is that enough?
filecoin-project-venus
go
@@ -0,0 +1,16 @@ +package tracing + +import ( + "context" + + "go.opencensus.io/trace" +) + +// AddErrorEndSpan will end `span` and adds `err` to `span` iff err is not nil. +// This is a helper method to cut down on boiler plate. +func AddErrorEndSpan(ctx context.Context, span *trace.Span, err *error) { + if *err != nil { + span.AddAttributes(trace.StringAttribute("error", (*err).Error())) + } + span.End() +}
1
1
18,687
removing boilerplate. You could also add a `StartSpan` with varargs to inline string attributes.
filecoin-project-venus
go
@@ -71,9 +71,10 @@ func (b *EthAPIBackend) GetEthContext() (uint64, uint64) { return bn, ts } -func (b *EthAPIBackend) GetRollupContext() (uint64, uint64) { +func (b *EthAPIBackend) GetRollupContext() (uint64, uint64, uint64) { i := uint64(0) q := uint64(0) + v := uint64(0) index := b.eth.syncService.GetLatestIndex() if index != nil { i = *index
1
// Copyright 2015 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // The go-ethereum library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. package eth import ( "context" "errors" "fmt" "math/big" "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/bloombits" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/diffdb" "github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/eth/gasprice" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rpc" ) // EthAPIBackend implements ethapi.Backend for full nodes type EthAPIBackend struct { extRPCEnabled bool eth *Ethereum gpo *gasprice.Oracle l1gpo *gasprice.L1Oracle verifier bool gasLimit uint64 UsingOVM bool MaxCallDataSize int } func (b *EthAPIBackend) IsVerifier() bool { return b.verifier } func (b *EthAPIBackend) IsSyncing() bool { return b.eth.syncService.IsSyncing() } func (b *EthAPIBackend) GasLimit() uint64 { return b.gasLimit } func (b *EthAPIBackend) GetEthContext() (uint64, uint64) { bn := b.eth.syncService.GetLatestL1BlockNumber() ts := b.eth.syncService.GetLatestL1Timestamp() return bn, ts } func (b *EthAPIBackend) GetRollupContext() (uint64, uint64) { i := uint64(0) q := uint64(0) index := b.eth.syncService.GetLatestIndex() if index != nil { i = *index } queueIndex := b.eth.syncService.GetLatestEnqueueIndex() if queueIndex != nil { q = *queueIndex } return i, q } // ChainConfig returns the active chain configuration. func (b *EthAPIBackend) ChainConfig() *params.ChainConfig { return b.eth.blockchain.Config() } func (b *EthAPIBackend) CurrentBlock() *types.Block { return b.eth.blockchain.CurrentBlock() } func (b *EthAPIBackend) GetDiff(block *big.Int) (diffdb.Diff, error) { return b.eth.blockchain.GetDiff(block) } func (b *EthAPIBackend) SetHead(number uint64) { if number == 0 { log.Info("Cannot reset to genesis") return } if !b.UsingOVM { b.eth.protocolManager.downloader.Cancel() } b.eth.blockchain.SetHead(number) // Make sure to reset the LatestL1{Timestamp,BlockNumber} block := b.eth.blockchain.CurrentBlock() txs := block.Transactions() if len(txs) == 0 { log.Error("No transactions found in block", "number", number) return } tx := txs[0] blockNumber := tx.L1BlockNumber() if blockNumber == nil { log.Error("No L1BlockNumber found in transaction", "number", number) return } b.eth.syncService.SetLatestL1Timestamp(tx.L1Timestamp()) b.eth.syncService.SetLatestL1BlockNumber(blockNumber.Uint64()) } func (b *EthAPIBackend) HeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Header, error) { // Pending block is only known by the miner if number == rpc.PendingBlockNumber { block := b.eth.miner.PendingBlock() return block.Header(), nil } // Otherwise resolve and return the block if number == rpc.LatestBlockNumber { return b.eth.blockchain.CurrentBlock().Header(), nil } return b.eth.blockchain.GetHeaderByNumber(uint64(number)), nil } func (b *EthAPIBackend) HeaderByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*types.Header, error) { if blockNr, ok := blockNrOrHash.Number(); ok { return b.HeaderByNumber(ctx, blockNr) } if hash, ok := blockNrOrHash.Hash(); ok { header := b.eth.blockchain.GetHeaderByHash(hash) if header == nil { return nil, errors.New("header for hash not found") } if blockNrOrHash.RequireCanonical && b.eth.blockchain.GetCanonicalHash(header.Number.Uint64()) != hash { return nil, errors.New("hash is not currently canonical") } return header, nil } return nil, errors.New("invalid arguments; neither block nor hash specified") } func (b *EthAPIBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) { return b.eth.blockchain.GetHeaderByHash(hash), nil } func (b *EthAPIBackend) BlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error) { // Pending block is only known by the miner if number == rpc.PendingBlockNumber { block := b.eth.miner.PendingBlock() return block, nil } // Otherwise resolve and return the block if number == rpc.LatestBlockNumber { return b.eth.blockchain.CurrentBlock(), nil } return b.eth.blockchain.GetBlockByNumber(uint64(number)), nil } func (b *EthAPIBackend) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) { return b.eth.blockchain.GetBlockByHash(hash), nil } func (b *EthAPIBackend) BlockByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*types.Block, error) { if blockNr, ok := blockNrOrHash.Number(); ok { return b.BlockByNumber(ctx, blockNr) } if hash, ok := blockNrOrHash.Hash(); ok { header := b.eth.blockchain.GetHeaderByHash(hash) if header == nil { return nil, errors.New("header for hash not found") } if blockNrOrHash.RequireCanonical && b.eth.blockchain.GetCanonicalHash(header.Number.Uint64()) != hash { return nil, errors.New("hash is not currently canonical") } block := b.eth.blockchain.GetBlock(hash, header.Number.Uint64()) if block == nil { return nil, errors.New("header found, but block body is missing") } return block, nil } return nil, errors.New("invalid arguments; neither block nor hash specified") } func (b *EthAPIBackend) StateAndHeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*state.StateDB, *types.Header, error) { // Pending state is only known by the miner if number == rpc.PendingBlockNumber { block, state := b.eth.miner.Pending() return state, block.Header(), nil } // Otherwise resolve the block number and return its state header, err := b.HeaderByNumber(ctx, number) if err != nil { return nil, nil, err } if header == nil { return nil, nil, errors.New("header not found") } stateDb, err := b.eth.BlockChain().StateAt(header.Root) return stateDb, header, err } func (b *EthAPIBackend) StateAndHeaderByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*state.StateDB, *types.Header, error) { if blockNr, ok := blockNrOrHash.Number(); ok { return b.StateAndHeaderByNumber(ctx, blockNr) } if hash, ok := blockNrOrHash.Hash(); ok { header, err := b.HeaderByHash(ctx, hash) if err != nil { return nil, nil, err } if header == nil { return nil, nil, errors.New("header for hash not found") } if blockNrOrHash.RequireCanonical && b.eth.blockchain.GetCanonicalHash(header.Number.Uint64()) != hash { return nil, nil, errors.New("hash is not currently canonical") } stateDb, err := b.eth.BlockChain().StateAt(header.Root) return stateDb, header, err } return nil, nil, errors.New("invalid arguments; neither block nor hash specified") } func (b *EthAPIBackend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) { return b.eth.blockchain.GetReceiptsByHash(hash), nil } func (b *EthAPIBackend) GetLogs(ctx context.Context, hash common.Hash) ([][]*types.Log, error) { receipts := b.eth.blockchain.GetReceiptsByHash(hash) if receipts == nil { return nil, nil } logs := make([][]*types.Log, len(receipts)) for i, receipt := range receipts { logs[i] = receipt.Logs } return logs, nil } func (b *EthAPIBackend) GetTd(blockHash common.Hash) *big.Int { return b.eth.blockchain.GetTdByHash(blockHash) } func (b *EthAPIBackend) GetEVM(ctx context.Context, msg core.Message, state *state.StateDB, header *types.Header) (*vm.EVM, func() error, error) { state.SetBalance(msg.From(), math.MaxBig256) vmError := func() error { return nil } context := core.NewEVMContext(msg, header, b.eth.BlockChain(), nil) return vm.NewEVM(context, state, b.eth.blockchain.Config(), *b.eth.blockchain.GetVMConfig()), vmError, nil } func (b *EthAPIBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription { return b.eth.BlockChain().SubscribeRemovedLogsEvent(ch) } func (b *EthAPIBackend) SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription { return b.eth.miner.SubscribePendingLogs(ch) } func (b *EthAPIBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription { return b.eth.BlockChain().SubscribeChainEvent(ch) } func (b *EthAPIBackend) SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription { return b.eth.BlockChain().SubscribeChainHeadEvent(ch) } func (b *EthAPIBackend) SubscribeChainSideEvent(ch chan<- core.ChainSideEvent) event.Subscription { return b.eth.BlockChain().SubscribeChainSideEvent(ch) } func (b *EthAPIBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription { return b.eth.BlockChain().SubscribeLogsEvent(ch) } // Transactions originating from the RPC endpoints are added to remotes so that // a lock can be used around the remotes for when the sequencer is reorganizing. func (b *EthAPIBackend) SendTx(ctx context.Context, signedTx *types.Transaction) error { if b.UsingOVM { // The value field is not rolled up so it must be set to 0 if signedTx.Value().Cmp(new(big.Int)) != 0 { return fmt.Errorf("Cannot send transaction with non-zero value. Use WETH.transfer()") } to := signedTx.To() if to != nil { if *to == (common.Address{}) { return errors.New("Cannot send transaction to zero address") } // Prevent transactions from being submitted if the gas limit too high if signedTx.Gas() >= b.gasLimit { return fmt.Errorf("Transaction gasLimit (%d) is greater than max gasLimit (%d)", signedTx.Gas(), b.gasLimit) } // Prevent QueueOriginSequencer transactions that are too large to // be included in a batch. The `MaxCallDataSize` should be set to // the layer one consensus max transaction size in bytes minus the // constant sized overhead of a batch. This will prevent // a layer two transaction from not being able to be batch submitted // to layer one. if len(signedTx.Data()) > b.MaxCallDataSize { return fmt.Errorf("Calldata cannot be larger than %d, sent %d", b.MaxCallDataSize, len(signedTx.Data())) } } return b.eth.syncService.ApplyTransaction(signedTx) } // OVM Disabled return b.eth.txPool.AddLocal(signedTx) } func (b *EthAPIBackend) SetTimestamp(timestamp int64) { b.eth.blockchain.SetCurrentTimestamp(timestamp) } func (b *EthAPIBackend) GetPoolTransactions() (types.Transactions, error) { pending, err := b.eth.txPool.Pending() if err != nil { return nil, err } var txs types.Transactions for _, batch := range pending { txs = append(txs, batch...) } return txs, nil } func (b *EthAPIBackend) GetPoolTransaction(hash common.Hash) *types.Transaction { return b.eth.txPool.Get(hash) } func (b *EthAPIBackend) GetTransaction(ctx context.Context, txHash common.Hash) (*types.Transaction, common.Hash, uint64, uint64, error) { tx, blockHash, blockNumber, index := rawdb.ReadTransaction(b.eth.ChainDb(), txHash) return tx, blockHash, blockNumber, index, nil } func (b *EthAPIBackend) GetPoolNonce(ctx context.Context, addr common.Address) (uint64, error) { return b.eth.txPool.Nonce(addr), nil } func (b *EthAPIBackend) Stats() (pending int, queued int) { return b.eth.txPool.Stats() } func (b *EthAPIBackend) TxPoolContent() (map[common.Address]types.Transactions, map[common.Address]types.Transactions) { return b.eth.TxPool().Content() } func (b *EthAPIBackend) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription { return b.eth.TxPool().SubscribeNewTxsEvent(ch) } func (b *EthAPIBackend) Downloader() *downloader.Downloader { return b.eth.Downloader() } func (b *EthAPIBackend) ProtocolVersion() int { return b.eth.EthVersion() } func (b *EthAPIBackend) SuggestPrice(ctx context.Context) (*big.Int, error) { return b.gpo.SuggestPrice(ctx) } func (b *EthAPIBackend) SuggestDataPrice(ctx context.Context) (*big.Int, error) { return b.l1gpo.SuggestDataPrice(ctx) } func (b *EthAPIBackend) SetL1GasPrice(ctx context.Context, gasPrice *big.Int) { b.l1gpo.SetL1GasPrice(gasPrice) } func (b *EthAPIBackend) ChainDb() ethdb.Database { return b.eth.ChainDb() } func (b *EthAPIBackend) EventMux() *event.TypeMux { return b.eth.EventMux() } func (b *EthAPIBackend) AccountManager() *accounts.Manager { return b.eth.AccountManager() } func (b *EthAPIBackend) ExtRPCEnabled() bool { return b.extRPCEnabled } func (b *EthAPIBackend) RPCGasCap() *big.Int { return b.eth.config.RPCGasCap } func (b *EthAPIBackend) BloomStatus() (uint64, uint64) { sections, _, _ := b.eth.bloomIndexer.Sections() return params.BloomBitsBlocks, sections } func (b *EthAPIBackend) ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) { for i := 0; i < bloomFilterThreads; i++ { go session.Multiplex(bloomRetrievalBatch, bloomRetrievalWait, b.eth.bloomRequests) } }
1
14,987
Can you replace these single letter variables with full names?
ethereum-optimism-optimism
go
@@ -1,13 +1,16 @@ // Copyright (c) .NET Foundation. All rights reserved. // Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information. +using System; using System.IO; +using Microsoft.AspNet.Http.Features; namespace Microsoft.AspNet.Server.Kestrel.Filter { public class ConnectionFilterContext { public ServerAddress Address { get; set; } - public Stream Connection { get; set; } + public Stream Connection { get; set; } + public Action<IFeatureCollection> PrepareRequest { get; set; } } }
1
// Copyright (c) .NET Foundation. All rights reserved. // Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information. using System.IO; namespace Microsoft.AspNet.Server.Kestrel.Filter { public class ConnectionFilterContext { public ServerAddress Address { get; set; } public Stream Connection { get; set; } } }
1
6,820
At first I wasn't sure, but now I think I'm sold on making PrepareRequest an action over adding a state object to the context.
aspnet-KestrelHttpServer
.cs
@@ -679,9 +679,9 @@ class Upgrade // Eliminate obsolete config override settings: unset($newConfig['Extra_Config']); - // Update generator if it is default value: + // Update generator if it contains a version number: if (isset($newConfig['Site']['generator']) - && $newConfig['Site']['generator'] == 'VuFind ' . $this->from + && preg_match('/^VuFind (\d+\.?)+$/', $newConfig['Site']['generator']) ) { $newConfig['Site']['generator'] = 'VuFind ' . $this->to; }
1
<?php /** * VF Configuration Upgrade Tool * * PHP version 7 * * Copyright (C) Villanova University 2010. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2, * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * * @category VuFind * @package Config * @author Demian Katz <[email protected]> * @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License * @link https://vufind.org Main Site */ namespace VuFind\Config; use VuFind\Config\Writer as ConfigWriter; use VuFind\Exception\FileAccess as FileAccessException; /** * Class to upgrade previous VuFind configurations to the current version * * @category VuFind * @package Config * @author Demian Katz <[email protected]> * @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License * @link https://vufind.org Main Site */ class Upgrade { /** * Version we're upgrading from * * @var string */ protected $from; /** * Version we're upgrading to * * @var string */ protected $to; /** * Directory containing configurations to upgrade * * @var string */ protected $oldDir; /** * Directory containing unmodified new configurations * * @var string */ protected $rawDir; /** * Directory where new configurations should be written (null for test mode) * * @var string */ protected $newDir; /** * Parsed old configurations * * @var array */ protected $oldConfigs = []; /** * Processed new configurations * * @var array */ protected $newConfigs = []; /** * Comments parsed from configuration files * * @var array */ protected $comments = []; /** * Warnings generated during upgrade process * * @var array */ protected $warnings = []; /** * Are we upgrading files in place rather than creating them? * * @var bool */ protected $inPlaceUpgrade; /** * Have we modified permissions.ini? * * @var bool */ protected $permissionsModified = false; /** * Constructor * * @param string $from Version we're upgrading from. * @param string $to Version we're upgrading to. * @param string $oldDir Directory containing old configurations. * @param string $rawDir Directory containing raw new configurations. * @param string $newDir Directory to write updated new configurations into * (leave null to disable writes -- used in test mode). */ public function __construct($from, $to, $oldDir, $rawDir, $newDir = null) { $this->from = $from; $this->to = $to; $this->oldDir = $oldDir; $this->rawDir = $rawDir; $this->newDir = $newDir; $this->inPlaceUpgrade = ($this->oldDir == $this->newDir); } /** * Run through all of the necessary upgrading. * * @return void */ public function run() { // Load all old configurations: $this->loadConfigs(); // Upgrade them one by one and write the results to disk; order is // important since in some cases, settings may migrate out of config.ini // and into other files. $this->upgradeConfig(); $this->upgradeAuthority(); $this->upgradeFacetsAndCollection(); $this->upgradeFulltext(); $this->upgradeReserves(); $this->upgradeSearches(); $this->upgradeSitemap(); $this->upgradeSms(); $this->upgradeSummon(); $this->upgradePrimo(); $this->upgradeWorldCat(); // The previous upgrade routines may have added values to permissions.ini, // so we should save it last. It doesn't have its own upgrade routine. $this->saveModifiedConfig('permissions.ini'); // The following routines load special configurations that were not // explicitly loaded by loadConfigs: if ($this->from < 2) { // some pieces only apply to 1.x upgrade! $this->upgradeSolrMarc(); $this->upgradeSearchSpecs(); } $this->upgradeILS(); } /** * Get processed configurations (used by test routines). * * @return array */ public function getNewConfigs() { return $this->newConfigs; } /** * Get warning strings generated during upgrade process. * * @return array */ public function getWarnings() { return $this->warnings; } /** * Add a warning message. * * @param string $msg Warning message. * * @return void */ protected function addWarning($msg) { $this->warnings[] = $msg; } /** * Support function -- merge the contents of two arrays parsed from ini files. * * @param string $config_ini The base config array. * @param string $custom_ini Overrides to apply on top of the base array. * * @return array The merged results. */ public static function iniMerge($config_ini, $custom_ini) { foreach ($custom_ini as $k => $v) { // Make a recursive call if we need to merge array values into an // existing key... otherwise just drop the value in place. if (is_array($v) && isset($config_ini[$k])) { $config_ini[$k] = self::iniMerge($config_ini[$k], $custom_ini[$k]); } else { $config_ini[$k] = $v; } } return $config_ini; } /** * Load the old config.ini settings. * * @return void */ protected function loadOldBaseConfig() { // Load the base settings: $oldIni = $this->oldDir . '/config.ini'; $mainArray = file_exists($oldIni) ? parse_ini_file($oldIni, true) : []; // Merge in local overrides as needed. VuFind 2 structures configurations // differently, so people who used this mechanism will need to refactor // their configurations to take advantage of the new "local directory" // feature. For now, we'll just merge everything to avoid losing settings. if (isset($mainArray['Extra_Config']) && isset($mainArray['Extra_Config']['local_overrides']) ) { $file = trim( $this->oldDir . '/' . $mainArray['Extra_Config']['local_overrides'] ); $localOverride = @parse_ini_file($file, true); if ($localOverride) { $mainArray = self::iniMerge($mainArray, $localOverride); } } // Save the configuration to the appropriate place: $this->oldConfigs['config.ini'] = $mainArray; } /** * Find the path to the old configuration file. * * @param string $filename Filename of configuration file. * * @return string */ protected function getOldConfigPath($filename) { // Check if the user has overridden the filename in the [Extra_Config] // section: $index = str_replace('.ini', '', $filename); if (isset($this->oldConfigs['config.ini']['Extra_Config'][$index])) { $path = $this->oldDir . '/' . $this->oldConfigs['config.ini']['Extra_Config'][$index]; if (file_exists($path) && is_file($path)) { return $path; } } return $this->oldDir . '/' . $filename; } /** * Load all of the user's existing configurations. * * @return void */ protected function loadConfigs() { // Configuration files to load. Note that config.ini must always be loaded // first so that getOldConfigPath can work properly! $configs = ['config.ini']; foreach (glob($this->rawDir . '/*.ini') as $ini) { $parts = explode('/', str_replace('\\', '/', $ini)); $filename = array_pop($parts); if ($filename !== 'config.ini') { $configs[] = $filename; } } foreach ($configs as $config) { // Special case for config.ini, since we may need to overlay extra // settings: if ($config == 'config.ini') { $this->loadOldBaseConfig(); } else { $path = $this->getOldConfigPath($config); $this->oldConfigs[$config] = file_exists($path) ? parse_ini_file($path, true) : []; } $this->newConfigs[$config] = parse_ini_file($this->rawDir . '/' . $config, true); $this->comments[$config] = $this->extractComments($this->rawDir . '/' . $config); } } /** * Apply settings from an old configuration to a new configuration. * * @param string $filename Name of the configuration being updated. * @param array $fullSections Array of section names that need to be fully * overridden (as opposed to overridden on a setting-by-setting basis). * * @return void */ protected function applyOldSettings($filename, $fullSections = []) { // First override all individual settings: foreach ($this->oldConfigs[$filename] as $section => $subsection) { foreach ($subsection as $key => $value) { $this->newConfigs[$filename][$section][$key] = $value; } } // Now override on a section-by-section basis where necessary: foreach ($fullSections as $section) { $this->newConfigs[$filename][$section] = $this->oldConfigs[$filename][$section] ?? []; } } /** * Save a modified configuration file. * * @param string $filename Name of config file to write (contents will be * pulled from current state of object properties). * * @throws FileAccessException * @return void */ protected function saveModifiedConfig($filename) { if (null === $this->newDir) { // skip write if no destination return; } // If we're doing an in-place upgrade, and the source file is empty, // there is no point in upgrading anything (the file doesn't exist). if (empty($this->oldConfigs[$filename]) && $this->inPlaceUpgrade) { // Special case: if we set up custom permissions, we need to // write the file even if it didn't previously exist. if (!$this->permissionsModified || $filename !== 'permissions.ini') { return; } } // If target file already exists, back it up: $outfile = $this->newDir . '/' . $filename; $bakfile = $outfile . '.bak.' . time(); if (file_exists($outfile) && !copy($outfile, $bakfile)) { throw new FileAccessException( "Error: Could not copy {$outfile} to {$bakfile}." ); } $writer = new ConfigWriter( $outfile, $this->newConfigs[$filename], $this->comments[$filename] ); if (!$writer->save()) { throw new FileAccessException( "Error: Problem writing to {$outfile}." ); } } /** * Save an unmodified configuration file -- copy the old version, unless it is * the same as the new version! * * @param string $filename Path to the old config file * * @throws FileAccessException * @return void */ protected function saveUnmodifiedConfig($filename) { if (null === $this->newDir) { // skip write if no destination return; } if ($this->inPlaceUpgrade) { // skip write if doing in-place upgrade return; } // Figure out directories for all versions of this config file: $src = $this->getOldConfigPath($filename); $raw = $this->rawDir . '/' . $filename; $dest = $this->newDir . '/' . $filename; // Compare the source file against the raw file; if they happen to be the // same, we don't need to copy anything! if (file_exists($src) && file_exists($raw) && md5(file_get_contents($src)) == md5(file_get_contents($raw)) ) { return; } // If we got this far, we need to copy the user's file into place: if (file_exists($src) && !copy($src, $dest)) { throw new FileAccessException( "Error: Could not copy {$src} to {$dest}." ); } } /** * Check for invalid theme setting. * * @param string $setting Name of setting in [Site] section to check. * @param string $default Default value to use if invalid option was found. * * @return void */ protected function checkTheme($setting, $default = null) { // If a setting is not set, there is nothing to check: $theme = $this->newConfigs['config.ini']['Site'][$setting] ?? null; if (empty($theme)) { return; } $parts = explode(',', $theme); $theme = trim($parts[0]); if (!file_exists(APPLICATION_PATH . '/themes/' . $theme) || !is_dir(APPLICATION_PATH . '/themes/' . $theme) ) { if ($default === null) { $this->addWarning( "WARNING: This version of VuFind does not support the {$theme} " . "theme. As such, we have disabled your {$setting} setting." ); unset($this->newConfigs['config.ini']['Site'][$setting]); } else { $this->addWarning( "WARNING: This version of VuFind does not support " . "the {$theme} theme. Your config.ini [Site] {$setting} setting" . " has been reset to the default: {$default}. You may need to " . "reimplement your custom theme." ); $this->newConfigs['config.ini']['Site'][$setting] = $default; } } } /** * Is this a default BulkExport options setting? * * @param string $eo Bulk export options * * @return bool */ protected function isDefaultBulkExportOptions($eo) { $from = (float)$this->from; if ($from >= 2.4) { $default = 'MARC:MARCXML:EndNote:EndNoteWeb:RefWorks:BibTeX:RIS'; } elseif ($from >= 2.0) { $default = 'MARC:MARCXML:EndNote:EndNoteWeb:RefWorks:BibTeX'; } elseif ($from >= 1.4) { $default = 'MARC:MARCXML:EndNote:RefWorks:BibTeX'; } elseif ($from >= 1.3) { $default = 'MARC:EndNote:RefWorks:BibTeX'; } elseif ($from >= 1.2) { $default = 'MARC:EndNote:BibTeX'; } else { $default = 'MARC:EndNote'; } return $eo == $default; } /** * Add warnings if Amazon problems were found. * * @param array $config Configuration to check * * @return void */ protected function checkAmazonConfig($config) { // Warn the user if they have Amazon enabled but do not have the appropriate // credentials set up. $hasAmazonReview = stristr($config['Content']['reviews'] ?? '', 'amazon'); $hasAmazonCover = stristr($config['Content']['coverimages'] ?? '', 'amazon'); if ($hasAmazonReview || $hasAmazonCover) { $this->addWarning( 'WARNING: You have Amazon content enabled, but VuFind no longer ' . 'supports it. You should remove Amazon references from config.ini.' ); } } /** * Upgrade config.ini. * * @throws FileAccessException * @return void */ protected function upgradeConfig() { // override new version's defaults with matching settings from old version: $this->applyOldSettings('config.ini'); // Set up reference for convenience (and shorter lines): $newConfig = & $this->newConfigs['config.ini']; // If the [BulkExport] options setting is present and non-default, warn // the user about its deprecation. if (isset($newConfig['BulkExport']['options'])) { $default = $this->isDefaultBulkExportOptions( $newConfig['BulkExport']['options'] ); if (!$default) { $this->addWarning( 'The [BulkExport] options setting is deprecated; please ' . 'customize the [Export] section instead.' ); } unset($newConfig['BulkExport']['options']); } // If [Statistics] is present, warn the user about its deprecation. if (isset($newConfig['Statistics'])) { $this->addWarning( 'The Statistics module has been removed from VuFind. ' . 'For usage tracking, please configure Google Analytics or Piwik.' ); unset($newConfig['Statistics']); } // Warn the user about Amazon configuration issues: $this->checkAmazonConfig($newConfig); // Warn the user if they have enabled a deprecated Google API: if (isset($newConfig['GoogleSearch'])) { unset($newConfig['GoogleSearch']); $this->addWarning( 'The [GoogleSearch] section of config.ini is no ' . 'longer supported due to changes in Google APIs.' ); } if (isset($newConfig['Content']['recordMap']) && 'google' == strtolower($newConfig['Content']['recordMap']) ) { unset($newConfig['Content']['recordMap']); unset($newConfig['Content']['googleMapApiKey']); $this->addWarning( 'Google Maps is no longer a supported Content/recordMap option;' . ' please review your config.ini.' ); } if (isset($newConfig['GoogleAnalytics']['apiKey'])) { if (!isset($newConfig['GoogleAnalytics']['universal']) || !$newConfig['GoogleAnalytics']['universal'] ) { $this->addWarning( 'The [GoogleAnalytics] universal setting is off. See config.ini ' . 'for important information on how to upgrade your Analytics.' ); } } // Upgrade CAPTCHA Options $legacySettingsMap = [ 'publicKey' => 'recaptcha_siteKey', 'siteKey' => 'recaptcha_siteKey', 'privateKey' => 'recaptcha_secretKey', 'secretKey' => 'recaptcha_secretKey', 'theme' => 'recaptcha_theme', ]; $foundRecaptcha = false; foreach ($legacySettingsMap as $old => $new) { if (isset($newConfig['Captcha'][$old])) { $newConfig['Captcha'][$new] = $newConfig['Captcha'][$old]; unset($newConfig['Captcha'][$old]); } if (isset($newConfig['Captcha'][$new])) { $foundRecaptcha = true; } } if ($foundRecaptcha && !isset($newConfig['Captcha']['types'])) { $newConfig['Captcha']['types'] = ['recaptcha']; } // Warn the user about deprecated WorldCat settings: if (isset($newConfig['WorldCat']['LimitCodes'])) { unset($newConfig['WorldCat']['LimitCodes']); $this->addWarning( 'The [WorldCat] LimitCodes setting never had any effect and has been' . ' removed.' ); } $badKeys = ['id', 'xISBN_token', 'xISBN_secret', 'xISSN_token', 'xISSN_secret']; foreach ($badKeys as $key) { if (isset($newConfig['WorldCat'][$key])) { unset($newConfig['WorldCat'][$key]); $this->addWarning( 'The [WorldCat] ' . $key . ' setting is no longer used and' . ' has been removed.' ); } } if (isset($newConfig['Record']['related']) && in_array('Editions', $newConfig['Record']['related']) ) { $newConfig['Record']['related'] = array_diff( $newConfig['Record']['related'], ['Editions'] ); $this->addWarning( 'The Editions related record module is no longer ' . 'supported due to OCLC\'s xID API shutdown.' . ' It has been removed from your settings.' ); } // Upgrade Google Options: if (isset($newConfig['Content']['GoogleOptions']) && !is_array($newConfig['Content']['GoogleOptions']) ) { $newConfig['Content']['GoogleOptions'] = ['link' => $newConfig['Content']['GoogleOptions']]; } // Disable unused, obsolete setting: unset($newConfig['Index']['local']); // Warn the user if they are using an unsupported theme: $this->checkTheme('theme', 'bootprint3'); $this->checkTheme('mobile_theme', null); // Translate legacy auth settings: if (strtolower($newConfig['Authentication']['method']) == 'db') { $newConfig['Authentication']['method'] = 'Database'; } if (strtolower($newConfig['Authentication']['method']) == 'sip') { $newConfig['Authentication']['method'] = 'SIP2'; } // Translate legacy session settings: $newConfig['Session']['type'] = ucwords( str_replace('session', '', strtolower($newConfig['Session']['type'])) ); if ($newConfig['Session']['type'] == 'Mysql') { $newConfig['Session']['type'] = 'Database'; } // Eliminate obsolete database settings: $newConfig['Database'] = ['database' => $newConfig['Database']['database']]; // Eliminate obsolete config override settings: unset($newConfig['Extra_Config']); // Update generator if it is default value: if (isset($newConfig['Site']['generator']) && $newConfig['Site']['generator'] == 'VuFind ' . $this->from ) { $newConfig['Site']['generator'] = 'VuFind ' . $this->to; } // Update Syndetics config: if (isset($newConfig['Syndetics']['url'])) { $newConfig['Syndetics']['use_ssl'] = (strpos($newConfig['Syndetics']['url'], 'https://') === false) ? '' : 1; unset($newConfig['Syndetics']['url']); } // Translate obsolete permission settings: $this->upgradeAdminPermissions(); // Deal with shard settings (which may have to be moved to another file): $this->upgradeShardSettings(); // save the file $this->saveModifiedConfig('config.ini'); } /** * Translate obsolete permission settings. * * @return void */ protected function upgradeAdminPermissions() { $config = & $this->newConfigs['config.ini']; $permissions = & $this->newConfigs['permissions.ini']; if (isset($config['AdminAuth'])) { $permissions['access.AdminModule'] = []; if (isset($config['AdminAuth']['ipRegEx'])) { $permissions['access.AdminModule']['ipRegEx'] = $config['AdminAuth']['ipRegEx']; } if (isset($config['AdminAuth']['userWhitelist'])) { $permissions['access.AdminModule']['username'] = $config['AdminAuth']['userWhitelist']; } // If no settings exist in config.ini, we grant access to everyone // by allowing both logged-in and logged-out roles. if (empty($permissions['access.AdminModule'])) { $permissions['access.AdminModule']['role'] = ['guest', 'loggedin']; } $permissions['access.AdminModule']['permission'] = 'access.AdminModule'; $this->permissionsModified = true; // Remove any old settings remaining in config.ini: unset($config['AdminAuth']); } } /** * Change an array key. * * @param array $array Array to rewrite * @param string $old Old key name * @param string $new New key name * * @return array */ protected function changeArrayKey($array, $old, $new) { $newArr = []; foreach ($array as $k => $v) { if ($k === $old) { $k = $new; } $newArr[$k] = $v; } return $newArr; } /** * Support method for upgradeFacetsAndCollection() - change the name of * a facet field. * * @param string $old Old field name * @param string $new New field name * * @return void */ protected function renameFacet($old, $new) { $didWork = false; if (isset($this->newConfigs['facets.ini']['Results'][$old])) { $this->newConfigs['facets.ini']['Results'] = $this->changeArrayKey( $this->newConfigs['facets.ini']['Results'], $old, $new ); $didWork = true; } if (isset($this->newConfigs['Collection.ini']['Facets'][$old])) { $this->newConfigs['Collection.ini']['Facets'] = $this->changeArrayKey( $this->newConfigs['Collection.ini']['Facets'], $old, $new ); $didWork = true; } if ($didWork) { $this->newConfigs['facets.ini']['LegacyFields'][$old] = $new; } } /** * Upgrade facets.ini and Collection.ini (since these are tied together). * * @throws FileAccessException * @return void */ protected function upgradeFacetsAndCollection() { // we want to retain the old installation's various facet groups // exactly as-is $facetGroups = [ 'Results', 'ResultsTop', 'Advanced', 'Author', 'CheckboxFacets', 'HomePage' ]; $this->applyOldSettings('facets.ini', $facetGroups); $this->applyOldSettings('Collection.ini', ['Facets', 'Sort']); // fill in home page facets with advanced facets if missing: if (!isset($this->oldConfigs['facets.ini']['HomePage'])) { $this->newConfigs['facets.ini']['HomePage'] = $this->newConfigs['facets.ini']['Advanced']; } // rename changed facets $this->renameFacet('authorStr', 'author_facet'); // save the file $this->saveModifiedConfig('facets.ini'); $this->saveModifiedConfig('Collection.ini'); } /** * Update an old VuFind 1.x-style autocomplete handler name to the new style. * * @param string $name Name of module. * * @return string */ protected function upgradeAutocompleteName($name) { if ($name == 'NoAutocomplete') { return 'None'; } return str_replace('Autocomplete', '', $name); } /** * Upgrade searches.ini. * * @throws FileAccessException * @return void */ protected function upgradeSearches() { // we want to retain the old installation's Basic/Advanced search settings // and sort settings exactly as-is $groups = [ 'Basic_Searches', 'Advanced_Searches', 'Sorting', 'DefaultSortingByType' ]; $this->applyOldSettings('searches.ini', $groups); // Fix autocomplete settings in case they use the old style: $newConfig = & $this->newConfigs['searches.ini']; if (isset($newConfig['Autocomplete']['default_handler'])) { $newConfig['Autocomplete']['default_handler'] = $this->upgradeAutocompleteName( $newConfig['Autocomplete']['default_handler'] ); } if (isset($newConfig['Autocomplete_Types'])) { foreach ($newConfig['Autocomplete_Types'] as $k => $v) { $parts = explode(':', $v); $parts[0] = $this->upgradeAutocompleteName($parts[0]); $newConfig['Autocomplete_Types'][$k] = implode(':', $parts); } } // fix call number sort settings: if (isset($newConfig['Sorting']['callnumber'])) { $newConfig['Sorting']['callnumber-sort'] = $newConfig['Sorting']['callnumber']; unset($newConfig['Sorting']['callnumber']); } if (isset($newConfig['DefaultSortingByType'])) { foreach ($newConfig['DefaultSortingByType'] as & $v) { if ($v === 'callnumber') { $v = 'callnumber-sort'; } } } $this->upgradeSpellingSettings('searches.ini', ['CallNumber', 'WorkKeys']); // save the file $this->saveModifiedConfig('searches.ini'); } /** * Upgrade spelling settings to account for refactoring of spelling as a * recommendation module starting in release 2.4. * * @param string $ini .ini file to modify * @param array $skip Keys to skip within [TopRecommendations] * * @return void */ protected function upgradeSpellingSettings($ini, $skip = []) { // Turn on the spelling recommendations if we're upgrading from a version // prior to 2.4. if ((float)$this->from < 2.4) { // Fix defaults in general section: $cfg = & $this->newConfigs[$ini]['General']; $keys = ['default_top_recommend', 'default_noresults_recommend']; foreach ($keys as $key) { if (!isset($cfg[$key])) { $cfg[$key] = []; } if (!in_array('SpellingSuggestions', $cfg[$key])) { $cfg[$key][] = 'SpellingSuggestions'; } } // Fix settings in [TopRecommendations] $cfg = & $this->newConfigs[$ini]['TopRecommendations']; // Add SpellingSuggestions to all non-skipped handlers: foreach ($cfg as $key => & $value) { if (!in_array($key, $skip) && !in_array('SpellingSuggestions', $value) ) { $value[] = 'SpellingSuggestions'; } } // Define handlers with no spelling support as the default minus the // Spelling option: foreach ($skip as $key) { if (!isset($cfg[$key])) { $cfg[$key] = array_diff( $this->newConfigs[$ini]['General']['default_top_recommend'], ['SpellingSuggestions'] ); } } } } /** * Upgrade fulltext.ini. * * @throws FileAccessException * @return void */ protected function upgradeFulltext() { $this->saveUnmodifiedConfig('fulltext.ini'); } /** * Upgrade sitemap.ini. * * @throws FileAccessException * @return void */ protected function upgradeSitemap() { $this->saveUnmodifiedConfig('sitemap.ini'); } /** * Upgrade sms.ini. * * @throws FileAccessException * @return void */ protected function upgradeSms() { $this->applyOldSettings('sms.ini', ['Carriers']); $this->saveModifiedConfig('sms.ini'); } /** * Upgrade authority.ini. * * @throws FileAccessException * @return void */ protected function upgradeAuthority() { // we want to retain the old installation's search and facet settings // exactly as-is $groups = [ 'Facets', 'Basic_Searches', 'Advanced_Searches', 'Sorting' ]; $this->applyOldSettings('authority.ini', $groups); // save the file $this->saveModifiedConfig('authority.ini'); } /** * Upgrade reserves.ini. * * @throws FileAccessException * @return void */ protected function upgradeReserves() { // If Reserves module is disabled, don't bother updating config: if (!isset($this->newConfigs['config.ini']['Reserves']['search_enabled']) || !$this->newConfigs['config.ini']['Reserves']['search_enabled'] ) { return; } // we want to retain the old installation's search and facet settings // exactly as-is $groups = [ 'Facets', 'Basic_Searches', 'Advanced_Searches', 'Sorting' ]; $this->applyOldSettings('reserves.ini', $groups); // save the file $this->saveModifiedConfig('reserves.ini'); } /** * Upgrade Summon.ini. * * @throws FileAccessException * @return void */ protected function upgradeSummon() { // If Summon is disabled in our current configuration, we don't need to // load any Summon-specific settings: if (!isset($this->newConfigs['config.ini']['Summon']['apiKey'])) { return; } // we want to retain the old installation's search and facet settings // exactly as-is $groups = [ 'Facets', 'FacetsTop', 'Basic_Searches', 'Advanced_Searches', 'Sorting' ]; $this->applyOldSettings('Summon.ini', $groups); // Turn on advanced checkbox facets if we're upgrading from a version // prior to 2.3. if ((float)$this->from < 2.3) { $cfg = & $this->newConfigs['Summon.ini']['Advanced_Facet_Settings']; $specialFacets = $cfg['special_facets'] ?? null; if (empty($specialFacets)) { $cfg['special_facets'] = 'checkboxes:Summon'; } elseif (false === strpos('checkboxes', (string)$specialFacets)) { $cfg['special_facets'] .= ',checkboxes:Summon'; } } // update permission settings $this->upgradeSummonPermissions(); $this->upgradeSpellingSettings('Summon.ini'); // save the file $this->saveModifiedConfig('Summon.ini'); } /** * Translate obsolete permission settings. * * @return void */ protected function upgradeSummonPermissions() { $config = & $this->newConfigs['Summon.ini']; $permissions = & $this->newConfigs['permissions.ini']; if (isset($config['Auth'])) { $permissions['access.SummonExtendedResults'] = []; if (isset($config['Auth']['check_login']) && $config['Auth']['check_login'] ) { $permissions['access.SummonExtendedResults']['role'] = ['loggedin']; } if (isset($config['Auth']['ip_range'])) { $permissions['access.SummonExtendedResults']['ipRegEx'] = $config['Auth']['ip_range']; } if (!empty($permissions['access.SummonExtendedResults'])) { $permissions['access.SummonExtendedResults']['boolean'] = 'OR'; $permissions['access.SummonExtendedResults']['permission'] = 'access.SummonExtendedResults'; $this->permissionsModified = true; } else { unset($permissions['access.SummonExtendedResults']); } // Remove any old settings remaining in Summon.ini: unset($config['Auth']); } } /** * Upgrade Primo.ini. * * @throws FileAccessException * @return void */ protected function upgradePrimo() { // we want to retain the old installation's search and facet settings // exactly as-is $groups = [ 'Facets', 'FacetsTop', 'Basic_Searches', 'Advanced_Searches', 'Sorting' ]; $this->applyOldSettings('Primo.ini', $groups); // update permission settings $this->upgradePrimoPermissions(); // update server settings $this->upgradePrimoServerSettings(); // save the file $this->saveModifiedConfig('Primo.ini'); } /** * Translate obsolete permission settings. * * @return void */ protected function upgradePrimoPermissions() { $config = & $this->newConfigs['Primo.ini']; $permissions = & $this->newConfigs['permissions.ini']; if (isset($config['Institutions']['code']) && isset($config['Institutions']['regex']) ) { $codes = $config['Institutions']['code']; $regex = $config['Institutions']['regex']; if (count($regex) != count($codes)) { $this->addWarning( 'Mismatched code/regex counts in Primo.ini [Institutions].' ); } // Map parallel arrays into code => array of regexes and detect // wildcard regex to treat as default code. $map = []; $default = null; foreach ($codes as $i => $code) { if ($regex[$i] == '/.*/') { $default = $code; } else { $map[$code] = !isset($map[$code]) ? [$regex[$i]] : array_merge($map[$code], [$regex[$i]]); } } foreach ($map as $code => $regexes) { $perm = "access.PrimoInstitution.$code"; $config['Institutions']["onCampusRule['$code']"] = $perm; $permissions[$perm] = [ 'ipRegEx' => count($regexes) == 1 ? $regexes[0] : $regexes, 'permission' => $perm, ]; $this->permissionsModified = true; } if (null !== $default) { $config['Institutions']['defaultCode'] = $default; } // Remove any old settings remaining in Primo.ini: unset($config['Institutions']['code']); unset($config['Institutions']['regex']); } } /** * Translate obsolete server settings. * * @return void */ protected function upgradePrimoServerSettings() { $config = & $this->newConfigs['Primo.ini']; // Convert apiId to url if (isset($config['General']['apiId'])) { $url = 'http://' . $config['General']['apiId'] . '.hosted.exlibrisgroup.com'; if (isset($config['General']['port'])) { $url .= ':' . $config['General']['port']; } else { $url .= ':1701'; } $config['General']['url'] = $url; // Remove any old settings remaining in Primo.ini: unset($config['General']['apiId']); unset($config['General']['port']); } } /** * Upgrade WorldCat.ini. * * @throws FileAccessException * @return void */ protected function upgradeWorldCat() { // If WorldCat is disabled in our current configuration, we don't need to // load any WorldCat-specific settings: if (!isset($this->newConfigs['config.ini']['WorldCat']['apiKey'])) { return; } // we want to retain the old installation's search settings exactly as-is $groups = [ 'Basic_Searches', 'Advanced_Searches', 'Sorting' ]; $this->applyOldSettings('WorldCat.ini', $groups); // we need to fix an obsolete search setting for authors foreach (['Basic_Searches', 'Advanced_Searches'] as $section) { $new = []; foreach ($this->newConfigs['WorldCat.ini'][$section] as $k => $v) { if ($k == 'srw.au:srw.pn:srw.cn') { $k = 'srw.au'; } $new[$k] = $v; } $this->newConfigs['WorldCat.ini'][$section] = $new; } // Deal with deprecated related record module. $newConfig = & $this->newConfigs['WorldCat.ini']; if (isset($newConfig['Record']['related']) && in_array('WorldCatEditions', $newConfig['Record']['related']) ) { $newConfig['Record']['related'] = array_diff( $newConfig['Record']['related'], ['WorldCatEditions'] ); $this->addWarning( 'The WorldCatEditions related record module is no longer ' . 'supported due to OCLC\'s xID API shutdown.' . ' It has been removed from your settings.' ); } // save the file $this->saveModifiedConfig('WorldCat.ini'); } /** * Does the specified properties file contain any meaningful * (non-empty/non-comment) lines? * * @param string $src File to check * * @return bool */ protected function fileContainsMeaningfulLines($src) { // Does the file contain any meaningful lines? foreach (file($src) as $line) { $line = trim($line); if (!empty($line) && substr($line, 0, 1) != '#') { return true; } } return false; } /** * Upgrade SolrMarc configurations. * * @throws FileAccessException * @return void */ protected function upgradeSolrMarc() { if (null === $this->newDir) { // skip this step if no write destination return; } // Is there a marc_local.properties file? $src = realpath($this->oldDir . '/../../import/marc_local.properties'); if (empty($src) || !file_exists($src)) { return; } // Copy the file if it contains customizations: if ($this->fileContainsMeaningfulLines($src)) { $dest = realpath($this->newDir . '/../../import') . '/marc_local.properties'; if (!copy($src, $dest) || !file_exists($dest)) { throw new FileAccessException( "Cannot copy {$src} to {$dest}." ); } } } /** * Upgrade .yaml configurations. * * @throws FileAccessException * @return void */ protected function upgradeSearchSpecs() { if (null === $this->newDir) { // skip this step if no write destination return; } // VuFind 1.x uses *_local.yaml files as overrides; VuFind 2.x uses files // with the same filename in the local directory. Copy any old override // files into the new expected location: $files = ['searchspecs', 'authsearchspecs', 'reservessearchspecs']; foreach ($files as $file) { $old = $this->oldDir . '/' . $file . '_local.yaml'; $new = $this->newDir . '/' . $file . '.yaml'; if (file_exists($old)) { if (!copy($old, $new)) { throw new FileAccessException( "Cannot copy {$old} to {$new}." ); } } } } /** * Upgrade ILS driver configuration. * * @throws FileAccessException * @return void */ protected function upgradeILS() { $driver = $this->newConfigs['config.ini']['Catalog']['driver'] ?? ''; if (empty($driver)) { $this->addWarning("WARNING: Could not find ILS driver setting."); } elseif ('Sample' == $driver) { // No configuration file for Sample driver } elseif (!file_exists($this->oldDir . '/' . $driver . '.ini')) { $this->addWarning( "WARNING: Could not find {$driver}.ini file; " . "check your ILS driver configuration." ); } else { $this->saveUnmodifiedConfig($driver . '.ini'); } // If we're set to load NoILS.ini on failure, copy that over as well: if (isset($this->newConfigs['config.ini']['Catalog']['loadNoILSOnFailure']) && $this->newConfigs['config.ini']['Catalog']['loadNoILSOnFailure'] ) { // If NoILS is also the main driver, we don't need to copy it twice: if ($driver != 'NoILS') { $this->saveUnmodifiedConfig('NoILS.ini'); } } } /** * Upgrade shard settings (they have moved to a different config file, so * this is handled as a separate method so that all affected settings are * addressed in one place. * * This gets called from updateConfig(), which gets called before other * configuration upgrade routines. This means that we need to modify the * config.ini settings in the newConfigs property (since it is currently * being worked on and will be written to disk shortly), but we need to * modify the searches.ini/facets.ini settings in the oldConfigs property * (because they have not been processed yet). * * @return void */ protected function upgradeShardSettings() { // move settings from config.ini to searches.ini: if (isset($this->newConfigs['config.ini']['IndexShards'])) { $this->oldConfigs['searches.ini']['IndexShards'] = $this->newConfigs['config.ini']['IndexShards']; unset($this->newConfigs['config.ini']['IndexShards']); } if (isset($this->newConfigs['config.ini']['ShardPreferences'])) { $this->oldConfigs['searches.ini']['ShardPreferences'] = $this->newConfigs['config.ini']['ShardPreferences']; unset($this->newConfigs['config.ini']['ShardPreferences']); } // move settings from facets.ini to searches.ini (merging StripFacets // setting with StripFields setting): if (isset($this->oldConfigs['facets.ini']['StripFacets'])) { if (!isset($this->oldConfigs['searches.ini']['StripFields'])) { $this->oldConfigs['searches.ini']['StripFields'] = []; } foreach ($this->oldConfigs['facets.ini']['StripFacets'] as $k => $v) { // If we already have values for the current key, merge and dedupe: if (isset($this->oldConfigs['searches.ini']['StripFields'][$k])) { $v .= ',' . $this->oldConfigs['searches.ini']['StripFields'][$k]; $parts = explode(',', $v); foreach ($parts as $i => $part) { $parts[$i] = trim($part); } $v = implode(',', array_unique($parts)); } $this->oldConfigs['searches.ini']['StripFields'][$k] = $v; } unset($this->oldConfigs['facets.ini']['StripFacets']); } } /** * Read the specified file and return an associative array of this format * containing all comments extracted from the file: * * [ * 'sections' => array * 'section_name_1' => array * 'before' => string ("Comments found at the beginning of this section") * 'inline' => string ("Comments found at the end of the section's line") * 'settings' => array * 'setting_name_1' => array * 'before' => string ("Comments found before this setting") * 'inline' => string ("Comments found at the end of setting's line") * ... * 'setting_name_n' => array (same keys as setting_name_1) * ... * 'section_name_n' => array (same keys as section_name_1) * 'after' => string ("Comments found at the very end of the file") * ] * * @param string $filename Name of ini file to read. * * @return array Associative array as described above. */ protected function extractComments($filename) { $lines = file($filename); // Initialize our return value: $retVal = ['sections' => [], 'after' => '']; // Initialize variables for tracking status during parsing: $section = $comments = ''; foreach ($lines as $line) { // To avoid redundant processing, create a trimmed version of the current // line: $trimmed = trim($line); // Is the current line a comment? If so, add to the currentComments // string. Note that we treat blank lines as comments. if (substr($trimmed, 0, 1) == ';' || empty($trimmed)) { $comments .= $line; } elseif (substr($trimmed, 0, 1) == '[' && ($closeBracket = strpos($trimmed, ']')) > 1 ) { // Is the current line the start of a section? If so, create the // appropriate section of the return value: $section = substr($trimmed, 1, $closeBracket - 1); if (!empty($section)) { // Grab comments at the end of the line, if any: if (($semicolon = strpos($trimmed, ';')) !== false) { $inline = trim(substr($trimmed, $semicolon)); } else { $inline = ''; } $retVal['sections'][$section] = [ 'before' => $comments, 'inline' => $inline, 'settings' => []]; $comments = ''; } } elseif (($equals = strpos($trimmed, '=')) !== false) { // Is the current line a setting? If so, add to the return value: $set = trim(substr($trimmed, 0, $equals)); $set = trim(str_replace('[]', '', $set)); if (!empty($section) && !empty($set)) { // Grab comments at the end of the line, if any: if (($semicolon = strpos($trimmed, ';')) !== false) { $inline = trim(substr($trimmed, $semicolon)); } else { $inline = ''; } // Currently, this data structure doesn't support arrays very // well, since it can't distinguish which line of the array // corresponds with which comments. For now, we just append all // the preceding and inline comments together for arrays. Since // we rarely use arrays in the config.ini file, this isn't a big // concern, but we should improve it if we ever need to. if (!isset($retVal['sections'][$section]['settings'][$set])) { $retVal['sections'][$section]['settings'][$set] = ['before' => $comments, 'inline' => $inline]; } else { $retVal['sections'][$section]['settings'][$set]['before'] .= $comments; $retVal['sections'][$section]['settings'][$set]['inline'] .= "\n" . $inline; } $comments = ''; } } } // Store any leftover comments following the last setting: $retVal['after'] = $comments; return $retVal; } }
1
32,762
Would it be too greedy to preg_replace `VuFind (\d+\.?)+` with `'VuFind ' . $this->to` anywhere in the string? This would update something like 'Finna (VuFind 7.1.0)' as well. Just a thought, please disregard if you'd like to keep it as is.
vufind-org-vufind
php
@@ -91,7 +91,7 @@ public abstract class ManagedResourceStorage { zkClient = ((ZkSolrResourceLoader)resourceLoader).getZkController().getZkClient(); try { zkConfigName = ((ZkSolrResourceLoader)resourceLoader).getZkController(). - getZkStateReader().readConfigName(collection); + getZkStateReader().getClusterState().getCollection(collection).getConfigName(); } catch (Exception e) { log.error("Failed to get config name due to", e); throw new SolrException(ErrorCode.SERVER_ERROR,
1
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.solr.rest; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.File; import java.io.FileInputStream; import java.io.FileNotFoundException; import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.io.OutputStream; import java.io.OutputStreamWriter; import java.io.Reader; import java.lang.invoke.MethodHandles; import java.nio.charset.Charset; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.util.HashMap; import java.util.Locale; import java.util.Map; import org.apache.lucene.util.BytesRef; import org.apache.solr.cloud.ZkSolrResourceLoader; import org.apache.solr.common.SolrException; import org.apache.solr.common.SolrException.ErrorCode; import org.apache.solr.common.cloud.SolrZkClient; import org.apache.solr.common.util.NamedList; import org.apache.solr.common.util.Utils; import org.apache.solr.core.SolrResourceLoader; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import static org.apache.solr.common.util.Utils.toJSONString; /** * Abstract base class that provides most of the functionality needed * to store arbitrary data for managed resources. Concrete implementations * need to decide the underlying format that data is stored in, such as JSON. * * The underlying storage I/O layer will be determined by the environment * Solr is running in, e.g. in cloud mode, data will be stored and loaded * from ZooKeeper. */ public abstract class ManagedResourceStorage { /** * Hides the underlying storage implementation for data being managed * by a ManagedResource. For instance, a ManagedResource may use JSON as * the data format and an instance of this class to persist and load * the JSON bytes to/from some backing store, such as ZooKeeper. */ public static interface StorageIO { String getInfo(); void configure(SolrResourceLoader loader, NamedList<String> initArgs) throws SolrException; boolean exists(String storedResourceId) throws IOException; InputStream openInputStream(String storedResourceId) throws IOException; OutputStream openOutputStream(String storedResourceId) throws IOException; boolean delete(String storedResourceId) throws IOException; } public static final String STORAGE_IO_CLASS_INIT_ARG = "storageIO"; public static final String STORAGE_DIR_INIT_ARG = "storageDir"; /** * Creates a new StorageIO instance for a Solr core, taking into account * whether the core is running in cloud mode as well as initArgs. */ public static StorageIO newStorageIO(String collection, SolrResourceLoader resourceLoader, NamedList<String> initArgs) { StorageIO storageIO; SolrZkClient zkClient = null; String zkConfigName = null; if (resourceLoader instanceof ZkSolrResourceLoader) { zkClient = ((ZkSolrResourceLoader)resourceLoader).getZkController().getZkClient(); try { zkConfigName = ((ZkSolrResourceLoader)resourceLoader).getZkController(). getZkStateReader().readConfigName(collection); } catch (Exception e) { log.error("Failed to get config name due to", e); throw new SolrException(ErrorCode.SERVER_ERROR, "Failed to load config name for collection:" + collection + " due to: ", e); } if (zkConfigName == null) { throw new SolrException(ErrorCode.SERVER_ERROR, "Could not find config name for collection:" + collection); } } if (initArgs.get(STORAGE_IO_CLASS_INIT_ARG) != null) { storageIO = resourceLoader.newInstance(initArgs.get(STORAGE_IO_CLASS_INIT_ARG), StorageIO.class); } else { if (zkClient != null) { String znodeBase = "/configs/"+zkConfigName; log.debug("Setting up ZooKeeper-based storage for the RestManager with znodeBase: {}", znodeBase); storageIO = new ManagedResourceStorage.ZooKeeperStorageIO(zkClient, znodeBase); } else { storageIO = new FileStorageIO(); } } if (storageIO instanceof FileStorageIO) { // using local fs, if storageDir is not set in the solrconfig.xml, assume the configDir for the core if (initArgs.get(STORAGE_DIR_INIT_ARG) == null) { File configDir = new File(resourceLoader.getConfigDir()); boolean hasAccess = false; try { hasAccess = configDir.isDirectory() && configDir.canWrite(); } catch (java.security.AccessControlException ace) {} if (hasAccess) { initArgs.add(STORAGE_DIR_INIT_ARG, configDir.getAbsolutePath()); } else { // most likely this is because of a unit test // that doesn't have write-access to the config dir // while this failover approach is not ideal, it's better // than causing the core to fail esp. if managed resources aren't being used log.warn("Cannot write to config directory {} ; switching to use InMemory storage instead.", configDir.getAbsolutePath()); storageIO = new ManagedResourceStorage.InMemoryStorageIO(); } } } storageIO.configure(resourceLoader, initArgs); return storageIO; } /** * Local file-based storage implementation. */ public static class FileStorageIO implements StorageIO { private String storageDir; @Override public void configure(SolrResourceLoader loader, NamedList<String> initArgs) throws SolrException { String storageDirArg = initArgs.get(STORAGE_DIR_INIT_ARG); if (storageDirArg == null || storageDirArg.trim().length() == 0) throw new IllegalArgumentException("Required configuration parameter '"+ STORAGE_DIR_INIT_ARG+"' not provided!"); File dir = new File(storageDirArg); if (!dir.isDirectory()) dir.mkdirs(); storageDir = dir.getAbsolutePath(); log.info("File-based storage initialized to use dir: {}", storageDir); } @Override public boolean exists(String storedResourceId) throws IOException { return (new File(storageDir, storedResourceId)).exists(); } @Override public InputStream openInputStream(String storedResourceId) throws IOException { return new FileInputStream(storageDir+"/"+storedResourceId); } @Override public OutputStream openOutputStream(String storedResourceId) throws IOException { return new FileOutputStream(storageDir+"/"+storedResourceId); } @Override public boolean delete(String storedResourceId) throws IOException { File storedFile = new File(storageDir, storedResourceId); return deleteIfFile(storedFile); } // TODO: this interface should probably be changed, this simulates the old behavior, // only throw security exception, just return false otherwise private boolean deleteIfFile(File f) { if (!f.isFile()) { return false; } try { Files.delete(f.toPath()); return true; } catch (IOException cause) { return false; } } @Override public String getInfo() { return "file:dir="+storageDir; } } // end FileStorageIO /** * ZooKeeper based storage implementation that uses the SolrZkClient provided * by the CoreContainer. */ public static class ZooKeeperStorageIO implements StorageIO { protected SolrZkClient zkClient; protected String znodeBase; protected boolean retryOnConnLoss = true; public ZooKeeperStorageIO(SolrZkClient zkClient, String znodeBase) { this.zkClient = zkClient; this.znodeBase = znodeBase; } @Override public void configure(SolrResourceLoader loader, NamedList<String> initArgs) throws SolrException { // validate connectivity and the configured znode base try { if (!zkClient.exists(znodeBase, retryOnConnLoss)) { zkClient.makePath(znodeBase, retryOnConnLoss); } } catch (Exception exc) { String errMsg = String.format (Locale.ROOT, "Failed to verify znode at %s due to: %s", znodeBase, exc.toString()); log.error(errMsg, exc); throw new SolrException(ErrorCode.SERVER_ERROR, errMsg, exc); } log.info("Configured ZooKeeperStorageIO with znodeBase: {}", znodeBase); } @Override public boolean exists(String storedResourceId) throws IOException { final String znodePath = getZnodeForResource(storedResourceId); try { return zkClient.exists(znodePath, retryOnConnLoss); } catch (Exception e) { if (e instanceof IOException) { throw (IOException)e; } else { throw new IOException("Failed to read data at "+znodePath, e); } } } @Override public InputStream openInputStream(String storedResourceId) throws IOException { final String znodePath = getZnodeForResource(storedResourceId); byte[] znodeData = null; try { if (zkClient.exists(znodePath, retryOnConnLoss)) { znodeData = zkClient.getData(znodePath, null, null, retryOnConnLoss); } } catch (Exception e) { if (e instanceof IOException) { throw (IOException)e; } else { throw new IOException("Failed to read data at "+znodePath, e); } } if (znodeData != null) { log.debug("Read {} bytes from znode {}", znodeData.length, znodePath); } else { znodeData = new byte[0]; log.debug("No data found for znode {}", znodePath); } return new ByteArrayInputStream(znodeData); } @Override public OutputStream openOutputStream(String storedResourceId) throws IOException { final String znodePath = getZnodeForResource(storedResourceId); final boolean retryOnConnLoss = this.retryOnConnLoss; ByteArrayOutputStream baos = new ByteArrayOutputStream() { @Override public void close() { byte[] znodeData = toByteArray(); try { if (zkClient.exists(znodePath, retryOnConnLoss)) { zkClient.setData(znodePath, znodeData, retryOnConnLoss); log.info("Wrote {} bytes to existing znode {}", znodeData.length, znodePath); } else { zkClient.makePath(znodePath, znodeData, retryOnConnLoss); log.info("Wrote {} bytes to new znode {}", znodeData.length, znodePath); } } catch (Exception e) { // have to throw a runtimer here as we're in close, // which doesn't throw IOException if (e instanceof RuntimeException) { throw (RuntimeException)e; } else { throw new SolrException(ErrorCode.SERVER_ERROR, "Failed to save data to ZooKeeper znode: "+znodePath+" due to: "+e, e); } } } }; return baos; } /** * Returns the Znode for the given storedResourceId by combining it * with the znode base. */ protected String getZnodeForResource(String storedResourceId) { return String.format(Locale.ROOT, "%s/%s", znodeBase, storedResourceId); } @Override public boolean delete(String storedResourceId) throws IOException { boolean wasDeleted = false; final String znodePath = getZnodeForResource(storedResourceId); // this might be overkill for a delete operation try { if (zkClient.exists(znodePath, retryOnConnLoss)) { log.debug("Attempting to delete znode {}", znodePath); zkClient.delete(znodePath, -1, retryOnConnLoss); wasDeleted = zkClient.exists(znodePath, retryOnConnLoss); if (wasDeleted) { log.info("Deleted znode {}", znodePath); } else { log.warn("Failed to delete znode {}", znodePath); } } else { log.warn("Znode {} does not exist; delete operation ignored.", znodePath); } } catch (Exception e) { if (e instanceof IOException) { throw (IOException)e; } else { throw new IOException("Failed to read data at "+znodePath, e); } } return wasDeleted; } @Override public String getInfo() { return "ZooKeeperStorageIO:path="+znodeBase; } } // end ZooKeeperStorageIO /** * Memory-backed storage IO; not really intended for storage large amounts * of data in production, but useful for testing and other transient workloads. */ public static class InMemoryStorageIO implements StorageIO { Map<String,BytesRef> storage = new HashMap<>(); @Override public void configure(SolrResourceLoader loader, NamedList<String> initArgs) throws SolrException {} @Override public boolean exists(String storedResourceId) throws IOException { return storage.containsKey(storedResourceId); } @Override public InputStream openInputStream(String storedResourceId) throws IOException { BytesRef storedVal = storage.get(storedResourceId); if (storedVal == null) throw new FileNotFoundException(storedResourceId); return new ByteArrayInputStream(storedVal.bytes, storedVal.offset, storedVal.length); } @Override public OutputStream openOutputStream(final String storedResourceId) throws IOException { ByteArrayOutputStream boas = new ByteArrayOutputStream() { @Override public void close() { storage.put(storedResourceId, new BytesRef(toByteArray())); } }; return boas; } @Override public boolean delete(String storedResourceId) throws IOException { return (storage.remove(storedResourceId) != null); } @Override public String getInfo() { return "InMemoryStorage"; } } // end InMemoryStorageIO class /** * Default storage implementation that uses JSON as the storage format for managed data. */ public static class JsonStorage extends ManagedResourceStorage { public JsonStorage(StorageIO storageIO, SolrResourceLoader loader) { super(storageIO, loader); } /** * Determines the relative path (from the storage root) for the given resource. * In this case, it returns a file named with the .json extension. */ @Override public String getStoredResourceId(String resourceId) { return resourceId.replace('/','_')+".json"; } @Override protected Object parseText(Reader reader, String resourceId) throws IOException { return Utils.fromJSON(reader); } @Override public void store(String resourceId, Object toStore) throws IOException { String json = toJSONString(toStore); String storedResourceId = getStoredResourceId(resourceId); OutputStreamWriter writer = null; try { writer = new OutputStreamWriter(storageIO.openOutputStream(storedResourceId), UTF_8); writer.write(json); writer.flush(); } finally { if (writer != null) { try { writer.close(); } catch (Exception ignore){} } } if (log.isInfoEnabled()) { log.info("Saved JSON object to path {} using {}", storedResourceId, storageIO.getInfo()); } } } // end JsonStorage private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); public static final Charset UTF_8 = StandardCharsets.UTF_8; protected StorageIO storageIO; protected SolrResourceLoader loader; protected ManagedResourceStorage(StorageIO storageIO, SolrResourceLoader loader) { this.storageIO = storageIO; this.loader = loader; } /** Returns the resource loader used by this storage instance */ public SolrResourceLoader getResourceLoader() { return loader; } /** Returns the storageIO instance used by this storage instance */ public StorageIO getStorageIO() { return storageIO; } /** * Gets the unique identifier for a stored resource, typically based * on the resourceId and some storage-specific information, such as * file extension and storage root directory. */ public abstract String getStoredResourceId(String resourceId); /** * Loads a resource from storage; the default implementation makes * the assumption that the data is stored as UTF-8 encoded text, * such as JSON. This method should be overridden if that assumption * is invalid. */ public Object load(String resourceId) throws IOException { String storedResourceId = getStoredResourceId(resourceId); if (log.isDebugEnabled()) { log.debug("Reading {} using {}", storedResourceId, storageIO.getInfo()); } InputStream inputStream = storageIO.openInputStream(storedResourceId); if (inputStream == null) { return null; } Object parsed; try (InputStreamReader reader = new InputStreamReader(inputStream, UTF_8)) { parsed = parseText(reader, resourceId); } String objectType = (parsed != null) ? parsed.getClass().getSimpleName() : "null"; if (log.isInfoEnabled()) { log.info(String.format(Locale.ROOT, "Loaded %s at path %s using %s", objectType, storedResourceId, storageIO.getInfo())); } return parsed; } /** * Called by {@link ManagedResourceStorage#load(String)} to convert the * serialized resource into its in-memory representation. */ protected Object parseText(Reader reader, String resourceId) throws IOException { // no-op: base classes should override this if they deal with text. return null; } /** Persists the given toStore object with the given resourceId. */ public abstract void store(String resourceId, Object toStore) throws IOException; /** Removes the given resourceId's persisted representation. */ public boolean delete(String resourceId) throws IOException { return storageIO.delete(getStoredResourceId(resourceId)); } }
1
40,392
BTW for brevity, you can remove `getZkStateReader().` here and elsewhere since ZkController has a convenience method for the cluster state.
apache-lucene-solr
java
@@ -191,7 +191,10 @@ public class FindFiles { Snapshot snapshot = snapshotId != null ? ops.current().snapshot(snapshotId) : ops.current().currentSnapshot(); - CloseableIterable<ManifestEntry> entries = new ManifestGroup(ops, snapshot.manifests()) + // snapshot could be null when the table just gets created + Iterable<ManifestFile> manifests = (snapshot != null) ? snapshot.manifests() : CloseableIterable.empty(); + + CloseableIterable<ManifestEntry> entries = new ManifestGroup(ops, manifests) .filterData(rowFilter) .filterFiles(fileFilter) .filterPartitions(partitionFilter)
1
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.iceberg; import com.google.common.base.Preconditions; import java.time.Instant; import java.time.LocalDateTime; import java.time.ZoneId; import java.time.format.DateTimeFormatter; import java.util.Arrays; import java.util.List; import org.apache.iceberg.expressions.Expression; import org.apache.iceberg.expressions.Expressions; import org.apache.iceberg.io.CloseableIterable; public class FindFiles { private FindFiles() { } private static final DateTimeFormatter DATE_FORMAT = DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss.SSS"); public static Builder in(Table table) { return new Builder(table); } public static class Builder { private final Table table; private final TableOperations ops; private boolean caseSensitive = true; private Long snapshotId = null; private Expression rowFilter = Expressions.alwaysTrue(); private Expression fileFilter = Expressions.alwaysTrue(); private Expression partitionFilter = Expressions.alwaysTrue(); public Builder(Table table) { this.table = table; this.ops = ((HasTableOperations) table).operations(); } public Builder caseInsensitive() { this.caseSensitive = false; return this; } public Builder caseSensitive(boolean findCaseSensitive) { this.caseSensitive = findCaseSensitive; return this; } /** * Base results on the given snapshot. * * @param findSnapshotId a snapshot ID * @return this for method chaining */ public Builder inSnapshot(long findSnapshotId) { Preconditions.checkArgument(this.snapshotId == null, "Cannot set snapshot multiple times, already set to id=%s", findSnapshotId); Preconditions.checkArgument(table.snapshot(findSnapshotId) != null, "Cannot find snapshot for id=%s", findSnapshotId); this.snapshotId = findSnapshotId; return this; } /** * Base results on files in the snapshot that was current as of a timestamp. * * @param timestampMillis a timestamp in milliseconds * @return this for method chaining */ public Builder asOfTime(long timestampMillis) { Preconditions.checkArgument(this.snapshotId == null, "Cannot set snapshot multiple times, already set to id=%s", snapshotId); Long lastSnapshotId = null; for (HistoryEntry logEntry : ops.current().snapshotLog()) { if (logEntry.timestampMillis() <= timestampMillis) { lastSnapshotId = logEntry.snapshotId(); } else { // the last snapshot ID was the last one older than the timestamp break; } } // the snapshot ID could be null if no entries were older than the requested time. in that // case, there is no valid snapshot to read. Preconditions.checkArgument(lastSnapshotId != null, "Cannot find a snapshot older than %s", DATE_FORMAT.format(LocalDateTime.ofInstant(Instant.ofEpochMilli(timestampMillis), ZoneId.systemDefault()))); return inSnapshot(lastSnapshotId); } /** * Filter results using a record filter. Files that may contain at least one matching record * will be returned by {@link #collect()}. * * @param expr a record filter * @return this for method chaining */ public Builder withRecordsMatching(Expression expr) { this.rowFilter = Expressions.and(rowFilter, expr); return this; } /** * Filter results using a metadata filter for the data in a {@link DataFile}. * * @param expr a filter for {@link DataFile} metadata columns * @return this for method chaining */ public Builder withMetadataMatching(Expression expr) { this.fileFilter = Expressions.and(fileFilter, expr); return this; } /** * Filter results to files in any one of the given partitions. * * @param spec a spec for the partitions * @param partition a StructLike that stores a partition tuple * @return this for method chaining */ public Builder inPartition(PartitionSpec spec, StructLike partition) { return inPartitions(spec, partition); } /** * Filter results to files in any one of the given partitions. * * @param spec a spec for the partitions * @param partitions one or more StructLike that stores a partition tuple * @return this for method chaining */ public Builder inPartitions(PartitionSpec spec, StructLike... partitions) { return inPartitions(spec, Arrays.asList(partitions)); } /** * Filter results to files in any one of the given partitions. * * @param spec a spec for the partitions * @param partitions a list of StructLike that stores a partition tuple * @return this for method chaining */ public Builder inPartitions(PartitionSpec spec, List<StructLike> partitions) { Preconditions.checkArgument(spec.equals(ops.current().spec(spec.specId())), "Partition spec does not belong to table: %s", table); Expression partitionSetFilter = Expressions.alwaysFalse(); for (StructLike partitionData : partitions) { Expression partFilter = Expressions.alwaysTrue(); for (int i = 0; i < spec.fields().size(); i += 1) { PartitionField field = spec.fields().get(i); partFilter = Expressions.and( partFilter, Expressions.equal(field.name(), partitionData.get(i, Object.class))); } partitionSetFilter = Expressions.or(partitionSetFilter, partFilter); } if (partitionFilter != Expressions.alwaysTrue()) { this.partitionFilter = Expressions.or(partitionFilter, partitionSetFilter); } else { this.partitionFilter = partitionSetFilter; } return this; } /** * @return all files in the table that match all of the filters */ public CloseableIterable<DataFile> collect() { Snapshot snapshot = snapshotId != null ? ops.current().snapshot(snapshotId) : ops.current().currentSnapshot(); CloseableIterable<ManifestEntry> entries = new ManifestGroup(ops, snapshot.manifests()) .filterData(rowFilter) .filterFiles(fileFilter) .filterPartitions(partitionFilter) .ignoreDeleted() .caseSensitive(caseSensitive) .entries(); return CloseableIterable.transform(entries, entry -> entry.file().copyWithoutStats()); } } }
1
16,063
If there are no manifests, then entries should be `CloseableIterable.empty()`, not the manifest iterable. That doesn't need to be closeable.
apache-iceberg
java
@@ -0,0 +1,11 @@ +/** + * BSD-style license; for more info see http://pmd.sourceforge.net/license.html + */ + +package net.sourceforge.pmd.lang.java.rule.codestyle; + +import net.sourceforge.pmd.testframework.PmdRuleTst; + +public class ArrayInitializationVerbosenessRuleTest extends PmdRuleTst { + // no additional unit tests +}
1
1
15,398
this class should be named `ArrayInitializationVerbosenessTest` to work
pmd-pmd
java
@@ -338,9 +338,17 @@ module Bolt private def update_logs(logs) logs.each_with_object({}) do |(key, val), acc| - next unless val.is_a?(Hash) + # Remove any disabled logs + next if val == 'disable' name = normalize_log(key) + + # But otherwise it has to be a Hash + unless val.is_a?(Hash) + raise Bolt::ValidationError, + "config of log #{name} must be a Hash, received #{val.class} #{val.inspect}" + end + acc[name] = val.slice('append', 'level') .transform_keys(&:to_sym)
1
# frozen_string_literal: true require 'etc' require 'logging' require 'pathname' require 'bolt/project' require 'bolt/logger' require 'bolt/util' require 'bolt/config/options' module Bolt class UnknownTransportError < Bolt::Error def initialize(transport, uri = nil) msg = uri.nil? ? "Unknown transport #{transport}" : "Unknown transport #{transport} found for #{uri}" super(msg, 'bolt/unknown-transport') end end class Config include Bolt::Config::Options attr_reader :config_files, :warnings, :data, :transports, :project, :modified_concurrency, :deprecations BOLT_CONFIG_NAME = 'bolt.yaml' BOLT_DEFAULTS_NAME = 'bolt-defaults.yaml' # The default concurrency value that is used when the ulimit is not low (i.e. < 700) DEFAULT_DEFAULT_CONCURRENCY = 100 def self.default new(Bolt::Project.default_project, {}) end def self.from_project(project, overrides = {}) conf = if project.project_file == project.config_file project.data else Bolt::Util.read_optional_yaml_hash(project.config_file, 'config') end data = load_defaults(project).push( filepath: project.config_file, data: conf, warnings: [], deprecations: [] ) new(project, data, overrides) end def self.from_file(configfile, overrides = {}) project = Bolt::Project.create_project(Pathname.new(configfile).expand_path.dirname) conf = if project.project_file == project.config_file project.data else Bolt::Util.read_yaml_hash(configfile, 'config') end data = load_defaults(project).push( filepath: project.config_file, data: conf, warnings: [], deprecations: [] ) new(project, data, overrides) end def self.system_path # Lazy-load expensive gem code require 'win32/dir' if Bolt::Util.windows? if Bolt::Util.windows? Pathname.new(File.join(Dir::COMMON_APPDATA, 'PuppetLabs', 'bolt', 'etc')) else Pathname.new(File.join('/etc', 'puppetlabs', 'bolt')) end end def self.user_path Pathname.new(File.expand_path(File.join('~', '.puppetlabs', 'etc', 'bolt'))) rescue StandardError nil end # Loads a 'bolt-defaults.yaml' file, which contains default configuration that applies to all # projects. This file does not allow project-specific configuration such as 'hiera-config' and # 'inventoryfile', and nests all default inventory configuration under an 'inventory-config' key. def self.load_bolt_defaults_yaml(dir) filepath = dir + BOLT_DEFAULTS_NAME data = Bolt::Util.read_yaml_hash(filepath, 'config') warnings = [] # Warn if 'bolt.yaml' detected in same directory. if File.exist?(bolt_yaml = dir + BOLT_CONFIG_NAME) warnings.push( msg: "Detected multiple configuration files: ['#{bolt_yaml}', '#{filepath}']. '#{bolt_yaml}' "\ "will be ignored." ) end # Remove project-specific config such as hiera-config, etc. project_config = data.slice(*(BOLT_PROJECT_OPTIONS - BOLT_DEFAULTS_OPTIONS)) if project_config.any? data.reject! { |key, _| project_config.include?(key) } warnings.push( msg: "Unsupported project configuration detected in '#{filepath}': #{project_config.keys}. "\ "Project configuration should be set in 'bolt-project.yaml'." ) end # Remove top-level transport config such as transport, ssh, etc. transport_config = data.slice(*INVENTORY_OPTIONS.keys) if transport_config.any? data.reject! { |key, _| transport_config.include?(key) } warnings.push( msg: "Unsupported inventory configuration detected in '#{filepath}': #{transport_config.keys}. "\ "Transport configuration should be set under the 'inventory-config' option or "\ "in 'inventory.yaml'." ) end # Move data under inventory-config to top-level so it can be easily merged with # config from other sources. Error early if inventory-config is not a hash or # has a plugin reference. if data.key?('inventory-config') unless data['inventory-config'].is_a?(Hash) raise Bolt::ValidationError, "Option 'inventory-config' must be of type Hash, received #{data['inventory-config']} "\ "#{data['inventory-config']} (file: #{filepath})" end if data['inventory-config'].key?('_plugin') raise Bolt::ValidationError, "Found unsupported key '_plugin' for option 'inventory-config'; supported keys are "\ "'#{INVENTORY_OPTIONS.keys.join("', '")}' (file: #{filepath})" end data = data.merge(data.delete('inventory-config')) end { filepath: filepath, data: data, warnings: warnings, deprecations: [] } end # Loads a 'bolt.yaml' file, the legacy configuration file. There's no special munging needed # here since Bolt::Config will just ignore any invalid keys. def self.load_bolt_yaml(dir) filepath = dir + BOLT_CONFIG_NAME data = Bolt::Util.read_yaml_hash(filepath, 'config') deprecations = [{ type: 'Using bolt.yaml for system configuration', msg: "Configuration file #{filepath} is deprecated and will be removed in a future version "\ "of Bolt. Use '#{dir + BOLT_DEFAULTS_NAME}' instead." }] { filepath: filepath, data: data, warnings: [], deprecations: deprecations } end def self.load_defaults(project) confs = [] # Load system-level config. Prefer a 'bolt-defaults.yaml' file, but fall back to the # legacy 'bolt.yaml' file. If the project-level config file is also the system-level # config file, don't load it a second time. if File.exist?(system_path + BOLT_DEFAULTS_NAME) confs << load_bolt_defaults_yaml(system_path) elsif File.exist?(system_path + BOLT_CONFIG_NAME) && (system_path + BOLT_CONFIG_NAME) != project.config_file confs << load_bolt_yaml(system_path) end # Load user-level config if there is a homedir. Prefer a 'bolt-defaults.yaml' file, but # fall back to the legacy 'bolt.yaml' file. if user_path if File.exist?(user_path + BOLT_DEFAULTS_NAME) confs << load_bolt_defaults_yaml(user_path) elsif File.exist?(user_path + BOLT_CONFIG_NAME) confs << load_bolt_yaml(user_path) end end confs end def initialize(project, config_data, overrides = {}) unless config_data.is_a?(Array) config_data = [{ filepath: project.config_file, data: config_data, warnings: [], deprecations: [] }] end @logger = Logging.logger[self] @project = project @warnings = @project.warnings.dup @deprecations = @project.deprecations.dup @transports = {} @config_files = [] default_data = { 'apply_settings' => {}, 'color' => true, 'compile-concurrency' => Etc.nprocessors, 'concurrency' => default_concurrency, 'format' => 'human', 'log' => { 'console' => {} }, 'plugin_hooks' => {}, 'plugins' => {}, 'puppetdb' => {}, 'puppetfile' => {}, 'save-rerun' => true, 'transport' => 'ssh' } if project.path.directory? default_data['log']['bolt-debug.log'] = { 'level' => 'debug', 'append' => false } end loaded_data = config_data.each_with_object([]) do |data, acc| @warnings.concat(data[:warnings]) if data[:warnings].any? @deprecations.concat(data[:deprecations]) if data[:deprecations].any? if data[:data].any? @config_files.push(data[:filepath]) acc.push(data[:data]) end end override_data = normalize_overrides(overrides) # If we need to lower concurrency and concurrency is not configured ld_concurrency = loaded_data.map(&:keys).flatten.include?('concurrency') @modified_concurrency = default_concurrency != DEFAULT_DEFAULT_CONCURRENCY && !ld_concurrency && !override_data.key?('concurrency') @data = merge_config_layers(default_data, *loaded_data, override_data) TRANSPORT_CONFIG.each do |transport, config| @transports[transport] = config.new(@data.delete(transport), @project.path) end finalize_data validate end # Transforms CLI options into a config hash that can be merged with # default and loaded config. def normalize_overrides(options) opts = options.transform_keys(&:to_s) # Pull out config options. We need to add 'transport' as it's not part of the # OPTIONS hash but is a valid option that can be set with the --transport CLI option overrides = opts.slice(*OPTIONS.keys, 'transport') # Pull out transport config options TRANSPORT_CONFIG.each do |transport, config| overrides[transport] = opts.slice(*config.options) end # Set console log to debug if in debug mode if options[:debug] overrides['log'] = { 'console' => { 'level' => :debug } } end if options[:puppetfile_path] @puppetfile = options[:puppetfile_path] end overrides['trace'] = opts['trace'] if opts.key?('trace') overrides end # Merge configuration from all sources into a single hash. Precedence from lowest to highest: # defaults, system-wide, user-level, project-level, CLI overrides def merge_config_layers(*config_data) config_data.inject({}) do |acc, config| acc.merge(config) do |key, val1, val2| case key # Plugin config is shallow merged for each plugin when 'plugins' val1.merge(val2) { |_, v1, v2| v1.merge(v2) } # Transports are deep merged when *TRANSPORT_CONFIG.keys Bolt::Util.deep_merge(val1, val2) # Hash values are shallow merged when 'puppetdb', 'plugin_hooks', 'apply_settings', 'log' val1.merge(val2) # All other values are overwritten else val2 end end end end def deep_clone Bolt::Util.deep_clone(self) end private def finalize_data if @data['log'].is_a?(Hash) @data['log'] = update_logs(@data['log']) end # Expand paths relative to the project. Any settings that came from the # CLI will already be absolute, so the expand will be skipped. if @data.key?('modulepath') moduledirs = if data['modulepath'].is_a?(String) data['modulepath'].split(File::PATH_SEPARATOR) else data['modulepath'] end @data['modulepath'] = moduledirs.map do |moduledir| File.expand_path(moduledir, @project.path) end end %w[hiera-config inventoryfile trusted-external-command].each do |opt| @data[opt] = File.expand_path(@data[opt], @project.path) if @data.key?(opt) end # Filter hashes to only include valid options @data['apply_settings'] = @data['apply_settings'].slice(*OPTIONS['apply_settings'][:properties].keys) @data['puppetfile'] = @data['puppetfile'].slice(*OPTIONS['puppetfile'][:properties].keys) end private def normalize_log(target) return target if target == 'console' target = target[5..-1] if target.start_with?('file:') 'file:' + File.expand_path(target, @project.path) end private def update_logs(logs) logs.each_with_object({}) do |(key, val), acc| next unless val.is_a?(Hash) name = normalize_log(key) acc[name] = val.slice('append', 'level') .transform_keys(&:to_sym) if (v = acc[name][:level]) unless v.is_a?(String) || v.is_a?(Symbol) raise Bolt::ValidationError, "level of log #{name} must be a String or Symbol, received #{v.class} #{v.inspect}" end unless Bolt::Logger.valid_level?(v) raise Bolt::ValidationError, "level of log #{name} must be one of #{Bolt::Logger.levels.join(', ')}; received #{v}" end end if (v = acc[name][:append]) && v != true && v != false raise Bolt::ValidationError, "append flag of log #{name} must be a Boolean, received #{v.class} #{v.inspect}" end end end def validate if @data['future'] msg = "Configuration option 'future' no longer exposes future behavior." @warnings << { option: 'future', msg: msg } end keys = OPTIONS.keys - %w[plugins plugin_hooks puppetdb] keys.each do |key| next unless Bolt::Util.references?(@data[key]) valid_keys = TRANSPORT_CONFIG.keys + %w[plugins plugin_hooks puppetdb] raise Bolt::ValidationError, "Found unsupported key _plugin in config setting #{key}. Plugins are only available in "\ "#{valid_keys.join(', ')}." end unless concurrency.is_a?(Integer) && concurrency > 0 raise Bolt::ValidationError, "Concurrency must be a positive Integer, received #{concurrency.class} #{concurrency}" end unless compile_concurrency.is_a?(Integer) && compile_concurrency > 0 raise Bolt::ValidationError, "Compile concurrency must be a positive Integer, received #{compile_concurrency.class} "\ "#{compile_concurrency}" end compile_limit = 2 * Etc.nprocessors unless compile_concurrency < compile_limit raise Bolt::ValidationError, "Compilation is CPU-intensive, set concurrency less than #{compile_limit}" end unless %w[human json rainbow].include? format raise Bolt::ValidationError, "Unsupported format: '#{format}'" end Bolt::Util.validate_file('hiera-config', @data['hiera-config']) if @data['hiera-config'] Bolt::Util.validate_file('trusted-external-command', trusted_external) if trusted_external unless TRANSPORT_CONFIG.include?(transport) raise UnknownTransportError, transport end end def default_inventoryfile @project.inventory_file end def rerunfile @project.rerunfile end def hiera_config @data['hiera-config'] || @project.hiera_config end def puppetfile @puppetfile || @project.puppetfile end def modulepath @data['modulepath'] || @project.modulepath end def modulepath=(value) @data['modulepath'] = value end def concurrency @data['concurrency'] end def format @data['format'] end def format=(value) @data['format'] = value end def trace @data['trace'] end def log @data['log'] end def puppetdb @data['puppetdb'] end def color @data['color'] end def save_rerun @data['save-rerun'] end def inventoryfile @data['inventoryfile'] end def compile_concurrency @data['compile-concurrency'] end def puppetfile_config @data['puppetfile'] end def plugins @data['plugins'] end def plugin_hooks @data['plugin_hooks'] end def trusted_external @data['trusted-external-command'] end def apply_settings @data['apply_settings'] end def transport @data['transport'] end # Check if there is a case-insensitive match to the path def check_path_case(type, paths) return if paths.nil? matches = matching_paths(paths) if matches.any? msg = "WARNING: Bolt is case sensitive when specifying a #{type}. Did you mean:\n" matches.each { |path| msg += " #{path}\n" } @logger.warn msg end end def matching_paths(paths) Array(paths).map { |p| Dir.glob([p, casefold(p)]) }.flatten.uniq.reject { |p| Array(paths).include?(p) } end private def casefold(path) path.chars.map do |l| l =~ /[A-Za-z]/ ? "[#{l.upcase}#{l.downcase}]" : l end.join end # Etc::SC_OPEN_MAX is meaningless on windows, not defined in PE Jruby and not available # on some platforms. This method holds the logic to decide whether or not to even consider it. def sc_open_max_available? !Bolt::Util.windows? && defined?(Etc::SC_OPEN_MAX) && Etc.sysconf(Etc::SC_OPEN_MAX) end def default_concurrency @default_concurrency ||= if !sc_open_max_available? || Etc.sysconf(Etc::SC_OPEN_MAX) >= 300 DEFAULT_DEFAULT_CONCURRENCY else Etc.sysconf(Etc::SC_OPEN_MAX) / 7 end end end end
1
15,817
Do we want to allow users to disable `console` as well? The schema currently says that it only permits a hash for `console`.
puppetlabs-bolt
rb
@@ -44,6 +44,7 @@ func TestAddrsNewAndList(t *testing.T) { func TestWalletBalance(t *testing.T) { tf.IntegrationTest(t) + t.Skip("not working") ctx := context.Background() builder := test.NewNodeBuilder(t)
1
package commands_test import ( "context" "os" "strings" "testing" "github.com/ipfs/go-cid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/filecoin-project/go-filecoin/fixtures" "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/node" "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/node/test" th "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers" tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" "github.com/filecoin-project/go-filecoin/internal/pkg/types" "github.com/filecoin-project/go-filecoin/internal/pkg/vm/address" ) func TestAddrsNewAndList(t *testing.T) { tf.IntegrationTest(t) ctx := context.Background() builder := test.NewNodeBuilder(t) n, cmdClient, done := builder.BuildAndStartAPI(ctx) defer done() addrs := make([]address.Address, 10) var err error for i := 0; i < 10; i++ { addrs[i], err = n.PorcelainAPI.WalletNewAddress(types.SECP256K1) require.NoError(t, err) } list := cmdClient.RunSuccess(ctx, "address", "ls").ReadStdout() for _, addr := range addrs { assert.Contains(t, list, addr.String()) } } func TestWalletBalance(t *testing.T) { tf.IntegrationTest(t) ctx := context.Background() builder := test.NewNodeBuilder(t) cs := node.FixtureChainSeed(t) builder.WithGenesisInit(cs.GenesisInitFunc) n, cmdClient, done := builder.BuildAndStartAPI(ctx) defer done() addr, err := n.PorcelainAPI.WalletNewAddress(types.SECP256K1) require.NoError(t, err) t.Log("[success] not found, zero") balance := cmdClient.RunSuccess(ctx, "wallet", "balance", addr.String()) assert.Equal(t, "0", balance.ReadStdoutTrimNewlines()) t.Log("[success] balance 9999900000") balance = cmdClient.RunSuccess(ctx, "wallet", "balance", address.LegacyNetworkAddress.String()) assert.Equal(t, "949999900000", balance.ReadStdoutTrimNewlines()) t.Log("[success] newly generated one") addrNew := cmdClient.RunSuccess(ctx, "address", "new") balance = cmdClient.RunSuccess(ctx, "wallet", "balance", addrNew.ReadStdoutTrimNewlines()) assert.Equal(t, "0", balance.ReadStdoutTrimNewlines()) } func TestAddrLookupAndUpdate(t *testing.T) { t.Skip("Long term solution: #3642") tf.IntegrationTest(t) ctx := context.Background() builder := test.NewNodeBuilder(t) cs := node.FixtureChainSeed(t) builder.WithGenesisInit(cs.GenesisInitFunc) n1, cmdClient, done := builder.BuildAndStartAPI(ctx) defer done() builder2 := test.NewNodeBuilder(t) builder2.WithConfig(cs.MinerConfigOpt(0)) builder2.WithInitOpt(cs.KeyInitOpt(0)) builder2.WithInitOpt(cs.KeyInitOpt(1)) n2 := builder2.BuildAndStart(ctx) defer n2.Stop(ctx) node.ConnectNodes(t, n1, n2) addr := fixtures.TestAddresses[0] minerAddr := fixtures.TestMiners[0] minerPidForUpdate := th.RequireRandomPeerID(t) // capture original, pre-update miner pid lookupOutA := cmdClient.RunSuccessFirstLine(ctx, "address", "lookup", minerAddr) // Not a miner address, should fail. cmdClient.RunFail(ctx, "failed to find", "address", "lookup", addr) // update the miner's peer ID updateMsg := cmdClient.RunSuccessFirstLine(ctx, "miner", "update-peerid", "--from", addr, "--gas-price", "1", "--gas-limit", "300", minerAddr, minerPidForUpdate.Pretty(), ) // ensure mining happens after update message gets included in mempool _, err := n2.BlockMining.BlockMiningAPI.MiningOnce(ctx) require.NoError(t, err) // wait for message to be included in a block _, err = n1.PorcelainAPI.MessageWaitDone(ctx, mustDecodeCid(updateMsg)) require.NoError(t, err) // use the address lookup command to ensure update happened lookupOutB := cmdClient.RunSuccessFirstLine(ctx, "address", "lookup", minerAddr) assert.Equal(t, minerPidForUpdate.Pretty(), lookupOutB) assert.NotEqual(t, lookupOutA, lookupOutB) } func TestWalletLoadFromFile(t *testing.T) { tf.IntegrationTest(t) ctx := context.Background() builder := test.NewNodeBuilder(t) buildWithMiner(t, builder) _, cmdClient, done := builder.BuildAndStartAPI(ctx) defer done() for _, p := range fixtures.KeyFilePaths() { cmdClient.RunSuccess(ctx, "wallet", "import", p) } dw := cmdClient.RunSuccess(ctx, "address", "ls").ReadStdoutTrimNewlines() for _, addr := range fixtures.TestAddresses { // assert we loaded the test address from the file assert.Contains(t, dw, addr) } // assert default amount of funds were allocated to address during genesis wb := cmdClient.RunSuccess(ctx, "wallet", "balance", fixtures.TestAddresses[0]).ReadStdoutTrimNewlines() assert.Contains(t, wb, "10000") } func TestWalletExportImportRoundTrip(t *testing.T) { tf.IntegrationTest(t) ctx := context.Background() builder := test.NewNodeBuilder(t) _, cmdClient, done := builder.BuildAndStartAPI(ctx) defer done() dw := cmdClient.RunSuccess(ctx, "address", "ls").ReadStdoutTrimNewlines() ki := cmdClient.RunSuccess(ctx, "wallet", "export", dw, "--enc=json").ReadStdoutTrimNewlines() wf, err := os.Create("walletFileTest") require.NoError(t, err) defer func() { require.NoError(t, os.Remove("walletFileTest")) }() _, err = wf.WriteString(ki) require.NoError(t, err) require.NoError(t, wf.Close()) maybeAddr := cmdClient.RunSuccess(ctx, "wallet", "import", wf.Name()).ReadStdoutTrimNewlines() assert.Equal(t, dw, maybeAddr) } func TestWalletExportPrivateKeyConsistentDisplay(t *testing.T) { tf.IntegrationTest(t) ctx := context.Background() builder := test.NewNodeBuilder(t) _, cmdClient, done := builder.BuildAndStartAPI(ctx) defer done() dw := cmdClient.RunSuccess(ctx, "address", "ls").ReadStdoutTrimNewlines() exportLines := cmdClient.RunSuccessLines(ctx, "wallet", "export", dw) exportTextPrivateKeyLine := exportLines[1] exportTextPrivateKey := strings.Split(exportTextPrivateKeyLine, "\t")[1] exportJSON := cmdClient.RunSuccess(ctx, "wallet", "export", dw, "--enc=json").ReadStdoutTrimNewlines() assert.Contains(t, exportJSON, exportTextPrivateKey) } // MustDecodeCid decodes a string to a Cid pointer, panicking on error func mustDecodeCid(cidStr string) cid.Cid { decode, err := cid.Decode(cidStr) if err != nil { panic(err) } return decode }
1
22,630
It would be very helpful to describe succinctly either inline or by linking to an issue going into depth why each test is not working. If we merge like this your knowledge of what is going on is lost and other people in the code need to do a ton of reading before understanding when/how/if we should unskip.
filecoin-project-venus
go
@@ -85,9 +85,13 @@ export default function LegacyAdSenseDashboardWidgetOverview( props ) { } } }, [ - currentDataLoaded && previousDataLoaded, // All reports are fetched. - !! currentError || !! previousError, // Whether there is an error or not. - JSON.stringify( currentRangeData ), + currentDataLoaded, + currentError, + currentRangeData, + handleDataError, + handleDataSuccess, + previousDataLoaded, + previousError, ] ); if ( ! currentDataLoaded || ! previousDataLoaded ) {
1
/** * LegacyAdSenseDashboardWidgetOverview component. * * Site Kit by Google, Copyright 2021 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * External dependencies */ import PropTypes from 'prop-types'; /** * WordPress dependencies */ import { useEffect } from '@wordpress/element'; /** * Internal dependencies */ import Data from 'googlesitekit-data'; import { STORE_NAME } from '../../datastore/constants'; import PreviewBlock from '../../../../components/PreviewBlock'; import DataBlock from '../../../../components/DataBlock'; import ReportError from '../../../../components/ReportError'; import ReportZero from '../../../../components/ReportZero'; import { calculateChange } from '../../../../util'; import { isZeroReport } from '../../util'; const { useSelect } = Data; export default function LegacyAdSenseDashboardWidgetOverview( props ) { const { startDate, endDate, compareStartDate, compareEndDate, metrics, selectedStats, handleStatSelection, handleDataError, handleDataSuccess, } = props; const currentRangeArgs = { metrics: Object.keys( metrics ), startDate, endDate, }; const previousRangeArgs = { metrics: Object.keys( metrics ), startDate: compareStartDate, endDate: compareEndDate, }; const currentRangeData = useSelect( ( select ) => select( STORE_NAME ).getReport( currentRangeArgs ) ); const previousRangeData = useSelect( ( select ) => select( STORE_NAME ).getReport( previousRangeArgs ) ); const currentDataLoaded = useSelect( ( select ) => select( STORE_NAME ).hasFinishedResolution( 'getReport', [ currentRangeArgs ] ) ); const previousDataLoaded = useSelect( ( select ) => select( STORE_NAME ).hasFinishedResolution( 'getReport', [ previousRangeArgs ] ) ); const currentError = useSelect( ( select ) => select( STORE_NAME ).getErrorForSelector( 'getReport', [ currentRangeArgs ] ) ); const previousError = useSelect( ( select ) => select( STORE_NAME ).getErrorForSelector( 'getReport', [ previousRangeArgs ] ) ); // TODO: remove the following logic when AdSenseDashboardWidget is refactored. useEffect( () => { if ( currentDataLoaded && previousDataLoaded ) { if ( currentError || previousError ) { handleDataError( currentError || previousError ); } else if ( isZeroReport( currentRangeData ) ) { handleDataError(); } else { handleDataSuccess(); } } }, [ currentDataLoaded && previousDataLoaded, // All reports are fetched. !! currentError || !! previousError, // Whether there is an error or not. JSON.stringify( currentRangeData ), ] ); if ( ! currentDataLoaded || ! previousDataLoaded ) { return <PreviewBlock width="100%" height="250px" />; } if ( currentError || previousError ) { const error = currentError || previousError; return <ReportError moduleSlug="adsense" error={ error } />; } if ( isZeroReport( currentRangeData ) ) { return <ReportZero moduleSlug="adsense" />; } const { totals, headers } = currentRangeData; const { totals: previousTotals } = previousRangeData; return ( <div className="googlesitekit-adsense-performance-overview"> <DataBlock stat={ 0 } className="googlesitekit-data-block--page-rpm googlesitekit-data-block--button-1" title={ metrics[ headers[ 0 ].name ] } datapoint={ totals[ 0 ] } datapointUnit={ headers[ 0 ]?.currency } change={ calculateChange( previousTotals[ 0 ], totals[ 0 ] ) } changeDataUnit="%" context="button" selected={ selectedStats === 0 } handleStatSelection={ () => handleStatSelection( 0 ) } /> <DataBlock stat={ 1 } className="googlesitekit-data-block--page-rpm googlesitekit-data-block--button-2" title={ metrics[ headers[ 1 ].name ] } datapoint={ totals[ 1 ] } datapointUnit={ headers[ 1 ]?.currency } change={ calculateChange( previousTotals[ 1 ], totals[ 1 ] ) } changeDataUnit="%" context="button" selected={ selectedStats === 1 } handleStatSelection={ () => handleStatSelection( 1 ) } /> <DataBlock stat={ 2 } className="googlesitekit-data-block--impression googlesitekit-data-block--button-3" title={ metrics[ headers[ 2 ].name ] } datapoint={ totals[ 2 ] } change={ calculateChange( previousTotals[ 2 ], totals[ 2 ] ) } changeDataUnit="%" context="button" selected={ selectedStats === 2 } handleStatSelection={ () => handleStatSelection( 2 ) } /> <DataBlock stat={ 3 } className="googlesitekit-data-block--impression googlesitekit-data-block--button-4" title={ metrics[ headers[ 3 ].name ] } datapoint={ totals[ 3 ] } datapointUnit={ '%' } change={ calculateChange( previousTotals[ 3 ], totals[ 3 ] ) } changeDataUnit="%" context="button" selected={ selectedStats === 3 } handleStatSelection={ () => handleStatSelection( 3 ) } /> </div> ); } LegacyAdSenseDashboardWidgetOverview.propTypes = { startDate: PropTypes.string.isRequired, endDate: PropTypes.string.isRequired, compareStartDate: PropTypes.string.isRequired, compareEndDate: PropTypes.string.isRequired, metrics: PropTypes.shape( {} ).isRequired, selectedStats: PropTypes.number.isRequired, handleStatSelection: PropTypes.func.isRequired, handleDataError: PropTypes.func.isRequired, handleDataSuccess: PropTypes.func.isRequired, };
1
37,760
As this is a `useEffect` this could be a cause for concern! Note that `useEffect` re-runs when a dependency changes **not** when a dependency is truthy (the previous code does look a bit like it's expecting that)
google-site-kit-wp
js
@@ -47,12 +47,13 @@ namespace lbann { -lbann_comm* initialize(int& argc, char**& argv, int seed) { +world_comm_ptr initialize(int& argc, char**& argv, int seed) { // Initialize Elemental. El::Initialize(argc, argv); // Create a new comm object. // Initial creation with every process in one model. - auto* comm = new lbann_comm(0); + auto comm = world_comm_ptr{new lbann_comm(0), &lbann::finalize }; + #if defined(LBANN_TOPO_AWARE) // Determine the number of NUMA nodes present. hwloc_topology_t topo;
1
//////////////////////////////////////////////////////////////////////////////// // Copyright (c) 2014-2016, Lawrence Livermore National Security, LLC. // Produced at the Lawrence Livermore National Laboratory. // Written by the LBANN Research Team (B. Van Essen, et al.) listed in // the CONTRIBUTORS file. <[email protected]> // // LLNL-CODE-697807. // All rights reserved. // // This file is part of LBANN: Livermore Big Artificial Neural Network // Toolkit. For details, see http://software.llnl.gov/LBANN or // https://github.com/LLNL/LBANN. // // Licensed under the Apache License, Version 2.0 (the "Licensee"); you // may not use this file except in compliance with the License. You may // obtain a copy of the License at: // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the license. // // lbann_base .cpp - Basic definitions, functions //////////////////////////////////////////////////////////////////////////////// #include "lbann/base.hpp" #include <omp.h> #if defined(LBANN_TOPO_AWARE) #include <hwloc.h> #if defined(HWLOC_API_VERSION) && (HWLOC_API_VERSION < 0x00010b00) #define HWLOC_OBJ_NUMANODE HWLOC_OBJ_NODE #endif #endif #include "lbann/comm.hpp" #include "lbann/utils/random.hpp" #include "lbann/utils/omp_diagnostics.hpp" #include "lbann/utils/stack_trace.hpp" #ifdef LBANN_HAS_CUDNN #include "lbann/utils/cudnn.hpp" #endif namespace lbann { lbann_comm* initialize(int& argc, char**& argv, int seed) { // Initialize Elemental. El::Initialize(argc, argv); // Create a new comm object. // Initial creation with every process in one model. auto* comm = new lbann_comm(0); #if defined(LBANN_TOPO_AWARE) // Determine the number of NUMA nodes present. hwloc_topology_t topo; hwloc_topology_init(&topo); hwloc_topology_load(topo); int numa_depth = hwloc_get_type_depth(topo, HWLOC_OBJ_NUMANODE); if (numa_depth == HWLOC_TYPE_DEPTH_UNKNOWN) { std::cout << comm->get_rank_in_world() << ": cannot determine hwloc NUMA-node depth" << std::endl; } int num_numa_nodes = hwloc_get_nbobjs_by_depth(topo, numa_depth); // Warn if there are more NUMA nodes than processes per node. // It's probably fine if there are more processes than NUMA nodes for now. // We can adjust that later when we better understand the threaded perf. int ppn = comm->get_procs_per_node(); if (num_numa_nodes > ppn) { if (comm->get_rank_in_node() == 0) { std::cout << comm->get_rank_in_world() << ": WARNING: node has " << num_numa_nodes << " NUMA nodes but you have " << ppn << " processes per node" << std::endl; } } hwloc_topology_destroy(topo); #endif // Initialize local random number generators. init_random(seed); init_data_seq_random(seed); return comm; } void finalize(lbann_comm* comm) { #ifdef LBANN_HAS_CUDNN cudnn::destroy(); #endif if (comm != nullptr) { delete comm; } El::Finalize(); } /** hack to avoid long switch/case statement; users should ignore; of interest to developers */ static std::vector<std::string> pool_mode_names = { "invalid", "max", "average", "average_no_pad" }; /** returns a string representation of the pool_mode */ std::string get_pool_mode_name(pool_mode m) { if ((int)m < 1 or (int)m >= (int)pool_mode_names.size()) { throw(std::string{} + __FILE__ + " " + std::to_string(__LINE__) + " :: " + " Invalid pool_mode"); } return pool_mode_names[(int)m]; } } // namespace lbann
1
13,636
Should we be doing this with `make_unique` or something?
LLNL-lbann
cpp
@@ -35,9 +35,11 @@ class ProposalMailer < ApplicationMailer ) end - def proposal_updated_no_action_required(user, proposal, modifier = nil) + def proposal_updated_no_action_required(user = User.last, proposal = Proposal.last, modifier = nil) @proposal = proposal.decorate @modifier = modifier || NullUser.new + # Example ideal data to consume + @changes = [{verb: 'modified', result: 'this to that'},{verb: 'added', result: 'that'},{verb: 'modified', result: 'this to that'}] assign_threading_headers(@proposal) mail(
1
class ProposalMailer < ApplicationMailer def proposal_created_confirmation(proposal) @proposal = proposal.decorate assign_threading_headers(@proposal) mail( to: @proposal.requester.email_address, subject: subject(@proposal), from: default_sender_email, reply_to: reply_email(@proposal) ) end def emergency_proposal_created_confirmation(proposal) @proposal = proposal.decorate assign_threading_headers(@proposal) mail( to: @proposal.requester.email_address, subject: subject(@proposal), from: default_sender_email, reply_to: reply_email(@proposal) ) end def proposal_complete(proposal) @proposal = proposal.decorate assign_threading_headers(@proposal) mail( to: @proposal.requester.email_address, subject: subject(@proposal), from: default_sender_email, reply_to: reply_email(@proposal) ) end def proposal_updated_no_action_required(user, proposal, modifier = nil) @proposal = proposal.decorate @modifier = modifier || NullUser.new assign_threading_headers(@proposal) mail( to: user.email_address, subject: subject(@proposal), from: default_sender_email, reply_to: reply_email(@proposal) ) end def proposal_updated_needs_re_review(user, proposal, modifier = nil) @proposal = proposal.decorate @modifier = modifier || NullUser.new @step = proposal.currently_awaiting_steps # Example ideal data to consume @changes = [{verb: 'modified', result: 'this to that'},{verb: 'added', result: 'that'},{verb: 'modified', result: 'this to that'}] assign_threading_headers(@proposal) mail( to: user.email_address, subject: subject(@proposal), from: default_sender_email, reply_to: reply_email(@proposal) ) end def proposal_updated_while_step_pending(step) @step = step.decorate @proposal = step.proposal.decorate assign_threading_headers(@proposal) unless @step.api_token @step.create_api_token end mail( to: step.user.email_address, subject: subject(@proposal), from: default_sender_email, reply_to: reply_email(@proposal) ) end end
1
16,828
we don't want to set default values for this. Since it is the actual email, we want to make sure we are always passing in the `user` and `proposal` . we have a default value of `nil` for modifier because sometimes will update a proposal via `rails console` in which case there will be no recorded modifier.
18F-C2
rb
@@ -14,9 +14,10 @@ import ( "strconv" "strings" + "go/parser" + "github.com/aykevl/go-llvm" "github.com/aykevl/tinygo/ir" - "go/parser" "golang.org/x/tools/go/loader" "golang.org/x/tools/go/ssa" )
1
package compiler import ( "errors" "fmt" "go/build" "go/constant" "go/token" "go/types" "os" "path/filepath" "regexp" "runtime" "strconv" "strings" "github.com/aykevl/go-llvm" "github.com/aykevl/tinygo/ir" "go/parser" "golang.org/x/tools/go/loader" "golang.org/x/tools/go/ssa" ) func init() { llvm.InitializeAllTargets() llvm.InitializeAllTargetMCs() llvm.InitializeAllTargetInfos() llvm.InitializeAllAsmParsers() llvm.InitializeAllAsmPrinters() } // Configure the compiler. type Config struct { Triple string // LLVM target triple, e.g. x86_64-unknown-linux-gnu (empty string means default) DumpSSA bool // dump Go SSA, for compiler debugging Debug bool // add debug symbols for gdb RootDir string // GOROOT for TinyGo GOPATH string // GOPATH, like `go env GOPATH` BuildTags []string // build tags for TinyGo (empty means {runtime.GOOS/runtime.GOARCH}) } type Compiler struct { Config mod llvm.Module ctx llvm.Context builder llvm.Builder dibuilder *llvm.DIBuilder cu llvm.Metadata difiles map[string]llvm.Metadata ditypes map[string]llvm.Metadata machine llvm.TargetMachine targetData llvm.TargetData intType llvm.Type i8ptrType llvm.Type // for convenience uintptrType llvm.Type lenType llvm.Type coroIdFunc llvm.Value coroSizeFunc llvm.Value coroBeginFunc llvm.Value coroSuspendFunc llvm.Value coroEndFunc llvm.Value coroFreeFunc llvm.Value initFuncs []llvm.Value deferFuncs []*ir.Function ctxDeferFuncs []ContextDeferFunction ir *ir.Program } type Frame struct { fn *ir.Function locals map[ssa.Value]llvm.Value // local variables blockEntries map[*ssa.BasicBlock]llvm.BasicBlock // a *ssa.BasicBlock may be split up blockExits map[*ssa.BasicBlock]llvm.BasicBlock // these are the exit blocks currentBlock *ssa.BasicBlock phis []Phi blocking bool taskHandle llvm.Value cleanupBlock llvm.BasicBlock suspendBlock llvm.BasicBlock deferPtr llvm.Value difunc llvm.Metadata } type Phi struct { ssa *ssa.Phi llvm llvm.Value } // A thunk for a defer that defers calling a function pointer with context. type ContextDeferFunction struct { fn llvm.Value deferStruct []llvm.Type signature *types.Signature } func NewCompiler(pkgName string, config Config) (*Compiler, error) { if config.Triple == "" { config.Triple = llvm.DefaultTargetTriple() } if len(config.BuildTags) == 0 { config.BuildTags = []string{runtime.GOOS, runtime.GOARCH} } c := &Compiler{ Config: config, difiles: make(map[string]llvm.Metadata), ditypes: make(map[string]llvm.Metadata), } target, err := llvm.GetTargetFromTriple(config.Triple) if err != nil { return nil, err } c.machine = target.CreateTargetMachine(config.Triple, "", "", llvm.CodeGenLevelDefault, llvm.RelocStatic, llvm.CodeModelDefault) c.targetData = c.machine.CreateTargetData() c.ctx = llvm.NewContext() c.mod = c.ctx.NewModule(pkgName) c.mod.SetTarget(config.Triple) c.mod.SetDataLayout(c.targetData.String()) c.builder = c.ctx.NewBuilder() c.dibuilder = llvm.NewDIBuilder(c.mod) // Depends on platform (32bit or 64bit), but fix it here for now. c.intType = c.ctx.Int32Type() c.uintptrType = c.ctx.IntType(c.targetData.PointerSize() * 8) if c.targetData.PointerSize() < 4 { // 16 or 8 bits target with smaller length type c.lenType = c.uintptrType } else { c.lenType = c.ctx.Int32Type() // also defined as runtime.lenType } c.i8ptrType = llvm.PointerType(c.ctx.Int8Type(), 0) coroIdType := llvm.FunctionType(c.ctx.TokenType(), []llvm.Type{c.ctx.Int32Type(), c.i8ptrType, c.i8ptrType, c.i8ptrType}, false) c.coroIdFunc = llvm.AddFunction(c.mod, "llvm.coro.id", coroIdType) coroSizeType := llvm.FunctionType(c.ctx.Int32Type(), nil, false) c.coroSizeFunc = llvm.AddFunction(c.mod, "llvm.coro.size.i32", coroSizeType) coroBeginType := llvm.FunctionType(c.i8ptrType, []llvm.Type{c.ctx.TokenType(), c.i8ptrType}, false) c.coroBeginFunc = llvm.AddFunction(c.mod, "llvm.coro.begin", coroBeginType) coroSuspendType := llvm.FunctionType(c.ctx.Int8Type(), []llvm.Type{c.ctx.TokenType(), c.ctx.Int1Type()}, false) c.coroSuspendFunc = llvm.AddFunction(c.mod, "llvm.coro.suspend", coroSuspendType) coroEndType := llvm.FunctionType(c.ctx.Int1Type(), []llvm.Type{c.i8ptrType, c.ctx.Int1Type()}, false) c.coroEndFunc = llvm.AddFunction(c.mod, "llvm.coro.end", coroEndType) coroFreeType := llvm.FunctionType(c.i8ptrType, []llvm.Type{c.ctx.TokenType(), c.i8ptrType}, false) c.coroFreeFunc = llvm.AddFunction(c.mod, "llvm.coro.free", coroFreeType) return c, nil } // Return the LLVM module. Only valid after a successful compile. func (c *Compiler) Module() llvm.Module { return c.mod } // Compile the given package path or .go file path. Return an error when this // fails (in any stage). func (c *Compiler) Compile(mainPath string) error { tripleSplit := strings.Split(c.Triple, "-") // Prefix the GOPATH with the system GOROOT, as GOROOT is already set to // the TinyGo root. gopath := c.GOPATH if gopath == "" { gopath = runtime.GOROOT() } else { gopath = runtime.GOROOT() + string(filepath.ListSeparator) + gopath } config := loader.Config{ TypeChecker: types.Config{ Sizes: &StdSizes{ IntSize: int64(c.targetData.TypeAllocSize(c.intType)), PtrSize: int64(c.targetData.PointerSize()), MaxAlign: int64(c.targetData.PrefTypeAlignment(c.i8ptrType)), }, }, Build: &build.Context{ GOARCH: tripleSplit[0], GOOS: tripleSplit[2], GOROOT: c.RootDir, GOPATH: gopath, CgoEnabled: true, UseAllFiles: false, Compiler: "gc", // must be one of the recognized compilers BuildTags: append([]string{"tgo"}, c.BuildTags...), }, ParserMode: parser.ParseComments, } config.Import("runtime") if strings.HasSuffix(mainPath, ".go") { config.CreateFromFilenames("main", mainPath) } else { config.Import(mainPath) } lprogram, err := config.Load() if err != nil { return err } c.ir = ir.NewProgram(lprogram, mainPath) // Run some DCE and analysis passes. The results are later used by the // compiler. c.ir.SimpleDCE() // remove most dead code c.ir.AnalyseCallgraph() // set up callgraph c.ir.AnalyseInterfaceConversions() // determine which types are converted to an interface c.ir.AnalyseFunctionPointers() // determine which function pointer signatures need context c.ir.AnalyseBlockingRecursive() // make all parents of blocking calls blocking (transitively) c.ir.AnalyseGoCalls() // check whether we need a scheduler // Initialize debug information. c.cu = c.dibuilder.CreateCompileUnit(llvm.DICompileUnit{ Language: llvm.DW_LANG_Go, File: mainPath, Dir: "", Producer: "TinyGo", Optimized: true, }) var frames []*Frame // Declare all named struct types. for _, t := range c.ir.NamedTypes { if named, ok := t.Type.Type().(*types.Named); ok { if _, ok := named.Underlying().(*types.Struct); ok { t.LLVMType = c.ctx.StructCreateNamed(named.Obj().Pkg().Path() + "." + named.Obj().Name()) } } } // Define all named struct types. for _, t := range c.ir.NamedTypes { if named, ok := t.Type.Type().(*types.Named); ok { if st, ok := named.Underlying().(*types.Struct); ok { llvmType, err := c.getLLVMType(st) if err != nil { return err } t.LLVMType.StructSetBody(llvmType.StructElementTypes(), false) } } } // Declare all globals. These will get an initializer when parsing "package // initializer" functions. for _, g := range c.ir.Globals { typ := g.Type().(*types.Pointer).Elem() llvmType, err := c.getLLVMType(typ) if err != nil { return err } global := c.mod.NamedGlobal(g.LinkName()) if global.IsNil() { global = llvm.AddGlobal(c.mod, llvmType, g.LinkName()) } g.LLVMGlobal = global if !g.IsExtern() { global.SetLinkage(llvm.InternalLinkage) initializer, err := c.getZeroValue(llvmType) if err != nil { return err } global.SetInitializer(initializer) } } // Declare all functions. for _, f := range c.ir.Functions { frame, err := c.parseFuncDecl(f) if err != nil { return err } frames = append(frames, frame) } // Find and interpret package initializers. for _, frame := range frames { if frame.fn.Synthetic == "package initializer" { c.initFuncs = append(c.initFuncs, frame.fn.LLVMFn) if len(frame.fn.Blocks) != 1 { panic("unexpected number of basic blocks in package initializer") } // Try to interpret as much as possible of the init() function. // Whenever it hits an instruction that it doesn't understand, it // bails out and leaves the rest to the compiler (so initialization // continues at runtime). // This should only happen when it hits a function call or the end // of the block, ideally. err := c.ir.Interpret(frame.fn.Blocks[0], c.DumpSSA) if err != nil { return err } err = c.parseFunc(frame) if err != nil { return err } } } // Set values for globals (after package initializer has been interpreted). for _, g := range c.ir.Globals { if g.Initializer() == nil { continue } err := c.parseGlobalInitializer(g) if err != nil { return err } } // Add definitions to declarations. for _, frame := range frames { if frame.fn.CName() != "" { continue } if frame.fn.Blocks == nil { continue // external function } var err error if frame.fn.Synthetic == "package initializer" { continue // already done } else { err = c.parseFunc(frame) } if err != nil { return err } } // Create deferred function wrappers. for _, fn := range c.deferFuncs { // This function gets a single parameter which is a pointer to a struct // (the defer frame). // This struct starts with the values of runtime._defer, but after that // follow the real function parameters. // The job of this wrapper is to extract these parameters and to call // the real function with them. llvmFn := c.mod.NamedFunction(fn.LinkName() + "$defer") llvmFn.SetLinkage(llvm.InternalLinkage) entry := c.ctx.AddBasicBlock(llvmFn, "entry") c.builder.SetInsertPointAtEnd(entry) deferRawPtr := llvmFn.Param(0) // Get the real param type and cast to it. valueTypes := []llvm.Type{llvmFn.Type(), llvm.PointerType(c.mod.GetTypeByName("runtime._defer"), 0)} for _, param := range fn.Params { llvmType, err := c.getLLVMType(param.Type()) if err != nil { return err } valueTypes = append(valueTypes, llvmType) } deferFrameType := c.ctx.StructType(valueTypes, false) deferFramePtr := c.builder.CreateBitCast(deferRawPtr, llvm.PointerType(deferFrameType, 0), "deferFrame") // Extract the params from the struct. forwardParams := []llvm.Value{} zero := llvm.ConstInt(c.ctx.Int32Type(), 0, false) for i := range fn.Params { gep := c.builder.CreateGEP(deferFramePtr, []llvm.Value{zero, llvm.ConstInt(c.ctx.Int32Type(), uint64(i+2), false)}, "gep") forwardParam := c.builder.CreateLoad(gep, "param") forwardParams = append(forwardParams, forwardParam) } // Call real function (of which this is a wrapper). c.createCall(fn.LLVMFn, forwardParams, "") c.builder.CreateRetVoid() } // Create wrapper for deferred function pointer call. for _, thunk := range c.ctxDeferFuncs { // This function gets a single parameter which is a pointer to a struct // (the defer frame). // This struct starts with the values of runtime._defer, but after that // follows the closure and then the real parameters. // The job of this wrapper is to extract this closure and these // parameters and to call the function pointer with them. llvmFn := thunk.fn llvmFn.SetLinkage(llvm.InternalLinkage) entry := c.ctx.AddBasicBlock(llvmFn, "entry") // TODO: set the debug location - perhaps the location of the rundefers // call? c.builder.SetInsertPointAtEnd(entry) deferRawPtr := llvmFn.Param(0) // Get the real param type and cast to it. deferFrameType := c.ctx.StructType(thunk.deferStruct, false) deferFramePtr := c.builder.CreateBitCast(deferRawPtr, llvm.PointerType(deferFrameType, 0), "defer.frame") // Extract the params from the struct. forwardParams := []llvm.Value{} zero := llvm.ConstInt(c.ctx.Int32Type(), 0, false) for i := 3; i < len(thunk.deferStruct); i++ { gep := c.builder.CreateGEP(deferFramePtr, []llvm.Value{zero, llvm.ConstInt(c.ctx.Int32Type(), uint64(i), false)}, "") forwardParam := c.builder.CreateLoad(gep, "param") forwardParams = append(forwardParams, forwardParam) } // Extract the closure from the struct. fpGEP := c.builder.CreateGEP(deferFramePtr, []llvm.Value{ zero, llvm.ConstInt(c.ctx.Int32Type(), 2, false), llvm.ConstInt(c.ctx.Int32Type(), 1, false), }, "closure.fp.ptr") fp := c.builder.CreateLoad(fpGEP, "closure.fp") contextGEP := c.builder.CreateGEP(deferFramePtr, []llvm.Value{ zero, llvm.ConstInt(c.ctx.Int32Type(), 2, false), llvm.ConstInt(c.ctx.Int32Type(), 0, false), }, "closure.context.ptr") context := c.builder.CreateLoad(contextGEP, "closure.context") forwardParams = append(forwardParams, context) // Cast the function pointer in the closure to the correct function // pointer type. closureType, err := c.getLLVMType(thunk.signature) if err != nil { return err } fpType := closureType.StructElementTypes()[1] fpCast := c.builder.CreateBitCast(fp, fpType, "closure.fp.cast") // Call real function (of which this is a wrapper). c.createCall(fpCast, forwardParams, "") c.builder.CreateRetVoid() } // After all packages are imported, add a synthetic initializer function // that calls the initializer of each package. initFn := c.ir.GetFunction(c.ir.Program.ImportedPackage("runtime").Members["initAll"].(*ssa.Function)) initFn.LLVMFn.SetLinkage(llvm.InternalLinkage) difunc, err := c.attachDebugInfo(initFn) if err != nil { return err } pos := c.ir.Program.Fset.Position(initFn.Pos()) c.builder.SetCurrentDebugLocation(uint(pos.Line), uint(pos.Column), difunc, llvm.Metadata{}) block := c.ctx.AddBasicBlock(initFn.LLVMFn, "entry") c.builder.SetInsertPointAtEnd(block) for _, fn := range c.initFuncs { c.builder.CreateCall(fn, nil, "") } c.builder.CreateRetVoid() mainWrapper := c.ir.GetFunction(c.ir.Program.ImportedPackage("runtime").Members["mainWrapper"].(*ssa.Function)) mainWrapper.LLVMFn.SetLinkage(llvm.InternalLinkage) difunc, err = c.attachDebugInfo(mainWrapper) if err != nil { return err } pos = c.ir.Program.Fset.Position(mainWrapper.Pos()) c.builder.SetCurrentDebugLocation(uint(pos.Line), uint(pos.Column), difunc, llvm.Metadata{}) block = c.ctx.AddBasicBlock(mainWrapper.LLVMFn, "entry") c.builder.SetInsertPointAtEnd(block) realMain := c.mod.NamedFunction(c.ir.MainPkg().Pkg.Path() + ".main") if c.ir.NeedsScheduler() { coroutine := c.builder.CreateCall(realMain, []llvm.Value{llvm.ConstPointerNull(c.i8ptrType)}, "") scheduler := c.mod.NamedFunction("runtime.scheduler") c.builder.CreateCall(scheduler, []llvm.Value{coroutine}, "") } else { c.builder.CreateCall(realMain, nil, "") } c.builder.CreateRetVoid() // Initialize runtime type information, for interfaces. // See src/runtime/interface.go for more details. dynamicTypes := c.ir.AllDynamicTypes() numDynamicTypes := 0 for _, meta := range dynamicTypes { numDynamicTypes += len(meta.Methods) } ranges := make([]llvm.Value, 0, len(dynamicTypes)) funcPointers := make([]llvm.Value, 0, numDynamicTypes) signatures := make([]llvm.Value, 0, numDynamicTypes) startIndex := 0 rangeType := c.mod.GetTypeByName("runtime.methodSetRange") for _, meta := range dynamicTypes { rangeValues := []llvm.Value{ llvm.ConstInt(c.ctx.Int16Type(), uint64(startIndex), false), llvm.ConstInt(c.ctx.Int16Type(), uint64(len(meta.Methods)), false), } rangeValue := llvm.ConstNamedStruct(rangeType, rangeValues) ranges = append(ranges, rangeValue) methods := make([]*types.Selection, 0, len(meta.Methods)) for _, method := range meta.Methods { methods = append(methods, method) } c.ir.SortMethods(methods) for _, method := range methods { f := c.ir.GetFunction(c.ir.Program.MethodValue(method)) if f.LLVMFn.IsNil() { return errors.New("cannot find function: " + f.LinkName()) } fn, err := c.wrapInterfaceInvoke(f) if err != nil { return err } fnPtr := llvm.ConstBitCast(fn, c.i8ptrType) funcPointers = append(funcPointers, fnPtr) signatureNum := c.ir.MethodNum(method.Obj().(*types.Func)) signature := llvm.ConstInt(c.ctx.Int16Type(), uint64(signatureNum), false) signatures = append(signatures, signature) } startIndex += len(meta.Methods) } interfaceTypes := c.ir.AllInterfaces() interfaceIndex := make([]llvm.Value, len(interfaceTypes)) interfaceLengths := make([]llvm.Value, len(interfaceTypes)) interfaceMethods := make([]llvm.Value, 0) for i, itfType := range interfaceTypes { if itfType.Type.NumMethods() > 0xff { return errors.New("too many methods for interface " + itfType.Type.String()) } interfaceIndex[i] = llvm.ConstInt(c.ctx.Int16Type(), uint64(i), false) interfaceLengths[i] = llvm.ConstInt(c.ctx.Int8Type(), uint64(itfType.Type.NumMethods()), false) funcs := make([]*types.Func, itfType.Type.NumMethods()) for i := range funcs { funcs[i] = itfType.Type.Method(i) } c.ir.SortFuncs(funcs) for _, f := range funcs { id := llvm.ConstInt(c.ctx.Int16Type(), uint64(c.ir.MethodNum(f)), false) interfaceMethods = append(interfaceMethods, id) } } if len(ranges) >= 1<<16 { return errors.New("method call numbers do not fit in a 16-bit integer") } // Replace the pre-created arrays with the generated arrays. rangeArray := llvm.ConstArray(rangeType, ranges) rangeArrayNewGlobal := llvm.AddGlobal(c.mod, rangeArray.Type(), "runtime.methodSetRanges.tmp") rangeArrayNewGlobal.SetInitializer(rangeArray) rangeArrayNewGlobal.SetLinkage(llvm.InternalLinkage) rangeArrayOldGlobal := c.mod.NamedGlobal("runtime.methodSetRanges") rangeArrayOldGlobal.ReplaceAllUsesWith(llvm.ConstBitCast(rangeArrayNewGlobal, rangeArrayOldGlobal.Type())) rangeArrayOldGlobal.EraseFromParentAsGlobal() rangeArrayNewGlobal.SetName("runtime.methodSetRanges") funcArray := llvm.ConstArray(c.i8ptrType, funcPointers) funcArrayNewGlobal := llvm.AddGlobal(c.mod, funcArray.Type(), "runtime.methodSetFunctions.tmp") funcArrayNewGlobal.SetInitializer(funcArray) funcArrayNewGlobal.SetLinkage(llvm.InternalLinkage) funcArrayOldGlobal := c.mod.NamedGlobal("runtime.methodSetFunctions") funcArrayOldGlobal.ReplaceAllUsesWith(llvm.ConstBitCast(funcArrayNewGlobal, funcArrayOldGlobal.Type())) funcArrayOldGlobal.EraseFromParentAsGlobal() funcArrayNewGlobal.SetName("runtime.methodSetFunctions") signatureArray := llvm.ConstArray(c.ctx.Int16Type(), signatures) signatureArrayNewGlobal := llvm.AddGlobal(c.mod, signatureArray.Type(), "runtime.methodSetSignatures.tmp") signatureArrayNewGlobal.SetInitializer(signatureArray) signatureArrayNewGlobal.SetLinkage(llvm.InternalLinkage) signatureArrayOldGlobal := c.mod.NamedGlobal("runtime.methodSetSignatures") signatureArrayOldGlobal.ReplaceAllUsesWith(llvm.ConstBitCast(signatureArrayNewGlobal, signatureArrayOldGlobal.Type())) signatureArrayOldGlobal.EraseFromParentAsGlobal() signatureArrayNewGlobal.SetName("runtime.methodSetSignatures") interfaceIndexArray := llvm.ConstArray(c.ctx.Int16Type(), interfaceIndex) interfaceIndexArrayNewGlobal := llvm.AddGlobal(c.mod, interfaceIndexArray.Type(), "runtime.interfaceIndex.tmp") interfaceIndexArrayNewGlobal.SetInitializer(interfaceIndexArray) interfaceIndexArrayNewGlobal.SetLinkage(llvm.InternalLinkage) interfaceIndexArrayOldGlobal := c.mod.NamedGlobal("runtime.interfaceIndex") interfaceIndexArrayOldGlobal.ReplaceAllUsesWith(llvm.ConstBitCast(interfaceIndexArrayNewGlobal, interfaceIndexArrayOldGlobal.Type())) interfaceIndexArrayOldGlobal.EraseFromParentAsGlobal() interfaceIndexArrayNewGlobal.SetName("runtime.interfaceIndex") interfaceLengthsArray := llvm.ConstArray(c.ctx.Int8Type(), interfaceLengths) interfaceLengthsArrayNewGlobal := llvm.AddGlobal(c.mod, interfaceLengthsArray.Type(), "runtime.interfaceLengths.tmp") interfaceLengthsArrayNewGlobal.SetInitializer(interfaceLengthsArray) interfaceLengthsArrayNewGlobal.SetLinkage(llvm.InternalLinkage) interfaceLengthsArrayOldGlobal := c.mod.NamedGlobal("runtime.interfaceLengths") interfaceLengthsArrayOldGlobal.ReplaceAllUsesWith(llvm.ConstBitCast(interfaceLengthsArrayNewGlobal, interfaceLengthsArrayOldGlobal.Type())) interfaceLengthsArrayOldGlobal.EraseFromParentAsGlobal() interfaceLengthsArrayNewGlobal.SetName("runtime.interfaceLengths") interfaceMethodsArray := llvm.ConstArray(c.ctx.Int16Type(), interfaceMethods) interfaceMethodsArrayNewGlobal := llvm.AddGlobal(c.mod, interfaceMethodsArray.Type(), "runtime.interfaceMethods.tmp") interfaceMethodsArrayNewGlobal.SetInitializer(interfaceMethodsArray) interfaceMethodsArrayNewGlobal.SetLinkage(llvm.InternalLinkage) interfaceMethodsArrayOldGlobal := c.mod.NamedGlobal("runtime.interfaceMethods") interfaceMethodsArrayOldGlobal.ReplaceAllUsesWith(llvm.ConstBitCast(interfaceMethodsArrayNewGlobal, interfaceMethodsArrayOldGlobal.Type())) interfaceMethodsArrayOldGlobal.EraseFromParentAsGlobal() interfaceMethodsArrayNewGlobal.SetName("runtime.interfaceMethods") c.mod.NamedGlobal("runtime.firstTypeWithMethods").SetInitializer(llvm.ConstInt(c.ctx.Int16Type(), uint64(c.ir.FirstDynamicType()), false)) // see: https://reviews.llvm.org/D18355 c.mod.AddNamedMetadataOperand("llvm.module.flags", c.ctx.MDNode([]llvm.Metadata{ llvm.ConstInt(c.ctx.Int32Type(), 1, false).ConstantAsMetadata(), // Error on mismatch llvm.GlobalContext().MDString("Debug Info Version"), llvm.ConstInt(c.ctx.Int32Type(), 3, false).ConstantAsMetadata(), // DWARF version }), ) c.dibuilder.Finalize() return nil } func (c *Compiler) getLLVMType(goType types.Type) (llvm.Type, error) { switch typ := goType.(type) { case *types.Array: elemType, err := c.getLLVMType(typ.Elem()) if err != nil { return llvm.Type{}, err } return llvm.ArrayType(elemType, int(typ.Len())), nil case *types.Basic: switch typ.Kind() { case types.Bool, types.UntypedBool: return c.ctx.Int1Type(), nil case types.Int8, types.Uint8: return c.ctx.Int8Type(), nil case types.Int16, types.Uint16: return c.ctx.Int16Type(), nil case types.Int32, types.Uint32: return c.ctx.Int32Type(), nil case types.Int, types.Uint: return c.intType, nil case types.Int64, types.Uint64: return c.ctx.Int64Type(), nil case types.Float32: return c.ctx.FloatType(), nil case types.Float64: return c.ctx.DoubleType(), nil case types.Complex64: return llvm.VectorType(c.ctx.FloatType(), 2), nil case types.Complex128: return llvm.VectorType(c.ctx.DoubleType(), 2), nil case types.String, types.UntypedString: return c.mod.GetTypeByName("runtime._string"), nil case types.Uintptr: return c.uintptrType, nil case types.UnsafePointer: return c.i8ptrType, nil default: return llvm.Type{}, errors.New("todo: unknown basic type: " + typ.String()) } case *types.Chan: return llvm.PointerType(c.mod.GetTypeByName("runtime.channel"), 0), nil case *types.Interface: return c.mod.GetTypeByName("runtime._interface"), nil case *types.Map: return llvm.PointerType(c.mod.GetTypeByName("runtime.hashmap"), 0), nil case *types.Named: if _, ok := typ.Underlying().(*types.Struct); ok { llvmType := c.mod.GetTypeByName(typ.Obj().Pkg().Path() + "." + typ.Obj().Name()) if llvmType.IsNil() { return llvm.Type{}, errors.New("type not found: " + typ.Obj().Pkg().Path() + "." + typ.Obj().Name()) } return llvmType, nil } return c.getLLVMType(typ.Underlying()) case *types.Pointer: ptrTo, err := c.getLLVMType(typ.Elem()) if err != nil { return llvm.Type{}, err } return llvm.PointerType(ptrTo, 0), nil case *types.Signature: // function pointer // return value var err error var returnType llvm.Type if typ.Results().Len() == 0 { returnType = c.ctx.VoidType() } else if typ.Results().Len() == 1 { returnType, err = c.getLLVMType(typ.Results().At(0).Type()) if err != nil { return llvm.Type{}, err } } else { // Multiple return values. Put them together in a struct. members := make([]llvm.Type, typ.Results().Len()) for i := 0; i < typ.Results().Len(); i++ { returnType, err := c.getLLVMType(typ.Results().At(i).Type()) if err != nil { return llvm.Type{}, err } members[i] = returnType } returnType = c.ctx.StructType(members, false) } // param values var paramTypes []llvm.Type if typ.Recv() != nil { recv, err := c.getLLVMType(typ.Recv().Type()) if err != nil { return llvm.Type{}, err } if recv.StructName() == "runtime._interface" { recv = c.i8ptrType } paramTypes = append(paramTypes, c.expandFormalParamType(recv)...) } params := typ.Params() for i := 0; i < params.Len(); i++ { subType, err := c.getLLVMType(params.At(i).Type()) if err != nil { return llvm.Type{}, err } paramTypes = append(paramTypes, c.expandFormalParamType(subType)...) } var ptr llvm.Type if c.ir.SignatureNeedsContext(typ) { // make a closure type (with a function pointer type inside): // {context, funcptr} paramTypes = append(paramTypes, c.i8ptrType) ptr = llvm.PointerType(llvm.FunctionType(returnType, paramTypes, false), 0) ptr = c.ctx.StructType([]llvm.Type{c.i8ptrType, ptr}, false) } else { // make a simple function pointer ptr = llvm.PointerType(llvm.FunctionType(returnType, paramTypes, false), 0) } return ptr, nil case *types.Slice: elemType, err := c.getLLVMType(typ.Elem()) if err != nil { return llvm.Type{}, err } members := []llvm.Type{ llvm.PointerType(elemType, 0), c.lenType, // len c.lenType, // cap } return c.ctx.StructType(members, false), nil case *types.Struct: members := make([]llvm.Type, typ.NumFields()) for i := 0; i < typ.NumFields(); i++ { member, err := c.getLLVMType(typ.Field(i).Type()) if err != nil { return llvm.Type{}, err } members[i] = member } return c.ctx.StructType(members, false), nil default: return llvm.Type{}, errors.New("todo: unknown type: " + goType.String()) } } // Return a zero LLVM value for any LLVM type. Setting this value as an // initializer has the same effect as setting 'zeroinitializer' on a value. // Sadly, I haven't found a way to do it directly with the Go API but this works // just fine. func (c *Compiler) getZeroValue(typ llvm.Type) (llvm.Value, error) { switch typ.TypeKind() { case llvm.ArrayTypeKind: subTyp := typ.ElementType() subVal, err := c.getZeroValue(subTyp) if err != nil { return llvm.Value{}, err } vals := make([]llvm.Value, typ.ArrayLength()) for i := range vals { vals[i] = subVal } return llvm.ConstArray(subTyp, vals), nil case llvm.FloatTypeKind, llvm.DoubleTypeKind: return llvm.ConstFloat(typ, 0.0), nil case llvm.IntegerTypeKind: return llvm.ConstInt(typ, 0, false), nil case llvm.PointerTypeKind: return llvm.ConstPointerNull(typ), nil case llvm.StructTypeKind: types := typ.StructElementTypes() vals := make([]llvm.Value, len(types)) for i, subTyp := range types { val, err := c.getZeroValue(subTyp) if err != nil { return llvm.Value{}, err } vals[i] = val } if typ.StructName() != "" { return llvm.ConstNamedStruct(typ, vals), nil } else { return c.ctx.ConstStruct(vals, false), nil } case llvm.VectorTypeKind: zero, err := c.getZeroValue(typ.ElementType()) if err != nil { return llvm.Value{}, err } vals := make([]llvm.Value, typ.VectorSize()) for i := range vals { vals[i] = zero } return llvm.ConstVector(vals, false), nil default: return llvm.Value{}, errors.New("todo: LLVM zero initializer: " + typ.String()) } } // Is this a pointer type of some sort? Can be unsafe.Pointer or any *T pointer. func isPointer(typ types.Type) bool { if _, ok := typ.(*types.Pointer); ok { return true } else if typ, ok := typ.(*types.Basic); ok && typ.Kind() == types.UnsafePointer { return true } else { return false } } // Get the DWARF type for this Go type. func (c *Compiler) getDIType(typ types.Type) (llvm.Metadata, error) { name := typ.String() if dityp, ok := c.ditypes[name]; ok { return dityp, nil } else { llvmType, err := c.getLLVMType(typ) if err != nil { return llvm.Metadata{}, err } sizeInBytes := c.targetData.TypeAllocSize(llvmType) var encoding llvm.DwarfTypeEncoding switch typ := typ.(type) { case *types.Basic: if typ.Info()&types.IsBoolean != 0 { encoding = llvm.DW_ATE_boolean } else if typ.Info()&types.IsFloat != 0 { encoding = llvm.DW_ATE_float } else if typ.Info()&types.IsComplex != 0 { encoding = llvm.DW_ATE_complex_float } else if typ.Info()&types.IsUnsigned != 0 { encoding = llvm.DW_ATE_unsigned } else if typ.Info()&types.IsInteger != 0 { encoding = llvm.DW_ATE_signed } else if typ.Kind() == types.UnsafePointer { encoding = llvm.DW_ATE_address } case *types.Pointer: encoding = llvm.DW_ATE_address } // TODO: other types dityp = c.dibuilder.CreateBasicType(llvm.DIBasicType{ Name: name, SizeInBits: sizeInBytes * 8, Encoding: encoding, }) c.ditypes[name] = dityp return dityp, nil } } // Wrap an interface method function pointer. The wrapper takes in a pointer to // the underlying value, dereferences it, and calls the real method. This // wrapper is only needed when the interface value actually doesn't fit in a // pointer and a pointer to the value must be created. func (c *Compiler) wrapInterfaceInvoke(f *ir.Function) (llvm.Value, error) { receiverType, err := c.getLLVMType(f.Params[0].Type()) if err != nil { return llvm.Value{}, err } expandedReceiverType := c.expandFormalParamType(receiverType) if c.targetData.TypeAllocSize(receiverType) <= c.targetData.TypeAllocSize(c.i8ptrType) && len(expandedReceiverType) == 1 { // nothing to wrap return f.LLVMFn, nil } // create wrapper function fnType := f.LLVMFn.Type().ElementType() paramTypes := append([]llvm.Type{c.i8ptrType}, fnType.ParamTypes()[len(expandedReceiverType):]...) wrapFnType := llvm.FunctionType(fnType.ReturnType(), paramTypes, false) wrapper := llvm.AddFunction(c.mod, f.LinkName()+"$invoke", wrapFnType) wrapper.SetLinkage(llvm.InternalLinkage) // add debug info pos := c.ir.Program.Fset.Position(f.Pos()) difunc, err := c.attachDebugInfoRaw(f, wrapper, "$invoke", pos.Filename, pos.Line) if err != nil { return llvm.Value{}, err } c.builder.SetCurrentDebugLocation(uint(pos.Line), uint(pos.Column), difunc, llvm.Metadata{}) // set up IR builder block := c.ctx.AddBasicBlock(wrapper, "entry") c.builder.SetInsertPointAtEnd(block) var receiverPtr llvm.Value if c.targetData.TypeAllocSize(receiverType) > c.targetData.TypeAllocSize(c.i8ptrType) { // The receiver is passed in using a pointer. We have to load it here // and pass it by value to the real function. // Load the underlying value. receiverPtrType := llvm.PointerType(receiverType, 0) receiverPtr = c.builder.CreateBitCast(wrapper.Param(0), receiverPtrType, "receiver.ptr") } else if len(expandedReceiverType) != 1 { // The value is stored in the interface, but it is of type struct which // is expanded to multiple parameters (e.g. {i8, i8}). So we have to // receive the struct as parameter, expand it, and pass it on to the // real function. // Cast the passed-in i8* to the struct value (using an alloca) and // extract its values. alloca := c.builder.CreateAlloca(c.i8ptrType, "receiver.alloca") c.builder.CreateStore(wrapper.Param(0), alloca) receiverPtr = c.builder.CreateBitCast(alloca, llvm.PointerType(receiverType, 0), "receiver.ptr") } else { panic("unreachable") } receiverValue := c.builder.CreateLoad(receiverPtr, "receiver") params := append(c.expandFormalParam(receiverValue), wrapper.Params()[1:]...) if fnType.ReturnType().TypeKind() == llvm.VoidTypeKind { c.builder.CreateCall(f.LLVMFn, params, "") c.builder.CreateRetVoid() } else { ret := c.builder.CreateCall(f.LLVMFn, params, "ret") c.builder.CreateRet(ret) } return wrapper, nil } func (c *Compiler) parseFuncDecl(f *ir.Function) (*Frame, error) { frame := &Frame{ fn: f, locals: make(map[ssa.Value]llvm.Value), blockEntries: make(map[*ssa.BasicBlock]llvm.BasicBlock), blockExits: make(map[*ssa.BasicBlock]llvm.BasicBlock), blocking: c.ir.IsBlocking(f), } var retType llvm.Type if frame.blocking { if f.Signature.Results() != nil { return nil, errors.New("todo: return values in blocking function") } retType = c.i8ptrType } else if f.Signature.Results() == nil { retType = c.ctx.VoidType() } else if f.Signature.Results().Len() == 1 { var err error retType, err = c.getLLVMType(f.Signature.Results().At(0).Type()) if err != nil { return nil, err } } else { results := make([]llvm.Type, 0, f.Signature.Results().Len()) for i := 0; i < f.Signature.Results().Len(); i++ { typ, err := c.getLLVMType(f.Signature.Results().At(i).Type()) if err != nil { return nil, err } results = append(results, typ) } retType = c.ctx.StructType(results, false) } var paramTypes []llvm.Type if frame.blocking { paramTypes = append(paramTypes, c.i8ptrType) // parent coroutine } for _, param := range f.Params { paramType, err := c.getLLVMType(param.Type()) if err != nil { return nil, err } paramTypeFragments := c.expandFormalParamType(paramType) paramTypes = append(paramTypes, paramTypeFragments...) } if c.ir.FunctionNeedsContext(f) { // This function gets an extra parameter: the context pointer (for // closures and bound methods). Add it as an extra paramter here. paramTypes = append(paramTypes, c.i8ptrType) } fnType := llvm.FunctionType(retType, paramTypes, false) name := f.LinkName() frame.fn.LLVMFn = c.mod.NamedFunction(name) if frame.fn.LLVMFn.IsNil() { frame.fn.LLVMFn = llvm.AddFunction(c.mod, name, fnType) } if c.Debug && f.Synthetic == "package initializer" { difunc, err := c.attachDebugInfoRaw(f, f.LLVMFn, "", "", 0) if err != nil { return nil, err } frame.difunc = difunc } else if c.Debug && f.Syntax() != nil && len(f.Blocks) != 0 { // Create debug info file if needed. difunc, err := c.attachDebugInfo(f) if err != nil { return nil, err } frame.difunc = difunc } return frame, nil } func (c *Compiler) attachDebugInfo(f *ir.Function) (llvm.Metadata, error) { pos := c.ir.Program.Fset.Position(f.Syntax().Pos()) return c.attachDebugInfoRaw(f, f.LLVMFn, "", pos.Filename, pos.Line) } func (c *Compiler) attachDebugInfoRaw(f *ir.Function, llvmFn llvm.Value, suffix, filename string, line int) (llvm.Metadata, error) { if _, ok := c.difiles[filename]; !ok { dir, file := filepath.Split(filename) if dir != "" { dir = dir[:len(dir)-1] } c.difiles[filename] = c.dibuilder.CreateFile(file, dir) } // Debug info for this function. diparams := make([]llvm.Metadata, 0, len(f.Params)) for _, param := range f.Params { ditype, err := c.getDIType(param.Type()) if err != nil { return llvm.Metadata{}, err } diparams = append(diparams, ditype) } diFuncType := c.dibuilder.CreateSubroutineType(llvm.DISubroutineType{ File: c.difiles[filename], Parameters: diparams, Flags: 0, // ? }) difunc := c.dibuilder.CreateFunction(c.difiles[filename], llvm.DIFunction{ Name: f.RelString(nil) + suffix, LinkageName: f.LinkName() + suffix, File: c.difiles[filename], Line: line, Type: diFuncType, LocalToUnit: true, IsDefinition: true, ScopeLine: 0, Flags: llvm.FlagPrototyped, Optimized: true, }) llvmFn.SetSubprogram(difunc) return difunc, nil } // Create a new global hashmap bucket, for map initialization. func (c *Compiler) initMapNewBucket(prefix string, mapType *types.Map) (llvm.Value, uint64, uint64, error) { llvmKeyType, err := c.getLLVMType(mapType.Key().Underlying()) if err != nil { return llvm.Value{}, 0, 0, err } llvmValueType, err := c.getLLVMType(mapType.Elem().Underlying()) if err != nil { return llvm.Value{}, 0, 0, err } keySize := c.targetData.TypeAllocSize(llvmKeyType) valueSize := c.targetData.TypeAllocSize(llvmValueType) bucketType := c.ctx.StructType([]llvm.Type{ llvm.ArrayType(c.ctx.Int8Type(), 8), // tophash c.i8ptrType, // next bucket llvm.ArrayType(llvmKeyType, 8), // key type llvm.ArrayType(llvmValueType, 8), // value type }, false) bucketValue, err := c.getZeroValue(bucketType) if err != nil { return llvm.Value{}, 0, 0, err } bucket := llvm.AddGlobal(c.mod, bucketType, prefix+"$hashmap$bucket") bucket.SetInitializer(bucketValue) bucket.SetLinkage(llvm.InternalLinkage) return bucket, keySize, valueSize, nil } func (c *Compiler) parseGlobalInitializer(g *ir.Global) error { if g.IsExtern() { return nil } llvmValue, err := c.getInterpretedValue(g.LinkName(), g.Initializer()) if err != nil { return err } g.LLVMGlobal.SetInitializer(llvmValue) return nil } // Turn a computed Value type (ConstValue, ArrayValue, etc.) into a LLVM value. // This is used to set the initializer of globals after they have been // calculated by the package initializer interpreter. func (c *Compiler) getInterpretedValue(prefix string, value ir.Value) (llvm.Value, error) { switch value := value.(type) { case *ir.ArrayValue: vals := make([]llvm.Value, len(value.Elems)) for i, elem := range value.Elems { val, err := c.getInterpretedValue(prefix+"$arrayval", elem) if err != nil { return llvm.Value{}, err } vals[i] = val } subTyp, err := c.getLLVMType(value.ElemType) if err != nil { return llvm.Value{}, err } return llvm.ConstArray(subTyp, vals), nil case *ir.ConstValue: return c.parseConst(prefix, value.Expr) case *ir.FunctionValue: if value.Elem == nil { llvmType, err := c.getLLVMType(value.Type) if err != nil { return llvm.Value{}, err } return c.getZeroValue(llvmType) } fn := c.ir.GetFunction(value.Elem) ptr := fn.LLVMFn if c.ir.SignatureNeedsContext(fn.Signature) { // Create closure value: {context, function pointer} ptr = c.ctx.ConstStruct([]llvm.Value{llvm.ConstPointerNull(c.i8ptrType), ptr}, false) } return ptr, nil case *ir.GlobalValue: zero := llvm.ConstInt(c.ctx.Int32Type(), 0, false) ptr := llvm.ConstInBoundsGEP(value.Global.LLVMGlobal, []llvm.Value{zero}) return ptr, nil case *ir.InterfaceValue: underlying := llvm.ConstPointerNull(c.i8ptrType) // could be any 0 value if value.Elem != nil { elem, err := c.getInterpretedValue(prefix, value.Elem) if err != nil { return llvm.Value{}, err } underlying = elem } return c.parseMakeInterface(underlying, value.Type, prefix) case *ir.MapValue: // Create initial bucket. firstBucketGlobal, keySize, valueSize, err := c.initMapNewBucket(prefix, value.Type) if err != nil { return llvm.Value{}, err } // Insert each key/value pair in the hashmap. bucketGlobal := firstBucketGlobal for i, key := range value.Keys { llvmKey, err := c.getInterpretedValue(prefix, key) if err != nil { return llvm.Value{}, nil } llvmValue, err := c.getInterpretedValue(prefix, value.Values[i]) if err != nil { return llvm.Value{}, nil } constVal := key.(*ir.ConstValue).Expr var keyBuf []byte switch constVal.Type().Underlying().(*types.Basic).Kind() { case types.String, types.UntypedString: keyBuf = []byte(constant.StringVal(constVal.Value)) case types.Int: keyBuf = make([]byte, c.targetData.TypeAllocSize(c.intType)) n, _ := constant.Uint64Val(constVal.Value) for i := range keyBuf { keyBuf[i] = byte(n) n >>= 8 } default: return llvm.Value{}, errors.New("todo: init: map key not implemented: " + constVal.Type().Underlying().String()) } hash := hashmapHash(keyBuf) if i%8 == 0 && i != 0 { // Bucket is full, create a new one. newBucketGlobal, _, _, err := c.initMapNewBucket(prefix, value.Type) if err != nil { return llvm.Value{}, err } zero := llvm.ConstInt(c.ctx.Int32Type(), 0, false) newBucketPtr := llvm.ConstInBoundsGEP(newBucketGlobal, []llvm.Value{zero}) newBucketPtrCast := llvm.ConstBitCast(newBucketPtr, c.i8ptrType) // insert pointer into old bucket bucket := bucketGlobal.Initializer() bucket = llvm.ConstInsertValue(bucket, newBucketPtrCast, []uint32{1}) bucketGlobal.SetInitializer(bucket) // switch to next bucket bucketGlobal = newBucketGlobal } tophashValue := llvm.ConstInt(c.ctx.Int8Type(), uint64(hashmapTopHash(hash)), false) bucket := bucketGlobal.Initializer() bucket = llvm.ConstInsertValue(bucket, tophashValue, []uint32{0, uint32(i % 8)}) bucket = llvm.ConstInsertValue(bucket, llvmKey, []uint32{2, uint32(i % 8)}) bucket = llvm.ConstInsertValue(bucket, llvmValue, []uint32{3, uint32(i % 8)}) bucketGlobal.SetInitializer(bucket) } // Create the hashmap itself. zero := llvm.ConstInt(c.ctx.Int32Type(), 0, false) bucketPtr := llvm.ConstInBoundsGEP(firstBucketGlobal, []llvm.Value{zero}) hashmapType := c.mod.GetTypeByName("runtime.hashmap") hashmap := llvm.ConstNamedStruct(hashmapType, []llvm.Value{ llvm.ConstPointerNull(llvm.PointerType(hashmapType, 0)), // next llvm.ConstBitCast(bucketPtr, c.i8ptrType), // buckets llvm.ConstInt(c.lenType, uint64(len(value.Keys)), false), // count llvm.ConstInt(c.ctx.Int8Type(), keySize, false), // keySize llvm.ConstInt(c.ctx.Int8Type(), valueSize, false), // valueSize llvm.ConstInt(c.ctx.Int8Type(), 0, false), // bucketBits }) // Create a pointer to this hashmap. hashmapPtr := llvm.AddGlobal(c.mod, hashmap.Type(), prefix+"$hashmap") hashmapPtr.SetInitializer(hashmap) hashmapPtr.SetLinkage(llvm.InternalLinkage) return llvm.ConstInBoundsGEP(hashmapPtr, []llvm.Value{zero}), nil case *ir.PointerBitCastValue: elem, err := c.getInterpretedValue(prefix, value.Elem) if err != nil { return llvm.Value{}, err } llvmType, err := c.getLLVMType(value.Type) if err != nil { return llvm.Value{}, err } return llvm.ConstBitCast(elem, llvmType), nil case *ir.PointerToUintptrValue: elem, err := c.getInterpretedValue(prefix, value.Elem) if err != nil { return llvm.Value{}, err } return llvm.ConstPtrToInt(elem, c.uintptrType), nil case *ir.PointerValue: if value.Elem == nil { typ, err := c.getLLVMType(value.Type) if err != nil { return llvm.Value{}, err } return llvm.ConstPointerNull(typ), nil } elem, err := c.getInterpretedValue(prefix, *value.Elem) if err != nil { return llvm.Value{}, err } obj := llvm.AddGlobal(c.mod, elem.Type(), prefix+"$ptrvalue") obj.SetInitializer(elem) obj.SetLinkage(llvm.InternalLinkage) elem = obj zero := llvm.ConstInt(c.ctx.Int32Type(), 0, false) ptr := llvm.ConstInBoundsGEP(elem, []llvm.Value{zero}) return ptr, nil case *ir.SliceValue: var globalPtr llvm.Value var arrayLength uint64 if value.Array == nil { arrayType, err := c.getLLVMType(value.Type.Elem()) if err != nil { return llvm.Value{}, err } globalPtr = llvm.ConstPointerNull(llvm.PointerType(arrayType, 0)) } else { // make array array, err := c.getInterpretedValue(prefix, value.Array) if err != nil { return llvm.Value{}, err } // make global from array global := llvm.AddGlobal(c.mod, array.Type(), prefix+"$array") global.SetInitializer(array) global.SetLinkage(llvm.InternalLinkage) // get pointer to global zero := llvm.ConstInt(c.ctx.Int32Type(), 0, false) globalPtr = c.builder.CreateInBoundsGEP(global, []llvm.Value{zero, zero}, "") arrayLength = uint64(len(value.Array.Elems)) } // make slice sliceTyp, err := c.getLLVMType(value.Type) if err != nil { return llvm.Value{}, err } llvmLen := llvm.ConstInt(c.lenType, arrayLength, false) slice := llvm.ConstNamedStruct(sliceTyp, []llvm.Value{ globalPtr, // ptr llvmLen, // len llvmLen, // cap }) return slice, nil case *ir.StructValue: fields := make([]llvm.Value, len(value.Fields)) for i, elem := range value.Fields { field, err := c.getInterpretedValue(prefix, elem) if err != nil { return llvm.Value{}, err } fields[i] = field } switch value.Type.(type) { case *types.Named: llvmType, err := c.getLLVMType(value.Type) if err != nil { return llvm.Value{}, err } return llvm.ConstNamedStruct(llvmType, fields), nil case *types.Struct: return c.ctx.ConstStruct(fields, false), nil default: return llvm.Value{}, errors.New("init: unknown struct type: " + value.Type.String()) } case *ir.ZeroBasicValue: llvmType, err := c.getLLVMType(value.Type) if err != nil { return llvm.Value{}, err } return c.getZeroValue(llvmType) default: return llvm.Value{}, errors.New("init: unknown initializer type: " + fmt.Sprintf("%#v", value)) } } func (c *Compiler) parseFunc(frame *Frame) error { if c.DumpSSA { fmt.Printf("\nfunc %s:\n", frame.fn.Function) } if !frame.fn.IsExported() { frame.fn.LLVMFn.SetLinkage(llvm.InternalLinkage) } if frame.fn.IsInterrupt() && strings.HasPrefix(c.Triple, "avr") { frame.fn.LLVMFn.SetFunctionCallConv(85) // CallingConv::AVR_SIGNAL } if c.Debug { pos := c.ir.Program.Fset.Position(frame.fn.Pos()) c.builder.SetCurrentDebugLocation(uint(pos.Line), uint(pos.Column), frame.difunc, llvm.Metadata{}) } // Pre-create all basic blocks in the function. for _, block := range frame.fn.DomPreorder() { llvmBlock := c.ctx.AddBasicBlock(frame.fn.LLVMFn, block.Comment) frame.blockEntries[block] = llvmBlock frame.blockExits[block] = llvmBlock } if frame.blocking { frame.cleanupBlock = c.ctx.AddBasicBlock(frame.fn.LLVMFn, "task.cleanup") frame.suspendBlock = c.ctx.AddBasicBlock(frame.fn.LLVMFn, "task.suspend") } entryBlock := frame.blockEntries[frame.fn.Blocks[0]] c.builder.SetInsertPointAtEnd(entryBlock) // Load function parameters llvmParamIndex := 0 for i, param := range frame.fn.Params { llvmType, err := c.getLLVMType(param.Type()) if err != nil { return err } fields := make([]llvm.Value, 0, 1) for range c.expandFormalParamType(llvmType) { fields = append(fields, frame.fn.LLVMFn.Param(llvmParamIndex)) llvmParamIndex++ } frame.locals[param] = c.collapseFormalParam(llvmType, fields) // Add debug information to this parameter (if available) if c.Debug && frame.fn.Syntax() != nil { pos := c.ir.Program.Fset.Position(frame.fn.Syntax().Pos()) dityp, err := c.getDIType(param.Type()) if err != nil { return err } c.dibuilder.CreateParameterVariable(frame.difunc, llvm.DIParameterVariable{ Name: param.Name(), File: c.difiles[pos.Filename], Line: pos.Line, Type: dityp, AlwaysPreserve: true, ArgNo: i + 1, }) // TODO: set the value of this parameter. } } // Load free variables from the context. This is a closure (or bound // method). if len(frame.fn.FreeVars) != 0 { if !c.ir.FunctionNeedsContext(frame.fn) { panic("free variables on function without context") } context := frame.fn.LLVMFn.LastParam() context.SetName("context") // Determine the context type. It's a struct containing all variables. freeVarTypes := make([]llvm.Type, 0, len(frame.fn.FreeVars)) for _, freeVar := range frame.fn.FreeVars { typ, err := c.getLLVMType(freeVar.Type()) if err != nil { return err } freeVarTypes = append(freeVarTypes, typ) } contextType := c.ctx.StructType(freeVarTypes, false) // Get a correctly-typed pointer to the context. contextAlloc := llvm.Value{} if c.targetData.TypeAllocSize(contextType) <= c.targetData.TypeAllocSize(c.i8ptrType) { // Context stored directly in pointer. Load it using an alloca. contextRawAlloc := c.builder.CreateAlloca(llvm.PointerType(c.i8ptrType, 0), "") contextRawValue := c.builder.CreateBitCast(context, llvm.PointerType(c.i8ptrType, 0), "") c.builder.CreateStore(contextRawValue, contextRawAlloc) contextAlloc = c.builder.CreateBitCast(contextRawAlloc, llvm.PointerType(contextType, 0), "") } else { // Context stored in the heap. Bitcast the passed-in pointer to the // correct pointer type. contextAlloc = c.builder.CreateBitCast(context, llvm.PointerType(contextType, 0), "") } // Load each free variable from the context. // A free variable is always a pointer when this is a closure, but it // can be another type when it is a wrapper for a bound method (these // wrappers are generated by the ssa package). for i, freeVar := range frame.fn.FreeVars { indices := []llvm.Value{ llvm.ConstInt(c.ctx.Int32Type(), 0, false), llvm.ConstInt(c.ctx.Int32Type(), uint64(i), false), } gep := c.builder.CreateInBoundsGEP(contextAlloc, indices, "") frame.locals[freeVar] = c.builder.CreateLoad(gep, "") } } if frame.fn.Recover != nil { // Create defer list pointer. deferType := llvm.PointerType(c.mod.GetTypeByName("runtime._defer"), 0) frame.deferPtr = c.builder.CreateAlloca(deferType, "deferPtr") c.builder.CreateStore(llvm.ConstPointerNull(deferType), frame.deferPtr) } if frame.blocking { // Coroutine initialization. taskState := c.builder.CreateAlloca(c.mod.GetTypeByName("runtime.taskState"), "task.state") stateI8 := c.builder.CreateBitCast(taskState, c.i8ptrType, "task.state.i8") id := c.builder.CreateCall(c.coroIdFunc, []llvm.Value{ llvm.ConstInt(c.ctx.Int32Type(), 0, false), stateI8, llvm.ConstNull(c.i8ptrType), llvm.ConstNull(c.i8ptrType), }, "task.token") size := c.builder.CreateCall(c.coroSizeFunc, nil, "task.size") if c.targetData.TypeAllocSize(size.Type()) > c.targetData.TypeAllocSize(c.uintptrType) { size = c.builder.CreateTrunc(size, c.uintptrType, "task.size.uintptr") } else if c.targetData.TypeAllocSize(size.Type()) < c.targetData.TypeAllocSize(c.uintptrType) { size = c.builder.CreateZExt(size, c.uintptrType, "task.size.uintptr") } data := c.createRuntimeCall("alloc", []llvm.Value{size}, "task.data") frame.taskHandle = c.builder.CreateCall(c.coroBeginFunc, []llvm.Value{id, data}, "task.handle") // Coroutine cleanup. Free resources associated with this coroutine. c.builder.SetInsertPointAtEnd(frame.cleanupBlock) mem := c.builder.CreateCall(c.coroFreeFunc, []llvm.Value{id, frame.taskHandle}, "task.data.free") c.createRuntimeCall("free", []llvm.Value{mem}, "") // re-insert parent coroutine c.createRuntimeCall("yieldToScheduler", []llvm.Value{frame.fn.LLVMFn.FirstParam()}, "") c.builder.CreateBr(frame.suspendBlock) // Coroutine suspend. A call to llvm.coro.suspend() will branch here. c.builder.SetInsertPointAtEnd(frame.suspendBlock) c.builder.CreateCall(c.coroEndFunc, []llvm.Value{frame.taskHandle, llvm.ConstInt(c.ctx.Int1Type(), 0, false)}, "unused") c.builder.CreateRet(frame.taskHandle) } // Fill blocks with instructions. for _, block := range frame.fn.DomPreorder() { if c.DumpSSA { fmt.Printf("%d: %s:\n", block.Index, block.Comment) } c.builder.SetInsertPointAtEnd(frame.blockEntries[block]) frame.currentBlock = block for _, instr := range block.Instrs { if _, ok := instr.(*ssa.DebugRef); ok { continue } if c.DumpSSA { if val, ok := instr.(ssa.Value); ok && val.Name() != "" { fmt.Printf("\t%s = %s\n", val.Name(), val.String()) } else { fmt.Printf("\t%s\n", instr.String()) } } err := c.parseInstr(frame, instr) if err != nil { return err } } if frame.fn.Name() == "init" && len(block.Instrs) == 0 { c.builder.CreateRetVoid() } } // Resolve phi nodes for _, phi := range frame.phis { block := phi.ssa.Block() for i, edge := range phi.ssa.Edges { llvmVal, err := c.parseExpr(frame, edge) if err != nil { return err } llvmBlock := frame.blockExits[block.Preds[i]] phi.llvm.AddIncoming([]llvm.Value{llvmVal}, []llvm.BasicBlock{llvmBlock}) } } return nil } func (c *Compiler) parseInstr(frame *Frame, instr ssa.Instruction) error { if c.Debug { pos := c.ir.Program.Fset.Position(instr.Pos()) c.builder.SetCurrentDebugLocation(uint(pos.Line), uint(pos.Column), frame.difunc, llvm.Metadata{}) } switch instr := instr.(type) { case ssa.Value: value, err := c.parseExpr(frame, instr) if err == ir.ErrCGoWrapper { // Ignore CGo global variables which we don't use. return nil } frame.locals[instr] = value return err case *ssa.DebugRef: return nil // ignore case *ssa.Defer: // The pointer to the previous defer struct, which we will replace to // make a linked list. next := c.builder.CreateLoad(frame.deferPtr, "defer.next") deferFuncType := llvm.FunctionType(c.ctx.VoidType(), []llvm.Type{next.Type()}, false) var values []llvm.Value var valueTypes []llvm.Type if callee, ok := instr.Call.Value.(*ssa.Function); ok && !instr.Call.IsInvoke() { // Regular function call. fn := c.ir.GetFunction(callee) // Try to find the wrapper $defer function. deferName := fn.LinkName() + "$defer" callback := c.mod.NamedFunction(deferName) if callback.IsNil() { // Not found, have to add it. callback = llvm.AddFunction(c.mod, deferName, deferFuncType) c.deferFuncs = append(c.deferFuncs, fn) } // Collect all values to be put in the struct (starting with // runtime._defer fields). values = []llvm.Value{callback, next} valueTypes = []llvm.Type{callback.Type(), next.Type()} for _, param := range instr.Call.Args { llvmParam, err := c.parseExpr(frame, param) if err != nil { return err } values = append(values, llvmParam) valueTypes = append(valueTypes, llvmParam.Type()) } } else if makeClosure, ok := instr.Call.Value.(*ssa.MakeClosure); ok { // Immediately applied function literal with free variables. closure, err := c.parseExpr(frame, instr.Call.Value) if err != nil { return err } // Hopefully, LLVM will merge equivalent functions. deferName := frame.fn.LinkName() + "$fpdefer" callback := llvm.AddFunction(c.mod, deferName, deferFuncType) // Collect all values to be put in the struct (starting with // runtime._defer fields, followed by the closure). values = []llvm.Value{callback, next, closure} valueTypes = []llvm.Type{callback.Type(), next.Type(), closure.Type()} for _, param := range instr.Call.Args { llvmParam, err := c.parseExpr(frame, param) if err != nil { return err } values = append(values, llvmParam) valueTypes = append(valueTypes, llvmParam.Type()) } thunk := ContextDeferFunction{ callback, valueTypes, makeClosure.Fn.(*ssa.Function).Signature, } c.ctxDeferFuncs = append(c.ctxDeferFuncs, thunk) } else { return errors.New("todo: defer on uncommon function call type") } // Make a struct out of the collected values to put in the defer frame. deferFrameType := c.ctx.StructType(valueTypes, false) deferFrame, err := c.getZeroValue(deferFrameType) if err != nil { return err } for i, value := range values { deferFrame = c.builder.CreateInsertValue(deferFrame, value, i, "") } // Put this struct in an alloca. alloca := c.builder.CreateAlloca(deferFrameType, "defer.alloca") c.builder.CreateStore(deferFrame, alloca) // Push it on top of the linked list by replacing deferPtr. allocaCast := c.builder.CreateBitCast(alloca, next.Type(), "defer.alloca.cast") c.builder.CreateStore(allocaCast, frame.deferPtr) return nil case *ssa.Go: if instr.Common().Method != nil { return errors.New("todo: go on method receiver") } // Execute non-blocking calls (including builtins) directly. // parentHandle param is ignored. if !c.ir.IsBlocking(c.ir.GetFunction(instr.Common().Value.(*ssa.Function))) { _, err := c.parseCall(frame, instr.Common(), llvm.Value{}) return err // probably nil } // Start this goroutine. // parentHandle is nil, as the goroutine has no parent frame (it's a new // stack). handle, err := c.parseCall(frame, instr.Common(), llvm.Value{}) if err != nil { return err } c.createRuntimeCall("yieldToScheduler", []llvm.Value{handle}, "") return nil case *ssa.If: cond, err := c.parseExpr(frame, instr.Cond) if err != nil { return err } block := instr.Block() blockThen := frame.blockEntries[block.Succs[0]] blockElse := frame.blockEntries[block.Succs[1]] c.builder.CreateCondBr(cond, blockThen, blockElse) return nil case *ssa.Jump: blockJump := frame.blockEntries[instr.Block().Succs[0]] c.builder.CreateBr(blockJump) return nil case *ssa.MapUpdate: m, err := c.parseExpr(frame, instr.Map) if err != nil { return err } key, err := c.parseExpr(frame, instr.Key) if err != nil { return err } value, err := c.parseExpr(frame, instr.Value) if err != nil { return err } mapType := instr.Map.Type().Underlying().(*types.Map) return c.emitMapUpdate(mapType.Key(), m, key, value) case *ssa.Panic: value, err := c.parseExpr(frame, instr.X) if err != nil { return err } c.createRuntimeCall("_panic", []llvm.Value{value}, "") c.builder.CreateUnreachable() return nil case *ssa.Return: if frame.blocking { if len(instr.Results) != 0 { return errors.New("todo: return values from blocking function") } // Final suspend. continuePoint := c.builder.CreateCall(c.coroSuspendFunc, []llvm.Value{ llvm.ConstNull(c.ctx.TokenType()), llvm.ConstInt(c.ctx.Int1Type(), 1, false), // final=true }, "") sw := c.builder.CreateSwitch(continuePoint, frame.suspendBlock, 2) sw.AddCase(llvm.ConstInt(c.ctx.Int8Type(), 1, false), frame.cleanupBlock) return nil } else { if len(instr.Results) == 0 { c.builder.CreateRetVoid() return nil } else if len(instr.Results) == 1 { val, err := c.parseExpr(frame, instr.Results[0]) if err != nil { return err } c.builder.CreateRet(val) return nil } else { // Multiple return values. Put them all in a struct. retVal, err := c.getZeroValue(frame.fn.LLVMFn.Type().ElementType().ReturnType()) if err != nil { return err } for i, result := range instr.Results { val, err := c.parseExpr(frame, result) if err != nil { return err } retVal = c.builder.CreateInsertValue(retVal, val, i, "") } c.builder.CreateRet(retVal) return nil } } case *ssa.RunDefers: deferData := c.builder.CreateLoad(frame.deferPtr, "") c.createRuntimeCall("rundefers", []llvm.Value{deferData}, "") return nil case *ssa.Store: llvmAddr, err := c.parseExpr(frame, instr.Addr) if err == ir.ErrCGoWrapper { // Ignore CGo global variables which we don't use. return nil } if err != nil { return err } llvmVal, err := c.parseExpr(frame, instr.Val) if err != nil { return err } store := c.builder.CreateStore(llvmVal, llvmAddr) valType := instr.Addr.Type().(*types.Pointer).Elem() if c.ir.IsVolatile(valType) { // Volatile store, for memory-mapped registers. store.SetVolatile(true) } return nil default: return errors.New("unknown instruction: " + instr.String()) } } func (c *Compiler) parseBuiltin(frame *Frame, args []ssa.Value, callName string) (llvm.Value, error) { switch callName { case "append": src, err := c.parseExpr(frame, args[0]) if err != nil { return llvm.Value{}, err } elems, err := c.parseExpr(frame, args[1]) if err != nil { return llvm.Value{}, err } srcBuf := c.builder.CreateExtractValue(src, 0, "append.srcBuf") srcPtr := c.builder.CreateBitCast(srcBuf, c.i8ptrType, "append.srcPtr") srcLen := c.builder.CreateExtractValue(src, 1, "append.srcLen") srcCap := c.builder.CreateExtractValue(src, 2, "append.srcCap") elemsBuf := c.builder.CreateExtractValue(elems, 0, "append.elemsBuf") elemsPtr := c.builder.CreateBitCast(elemsBuf, c.i8ptrType, "append.srcPtr") elemsLen := c.builder.CreateExtractValue(elems, 1, "append.elemsLen") elemType := srcBuf.Type().ElementType() elemSize := llvm.ConstInt(c.uintptrType, c.targetData.TypeAllocSize(elemType), false) result := c.createRuntimeCall("sliceAppend", []llvm.Value{srcPtr, elemsPtr, srcLen, srcCap, elemsLen, elemSize}, "append.new") newPtr := c.builder.CreateExtractValue(result, 0, "append.newPtr") newBuf := c.builder.CreateBitCast(newPtr, srcBuf.Type(), "append.newBuf") newLen := c.builder.CreateExtractValue(result, 1, "append.newLen") newCap := c.builder.CreateExtractValue(result, 2, "append.newCap") newSlice := llvm.Undef(src.Type()) newSlice = c.builder.CreateInsertValue(newSlice, newBuf, 0, "") newSlice = c.builder.CreateInsertValue(newSlice, newLen, 1, "") newSlice = c.builder.CreateInsertValue(newSlice, newCap, 2, "") return newSlice, nil case "cap": value, err := c.parseExpr(frame, args[0]) if err != nil { return llvm.Value{}, err } switch args[0].Type().(type) { case *types.Slice: return c.builder.CreateExtractValue(value, 2, "cap"), nil default: return llvm.Value{}, errors.New("todo: cap: unknown type") } case "complex": r, err := c.parseExpr(frame, args[0]) if err != nil { return llvm.Value{}, err } i, err := c.parseExpr(frame, args[1]) if err != nil { return llvm.Value{}, err } t := args[0].Type().Underlying().(*types.Basic) var cplx llvm.Value switch t.Kind() { case types.Float32: cplx = llvm.Undef(llvm.VectorType(c.ctx.FloatType(), 2)) case types.Float64: cplx = llvm.Undef(llvm.VectorType(c.ctx.DoubleType(), 2)) default: return llvm.Value{}, errors.New("unsupported type in complex builtin: " + t.String()) } cplx = c.builder.CreateInsertElement(cplx, r, llvm.ConstInt(c.ctx.Int8Type(), 0, false), "") cplx = c.builder.CreateInsertElement(cplx, i, llvm.ConstInt(c.ctx.Int8Type(), 1, false), "") return cplx, nil case "copy": dst, err := c.parseExpr(frame, args[0]) if err != nil { return llvm.Value{}, err } src, err := c.parseExpr(frame, args[1]) if err != nil { return llvm.Value{}, err } dstLen := c.builder.CreateExtractValue(dst, 1, "copy.dstLen") srcLen := c.builder.CreateExtractValue(src, 1, "copy.srcLen") dstBuf := c.builder.CreateExtractValue(dst, 0, "copy.dstArray") srcBuf := c.builder.CreateExtractValue(src, 0, "copy.srcArray") elemType := dstBuf.Type().ElementType() dstBuf = c.builder.CreateBitCast(dstBuf, c.i8ptrType, "copy.dstPtr") srcBuf = c.builder.CreateBitCast(srcBuf, c.i8ptrType, "copy.srcPtr") elemSize := llvm.ConstInt(c.uintptrType, c.targetData.TypeAllocSize(elemType), false) return c.createRuntimeCall("sliceCopy", []llvm.Value{dstBuf, srcBuf, dstLen, srcLen, elemSize}, "copy.n"), nil case "delete": m, err := c.parseExpr(frame, args[0]) if err != nil { return llvm.Value{}, err } key, err := c.parseExpr(frame, args[1]) if err != nil { return llvm.Value{}, err } return llvm.Value{}, c.emitMapDelete(args[1].Type(), m, key) case "imag": cplx, err := c.parseExpr(frame, args[0]) if err != nil { return llvm.Value{}, err } index := llvm.ConstInt(c.ctx.Int32Type(), 1, false) return c.builder.CreateExtractElement(cplx, index, "imag"), nil case "len": value, err := c.parseExpr(frame, args[0]) if err != nil { return llvm.Value{}, err } var llvmLen llvm.Value switch args[0].Type().Underlying().(type) { case *types.Basic, *types.Slice: // string or slice llvmLen = c.builder.CreateExtractValue(value, 1, "len") case *types.Map: indices := []llvm.Value{ llvm.ConstInt(c.ctx.Int32Type(), 0, false), llvm.ConstInt(c.ctx.Int32Type(), 2, false), // hashmap.count } ptr := c.builder.CreateGEP(value, indices, "lenptr") llvmLen = c.builder.CreateLoad(ptr, "len") default: return llvm.Value{}, errors.New("todo: len: unknown type") } if c.targetData.TypeAllocSize(llvmLen.Type()) < c.targetData.TypeAllocSize(c.intType) { llvmLen = c.builder.CreateZExt(llvmLen, c.intType, "len.int") } return llvmLen, nil case "print", "println": for i, arg := range args { if i >= 1 && callName == "println" { c.createRuntimeCall("printspace", nil, "") } value, err := c.parseExpr(frame, arg) if err != nil { return llvm.Value{}, err } typ := arg.Type().Underlying() switch typ := typ.(type) { case *types.Basic: switch typ.Kind() { case types.String, types.UntypedString: c.createRuntimeCall("printstring", []llvm.Value{value}, "") case types.Uintptr: c.createRuntimeCall("printptr", []llvm.Value{value}, "") case types.UnsafePointer: ptrValue := c.builder.CreatePtrToInt(value, c.uintptrType, "") c.createRuntimeCall("printptr", []llvm.Value{ptrValue}, "") default: // runtime.print{int,uint}{8,16,32,64} if typ.Info()&types.IsInteger != 0 { name := "print" if typ.Info()&types.IsUnsigned != 0 { name += "uint" } else { name += "int" } name += strconv.FormatUint(c.targetData.TypeAllocSize(value.Type())*8, 10) c.createRuntimeCall(name, []llvm.Value{value}, "") } else if typ.Kind() == types.Bool { c.createRuntimeCall("printbool", []llvm.Value{value}, "") } else if typ.Kind() == types.Float32 { c.createRuntimeCall("printfloat32", []llvm.Value{value}, "") } else if typ.Kind() == types.Float64 { c.createRuntimeCall("printfloat64", []llvm.Value{value}, "") } else if typ.Kind() == types.Complex64 { c.createRuntimeCall("printcomplex64", []llvm.Value{value}, "") } else if typ.Kind() == types.Complex128 { c.createRuntimeCall("printcomplex128", []llvm.Value{value}, "") } else { return llvm.Value{}, errors.New("unknown basic arg type: " + typ.String()) } } case *types.Interface: c.createRuntimeCall("printitf", []llvm.Value{value}, "") case *types.Map: c.createRuntimeCall("printmap", []llvm.Value{value}, "") case *types.Pointer: ptrValue := c.builder.CreatePtrToInt(value, c.uintptrType, "") c.createRuntimeCall("printptr", []llvm.Value{ptrValue}, "") default: return llvm.Value{}, errors.New("unknown arg type: " + typ.String()) } } if callName == "println" { c.createRuntimeCall("printnl", nil, "") } return llvm.Value{}, nil // print() or println() returns void case "real": cplx, err := c.parseExpr(frame, args[0]) if err != nil { return llvm.Value{}, err } index := llvm.ConstInt(c.ctx.Int32Type(), 0, false) return c.builder.CreateExtractElement(cplx, index, "real"), nil case "recover": return c.createRuntimeCall("_recover", nil, ""), nil case "ssa:wrapnilchk": // TODO: do an actual nil check? return c.parseExpr(frame, args[0]) default: return llvm.Value{}, errors.New("todo: builtin: " + callName) } } func (c *Compiler) parseFunctionCall(frame *Frame, args []ssa.Value, llvmFn, context llvm.Value, blocking bool, parentHandle llvm.Value) (llvm.Value, error) { var params []llvm.Value if blocking { if parentHandle.IsNil() { // Started from 'go' statement. params = append(params, llvm.ConstNull(c.i8ptrType)) } else { // Blocking function calls another blocking function. params = append(params, parentHandle) } } for _, param := range args { val, err := c.parseExpr(frame, param) if err != nil { return llvm.Value{}, err } params = append(params, val) } if !context.IsNil() { // This function takes a context parameter. // Add it to the end of the parameter list. params = append(params, context) } if frame.blocking && llvmFn.Name() == "time.Sleep" { // Set task state to TASK_STATE_SLEEP and set the duration. c.createRuntimeCall("sleepTask", []llvm.Value{frame.taskHandle, params[0]}, "") // Yield to scheduler. continuePoint := c.builder.CreateCall(c.coroSuspendFunc, []llvm.Value{ llvm.ConstNull(c.ctx.TokenType()), llvm.ConstInt(c.ctx.Int1Type(), 0, false), }, "") wakeup := c.ctx.InsertBasicBlock(llvm.NextBasicBlock(c.builder.GetInsertBlock()), "task.wakeup") sw := c.builder.CreateSwitch(continuePoint, frame.suspendBlock, 2) sw.AddCase(llvm.ConstInt(c.ctx.Int8Type(), 0, false), wakeup) sw.AddCase(llvm.ConstInt(c.ctx.Int8Type(), 1, false), frame.cleanupBlock) c.builder.SetInsertPointAtEnd(wakeup) return llvm.Value{}, nil } result := c.createCall(llvmFn, params, "") if blocking && !parentHandle.IsNil() { // Calling a blocking function as a regular function call. // This is done by passing the current coroutine as a parameter to the // new coroutine and dropping the current coroutine from the scheduler // (with the TASK_STATE_CALL state). When the subroutine is finished, it // will reactivate the parent (this frame) in it's destroy function. c.createRuntimeCall("yieldToScheduler", []llvm.Value{result}, "") // Set task state to TASK_STATE_CALL. c.createRuntimeCall("waitForAsyncCall", []llvm.Value{frame.taskHandle}, "") // Yield to the scheduler. continuePoint := c.builder.CreateCall(c.coroSuspendFunc, []llvm.Value{ llvm.ConstNull(c.ctx.TokenType()), llvm.ConstInt(c.ctx.Int1Type(), 0, false), }, "") resume := c.ctx.InsertBasicBlock(llvm.NextBasicBlock(c.builder.GetInsertBlock()), "task.callComplete") sw := c.builder.CreateSwitch(continuePoint, frame.suspendBlock, 2) sw.AddCase(llvm.ConstInt(c.ctx.Int8Type(), 0, false), resume) sw.AddCase(llvm.ConstInt(c.ctx.Int8Type(), 1, false), frame.cleanupBlock) c.builder.SetInsertPointAtEnd(resume) } return result, nil } func (c *Compiler) parseCall(frame *Frame, instr *ssa.CallCommon, parentHandle llvm.Value) (llvm.Value, error) { if instr.IsInvoke() { // Call an interface method with dynamic dispatch. itf, err := c.parseExpr(frame, instr.Value) // interface if err != nil { return llvm.Value{}, err } llvmFnType, err := c.getLLVMType(instr.Method.Type()) if err != nil { return llvm.Value{}, err } if c.ir.SignatureNeedsContext(instr.Method.Type().(*types.Signature)) { // This is somewhat of a hack. // getLLVMType() has created a closure type for us, but we don't // actually want a closure type as an interface call can never be a // closure call. So extract the function pointer type from the // closure. // This happens because somewhere the same function signature is // used in a closure or bound method. llvmFnType = llvmFnType.Subtypes()[1] } values := []llvm.Value{ itf, llvm.ConstInt(c.ctx.Int16Type(), uint64(c.ir.MethodNum(instr.Method)), false), } fn := c.createRuntimeCall("interfaceMethod", values, "invoke.func") fnCast := c.builder.CreateBitCast(fn, llvmFnType, "invoke.func.cast") receiverValue := c.builder.CreateExtractValue(itf, 1, "invoke.func.receiver") args := []llvm.Value{receiverValue} for _, arg := range instr.Args { val, err := c.parseExpr(frame, arg) if err != nil { return llvm.Value{}, err } args = append(args, val) } if c.ir.SignatureNeedsContext(instr.Method.Type().(*types.Signature)) { // This function takes an extra context parameter. An interface call // cannot also be a closure but we have to supply the nil pointer // anyway. args = append(args, llvm.ConstPointerNull(c.i8ptrType)) } // TODO: blocking methods (needs analysis) return c.createCall(fnCast, args, ""), nil } // Try to call the function directly for trivially static calls. if fn := instr.StaticCallee(); fn != nil { if fn.RelString(nil) == "device/arm.Asm" || fn.RelString(nil) == "device/avr.Asm" { // Magic function: insert inline assembly instead of calling it. fnType := llvm.FunctionType(c.ctx.VoidType(), []llvm.Type{}, false) asm := constant.StringVal(instr.Args[0].(*ssa.Const).Value) target := llvm.InlineAsm(fnType, asm, "", true, false, 0) return c.builder.CreateCall(target, nil, ""), nil } if fn.RelString(nil) == "device/arm.AsmFull" || fn.RelString(nil) == "device/avr.AsmFull" { asmString := constant.StringVal(instr.Args[0].(*ssa.Const).Value) registers := map[string]llvm.Value{} registerMap := instr.Args[1].(*ssa.MakeMap) for _, r := range *registerMap.Referrers() { switch r := r.(type) { case *ssa.DebugRef: // ignore case *ssa.MapUpdate: if r.Block() != registerMap.Block() { return llvm.Value{}, errors.New("register value map must be created in the same basic block") } key := constant.StringVal(r.Key.(*ssa.Const).Value) //println("value:", r.Value.(*ssa.MakeInterface).X.String()) value, err := c.parseExpr(frame, r.Value.(*ssa.MakeInterface).X) if err != nil { return llvm.Value{}, err } registers[key] = value case *ssa.Call: if r.Common() == instr { break } default: return llvm.Value{}, errors.New("don't know how to handle argument to inline assembly: " + r.String()) } } // TODO: handle dollar signs in asm string registerNumbers := map[string]int{} var err error argTypes := []llvm.Type{} args := []llvm.Value{} constraints := []string{} asmString = regexp.MustCompile("\\{[a-zA-Z]+\\}").ReplaceAllStringFunc(asmString, func(s string) string { // TODO: skip strings like {r4} etc. that look like ARM push/pop // instructions. name := s[1 : len(s)-1] if _, ok := registers[name]; !ok { if err == nil { err = errors.New("unknown register name: " + name) } return s } if _, ok := registerNumbers[name]; !ok { registerNumbers[name] = len(registerNumbers) argTypes = append(argTypes, registers[name].Type()) args = append(args, registers[name]) switch registers[name].Type().TypeKind() { case llvm.IntegerTypeKind: constraints = append(constraints, "r") case llvm.PointerTypeKind: constraints = append(constraints, "*m") default: err = errors.New("unknown type in inline assembly for value: " + name) return s } } return fmt.Sprintf("${%v}", registerNumbers[name]) }) if err != nil { return llvm.Value{}, err } fnType := llvm.FunctionType(c.ctx.VoidType(), argTypes, false) target := llvm.InlineAsm(fnType, asmString, strings.Join(constraints, ","), true, false, 0) return c.builder.CreateCall(target, args, ""), nil } targetFunc := c.ir.GetFunction(fn) if targetFunc.LLVMFn.IsNil() { return llvm.Value{}, errors.New("undefined function: " + targetFunc.LinkName()) } var context llvm.Value if c.ir.FunctionNeedsContext(targetFunc) { // This function call is to a (potential) closure, not a regular // function. See whether it is a closure and if so, call it as such. // Else, supply a dummy nil pointer as the last parameter. var err error if mkClosure, ok := instr.Value.(*ssa.MakeClosure); ok { // closure is {context, function pointer} closure, err := c.parseExpr(frame, mkClosure) if err != nil { return llvm.Value{}, err } context = c.builder.CreateExtractValue(closure, 0, "") } else { context, err = c.getZeroValue(c.i8ptrType) if err != nil { return llvm.Value{}, err } } } return c.parseFunctionCall(frame, instr.Args, targetFunc.LLVMFn, context, c.ir.IsBlocking(targetFunc), parentHandle) } // Builtin or function pointer. switch call := instr.Value.(type) { case *ssa.Builtin: return c.parseBuiltin(frame, instr.Args, call.Name()) default: // function pointer value, err := c.parseExpr(frame, instr.Value) if err != nil { return llvm.Value{}, err } // TODO: blocking function pointers (needs analysis) var context llvm.Value if c.ir.SignatureNeedsContext(instr.Signature()) { // 'value' is a closure, not a raw function pointer. // Extract the function pointer and the context pointer. // closure: {context, function pointer} context = c.builder.CreateExtractValue(value, 0, "") value = c.builder.CreateExtractValue(value, 1, "") } return c.parseFunctionCall(frame, instr.Args, value, context, false, parentHandle) } } func (c *Compiler) emitBoundsCheck(frame *Frame, arrayLen, index llvm.Value, indexType types.Type) { if frame.fn.IsNoBounds() { // The //go:nobounds pragma was added to the function to avoid bounds // checking. return } // Sometimes, the index can be e.g. an uint8 or int8, and we have to // correctly extend that type. if index.Type().IntTypeWidth() < arrayLen.Type().IntTypeWidth() { if indexType.(*types.Basic).Info()&types.IsUnsigned == 0 { index = c.builder.CreateZExt(index, arrayLen.Type(), "") } else { index = c.builder.CreateSExt(index, arrayLen.Type(), "") } } // Optimize away trivial cases. // LLVM would do this anyway with interprocedural optimizations, but it // helps to see cases where bounds check elimination would really help. if index.IsConstant() && arrayLen.IsConstant() && !arrayLen.IsUndef() { index := index.SExtValue() arrayLen := arrayLen.SExtValue() if index >= 0 && index < arrayLen { return } } if index.Type().IntTypeWidth() > c.intType.IntTypeWidth() { // Index is too big for the regular bounds check. Use the one for int64. c.createRuntimeCall("lookupBoundsCheckLong", []llvm.Value{arrayLen, index}, "") } else { c.createRuntimeCall("lookupBoundsCheck", []llvm.Value{arrayLen, index}, "") } } func (c *Compiler) emitSliceBoundsCheck(frame *Frame, length, low, high llvm.Value) { if frame.fn.IsNoBounds() { // The //go:nobounds pragma was added to the function to avoid bounds // checking. return } if low.Type().IntTypeWidth() > 32 || high.Type().IntTypeWidth() > 32 { if low.Type().IntTypeWidth() < 64 { low = c.builder.CreateSExt(low, c.ctx.Int64Type(), "") } if high.Type().IntTypeWidth() < 64 { high = c.builder.CreateSExt(high, c.ctx.Int64Type(), "") } c.createRuntimeCall("sliceBoundsCheckLong", []llvm.Value{length, low, high}, "") } else { c.createRuntimeCall("sliceBoundsCheck", []llvm.Value{length, low, high}, "") } } func (c *Compiler) parseExpr(frame *Frame, expr ssa.Value) (llvm.Value, error) { if value, ok := frame.locals[expr]; ok { // Value is a local variable that has already been computed. if value.IsNil() { return llvm.Value{}, errors.New("undefined local var (from cgo?)") } return value, nil } switch expr := expr.(type) { case *ssa.Alloc: typ, err := c.getLLVMType(expr.Type().Underlying().(*types.Pointer).Elem()) if err != nil { return llvm.Value{}, err } var buf llvm.Value if expr.Heap { // TODO: escape analysis size := llvm.ConstInt(c.uintptrType, c.targetData.TypeAllocSize(typ), false) buf = c.createRuntimeCall("alloc", []llvm.Value{size}, expr.Comment) buf = c.builder.CreateBitCast(buf, llvm.PointerType(typ, 0), "") } else { buf = c.builder.CreateAlloca(typ, expr.Comment) zero, err := c.getZeroValue(typ) if err != nil { return llvm.Value{}, err } c.builder.CreateStore(zero, buf) // zero-initialize var } return buf, nil case *ssa.BinOp: x, err := c.parseExpr(frame, expr.X) if err != nil { return llvm.Value{}, err } y, err := c.parseExpr(frame, expr.Y) if err != nil { return llvm.Value{}, err } return c.parseBinOp(expr.Op, expr.X.Type().Underlying(), x, y) case *ssa.Call: // Passing the current task here to the subroutine. It is only used when // the subroutine is blocking. return c.parseCall(frame, expr.Common(), frame.taskHandle) case *ssa.ChangeInterface: // Do not change between interface types: always use the underlying // (concrete) type in the type number of the interface. Every method // call on an interface will do a lookup which method to call. // This is different from how the official Go compiler works, because of // heap allocation and because it's easier to implement, see: // https://research.swtch.com/interfaces return c.parseExpr(frame, expr.X) case *ssa.ChangeType: x, err := c.parseExpr(frame, expr.X) if err != nil { return llvm.Value{}, err } // The only case when we need to bitcast is when casting between named // struct types, as those are actually different in LLVM. Let's just // bitcast all struct types for ease of use. if _, ok := expr.Type().Underlying().(*types.Struct); ok { llvmType, err := c.getLLVMType(expr.X.Type()) if err != nil { return llvm.Value{}, err } return c.builder.CreateBitCast(x, llvmType, "changetype"), nil } return x, nil case *ssa.Const: return c.parseConst(frame.fn.LinkName(), expr) case *ssa.Convert: x, err := c.parseExpr(frame, expr.X) if err != nil { return llvm.Value{}, err } return c.parseConvert(expr.X.Type(), expr.Type(), x) case *ssa.Extract: value, err := c.parseExpr(frame, expr.Tuple) if err != nil { return llvm.Value{}, err } result := c.builder.CreateExtractValue(value, expr.Index, "") return result, nil case *ssa.Field: value, err := c.parseExpr(frame, expr.X) if err != nil { return llvm.Value{}, err } result := c.builder.CreateExtractValue(value, expr.Field, "") return result, nil case *ssa.FieldAddr: val, err := c.parseExpr(frame, expr.X) if err != nil { return llvm.Value{}, err } indices := []llvm.Value{ llvm.ConstInt(c.ctx.Int32Type(), 0, false), llvm.ConstInt(c.ctx.Int32Type(), uint64(expr.Field), false), } return c.builder.CreateGEP(val, indices, ""), nil case *ssa.Function: fn := c.ir.GetFunction(expr) ptr := fn.LLVMFn if c.ir.FunctionNeedsContext(fn) { // Create closure for function pointer. // Closure is: {context, function pointer} ptr = c.ctx.ConstStruct([]llvm.Value{ llvm.ConstPointerNull(c.i8ptrType), ptr, }, false) } return ptr, nil case *ssa.Global: if strings.HasPrefix(expr.Name(), "__cgofn__cgo_") || strings.HasPrefix(expr.Name(), "_cgo_") { // Ignore CGo global variables which we don't use. return llvm.Value{}, ir.ErrCGoWrapper } value := c.ir.GetGlobal(expr).LLVMGlobal if value.IsNil() { return llvm.Value{}, errors.New("global not found: " + c.ir.GetGlobal(expr).LinkName()) } return value, nil case *ssa.Index: array, err := c.parseExpr(frame, expr.X) if err != nil { return llvm.Value{}, err } index, err := c.parseExpr(frame, expr.Index) if err != nil { return llvm.Value{}, err } // Check bounds. arrayLen := expr.X.Type().(*types.Array).Len() arrayLenLLVM := llvm.ConstInt(c.lenType, uint64(arrayLen), false) c.emitBoundsCheck(frame, arrayLenLLVM, index, expr.Index.Type()) // Can't load directly from array (as index is non-constant), so have to // do it using an alloca+gep+load. alloca := c.builder.CreateAlloca(array.Type(), "") c.builder.CreateStore(array, alloca) zero := llvm.ConstInt(c.ctx.Int32Type(), 0, false) ptr := c.builder.CreateGEP(alloca, []llvm.Value{zero, index}, "") return c.builder.CreateLoad(ptr, ""), nil case *ssa.IndexAddr: val, err := c.parseExpr(frame, expr.X) if err != nil { return llvm.Value{}, err } index, err := c.parseExpr(frame, expr.Index) if err != nil { return llvm.Value{}, err } // Get buffer pointer and length var bufptr, buflen llvm.Value switch ptrTyp := expr.X.Type().Underlying().(type) { case *types.Pointer: typ := expr.X.Type().(*types.Pointer).Elem().Underlying() switch typ := typ.(type) { case *types.Array: bufptr = val buflen = llvm.ConstInt(c.lenType, uint64(typ.Len()), false) default: return llvm.Value{}, errors.New("todo: indexaddr: " + typ.String()) } case *types.Slice: bufptr = c.builder.CreateExtractValue(val, 0, "indexaddr.ptr") buflen = c.builder.CreateExtractValue(val, 1, "indexaddr.len") default: return llvm.Value{}, errors.New("todo: indexaddr: " + ptrTyp.String()) } // Bounds check. // LLVM optimizes this away in most cases. c.emitBoundsCheck(frame, buflen, index, expr.Index.Type()) switch expr.X.Type().Underlying().(type) { case *types.Pointer: indices := []llvm.Value{ llvm.ConstInt(c.ctx.Int32Type(), 0, false), index, } return c.builder.CreateGEP(bufptr, indices, ""), nil case *types.Slice: return c.builder.CreateGEP(bufptr, []llvm.Value{index}, ""), nil default: panic("unreachable") } case *ssa.Lookup: value, err := c.parseExpr(frame, expr.X) if err != nil { return llvm.Value{}, nil } index, err := c.parseExpr(frame, expr.Index) if err != nil { return llvm.Value{}, nil } switch xType := expr.X.Type().Underlying().(type) { case *types.Basic: // Value type must be a string, which is a basic type. if xType.Info()&types.IsString == 0 { panic("lookup on non-string?") } // Bounds check. // LLVM optimizes this away in most cases. length, err := c.parseBuiltin(frame, []ssa.Value{expr.X}, "len") if err != nil { return llvm.Value{}, err // shouldn't happen } c.emitBoundsCheck(frame, length, index, expr.Index.Type()) // Lookup byte buf := c.builder.CreateExtractValue(value, 0, "") bufPtr := c.builder.CreateGEP(buf, []llvm.Value{index}, "") return c.builder.CreateLoad(bufPtr, ""), nil case *types.Map: valueType := expr.Type() if expr.CommaOk { valueType = valueType.(*types.Tuple).At(0).Type() } return c.emitMapLookup(xType.Key(), valueType, value, index, expr.CommaOk) default: panic("unknown lookup type: " + expr.String()) } case *ssa.MakeClosure: // A closure returns a function pointer with context: // {context, fp} return c.parseMakeClosure(frame, expr) case *ssa.MakeInterface: val, err := c.parseExpr(frame, expr.X) if err != nil { return llvm.Value{}, err } return c.parseMakeInterface(val, expr.X.Type(), "") case *ssa.MakeMap: mapType := expr.Type().Underlying().(*types.Map) llvmKeyType, err := c.getLLVMType(mapType.Key().Underlying()) if err != nil { return llvm.Value{}, err } llvmValueType, err := c.getLLVMType(mapType.Elem().Underlying()) if err != nil { return llvm.Value{}, err } keySize := c.targetData.TypeAllocSize(llvmKeyType) valueSize := c.targetData.TypeAllocSize(llvmValueType) llvmKeySize := llvm.ConstInt(c.ctx.Int8Type(), keySize, false) llvmValueSize := llvm.ConstInt(c.ctx.Int8Type(), valueSize, false) hashmap := c.createRuntimeCall("hashmapMake", []llvm.Value{llvmKeySize, llvmValueSize}, "") return hashmap, nil case *ssa.MakeSlice: sliceLen, err := c.parseExpr(frame, expr.Len) if err != nil { return llvm.Value{}, nil } sliceCap, err := c.parseExpr(frame, expr.Cap) if err != nil { return llvm.Value{}, nil } sliceType := expr.Type().Underlying().(*types.Slice) llvmElemType, err := c.getLLVMType(sliceType.Elem()) if err != nil { return llvm.Value{}, nil } elemSize := c.targetData.TypeAllocSize(llvmElemType) // Bounds checking. if !frame.fn.IsNoBounds() { c.createRuntimeCall("sliceBoundsCheckMake", []llvm.Value{sliceLen, sliceCap}, "") } // Allocate the backing array. // TODO: escape analysis elemSizeValue := llvm.ConstInt(c.uintptrType, elemSize, false) sliceCapCast, err := c.parseConvert(expr.Cap.Type(), types.Typ[types.Uintptr], sliceCap) if err != nil { return llvm.Value{}, err } sliceSize := c.builder.CreateBinOp(llvm.Mul, elemSizeValue, sliceCapCast, "makeslice.cap") slicePtr := c.createRuntimeCall("alloc", []llvm.Value{sliceSize}, "makeslice.buf") slicePtr = c.builder.CreateBitCast(slicePtr, llvm.PointerType(llvmElemType, 0), "makeslice.array") if c.targetData.TypeAllocSize(sliceLen.Type()) > c.targetData.TypeAllocSize(c.lenType) { sliceLen = c.builder.CreateTrunc(sliceLen, c.lenType, "") sliceCap = c.builder.CreateTrunc(sliceCap, c.lenType, "") } // Create the slice. slice := c.ctx.ConstStruct([]llvm.Value{ llvm.Undef(slicePtr.Type()), llvm.Undef(c.lenType), llvm.Undef(c.lenType), }, false) slice = c.builder.CreateInsertValue(slice, slicePtr, 0, "") slice = c.builder.CreateInsertValue(slice, sliceLen, 1, "") slice = c.builder.CreateInsertValue(slice, sliceCap, 2, "") return slice, nil case *ssa.Next: rangeVal := expr.Iter.(*ssa.Range).X llvmRangeVal, err := c.parseExpr(frame, rangeVal) if err != nil { return llvm.Value{}, err } it, err := c.parseExpr(frame, expr.Iter) if err != nil { return llvm.Value{}, err } if expr.IsString { return c.createRuntimeCall("stringNext", []llvm.Value{llvmRangeVal, it}, "range.next"), nil } else { // map llvmKeyType, err := c.getLLVMType(rangeVal.Type().(*types.Map).Key()) if err != nil { return llvm.Value{}, err } llvmValueType, err := c.getLLVMType(rangeVal.Type().(*types.Map).Elem()) if err != nil { return llvm.Value{}, err } mapKeyAlloca := c.builder.CreateAlloca(llvmKeyType, "range.key") mapKeyPtr := c.builder.CreateBitCast(mapKeyAlloca, c.i8ptrType, "range.keyptr") mapValueAlloca := c.builder.CreateAlloca(llvmValueType, "range.value") mapValuePtr := c.builder.CreateBitCast(mapValueAlloca, c.i8ptrType, "range.valueptr") ok := c.createRuntimeCall("hashmapNext", []llvm.Value{llvmRangeVal, it, mapKeyPtr, mapValuePtr}, "range.next") tuple := llvm.Undef(c.ctx.StructType([]llvm.Type{c.ctx.Int1Type(), llvmKeyType, llvmValueType}, false)) tuple = c.builder.CreateInsertValue(tuple, ok, 0, "") tuple = c.builder.CreateInsertValue(tuple, c.builder.CreateLoad(mapKeyAlloca, ""), 1, "") tuple = c.builder.CreateInsertValue(tuple, c.builder.CreateLoad(mapValueAlloca, ""), 2, "") return tuple, nil } case *ssa.Phi: t, err := c.getLLVMType(expr.Type()) if err != nil { return llvm.Value{}, err } phi := c.builder.CreatePHI(t, "") frame.phis = append(frame.phis, Phi{expr, phi}) return phi, nil case *ssa.Range: var iteratorType llvm.Type switch typ := expr.X.Type().Underlying().(type) { case *types.Basic: // string iteratorType = c.mod.GetTypeByName("runtime.stringIterator") case *types.Map: iteratorType = c.mod.GetTypeByName("runtime.hashmapIterator") default: panic("unknown type in range: " + typ.String()) } it := c.builder.CreateAlloca(iteratorType, "range.it") zero, err := c.getZeroValue(iteratorType) if err != nil { return llvm.Value{}, nil } c.builder.CreateStore(zero, it) return it, nil case *ssa.Slice: if expr.Max != nil { return llvm.Value{}, errors.New("todo: full slice expressions (with max): " + expr.Type().String()) } value, err := c.parseExpr(frame, expr.X) if err != nil { return llvm.Value{}, err } var low, high llvm.Value if expr.Low == nil { low = llvm.ConstInt(c.intType, 0, false) } else { low, err = c.parseExpr(frame, expr.Low) if err != nil { return llvm.Value{}, nil } } if expr.High != nil { high, err = c.parseExpr(frame, expr.High) if err != nil { return llvm.Value{}, nil } } switch typ := expr.X.Type().Underlying().(type) { case *types.Pointer: // pointer to array // slice an array length := typ.Elem().(*types.Array).Len() llvmLen := llvm.ConstInt(c.lenType, uint64(length), false) llvmLenInt := llvm.ConstInt(c.intType, uint64(length), false) if high.IsNil() { high = llvmLenInt } indices := []llvm.Value{ llvm.ConstInt(c.ctx.Int32Type(), 0, false), low, } slicePtr := c.builder.CreateGEP(value, indices, "slice.ptr") sliceLen := c.builder.CreateSub(high, low, "slice.len") sliceCap := c.builder.CreateSub(llvmLenInt, low, "slice.cap") // This check is optimized away in most cases. c.emitSliceBoundsCheck(frame, llvmLen, low, high) if c.targetData.TypeAllocSize(sliceLen.Type()) > c.targetData.TypeAllocSize(c.lenType) { sliceLen = c.builder.CreateTrunc(sliceLen, c.lenType, "") sliceCap = c.builder.CreateTrunc(sliceCap, c.lenType, "") } slice := c.ctx.ConstStruct([]llvm.Value{ llvm.Undef(slicePtr.Type()), llvm.Undef(c.lenType), llvm.Undef(c.lenType), }, false) slice = c.builder.CreateInsertValue(slice, slicePtr, 0, "") slice = c.builder.CreateInsertValue(slice, sliceLen, 1, "") slice = c.builder.CreateInsertValue(slice, sliceCap, 2, "") return slice, nil case *types.Slice: // slice a slice oldPtr := c.builder.CreateExtractValue(value, 0, "") oldLen := c.builder.CreateExtractValue(value, 1, "") oldCap := c.builder.CreateExtractValue(value, 2, "") if high.IsNil() { high = oldLen } c.emitSliceBoundsCheck(frame, oldLen, low, high) if c.targetData.TypeAllocSize(low.Type()) > c.targetData.TypeAllocSize(c.lenType) { low = c.builder.CreateTrunc(low, c.lenType, "") } if c.targetData.TypeAllocSize(high.Type()) > c.targetData.TypeAllocSize(c.lenType) { high = c.builder.CreateTrunc(high, c.lenType, "") } newPtr := c.builder.CreateGEP(oldPtr, []llvm.Value{low}, "") newLen := c.builder.CreateSub(high, low, "") newCap := c.builder.CreateSub(oldCap, low, "") slice := c.ctx.ConstStruct([]llvm.Value{ llvm.Undef(newPtr.Type()), llvm.Undef(c.lenType), llvm.Undef(c.lenType), }, false) slice = c.builder.CreateInsertValue(slice, newPtr, 0, "") slice = c.builder.CreateInsertValue(slice, newLen, 1, "") slice = c.builder.CreateInsertValue(slice, newCap, 2, "") return slice, nil case *types.Basic: if typ.Info()&types.IsString == 0 { return llvm.Value{}, errors.New("unknown slice type: " + typ.String()) } // slice a string oldPtr := c.builder.CreateExtractValue(value, 0, "") oldLen := c.builder.CreateExtractValue(value, 1, "") if high.IsNil() { high = oldLen } c.emitSliceBoundsCheck(frame, oldLen, low, high) newPtr := c.builder.CreateGEP(oldPtr, []llvm.Value{low}, "") newLen := c.builder.CreateSub(high, low, "") str, err := c.getZeroValue(c.mod.GetTypeByName("runtime._string")) if err != nil { return llvm.Value{}, err } str = c.builder.CreateInsertValue(str, newPtr, 0, "") str = c.builder.CreateInsertValue(str, newLen, 1, "") return str, nil default: return llvm.Value{}, errors.New("unknown slice type: " + typ.String()) } case *ssa.TypeAssert: itf, err := c.parseExpr(frame, expr.X) if err != nil { return llvm.Value{}, err } assertedType, err := c.getLLVMType(expr.AssertedType) if err != nil { return llvm.Value{}, err } valueNil, err := c.getZeroValue(assertedType) if err != nil { return llvm.Value{}, err } actualTypeNum := c.builder.CreateExtractValue(itf, 0, "interface.type") commaOk := llvm.Value{} if itf, ok := expr.AssertedType.Underlying().(*types.Interface); ok { // Type assert on interface type. // This is slightly non-trivial: at runtime the list of methods // needs to be checked to see whether it implements the interface. // At the same time, the interface value itself is unchanged. itfTypeNum := c.ir.InterfaceNum(itf) itfTypeNumValue := llvm.ConstInt(c.ctx.Int16Type(), uint64(itfTypeNum), false) commaOk = c.createRuntimeCall("interfaceImplements", []llvm.Value{actualTypeNum, itfTypeNumValue}, "") } else { // Type assert on concrete type. // This is easy: just compare the type number. assertedTypeNum, typeExists := c.ir.TypeNum(expr.AssertedType) if !typeExists { // Static analysis has determined this type assert will never apply. // Using undef here so that LLVM knows we'll never get here and // can optimize accordingly. undef := llvm.Undef(assertedType) commaOk := llvm.ConstInt(c.ctx.Int1Type(), 0, false) if expr.CommaOk { return c.ctx.ConstStruct([]llvm.Value{undef, commaOk}, false), nil } else { c.createRuntimeCall("interfaceTypeAssert", []llvm.Value{commaOk}, "") return undef, nil } } if assertedTypeNum >= 1<<16 { return llvm.Value{}, errors.New("interface typecodes do not fit in a 16-bit integer") } assertedTypeNumValue := llvm.ConstInt(c.ctx.Int16Type(), uint64(assertedTypeNum), false) commaOk = c.builder.CreateICmp(llvm.IntEQ, assertedTypeNumValue, actualTypeNum, "") } // Add 2 new basic blocks (that should get optimized away): one for the // 'ok' case and one for all instructions following this type assert. // This is necessary because we need to insert the casted value or the // nil value based on whether the assert was successful. Casting before // this check tells LLVM that it can use this value and may // speculatively dereference pointers before the check. This can lead to // a miscompilation resulting in a segfault at runtime. // Additionally, this is even required by the Go spec: a failed // typeassert should return a zero value, not an incorrectly casted // value. prevBlock := c.builder.GetInsertBlock() okBlock := c.ctx.AddBasicBlock(frame.fn.LLVMFn, "typeassert.ok") nextBlock := c.ctx.AddBasicBlock(frame.fn.LLVMFn, "typeassert.next") frame.blockExits[frame.currentBlock] = nextBlock // adjust outgoing block for phi nodes c.builder.CreateCondBr(commaOk, okBlock, nextBlock) // Retrieve the value from the interface if the type assert was // successful. c.builder.SetInsertPointAtEnd(okBlock) var valueOk llvm.Value if _, ok := expr.AssertedType.Underlying().(*types.Interface); ok { // Type assert on interface type. Easy: just return the same // interface value. valueOk = itf } else { // Type assert on concrete type. Extract the underlying type from // the interface (but only after checking it matches). valuePtr := c.builder.CreateExtractValue(itf, 1, "typeassert.value.ptr") if c.targetData.TypeAllocSize(assertedType) > c.targetData.TypeAllocSize(c.i8ptrType) { // Value was stored in an allocated buffer, load it from there. valuePtrCast := c.builder.CreateBitCast(valuePtr, llvm.PointerType(assertedType, 0), "") valueOk = c.builder.CreateLoad(valuePtrCast, "typeassert.value.ok") } else { // Value was stored directly in the interface. switch assertedType.TypeKind() { case llvm.IntegerTypeKind: valueOk = c.builder.CreatePtrToInt(valuePtr, assertedType, "typeassert.value.ok") case llvm.PointerTypeKind: valueOk = c.builder.CreateBitCast(valuePtr, assertedType, "typeassert.value.ok") case llvm.StructTypeKind: // A bitcast would be useful here, but bitcast doesn't allow // aggregate types. So we'll bitcast it using an alloca. // Hopefully this will get optimized away. mem := c.builder.CreateAlloca(c.i8ptrType, "") c.builder.CreateStore(valuePtr, mem) memStructPtr := c.builder.CreateBitCast(mem, llvm.PointerType(assertedType, 0), "") valueOk = c.builder.CreateLoad(memStructPtr, "typeassert.value.ok") default: return llvm.Value{}, errors.New("todo: typeassert: bitcast small types") } } } c.builder.CreateBr(nextBlock) // Continue after the if statement. c.builder.SetInsertPointAtEnd(nextBlock) phi := c.builder.CreatePHI(assertedType, "typeassert.value") phi.AddIncoming([]llvm.Value{valueNil, valueOk}, []llvm.BasicBlock{prevBlock, okBlock}) if expr.CommaOk { tuple := c.ctx.ConstStruct([]llvm.Value{llvm.Undef(assertedType), llvm.Undef(c.ctx.Int1Type())}, false) // create empty tuple tuple = c.builder.CreateInsertValue(tuple, phi, 0, "") // insert value tuple = c.builder.CreateInsertValue(tuple, commaOk, 1, "") // insert 'comma ok' boolean return tuple, nil } else { // This is kind of dirty as the branch above becomes mostly useless, // but hopefully this gets optimized away. c.createRuntimeCall("interfaceTypeAssert", []llvm.Value{commaOk}, "") return phi, nil } case *ssa.UnOp: return c.parseUnOp(frame, expr) default: return llvm.Value{}, errors.New("todo: unknown expression: " + expr.String()) } } func (c *Compiler) parseBinOp(op token.Token, typ types.Type, x, y llvm.Value) (llvm.Value, error) { switch typ := typ.(type) { case *types.Basic: if typ.Info()&types.IsInteger != 0 { // Operations on integers signed := typ.Info()&types.IsUnsigned == 0 switch op { case token.ADD: // + return c.builder.CreateAdd(x, y, ""), nil case token.SUB: // - return c.builder.CreateSub(x, y, ""), nil case token.MUL: // * return c.builder.CreateMul(x, y, ""), nil case token.QUO: // / if signed { return c.builder.CreateSDiv(x, y, ""), nil } else { return c.builder.CreateUDiv(x, y, ""), nil } case token.REM: // % if signed { return c.builder.CreateSRem(x, y, ""), nil } else { return c.builder.CreateURem(x, y, ""), nil } case token.AND: // & return c.builder.CreateAnd(x, y, ""), nil case token.OR: // | return c.builder.CreateOr(x, y, ""), nil case token.XOR: // ^ return c.builder.CreateXor(x, y, ""), nil case token.SHL, token.SHR: sizeX := c.targetData.TypeAllocSize(x.Type()) sizeY := c.targetData.TypeAllocSize(y.Type()) if sizeX > sizeY { // x and y must have equal sizes, make Y bigger in this case. // y is unsigned, this has been checked by the Go type checker. y = c.builder.CreateZExt(y, x.Type(), "") } else if sizeX < sizeY { // What about shifting more than the integer width? // I'm not entirely sure what the Go spec is on that, but as // Intel CPUs have undefined behavior when shifting more // than the integer width I'm assuming it is also undefined // in Go. y = c.builder.CreateTrunc(y, x.Type(), "") } switch op { case token.SHL: // << return c.builder.CreateShl(x, y, ""), nil case token.SHR: // >> if signed { return c.builder.CreateAShr(x, y, ""), nil } else { return c.builder.CreateLShr(x, y, ""), nil } default: panic("unreachable") } case token.EQL: // == return c.builder.CreateICmp(llvm.IntEQ, x, y, ""), nil case token.NEQ: // != return c.builder.CreateICmp(llvm.IntNE, x, y, ""), nil case token.AND_NOT: // &^ // Go specific. Calculate "and not" with x & (~y) inv := c.builder.CreateNot(y, "") // ~y return c.builder.CreateAnd(x, inv, ""), nil case token.LSS: // < if signed { return c.builder.CreateICmp(llvm.IntSLT, x, y, ""), nil } else { return c.builder.CreateICmp(llvm.IntULT, x, y, ""), nil } case token.LEQ: // <= if signed { return c.builder.CreateICmp(llvm.IntSLE, x, y, ""), nil } else { return c.builder.CreateICmp(llvm.IntULE, x, y, ""), nil } case token.GTR: // > if signed { return c.builder.CreateICmp(llvm.IntSGT, x, y, ""), nil } else { return c.builder.CreateICmp(llvm.IntUGT, x, y, ""), nil } case token.GEQ: // >= if signed { return c.builder.CreateICmp(llvm.IntSGE, x, y, ""), nil } else { return c.builder.CreateICmp(llvm.IntUGE, x, y, ""), nil } default: return llvm.Value{}, errors.New("todo: binop on integer: " + op.String()) } } else if typ.Info()&types.IsFloat != 0 { // Operations on floats switch op { case token.ADD: return c.builder.CreateFAdd(x, y, ""), nil case token.SUB: // - return c.builder.CreateFSub(x, y, ""), nil case token.MUL: // * return c.builder.CreateFMul(x, y, ""), nil case token.QUO: // / return c.builder.CreateFDiv(x, y, ""), nil case token.REM: // % return c.builder.CreateFRem(x, y, ""), nil case token.EQL: // == return c.builder.CreateFCmp(llvm.FloatOEQ, x, y, ""), nil case token.NEQ: // != return c.builder.CreateFCmp(llvm.FloatONE, x, y, ""), nil case token.LSS: // < return c.builder.CreateFCmp(llvm.FloatOLT, x, y, ""), nil case token.LEQ: // <= return c.builder.CreateFCmp(llvm.FloatOLE, x, y, ""), nil case token.GTR: // > return c.builder.CreateFCmp(llvm.FloatOGT, x, y, ""), nil case token.GEQ: // >= return c.builder.CreateFCmp(llvm.FloatOGE, x, y, ""), nil default: return llvm.Value{}, errors.New("todo: binop on float: " + op.String()) } } else if typ.Info()&types.IsBoolean != 0 { // Operations on booleans switch op { case token.EQL: // == return c.builder.CreateICmp(llvm.IntEQ, x, y, ""), nil case token.NEQ: // != return c.builder.CreateICmp(llvm.IntNE, x, y, ""), nil default: return llvm.Value{}, errors.New("todo: binop on boolean: " + op.String()) } } else if typ.Kind() == types.UnsafePointer { // Operations on pointers switch op { case token.EQL: // == return c.builder.CreateICmp(llvm.IntEQ, x, y, ""), nil case token.NEQ: // != return c.builder.CreateICmp(llvm.IntNE, x, y, ""), nil default: return llvm.Value{}, errors.New("todo: binop on pointer: " + op.String()) } } else if typ.Info()&types.IsString != 0 { // Operations on strings switch op { case token.ADD: return c.createRuntimeCall("stringConcat", []llvm.Value{x, y}, ""), nil case token.EQL, token.NEQ: // ==, != result := c.createRuntimeCall("stringEqual", []llvm.Value{x, y}, "") if op == token.NEQ { result = c.builder.CreateNot(result, "") } return result, nil default: return llvm.Value{}, errors.New("todo: binop on string: " + op.String()) } } else { return llvm.Value{}, errors.New("todo: unknown basic type in binop: " + typ.String()) } case *types.Signature: if c.ir.SignatureNeedsContext(typ) { // This is a closure, not a function pointer. Get the underlying // function pointer. // This is safe: function pointers are generally not comparable // against each other, only against nil. So one or both has to be // nil, so we can ignore the contents of the closure. x = c.builder.CreateExtractValue(x, 1, "") y = c.builder.CreateExtractValue(y, 1, "") } switch op { case token.EQL: // == return c.builder.CreateICmp(llvm.IntEQ, x, y, ""), nil case token.NEQ: // != return c.builder.CreateICmp(llvm.IntNE, x, y, ""), nil default: return llvm.Value{}, errors.New("binop on signature: " + op.String()) } case *types.Interface: switch op { case token.EQL, token.NEQ: // ==, != result := c.createRuntimeCall("interfaceEqual", []llvm.Value{x, y}, "") if op == token.NEQ { result = c.builder.CreateNot(result, "") } return result, nil default: return llvm.Value{}, errors.New("binop on interface: " + op.String()) } case *types.Pointer: switch op { case token.EQL: // == return c.builder.CreateICmp(llvm.IntEQ, x, y, ""), nil case token.NEQ: // != return c.builder.CreateICmp(llvm.IntNE, x, y, ""), nil default: return llvm.Value{}, errors.New("todo: binop on pointer: " + op.String()) } case *types.Slice: // Slices are in general not comparable, but can be compared against // nil. Assume at least one of them is nil to make the code easier. xPtr := c.builder.CreateExtractValue(x, 0, "") yPtr := c.builder.CreateExtractValue(y, 0, "") switch op { case token.EQL: // == return c.builder.CreateICmp(llvm.IntEQ, xPtr, yPtr, ""), nil case token.NEQ: // != return c.builder.CreateICmp(llvm.IntNE, xPtr, yPtr, ""), nil default: return llvm.Value{}, errors.New("todo: binop on slice: " + op.String()) } case *types.Struct: // Compare each struct field and combine the result. From the spec: // Struct values are comparable if all their fields are comparable. // Two struct values are equal if their corresponding non-blank // fields are equal. result := llvm.ConstInt(c.ctx.Int1Type(), 1, true) for i := 0; i < typ.NumFields(); i++ { if typ.Field(i).Name() == "_" { // skip blank fields continue } fieldType := typ.Field(i).Type() xField := c.builder.CreateExtractValue(x, i, "") yField := c.builder.CreateExtractValue(y, i, "") fieldEqual, err := c.parseBinOp(token.EQL, fieldType, xField, yField) if err != nil { return llvm.Value{}, err } result = c.builder.CreateAnd(result, fieldEqual, "") } switch op { case token.EQL: // == return result, nil case token.NEQ: // != return c.builder.CreateNot(result, ""), nil default: return llvm.Value{}, errors.New("unknown: binop on struct: " + op.String()) } return result, nil default: return llvm.Value{}, errors.New("todo: binop type: " + typ.String()) } } func (c *Compiler) parseConst(prefix string, expr *ssa.Const) (llvm.Value, error) { switch typ := expr.Type().Underlying().(type) { case *types.Basic: llvmType, err := c.getLLVMType(typ) if err != nil { return llvm.Value{}, err } if typ.Info()&types.IsBoolean != 0 { b := constant.BoolVal(expr.Value) n := uint64(0) if b { n = 1 } return llvm.ConstInt(llvmType, n, false), nil } else if typ.Info()&types.IsString != 0 { str := constant.StringVal(expr.Value) strLen := llvm.ConstInt(c.lenType, uint64(len(str)), false) objname := prefix + "$string" global := llvm.AddGlobal(c.mod, llvm.ArrayType(c.ctx.Int8Type(), len(str)), objname) global.SetInitializer(c.ctx.ConstString(str, false)) global.SetLinkage(llvm.InternalLinkage) global.SetGlobalConstant(true) global.SetUnnamedAddr(true) zero := llvm.ConstInt(c.ctx.Int32Type(), 0, false) strPtr := c.builder.CreateInBoundsGEP(global, []llvm.Value{zero, zero}, "") strObj := llvm.ConstNamedStruct(c.mod.GetTypeByName("runtime._string"), []llvm.Value{strPtr, strLen}) return strObj, nil } else if typ.Kind() == types.UnsafePointer { if !expr.IsNil() { value, _ := constant.Uint64Val(expr.Value) return llvm.ConstIntToPtr(llvm.ConstInt(c.uintptrType, value, false), c.i8ptrType), nil } return llvm.ConstNull(c.i8ptrType), nil } else if typ.Info()&types.IsUnsigned != 0 { n, _ := constant.Uint64Val(expr.Value) return llvm.ConstInt(llvmType, n, false), nil } else if typ.Info()&types.IsInteger != 0 { // signed n, _ := constant.Int64Val(expr.Value) return llvm.ConstInt(llvmType, uint64(n), true), nil } else if typ.Info()&types.IsFloat != 0 { n, _ := constant.Float64Val(expr.Value) return llvm.ConstFloat(llvmType, n), nil } else if typ.Kind() == types.Complex128 { r, err := c.parseConst(prefix, ssa.NewConst(constant.Real(expr.Value), types.Typ[types.Float64])) if err != nil { return llvm.Value{}, err } i, err := c.parseConst(prefix, ssa.NewConst(constant.Imag(expr.Value), types.Typ[types.Float64])) if err != nil { return llvm.Value{}, err } cplx := llvm.Undef(llvm.VectorType(c.ctx.DoubleType(), 2)) cplx = c.builder.CreateInsertValue(cplx, r, 0, "") cplx = c.builder.CreateInsertValue(cplx, i, 1, "") return cplx, nil } else { return llvm.Value{}, errors.New("todo: unknown constant: " + expr.String()) } case *types.Signature: if expr.Value != nil { return llvm.Value{}, errors.New("non-nil signature constant") } sig, err := c.getLLVMType(expr.Type()) if err != nil { return llvm.Value{}, err } return c.getZeroValue(sig) case *types.Interface: if expr.Value != nil { return llvm.Value{}, errors.New("non-nil interface constant") } // Create a generic nil interface with no dynamic type (typecode=0). fields := []llvm.Value{ llvm.ConstInt(c.ctx.Int16Type(), 0, false), llvm.ConstPointerNull(c.i8ptrType), } itf := llvm.ConstNamedStruct(c.mod.GetTypeByName("runtime._interface"), fields) return itf, nil case *types.Pointer: if expr.Value != nil { return llvm.Value{}, errors.New("non-nil pointer constant") } llvmType, err := c.getLLVMType(typ) if err != nil { return llvm.Value{}, err } return llvm.ConstPointerNull(llvmType), nil case *types.Slice: if expr.Value != nil { return llvm.Value{}, errors.New("non-nil slice constant") } elemType, err := c.getLLVMType(typ.Elem()) if err != nil { return llvm.Value{}, err } llvmPtr := llvm.ConstPointerNull(llvm.PointerType(elemType, 0)) llvmLen := llvm.ConstInt(c.lenType, 0, false) slice := c.ctx.ConstStruct([]llvm.Value{ llvmPtr, // backing array llvmLen, // len llvmLen, // cap }, false) return slice, nil default: return llvm.Value{}, errors.New("todo: unknown constant: " + expr.String()) } } func (c *Compiler) parseConvert(typeFrom, typeTo types.Type, value llvm.Value) (llvm.Value, error) { llvmTypeFrom := value.Type() llvmTypeTo, err := c.getLLVMType(typeTo) if err != nil { return llvm.Value{}, err } // Conversion between unsafe.Pointer and uintptr. isPtrFrom := isPointer(typeFrom.Underlying()) isPtrTo := isPointer(typeTo.Underlying()) if isPtrFrom && !isPtrTo { return c.builder.CreatePtrToInt(value, llvmTypeTo, ""), nil } else if !isPtrFrom && isPtrTo { return c.builder.CreateIntToPtr(value, llvmTypeTo, ""), nil } // Conversion between pointers and unsafe.Pointer. if isPtrFrom && isPtrTo { return c.builder.CreateBitCast(value, llvmTypeTo, ""), nil } switch typeTo := typeTo.Underlying().(type) { case *types.Basic: sizeFrom := c.targetData.TypeAllocSize(llvmTypeFrom) if typeTo.Info()&types.IsString != 0 { switch typeFrom := typeFrom.Underlying().(type) { case *types.Basic: // Assume a Unicode code point, as that is the only possible // value here. // Cast to an i32 value as expected by // runtime.stringFromUnicode. if sizeFrom > 4 { value = c.builder.CreateTrunc(value, c.ctx.Int32Type(), "") } else if sizeFrom < 4 && typeTo.Info()&types.IsUnsigned != 0 { value = c.builder.CreateZExt(value, c.ctx.Int32Type(), "") } else if sizeFrom < 4 { value = c.builder.CreateSExt(value, c.ctx.Int32Type(), "") } return c.createRuntimeCall("stringFromUnicode", []llvm.Value{value}, ""), nil case *types.Slice: switch typeFrom.Elem().(*types.Basic).Kind() { case types.Byte: return c.createRuntimeCall("stringFromBytes", []llvm.Value{value}, ""), nil default: return llvm.Value{}, errors.New("todo: convert to string: " + typeFrom.String()) } default: return llvm.Value{}, errors.New("todo: convert to string: " + typeFrom.String()) } } typeFrom := typeFrom.Underlying().(*types.Basic) sizeTo := c.targetData.TypeAllocSize(llvmTypeTo) if typeFrom.Info()&types.IsInteger != 0 && typeTo.Info()&types.IsInteger != 0 { // Conversion between two integers. if sizeFrom > sizeTo { return c.builder.CreateTrunc(value, llvmTypeTo, ""), nil } else if typeTo.Info()&types.IsUnsigned != 0 { // if unsigned return c.builder.CreateZExt(value, llvmTypeTo, ""), nil } else { // if signed return c.builder.CreateSExt(value, llvmTypeTo, ""), nil } } if typeFrom.Info()&types.IsFloat != 0 && typeTo.Info()&types.IsFloat != 0 { // Conversion between two floats. if sizeFrom > sizeTo { return c.builder.CreateFPTrunc(value, llvmTypeTo, ""), nil } else if sizeFrom < sizeTo { return c.builder.CreateFPExt(value, llvmTypeTo, ""), nil } else { return value, nil } } if typeFrom.Info()&types.IsFloat != 0 && typeTo.Info()&types.IsInteger != 0 { // Conversion from float to int. if typeTo.Info()&types.IsUnsigned != 0 { // to signed int return c.builder.CreateFPToSI(value, llvmTypeTo, ""), nil } else { // to unsigned int return c.builder.CreateFPToUI(value, llvmTypeTo, ""), nil } } if typeFrom.Info()&types.IsInteger != 0 && typeTo.Info()&types.IsFloat != 0 { // Conversion from int to float. if typeFrom.Info()&types.IsUnsigned != 0 { // from signed int return c.builder.CreateSIToFP(value, llvmTypeTo, ""), nil } else { // from unsigned int return c.builder.CreateUIToFP(value, llvmTypeTo, ""), nil } } if typeFrom.Kind() == types.Complex128 && typeTo.Kind() == types.Complex64 { // Conversion from complex128 to complex64. r := c.builder.CreateExtractElement(value, llvm.ConstInt(c.ctx.Int32Type(), 0, false), "real.f64") i := c.builder.CreateExtractElement(value, llvm.ConstInt(c.ctx.Int32Type(), 1, false), "imag.f64") r = c.builder.CreateFPTrunc(r, c.ctx.FloatType(), "real.f32") i = c.builder.CreateFPTrunc(i, c.ctx.FloatType(), "imag.f32") cplx := llvm.Undef(llvm.VectorType(c.ctx.FloatType(), 2)) cplx = c.builder.CreateInsertElement(cplx, r, llvm.ConstInt(c.ctx.Int8Type(), 0, false), "") cplx = c.builder.CreateInsertElement(cplx, i, llvm.ConstInt(c.ctx.Int8Type(), 1, false), "") return cplx, nil } if typeFrom.Kind() == types.Complex64 && typeTo.Kind() == types.Complex128 { // Conversion from complex64 to complex128. r := c.builder.CreateExtractElement(value, llvm.ConstInt(c.ctx.Int32Type(), 0, false), "real.f32") i := c.builder.CreateExtractElement(value, llvm.ConstInt(c.ctx.Int32Type(), 1, false), "imag.f32") r = c.builder.CreateFPExt(r, c.ctx.DoubleType(), "real.f64") i = c.builder.CreateFPExt(i, c.ctx.DoubleType(), "imag.f64") cplx := llvm.Undef(llvm.VectorType(c.ctx.DoubleType(), 2)) cplx = c.builder.CreateInsertElement(cplx, r, llvm.ConstInt(c.ctx.Int8Type(), 0, false), "") cplx = c.builder.CreateInsertElement(cplx, i, llvm.ConstInt(c.ctx.Int8Type(), 1, false), "") return cplx, nil } return llvm.Value{}, errors.New("todo: convert: basic non-integer type: " + typeFrom.String() + " -> " + typeTo.String()) case *types.Slice: if basic, ok := typeFrom.(*types.Basic); !ok || basic.Info()&types.IsString == 0 { panic("can only convert from a string to a slice") } elemType := typeTo.Elem().Underlying().(*types.Basic) // must be byte or rune switch elemType.Kind() { case types.Byte: return c.createRuntimeCall("stringToBytes", []llvm.Value{value}, ""), nil default: return llvm.Value{}, errors.New("todo: convert from string: " + elemType.String()) } default: return llvm.Value{}, errors.New("todo: convert " + typeTo.String() + " <- " + typeFrom.String()) } } func (c *Compiler) parseMakeClosure(frame *Frame, expr *ssa.MakeClosure) (llvm.Value, error) { if len(expr.Bindings) == 0 { panic("unexpected: MakeClosure without bound variables") } f := c.ir.GetFunction(expr.Fn.(*ssa.Function)) if !c.ir.FunctionNeedsContext(f) { // Maybe AnalyseFunctionPointers didn't run? panic("MakeClosure on function signature without context") } // Collect all bound variables. boundVars := make([]llvm.Value, 0, len(expr.Bindings)) boundVarTypes := make([]llvm.Type, 0, len(expr.Bindings)) for _, binding := range expr.Bindings { // The context stores the bound variables. llvmBoundVar, err := c.parseExpr(frame, binding) if err != nil { return llvm.Value{}, err } boundVars = append(boundVars, llvmBoundVar) boundVarTypes = append(boundVarTypes, llvmBoundVar.Type()) } contextType := c.ctx.StructType(boundVarTypes, false) // Allocate memory for the context. contextAlloc := llvm.Value{} contextHeapAlloc := llvm.Value{} if c.targetData.TypeAllocSize(contextType) <= c.targetData.TypeAllocSize(c.i8ptrType) { // Context fits in a pointer - e.g. when it is a pointer. Store it // directly in the stack after a convert. // Because contextType is a struct and we have to cast it to a *i8, // store it in an alloca first for bitcasting (store+bitcast+load). contextAlloc = c.builder.CreateAlloca(contextType, "") } else { // Context is bigger than a pointer, so allocate it on the heap. size := c.targetData.TypeAllocSize(contextType) sizeValue := llvm.ConstInt(c.uintptrType, size, false) contextHeapAlloc = c.createRuntimeCall("alloc", []llvm.Value{sizeValue}, "") contextAlloc = c.builder.CreateBitCast(contextHeapAlloc, llvm.PointerType(contextType, 0), "") } // Store all bound variables in the alloca or heap pointer. for i, boundVar := range boundVars { indices := []llvm.Value{ llvm.ConstInt(c.ctx.Int32Type(), 0, false), llvm.ConstInt(c.ctx.Int32Type(), uint64(i), false), } gep := c.builder.CreateInBoundsGEP(contextAlloc, indices, "") c.builder.CreateStore(boundVar, gep) } context := llvm.Value{} if c.targetData.TypeAllocSize(contextType) <= c.targetData.TypeAllocSize(c.i8ptrType) { // Load value (as *i8) from the alloca. contextAlloc = c.builder.CreateBitCast(contextAlloc, llvm.PointerType(c.i8ptrType, 0), "") context = c.builder.CreateLoad(contextAlloc, "") } else { // Get the original heap allocation pointer, which already is an // *i8. context = contextHeapAlloc } // Get the function signature type, which is a closure type. // A closure is a tuple of {context, function pointer}. typ, err := c.getLLVMType(f.Signature) if err != nil { return llvm.Value{}, err } // Create the closure, which is a struct: {context, function pointer}. closure, err := c.getZeroValue(typ) if err != nil { return llvm.Value{}, err } closure = c.builder.CreateInsertValue(closure, f.LLVMFn, 1, "") closure = c.builder.CreateInsertValue(closure, context, 0, "") return closure, nil } func (c *Compiler) parseMakeInterface(val llvm.Value, typ types.Type, global string) (llvm.Value, error) { var itfValue llvm.Value size := c.targetData.TypeAllocSize(val.Type()) if size > c.targetData.TypeAllocSize(c.i8ptrType) { if global != "" { // Allocate in a global variable. global := llvm.AddGlobal(c.mod, val.Type(), global+"$itfvalue") global.SetInitializer(val) global.SetLinkage(llvm.InternalLinkage) global.SetGlobalConstant(true) zero := llvm.ConstInt(c.ctx.Int32Type(), 0, false) itfValueRaw := llvm.ConstInBoundsGEP(global, []llvm.Value{zero, zero}) itfValue = llvm.ConstBitCast(itfValueRaw, c.i8ptrType) } else { // Allocate on the heap and put a pointer in the interface. // TODO: escape analysis. sizeValue := llvm.ConstInt(c.uintptrType, size, false) itfValue = c.createRuntimeCall("alloc", []llvm.Value{sizeValue}, "") itfValueCast := c.builder.CreateBitCast(itfValue, llvm.PointerType(val.Type(), 0), "") c.builder.CreateStore(val, itfValueCast) } } else { // Directly place the value in the interface. switch val.Type().TypeKind() { case llvm.IntegerTypeKind: itfValue = c.builder.CreateIntToPtr(val, c.i8ptrType, "") case llvm.PointerTypeKind: itfValue = c.builder.CreateBitCast(val, c.i8ptrType, "") case llvm.StructTypeKind: // A bitcast would be useful here, but bitcast doesn't allow // aggregate types. So we'll bitcast it using an alloca. // Hopefully this will get optimized away. mem := c.builder.CreateAlloca(c.i8ptrType, "") memStructPtr := c.builder.CreateBitCast(mem, llvm.PointerType(val.Type(), 0), "") c.builder.CreateStore(val, memStructPtr) itfValue = c.builder.CreateLoad(mem, "") default: return llvm.Value{}, errors.New("todo: makeinterface: cast small type to i8*") } } itfTypeNum, _ := c.ir.TypeNum(typ) if itfTypeNum >= 1<<16 { return llvm.Value{}, errors.New("interface typecodes do not fit in a 16-bit integer") } itf := llvm.ConstNamedStruct(c.mod.GetTypeByName("runtime._interface"), []llvm.Value{llvm.ConstInt(c.ctx.Int16Type(), uint64(itfTypeNum), false), llvm.Undef(c.i8ptrType)}) itf = c.builder.CreateInsertValue(itf, itfValue, 1, "") return itf, nil } func (c *Compiler) parseUnOp(frame *Frame, unop *ssa.UnOp) (llvm.Value, error) { x, err := c.parseExpr(frame, unop.X) if err != nil { return llvm.Value{}, err } switch unop.Op { case token.NOT: // !x return c.builder.CreateNot(x, ""), nil case token.SUB: // -x if typ, ok := unop.X.Type().Underlying().(*types.Basic); ok { if typ.Info()&types.IsInteger != 0 { return c.builder.CreateSub(llvm.ConstInt(x.Type(), 0, false), x, ""), nil } else if typ.Info()&types.IsFloat != 0 { return c.builder.CreateFSub(llvm.ConstFloat(x.Type(), 0.0), x, ""), nil } else { return llvm.Value{}, errors.New("todo: unknown basic type for negate: " + typ.String()) } } else { return llvm.Value{}, errors.New("todo: unknown type for negate: " + unop.X.Type().Underlying().String()) } case token.MUL: // *x, dereference pointer valType := unop.X.Type().(*types.Pointer).Elem() load := c.builder.CreateLoad(x, "") if c.ir.IsVolatile(valType) { // Volatile load, for memory-mapped registers. load.SetVolatile(true) } return load, nil case token.XOR: // ^x, toggle all bits in integer return c.builder.CreateXor(x, llvm.ConstInt(x.Type(), ^uint64(0), false), ""), nil default: return llvm.Value{}, errors.New("todo: unknown unop") } } // IR returns the whole IR as a human-readable string. func (c *Compiler) IR() string { return c.mod.String() } func (c *Compiler) Verify() error { return llvm.VerifyModule(c.mod, 0) } func (c *Compiler) ApplyFunctionSections() { // Put every function in a separate section. This makes it possible for the // linker to remove dead code (-ffunction-sections). llvmFn := c.mod.FirstFunction() for !llvmFn.IsNil() { if !llvmFn.IsDeclaration() { name := llvmFn.Name() llvmFn.SetSection(".text." + name) } llvmFn = llvm.NextFunction(llvmFn) } } // Turn all global constants into global variables. This works around a // limitation on Harvard architectures (e.g. AVR), where constant and // non-constant pointers point to a different address space. func (c *Compiler) NonConstGlobals() { global := c.mod.FirstGlobal() for !global.IsNil() { global.SetGlobalConstant(false) global = llvm.NextGlobal(global) } } // Emit object file (.o). func (c *Compiler) EmitObject(path string) error { llvmBuf, err := c.machine.EmitToMemoryBuffer(c.mod, llvm.ObjectFile) if err != nil { return err } return c.writeFile(llvmBuf.Bytes(), path) } // Emit LLVM bitcode file (.bc). func (c *Compiler) EmitBitcode(path string) error { data := llvm.WriteBitcodeToMemoryBuffer(c.mod).Bytes() return c.writeFile(data, path) } // Emit LLVM IR source file (.ll). func (c *Compiler) EmitText(path string) error { data := []byte(c.mod.String()) return c.writeFile(data, path) } // Write the data to the file specified by path. func (c *Compiler) writeFile(data []byte, path string) error { // Write output to file f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) if err != nil { return err } _, err = f.Write(data) if err != nil { return err } return f.Close() }
1
6,016
Yes this import was in the wrong place, but should ideally be in the first list of imports (among `go/build`, `go/token`, etc.). You may move it there, or just revert this change as it's actually unrelated.
tinygo-org-tinygo
go
@@ -106,7 +106,7 @@ import javax.lang.model.element.Name; @AutoService(BugChecker.class) @BugPattern( name = "StrictUnusedVariable", - altNames = {"unused", "StrictUnusedVariable"}, + altNames = {"unused", "UnusedVariable"}, link = "https://github.com/palantir/gradle-baseline#baseline-error-prone-checks", linkType = BugPattern.LinkType.CUSTOM, summary = "Unused.",
1
/* * Copyright 2018 The Error Prone Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.palantir.baseline.errorprone; import static com.google.common.base.MoreObjects.firstNonNull; import static com.google.common.base.Preconditions.checkArgument; import static com.google.common.base.Strings.nullToEmpty; import static com.google.common.collect.ImmutableList.toImmutableList; import static com.google.common.collect.Iterables.getLast; import static com.google.common.collect.Iterables.getOnlyElement; import static com.google.errorprone.BugPattern.SeverityLevel.ERROR; import static com.google.errorprone.util.ASTHelpers.getSymbol; import static com.google.errorprone.util.ASTHelpers.getType; import static com.google.errorprone.util.ASTHelpers.isSubtype; import static com.google.errorprone.util.SideEffectAnalysis.hasSideEffect; import static com.sun.source.tree.Tree.Kind.POSTFIX_DECREMENT; import static com.sun.source.tree.Tree.Kind.POSTFIX_INCREMENT; import static com.sun.source.tree.Tree.Kind.PREFIX_DECREMENT; import static com.sun.source.tree.Tree.Kind.PREFIX_INCREMENT; import com.google.auto.service.AutoService; import com.google.common.base.Ascii; import com.google.common.base.CaseFormat; import com.google.common.collect.ArrayListMultimap; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableListMultimap; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Iterables; import com.google.common.collect.ListMultimap; import com.google.common.collect.Multimaps; import com.google.errorprone.BugPattern; import com.google.errorprone.VisitorState; import com.google.errorprone.bugpatterns.BugChecker; import com.google.errorprone.fixes.SuggestedFix; import com.google.errorprone.fixes.SuggestedFixes; import com.google.errorprone.matchers.Description; import com.google.errorprone.suppliers.Suppliers; import com.google.errorprone.util.ASTHelpers; import com.sun.source.tree.AnnotationTree; import com.sun.source.tree.ArrayAccessTree; import com.sun.source.tree.AssignmentTree; import com.sun.source.tree.ClassTree; import com.sun.source.tree.CompilationUnitTree; import com.sun.source.tree.CompoundAssignmentTree; import com.sun.source.tree.DoWhileLoopTree; import com.sun.source.tree.EnhancedForLoopTree; import com.sun.source.tree.ErroneousTree; import com.sun.source.tree.ExpressionStatementTree; import com.sun.source.tree.ExpressionTree; import com.sun.source.tree.ForLoopTree; import com.sun.source.tree.IdentifierTree; import com.sun.source.tree.IfTree; import com.sun.source.tree.LambdaExpressionTree; import com.sun.source.tree.MemberReferenceTree; import com.sun.source.tree.MemberSelectTree; import com.sun.source.tree.MethodInvocationTree; import com.sun.source.tree.MethodTree; import com.sun.source.tree.ReturnTree; import com.sun.source.tree.StatementTree; import com.sun.source.tree.Tree; import com.sun.source.tree.TryTree; import com.sun.source.tree.UnaryTree; import com.sun.source.tree.VariableTree; import com.sun.source.tree.WhileLoopTree; import com.sun.source.util.SimpleTreeVisitor; import com.sun.source.util.TreePath; import com.sun.source.util.TreePathScanner; import com.sun.source.util.TreeScanner; import com.sun.tools.javac.code.Symbol; import com.sun.tools.javac.code.Type; import com.sun.tools.javac.tree.JCTree; import com.sun.tools.javac.util.Position; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; import javax.annotation.Nullable; import javax.lang.model.element.ElementKind; import javax.lang.model.element.Modifier; import javax.lang.model.element.Name; /** * Copy from {@Link UnusedVariable } except we consider all parameter for unused analysis. We modified the * `onlyCheckForReassignments` filter to exclude abstract methods, to check loggers */ @AutoService(BugChecker.class) @BugPattern( name = "StrictUnusedVariable", altNames = {"unused", "StrictUnusedVariable"}, link = "https://github.com/palantir/gradle-baseline#baseline-error-prone-checks", linkType = BugPattern.LinkType.CUSTOM, summary = "Unused.", providesFix = BugPattern.ProvidesFix.REQUIRES_HUMAN_ATTENTION, severity = ERROR, documentSuppression = false) public final class StrictUnusedVariable extends BugChecker implements BugChecker.CompilationUnitTreeMatcher { private static final ImmutableSet<String> EXEMPT_PREFIXES = ImmutableSet.of("_"); /** The set of annotation full names which exempt annotated element from being reported as unused. */ private static final ImmutableSet<String> EXEMPTING_VARIABLE_ANNOTATIONS = ImmutableSet.of( "javax.persistence.Basic", "javax.persistence.Column", "javax.persistence.Id", "javax.persistence.Version", "javax.xml.bind.annotation.XmlElement", "org.junit.Rule", "org.mockito.Mock", "org.openqa.selenium.support.FindBy", "org.openqa.selenium.support.FindBys"); /** The set of types exempting a type that is extending or implementing them. */ private static final ImmutableSet<String> EXEMPTING_SUPER_TYPES = ImmutableSet.of(); /** The set of types exempting a field of type extending them. */ private static final ImmutableSet<String> EXEMPTING_FIELD_SUPER_TYPES = ImmutableSet.of("org.junit.rules.TestRule", "org.slf4j.Logger"); private static final ImmutableList<String> SPECIAL_FIELDS = ImmutableList.of( "serialVersionUID", // TAG fields are used by convention in Android apps. "TAG"); private static final String UNUSED = "unused"; @Override public Description matchCompilationUnit(CompilationUnitTree tree, VisitorState state) { // We will skip reporting on the whole compilation if there are any native methods found. // Use a TreeScanner to find all local variables and fields. if (hasNativeMethods(tree)) { return Description.NO_MATCH; } VariableFinder variableFinder = new VariableFinder(state); variableFinder.scan(state.getPath(), null); checkUsedVariables(state, variableFinder); // Map of symbols to variable declarations. Initially this is a map of all of the local variable // and fields. As we go we remove those variables which are used. Map<Symbol, TreePath> unusedElements = variableFinder.unusedElements; // Whether a symbol should only be checked for reassignments (e.g. public methods' parameters). Set<Symbol> onlyCheckForReassignments = variableFinder.onlyCheckForReassignments; // Map of symbols to their usage sites. In this map we also include the definition site in // addition to all the trees where symbol is used. This map is designed to keep the usage sites // of variables (parameters, fields, locals). // // We populate this map when analyzing the unused variables and then use it to generate // appropriate fixes for them. ListMultimap<Symbol, TreePath> usageSites = variableFinder.usageSites; FilterUsedVariables filterUsedVariables = new FilterUsedVariables(unusedElements, usageSites); filterUsedVariables.scan(state.getPath(), null); // Keeps track of whether a symbol was _ever_ used (between reassignments). Set<Symbol> isEverUsed = filterUsedVariables.isEverUsed; List<UnusedSpec> unusedSpecs = filterUsedVariables.unusedSpecs; // Add the left-over unused variables... for (Map.Entry<Symbol, TreePath> entry : unusedElements.entrySet()) { unusedSpecs.add(UnusedSpec.of(entry.getKey(), entry.getValue(), usageSites.get(entry.getKey()), null)); } ImmutableListMultimap<Symbol, UnusedSpec> unusedSpecsBySymbol = Multimaps.index(unusedSpecs, UnusedSpec::symbol); for (Map.Entry<Symbol, Collection<UnusedSpec>> entry : unusedSpecsBySymbol.asMap().entrySet()) { Symbol unusedSymbol = entry.getKey(); Collection<UnusedSpec> specs = entry.getValue(); ImmutableList<TreePath> allUsageSites = specs.stream().flatMap(u -> u.usageSites().stream()).collect(toImmutableList()); if (!unusedElements.containsKey(unusedSymbol)) { isEverUsed.add(unusedSymbol); } SuggestedFix makeFirstAssignmentDeclaration = makeAssignmentDeclaration(unusedSymbol, specs, allUsageSites, state); // Don't complain if this is a public method and we only overwrote it once. if (onlyCheckForReassignments.contains(unusedSymbol) && specs.size() <= 1) { continue; } Tree unused = specs.iterator().next().variableTree().getLeaf(); Symbol.VarSymbol symbol = (Symbol.VarSymbol) unusedSymbol; ImmutableList<SuggestedFix> fixes; if (symbol.getKind() == ElementKind.PARAMETER && !isEverUsed.contains(unusedSymbol)) { fixes = buildUnusedParameterFixes(symbol, allUsageSites, state); } else { fixes = buildUnusedVarFixes(symbol, allUsageSites, state); } state.reportMatch(buildDescription(unused) .setMessage(String.format( "%s %s '%s' is never read.", unused instanceof VariableTree ? "The" : "The assignment to this", describeVariable(symbol), symbol.name)) .addAllFixes(fixes.stream() .map(f -> SuggestedFix.builder().merge(makeFirstAssignmentDeclaration).merge(f).build()) .collect(toImmutableList())) .build()); } return Description.NO_MATCH; } private void checkUsedVariables(VisitorState state, VariableFinder variableFinder) { VariableUsage variableUsage = new VariableUsage(); variableUsage.scan(state.getPath(), null); variableFinder.exemptedVariables.entrySet().forEach(entry -> { List<TreePath> usageSites = variableUsage.usageSites.get(entry.getKey()); if (usageSites.size() <= 1) { return; } state.reportMatch(buildDescription(entry.getValue()) .setMessage(String.format( "The %s '%s' is read but has 'StrictUnusedVariable' " + "suppressed because of its name.", describeVariable((Symbol.VarSymbol) entry.getKey()), entry.getKey().name)) .addFix(constructUsedVariableSuggestedFix(usageSites, state)) .build()); }); } private static SuggestedFix constructUsedVariableSuggestedFix(List<TreePath> usagePaths, VisitorState state) { SuggestedFix.Builder fix = SuggestedFix.builder(); for (TreePath usagePath : usagePaths) { if (usagePath.getLeaf() instanceof VariableTree) { VariableTree variableTree = (VariableTree) usagePath.getLeaf(); int startPos = state.getEndPosition(variableTree.getType()) + 1; int endPos = state.getEndPosition(variableTree); // Ignore the initializer if there is one if (variableTree.getInitializer() != null) { endPos = startPos + variableTree.getName().toString().length(); } if (startPos == Position.NOPOS || endPos == Position.NOPOS) { // TODO(b/118437729): handle bogus source positions in enum declarations continue; } renameVariable(startPos, endPos, variableTree.getName().toString(), fix); } else if (usagePath.getLeaf() instanceof IdentifierTree) { JCTree.JCIdent identifierTree = (JCTree.JCIdent) usagePath.getLeaf(); int startPos = identifierTree.getStartPosition(); int endPos = state.getEndPosition(identifierTree); if (startPos == Position.NOPOS || endPos == Position.NOPOS) { // TODO(b/118437729): handle bogus source positions in enum declarations continue; } renameVariable(startPos, endPos, identifierTree.getName().toString(), fix); } } return fix.build(); } private static void renameVariable(int startPos, int endPos, String name, SuggestedFix.Builder fix) { EXEMPT_PREFIXES.stream().filter(name::startsWith).findFirst().ifPresent(prefix -> fix.replace( startPos, endPos, prefix.length() == name.length() // Fall back to a generic variable name if the prefix is the entire variable name ? "value" : CaseFormat.UPPER_CAMEL.to(CaseFormat.LOWER_CAMEL, name.substring(prefix.length())))); } private static SuggestedFix makeAssignmentDeclaration( Symbol unusedSymbol, Collection<UnusedSpec> specs, ImmutableList<TreePath> allUsageSites, VisitorState state) { if (unusedSymbol.getKind() != ElementKind.LOCAL_VARIABLE) { return SuggestedFix.builder().build(); } Optional<VariableTree> removedVariableTree = allUsageSites.stream() .filter(tp -> tp.getLeaf() instanceof VariableTree) .findFirst() .map(tp -> (VariableTree) tp.getLeaf()); Optional<AssignmentTree> reassignment = specs.stream() .map(UnusedSpec::terminatingAssignment) .filter(Optional::isPresent) .map(Optional::get) .filter(a -> allUsageSites.stream().noneMatch(tp -> tp.getLeaf().equals(a))) .findFirst(); if (!removedVariableTree.isPresent() || !reassignment.isPresent()) { return SuggestedFix.builder().build(); } return SuggestedFix.prefixWith( reassignment.get(), state.getSourceForNode(removedVariableTree.get().getType()) + " "); } @SuppressWarnings("SwitchStatementDefaultCase") private static String describeVariable(Symbol.VarSymbol symbol) { switch (symbol.getKind()) { case FIELD: return "field"; case LOCAL_VARIABLE: return "local variable"; case PARAMETER: return "parameter"; default: return "variable"; } } private static boolean hasNativeMethods(CompilationUnitTree tree) { AtomicBoolean hasAnyNativeMethods = new AtomicBoolean(false); new TreeScanner<Void, Void>() { @Override public Void visitMethod(MethodTree tree, Void unused) { if (tree.getModifiers().getFlags().contains(Modifier.NATIVE)) { hasAnyNativeMethods.set(true); } return null; } }.scan(tree, null); return hasAnyNativeMethods.get(); } // https://docs.oracle.com/javase/specs/jls/se11/html/jls-14.html#jls-ExpressionStatement private static final ImmutableSet<Tree.Kind> TOP_LEVEL_EXPRESSIONS = ImmutableSet.of( Tree.Kind.ASSIGNMENT, Tree.Kind.PREFIX_INCREMENT, Tree.Kind.PREFIX_DECREMENT, Tree.Kind.POSTFIX_INCREMENT, Tree.Kind.POSTFIX_DECREMENT, Tree.Kind.METHOD_INVOCATION, Tree.Kind.NEW_CLASS); private static boolean needsBlock(TreePath path) { Tree leaf = path.getLeaf(); class Visitor extends SimpleTreeVisitor<Boolean, Void> { @Override public Boolean visitIf(IfTree tree, Void unused) { return tree.getThenStatement() == leaf || tree.getElseStatement() == leaf; } @Override public Boolean visitDoWhileLoop(DoWhileLoopTree tree, Void unused) { return tree.getStatement() == leaf; } @Override public Boolean visitWhileLoop(WhileLoopTree tree, Void unused) { return tree.getStatement() == leaf; } @Override public Boolean visitForLoop(ForLoopTree tree, Void unused) { return tree.getStatement() == leaf; } @Override public Boolean visitEnhancedForLoop(EnhancedForLoopTree tree, Void unused) { return tree.getStatement() == leaf; } } return firstNonNull(path.getParentPath().getLeaf().accept(new Visitor(), null), false); } private static ImmutableList<SuggestedFix> buildUnusedVarFixes( Symbol varSymbol, List<TreePath> usagePaths, VisitorState state) { // Don't suggest a fix for fields annotated @Inject: we can warn on them, but they *could* be // used outside the class. if (ASTHelpers.hasDirectAnnotationWithSimpleName(varSymbol, "Inject")) { return ImmutableList.of(); } ElementKind varKind = varSymbol.getKind(); SuggestedFix.Builder fix = SuggestedFix.builder().setShortDescription("remove unused variable"); for (TreePath usagePath : usagePaths) { StatementTree statement = (StatementTree) usagePath.getLeaf(); if (statement.getKind() == Tree.Kind.VARIABLE) { if (getSymbol(statement).getKind() == ElementKind.PARAMETER) { continue; } VariableTree variableTree = (VariableTree) statement; ExpressionTree initializer = variableTree.getInitializer(); if (hasSideEffect(initializer) && TOP_LEVEL_EXPRESSIONS.contains(initializer.getKind())) { if (varKind == ElementKind.FIELD) { String newContent = String.format( "%s{ %s; }", varSymbol.isStatic() ? "static " : "", state.getSourceForNode(initializer)); fix.merge(SuggestedFixes.replaceIncludingComments(usagePath, newContent, state)); } else { fix.replace(statement, String.format("%s;", state.getSourceForNode(initializer))); } } else if (isEnhancedForLoopVar(usagePath)) { String modifiers = nullToEmpty( variableTree.getModifiers() == null ? null : state.getSourceForNode(variableTree.getModifiers())); String newContent = String.format( "%s%s unused", modifiers.isEmpty() ? "" : (modifiers + " "), state.getSourceForNode(variableTree.getType())); // The new content for the second fix should be identical to the content for the first // fix in this case because we can't just remove the enhanced for loop variable. fix.replace(variableTree, newContent); } else { String replacement = needsBlock(usagePath) ? "{}" : ""; fix.merge(SuggestedFixes.replaceIncludingComments(usagePath, replacement, state)); } continue; } else if (statement.getKind() == Tree.Kind.EXPRESSION_STATEMENT) { JCTree tree = (JCTree) ((ExpressionStatementTree) statement).getExpression(); if (tree instanceof CompoundAssignmentTree) { if (hasSideEffect(((CompoundAssignmentTree) tree).getExpression())) { // If it's a compound assignment, there's no reason we'd want to remove the expression, // so don't set `encounteredSideEffects` based on this usage. SuggestedFix replacement = SuggestedFix.replace( tree.getStartPosition(), ((JCTree.JCAssignOp) tree).getExpression().getStartPosition(), ""); fix.merge(replacement); continue; } } else if (tree instanceof AssignmentTree) { if (hasSideEffect(((AssignmentTree) tree).getExpression())) { fix.replace( tree.getStartPosition(), ((JCTree.JCAssign) tree).getExpression().getStartPosition(), ""); continue; } } } String replacement = needsBlock(usagePath) ? "{}" : ""; fix.replace(statement, replacement); } return ImmutableList.of(fix.build()); } private static ImmutableList<SuggestedFix> buildUnusedParameterFixes( Symbol varSymbol, List<TreePath> usagePaths, VisitorState state) { Symbol.MethodSymbol methodSymbol = (Symbol.MethodSymbol) varSymbol.owner; boolean isPrivateMethod = methodSymbol.getModifiers().contains(Modifier.PRIVATE); int index = methodSymbol.params.indexOf(varSymbol); SuggestedFix.Builder fix = SuggestedFix.builder(); for (TreePath path : usagePaths) { fix.delete(path.getLeaf()); } // Remove parameter if the method is private since we can automatically fix all invocation sites // Otherwise add `_` prefix to the variable name if (isPrivateMethod) { new TreePathScanner<Void, Void>() { @Override public Void visitMethodInvocation(MethodInvocationTree tree, Void unused) { if (getSymbol(tree).equals(methodSymbol)) { removeByIndex(tree.getArguments()); } return super.visitMethodInvocation(tree, null); } @Override public Void visitMethod(MethodTree tree, Void unused) { if (getSymbol(tree).equals(methodSymbol)) { removeByIndex(tree.getParameters()); } return super.visitMethod(tree, null); } private void removeByIndex(List<? extends Tree> trees) { if (index >= trees.size()) { // possible when removing a varargs parameter with no corresponding formal parameters return; } if (trees.size() == 1) { Tree tree = getOnlyElement(trees); if (((JCTree) tree).getStartPosition() == -1 || state.getEndPosition(tree) == -1) { // TODO(b/118437729): handle bogus source positions in enum declarations return; } fix.delete(tree); return; } int startPos; int endPos; if (index >= 1) { startPos = state.getEndPosition(trees.get(index - 1)); endPos = state.getEndPosition(trees.get(index)); } else { startPos = ((JCTree) trees.get(index)).getStartPosition(); endPos = ((JCTree) trees.get(index + 1)).getStartPosition(); } if (index == methodSymbol.params().size() - 1 && methodSymbol.isVarArgs()) { endPos = state.getEndPosition(getLast(trees)); } if (startPos == Position.NOPOS || endPos == Position.NOPOS) { // TODO(b/118437729): handle bogus source positions in enum declarations return; } fix.replace(startPos, endPos, ""); } }.scan(state.getPath().getCompilationUnit(), null); } else { new TreePathScanner<Void, Void>() { @Override public Void visitMethod(MethodTree methodTree, Void unused) { if (getSymbol(methodTree).equals(methodSymbol)) { renameByIndex(methodTree.getParameters()); } return super.visitMethod(methodTree, null); } private void renameByIndex(List<? extends VariableTree> trees) { if (index >= trees.size()) { // possible when removing a varargs parameter with no corresponding formal parameters return; } VariableTree tree = trees.get(index); int startPos = state.getEndPosition(tree.getType()) + 1; int endPos = state.getEndPosition(trees.get(index)); if (index == methodSymbol.params().size() - 1 && methodSymbol.isVarArgs()) { endPos = state.getEndPosition(getLast(trees)); } if (startPos == Position.NOPOS || endPos == Position.NOPOS) { // TODO(b/118437729): handle bogus source positions in enum declarations return; } String name = tree.getName().toString(); if (name.startsWith(UNUSED)) { fix.replace( startPos, endPos, "_" + (name.equals(UNUSED) ? "value" : CaseFormat.UPPER_CAMEL.to( CaseFormat.LOWER_CAMEL, name.substring(UNUSED.length())))); } else { fix.replace(startPos, endPos, "_" + tree.getName()); } } }.scan(state.getPath().getCompilationUnit(), null); } return ImmutableList.of(fix.build()); } private static boolean isEnhancedForLoopVar(TreePath variablePath) { Tree tree = variablePath.getLeaf(); Tree parent = variablePath.getParentPath().getLeaf(); return parent instanceof EnhancedForLoopTree && ((EnhancedForLoopTree) parent).getVariable() == tree; } /** * Looks at the list of {@code annotations} and see if there is any annotation which exists {@code * exemptingAnnotations}. */ private static boolean exemptedByAnnotation(List<? extends AnnotationTree> annotations, VisitorState unused) { for (AnnotationTree annotation : annotations) { if (((JCTree.JCAnnotation) annotation).type != null) { Symbol.TypeSymbol tsym = ((JCTree.JCAnnotation) annotation).type.tsym; if (EXEMPTING_VARIABLE_ANNOTATIONS.contains(tsym.getQualifiedName().toString())) { return true; } } } return false; } private static boolean exemptedByName(Name name) { return EXEMPT_PREFIXES.stream().anyMatch(prefix -> Ascii.toLowerCase(name.toString()).startsWith(prefix)); } private final class VariableFinder extends TreePathScanner<Void, Void> { private final Map<Symbol, TreePath> unusedElements = new HashMap<>(); private final Set<Symbol> onlyCheckForReassignments = new HashSet<>(); private final ListMultimap<Symbol, TreePath> usageSites = ArrayListMultimap.create(); private final Map<Symbol, VariableTree> exemptedVariables = new HashMap<>(); private final VisitorState state; private VariableFinder(VisitorState state) { this.state = state; } @Override @SuppressWarnings("SwitchStatementDefaultCase") public Void visitVariable(VariableTree variableTree, Void unused) { if (isSuppressed(variableTree)) { return null; } Symbol.VarSymbol symbol = getSymbol(variableTree); if (symbol == null) { return null; } if (exemptedByName(variableTree.getName())) { exemptedVariables.put(symbol, variableTree); return null; } if (symbol.getKind() == ElementKind.FIELD && exemptedFieldBySuperType(getType(variableTree), state)) { return null; } super.visitVariable(variableTree, null); // Return if the element is exempted by an annotation. if (exemptedByAnnotation(variableTree.getModifiers().getAnnotations(), state)) { return null; } switch (symbol.getKind()) { case FIELD: // We are only interested in private fields and those which are not special. if (isFieldEligibleForChecking(variableTree, symbol)) { unusedElements.put(symbol, getCurrentPath()); usageSites.put(symbol, getCurrentPath()); } break; case LOCAL_VARIABLE: unusedElements.put(symbol, getCurrentPath()); usageSites.put(symbol, getCurrentPath()); break; case PARAMETER: // ignore the receiver parameter if (variableTree.getName().contentEquals("this")) { return null; } unusedElements.put(symbol, getCurrentPath()); if (!isParameterSubjectToAnalysis(symbol)) { onlyCheckForReassignments.add(symbol); } break; default: break; } return null; } private boolean exemptedFieldBySuperType(Type type, VisitorState state) { return EXEMPTING_FIELD_SUPER_TYPES.stream() .anyMatch(t -> isSubtype(type, state.getTypeFromString(t), state)); } private boolean isFieldEligibleForChecking(VariableTree variableTree, Symbol.VarSymbol symbol) { return variableTree.getModifiers().getFlags().contains(Modifier.PRIVATE) && !SPECIAL_FIELDS.contains(symbol.getSimpleName().toString()); } /** Returns whether {@code sym} can be removed without updating call sites in other files. */ @SuppressWarnings("PreferSafeLoggingPreconditions") private boolean isParameterSubjectToAnalysis(Symbol sym) { checkArgument(sym.getKind() == ElementKind.PARAMETER); Symbol enclosingMethod = sym.owner; return !enclosingMethod.getModifiers().contains(Modifier.ABSTRACT); } @Override public Void visitTry(TryTree node, Void unused) { // Skip resources, as while these may not be referenced, they are used. scan(node.getBlock(), null); scan(node.getCatches(), null); scan(node.getFinallyBlock(), null); return null; } @Override public Void visitClass(ClassTree tree, Void unused) { if (isSuppressed(tree)) { return null; } if (EXEMPTING_SUPER_TYPES.stream() .anyMatch(t -> isSubtype(getType(tree), Suppliers.typeFromString(t).get(state), state))) { return null; } return super.visitClass(tree, null); } @Override public Void visitLambdaExpression(LambdaExpressionTree node, Void unused) { // skip lambda parameters return scan(node.getBody(), null); } @Override public Void visitMethod(MethodTree tree, Void unused) { return isSuppressed(tree) ? null : super.visitMethod(tree, unused); } } private static final class FilterUsedVariables extends TreePathScanner<Void, Void> { private boolean leftHandSideAssignment = false; // When this greater than zero, the usage of identifiers are real. private int inArrayAccess = 0; // This is true when we are processing a `return` statement. Elements used in return statement // must not be considered unused. private boolean inReturnStatement = false; // When this greater than zero, the usage of identifiers are real because they are in a method // call. private int inMethodCall = 0; private final Set<Symbol> hasBeenAssigned = new HashSet<>(); private TreePath currentExpressionStatement = null; private final Map<Symbol, TreePath> unusedElements; private final ListMultimap<Symbol, TreePath> usageSites; // Keeps track of whether a symbol was _ever_ used (between reassignments). private final Set<Symbol> isEverUsed = new HashSet<>(); private final List<UnusedSpec> unusedSpecs = new ArrayList<>(); private final ImmutableMap<Symbol, TreePath> declarationSites; private FilterUsedVariables(Map<Symbol, TreePath> unusedElements, ListMultimap<Symbol, TreePath> usageSites) { this.unusedElements = unusedElements; this.usageSites = usageSites; this.declarationSites = ImmutableMap.copyOf(unusedElements); } private boolean isInExpressionStatementTree() { Tree parent = getCurrentPath().getParentPath().getLeaf(); return parent != null && parent.getKind() == Tree.Kind.EXPRESSION_STATEMENT; } private boolean isUsed(@Nullable Symbol symbol) { return symbol != null && (!leftHandSideAssignment || inReturnStatement || inArrayAccess > 0 || inMethodCall > 0) && unusedElements.containsKey(symbol); } @Override public Void visitVariable(VariableTree tree, Void unused) { Symbol.VarSymbol symbol = getSymbol(tree); if (hasBeenAssigned(tree, symbol)) { hasBeenAssigned.add(symbol); } return super.visitVariable(tree, null); } private boolean hasBeenAssigned(VariableTree tree, Symbol.VarSymbol symbol) { if (symbol == null) { return false; } // Parameters and enhanced for loop variables are always considered assigned. if (symbol.getKind() == ElementKind.PARAMETER) { return true; } if (getCurrentPath().getParentPath().getLeaf() instanceof EnhancedForLoopTree) { return true; } // Otherwise it's assigned if the VariableTree has an initializer. if (unusedElements.containsKey(symbol) && tree.getInitializer() != null) { return true; } return false; } @Override public Void visitExpressionStatement(ExpressionStatementTree tree, Void unused) { currentExpressionStatement = getCurrentPath(); super.visitExpressionStatement(tree, null); currentExpressionStatement = null; return null; } @Override public Void visitIdentifier(IdentifierTree tree, Void unused) { Symbol symbol = getSymbol(tree); // Filtering out identifier symbol from vars map. These are real usages of identifiers. if (isUsed(symbol)) { unusedElements.remove(symbol); } if (currentExpressionStatement != null && unusedElements.containsKey(symbol)) { usageSites.put(symbol, currentExpressionStatement); } return null; } @Override public Void visitAssignment(AssignmentTree tree, Void unused) { scan(tree.getExpression(), null); // If a variable is used in the left hand side of an assignment that does not count as a // usage. if (isInExpressionStatementTree()) { handleReassignment(tree); leftHandSideAssignment = true; scan(tree.getVariable(), null); leftHandSideAssignment = false; } else { super.visitAssignment(tree, null); } return null; } /** * Deals with assignment trees; works out if the assignment definitely overwrites the variable in all ways that * could be observed as we scan forwards. */ private void handleReassignment(AssignmentTree tree) { Tree parent = getCurrentPath().getParentPath().getLeaf(); if (!(parent instanceof StatementTree)) { return; } if (tree.getVariable().getKind() != Tree.Kind.IDENTIFIER) { return; } if (ASTHelpers.findEnclosingNode(getCurrentPath(), ForLoopTree.class) != null) { return; } Symbol symbol = getSymbol(tree.getVariable()); // Check if it was actually assigned to at this depth (or is a parameter). if (!((hasBeenAssigned.contains(symbol) && symbol.getKind() == ElementKind.LOCAL_VARIABLE) || symbol.getKind() == ElementKind.PARAMETER)) { return; } if (!declarationSites.containsKey(symbol)) { return; } hasBeenAssigned.add(symbol); TreePath assignmentSite = declarationSites.get(symbol); if (scopeDepth(assignmentSite) != Iterables.size(getCurrentPath().getParentPath())) { return; } if (unusedElements.containsKey(symbol)) { unusedSpecs.add(UnusedSpec.of(symbol, assignmentSite, usageSites.get(symbol), tree)); } else { isEverUsed.add(symbol); } unusedElements.put(symbol, getCurrentPath()); usageSites.removeAll(symbol); usageSites.put(symbol, getCurrentPath().getParentPath()); } // This is a crude proxy for when a variable is unconditionally overwritten. It doesn't match // all cases, but it catches a reassignment at the same depth. private static int scopeDepth(TreePath assignmentSite) { if (assignmentSite.getParentPath().getLeaf() instanceof EnhancedForLoopTree) { return Iterables.size(assignmentSite) + 1; } if (assignmentSite.getLeaf() instanceof VariableTree) { Symbol.VarSymbol symbol = getSymbol((VariableTree) assignmentSite.getLeaf()); if (symbol.getKind() == ElementKind.PARAMETER) { return Iterables.size(assignmentSite) + 1; } } return Iterables.size(assignmentSite); } @Override public Void visitMemberSelect(MemberSelectTree memberSelectTree, Void unused) { Symbol symbol = getSymbol(memberSelectTree); if (isUsed(symbol)) { unusedElements.remove(symbol); } else if (currentExpressionStatement != null && unusedElements.containsKey(symbol)) { usageSites.put(symbol, currentExpressionStatement); } // Clear leftHandSideAssignment and descend down the tree to catch any variables in the // receiver of this member select, which _are_ considered used. boolean wasLeftHandAssignment = leftHandSideAssignment; leftHandSideAssignment = false; super.visitMemberSelect(memberSelectTree, null); leftHandSideAssignment = wasLeftHandAssignment; return null; } @Override public Void visitMemberReference(MemberReferenceTree tree, Void unused) { super.visitMemberReference(tree, null); Symbol.MethodSymbol symbol = getSymbol(tree); if (symbol != null) { symbol.getParameters().forEach(unusedElements::remove); } return null; } @Override public Void visitCompoundAssignment(CompoundAssignmentTree tree, Void unused) { if (isInExpressionStatementTree()) { leftHandSideAssignment = true; scan(tree.getVariable(), null); leftHandSideAssignment = false; scan(tree.getExpression(), null); } else { super.visitCompoundAssignment(tree, null); } return null; } @Override public Void visitArrayAccess(ArrayAccessTree node, Void unused) { inArrayAccess++; super.visitArrayAccess(node, null); inArrayAccess--; return null; } @Override public Void visitReturn(ReturnTree node, Void unused) { inReturnStatement = true; scan(node.getExpression(), null); inReturnStatement = false; return null; } @Override public Void visitUnary(UnaryTree tree, Void unused) { // If unary expression is inside another expression, then this is a real usage of unary // operand. // Example: // array[i++] = 0; // 'i' has a real usage here. 'array' might not have. // list.get(i++); // But if it is like this: // i++; // Then it is possible that this is not a real usage of 'i'. if (isInExpressionStatementTree() && (tree.getKind() == POSTFIX_DECREMENT || tree.getKind() == POSTFIX_INCREMENT || tree.getKind() == PREFIX_DECREMENT || tree.getKind() == PREFIX_INCREMENT)) { leftHandSideAssignment = true; scan(tree.getExpression(), null); leftHandSideAssignment = false; } else { super.visitUnary(tree, null); } return null; } @Override public Void visitErroneous(ErroneousTree tree, Void unused) { return scan(tree.getErrorTrees(), null); } /** Looks at method invocations and removes the invoked private methods from {@code #unusedElements}. */ @Override public Void visitMethodInvocation(MethodInvocationTree tree, Void unused) { inMethodCall++; super.visitMethodInvocation(tree, null); inMethodCall--; return null; } } static class VariableUsage extends TreePathScanner<Void, Void> { public final ListMultimap<Symbol, TreePath> usageSites = ArrayListMultimap.create(); @Override public Void visitVariable(VariableTree tree, Void unused) { usageSites.put(getSymbol(tree), getCurrentPath()); return super.visitVariable(tree, null); } @Override public Void visitIdentifier(IdentifierTree tree, Void unused) { usageSites.put(getSymbol(tree), getCurrentPath()); return super.visitIdentifier(tree, null); } @Override public Void visitMemberSelect(MemberSelectTree memberSelectTree, Void unused) { usageSites.put(getSymbol(memberSelectTree), getCurrentPath()); return super.visitMemberSelect(memberSelectTree, null); } } interface UnusedSpec { /** {@link Symbol} of the unsued element. */ Symbol symbol(); /** {@link VariableTree} for the original declaration site. */ TreePath variableTree(); /** * All the usage sites of this variable that we claim are unused (including the initial declaration/assignment). */ ImmutableList<TreePath> usageSites(); /** * If this usage chain was terminated by an unconditional reassignment, the corresponding {@link * AssignmentTree}. */ Optional<AssignmentTree> terminatingAssignment(); static UnusedSpec of( Symbol symbol, TreePath variableTree, Iterable<TreePath> treePaths, @Nullable AssignmentTree assignmentTree) { final ImmutableList<TreePath> treePaths1 = ImmutableList.copyOf(treePaths); return new UnusedSpec() { @Override public Symbol symbol() { return symbol; } @Override public TreePath variableTree() { return variableTree; } @Override public ImmutableList<TreePath> usageSites() { return treePaths1; } @Override public Optional<AssignmentTree> terminatingAssignment() { return Optional.ofNullable(assignmentTree); } }; } } }
1
8,127
It's unnecessary to duplicate the `name`.
palantir-gradle-baseline
java
@@ -68,7 +68,7 @@ final class MediaType extends AbstractType implements LoggerAwareInterface $builder->addModelTransformer($dataTransformer); $builder->addEventListener(FormEvents::SUBMIT, static function (FormEvent $event): void { - if ($event->getForm()->has('unlink') && null !== $event->getForm()->get('unlink')->getData()) { + if ($event->getForm()->has('unlink') && true === $event->getForm()->get('unlink')->getData()) { $event->setData(null); } });
1
<?php declare(strict_types=1); /* * This file is part of the Sonata Project package. * * (c) Thomas Rabaix <[email protected]> * * For the full copyright and license information, please view the LICENSE * file that was distributed with this source code. */ namespace Sonata\MediaBundle\Form\Type; use Psr\Log\LoggerAwareInterface; use Psr\Log\LoggerAwareTrait; use Psr\Log\NullLogger; use Sonata\MediaBundle\Form\DataTransformer\ProviderDataTransformer; use Sonata\MediaBundle\Provider\Pool; use Symfony\Component\Form\AbstractType; use Symfony\Component\Form\Extension\Core\Type\CheckboxType; use Symfony\Component\Form\Extension\Core\Type\FormType; use Symfony\Component\Form\FormBuilderInterface; use Symfony\Component\Form\FormEvent; use Symfony\Component\Form\FormEvents; use Symfony\Component\Form\FormInterface; use Symfony\Component\Form\FormView; use Symfony\Component\OptionsResolver\OptionsResolver; final class MediaType extends AbstractType implements LoggerAwareInterface { use LoggerAwareTrait; /** * @var Pool */ private $pool; /** * @var string * * @phpstan-var class-string<\Sonata\MediaBundle\Model\MediaInterface> */ private $class; /** * @param string $class * * @phpstan-param class-string<\Sonata\MediaBundle\Model\MediaInterface> $class */ public function __construct(Pool $pool, $class) { $this->pool = $pool; $this->class = $class; } public function buildForm(FormBuilderInterface $builder, array $options): void { $dataTransformer = new ProviderDataTransformer($this->pool, $this->class, [ 'provider' => $options['provider'], 'context' => $options['context'], 'empty_on_new' => $options['empty_on_new'], 'new_on_update' => $options['new_on_update'], ]); $dataTransformer->setLogger($this->logger ?? new NullLogger()); $builder->addModelTransformer($dataTransformer); $builder->addEventListener(FormEvents::SUBMIT, static function (FormEvent $event): void { if ($event->getForm()->has('unlink') && null !== $event->getForm()->get('unlink')->getData()) { $event->setData(null); } }); $this->pool->getProvider($options['provider'])->buildMediaType($builder); $builder->add('unlink', CheckboxType::class, [ 'label' => 'widget_label_unlink', 'mapped' => false, 'data' => false, 'required' => false, ]); } public function buildView(FormView $view, FormInterface $form, array $options): void { $view->vars['provider'] = $options['provider']; $view->vars['context'] = $options['context']; } public function configureOptions(OptionsResolver $resolver): void { $resolver ->setDefaults([ 'data_class' => $this->class, 'empty_on_new' => true, 'new_on_update' => true, 'translation_domain' => 'SonataMediaBundle', ]) ->setRequired(['provider', 'context']) ->setAllowedTypes('provider', 'string') ->setAllowedTypes('context', 'string') ->setAllowedValues('provider', $this->pool->getProviderList()) ->setAllowedValues('context', array_keys($this->pool->getContexts())); } public function getParent() { return FormType::class; } public function getBlockPrefix() { return 'sonata_media_type'; } }
1
12,518
this was a mistake when adding phpstan strict plugin
sonata-project-SonataMediaBundle
php
@@ -197,6 +197,14 @@ export function diffChildren( newParentVNode._nextDom = oldDom; } + } else if ( + oldDom && + oldVNode._dom == oldDom && + oldDom.parentNode != parentDom + ) { + // The above condition is handle null placeholders. See test in placeholder.test.js: + // `efficiently replace null placeholders in parent rerenders` + oldDom = getDomSibling(oldVNode); } }
1
import { diff, unmount, applyRef } from './index'; import { createVNode } from '../create-element'; import { EMPTY_OBJ, EMPTY_ARR } from '../constants'; import { removeNode } from '../util'; import { getDomSibling } from '../component'; /** * Diff the children of a virtual node * @param {import('../internal').PreactElement} parentDom The DOM element whose * children are being diffed * @param {import('../internal').VNode} newParentVNode The new virtual * node whose children should be diff'ed against oldParentVNode * @param {import('../internal').VNode} oldParentVNode The old virtual * node whose children should be diff'ed against newParentVNode * @param {object} context The current context object * @param {boolean} isSvg Whether or not this DOM node is an SVG node * @param {Array<import('../internal').PreactElement>} excessDomChildren * @param {Array<import('../internal').Component>} commitQueue List of components * which have callbacks to invoke in commitRoot * @param {Node | Text} oldDom The current attached DOM * element any new dom elements should be placed around. Likely `null` on first * render (except when hydrating). Can be a sibling DOM element when diffing * Fragments that have siblings. In most cases, it starts out as `oldChildren[0]._dom`. * @param {boolean} isHydrating Whether or not we are in hydration */ export function diffChildren( parentDom, newParentVNode, oldParentVNode, context, isSvg, excessDomChildren, commitQueue, oldDom, isHydrating ) { let i, j, oldVNode, newDom, sibDom, firstChildDom, refs; // This is a compression of oldParentVNode!=null && oldParentVNode != EMPTY_OBJ && oldParentVNode._children || EMPTY_ARR // as EMPTY_OBJ._children should be `undefined`. let oldChildren = (oldParentVNode && oldParentVNode._children) || EMPTY_ARR; let oldChildrenLength = oldChildren.length; // Only in very specific places should this logic be invoked (top level `render` and `diffElementNodes`). // I'm using `EMPTY_OBJ` to signal when `diffChildren` is invoked in these situations. I can't use `null` // for this purpose, because `null` is a valid value for `oldDom` which can mean to skip to this logic // (e.g. if mounting a new tree in which the old DOM should be ignored (usually for Fragments). if (oldDom == EMPTY_OBJ) { if (excessDomChildren != null) { oldDom = excessDomChildren[0]; } else if (oldChildrenLength) { oldDom = getDomSibling(oldParentVNode, 0); } else { oldDom = null; } } i = 0; newParentVNode._children = toChildArray( newParentVNode._children, childVNode => { if (childVNode != null) { childVNode._parent = newParentVNode; childVNode._depth = newParentVNode._depth + 1; // Check if we find a corresponding element in oldChildren. // If found, delete the array item by setting to `undefined`. // We use `undefined`, as `null` is reserved for empty placeholders // (holes). oldVNode = oldChildren[i]; if ( oldVNode === null || (oldVNode && childVNode.key == oldVNode.key && childVNode.type === oldVNode.type) ) { oldChildren[i] = undefined; } else { // Either oldVNode === undefined or oldChildrenLength > 0, // so after this loop oldVNode == null or oldVNode is a valid value. for (j = 0; j < oldChildrenLength; j++) { oldVNode = oldChildren[j]; // If childVNode is unkeyed, we only match similarly unkeyed nodes, otherwise we match by key. // We always match by type (in either case). if ( oldVNode && childVNode.key == oldVNode.key && childVNode.type === oldVNode.type ) { oldChildren[j] = undefined; break; } oldVNode = null; } } oldVNode = oldVNode || EMPTY_OBJ; // Morph the old element into the new one, but don't append it to the dom yet newDom = diff( parentDom, childVNode, oldVNode, context, isSvg, excessDomChildren, commitQueue, oldDom, isHydrating ); if ((j = childVNode.ref) && oldVNode.ref != j) { if (!refs) refs = []; if (oldVNode.ref) refs.push(oldVNode.ref, null, childVNode); refs.push(j, childVNode._component || newDom, childVNode); } // Only proceed if the vnode has not been unmounted by `diff()` above. if (newDom != null) { if (firstChildDom == null) { firstChildDom = newDom; } let nextDom; if (childVNode._nextDom !== undefined) { // Only Fragments or components that return Fragment like VNodes will // have a non-undefined _nextDom. Continue the diff from the sibling // of last DOM child of this child VNode nextDom = childVNode._nextDom; // Eagerly cleanup _nextDom. We don't need to persist the value because // it is only used by `diffChildren` to determine where to resume the diff after // diffing Components and Fragments. Once we store it the nextDOM local var, we // can clean up the property childVNode._nextDom = undefined; } else if ( excessDomChildren == oldVNode || newDom != oldDom || newDom.parentNode == null ) { // NOTE: excessDomChildren==oldVNode above: // This is a compression of excessDomChildren==null && oldVNode==null! // The values only have the same type when `null`. outer: if (oldDom == null || oldDom.parentNode !== parentDom) { parentDom.appendChild(newDom); nextDom = null; } else { // `j<oldChildrenLength; j+=2` is an alternative to `j++<oldChildrenLength/2` for ( sibDom = oldDom, j = 0; (sibDom = sibDom.nextSibling) && j < oldChildrenLength; j += 2 ) { if (sibDom == newDom) { break outer; } } parentDom.insertBefore(newDom, oldDom); nextDom = oldDom; } // Browsers will infer an option's `value` from `textContent` when // no value is present. This essentially bypasses our code to set it // later in `diff()`. It works fine in all browsers except for IE11 // where it breaks setting `select.value`. There it will be always set // to an empty string. Re-applying an options value will fix that, so // there are probably some internal data structures that aren't // updated properly. // // To fix it we make sure to reset the inferred value, so that our own // value check in `diff()` won't be skipped. if (newParentVNode.type == 'option') { parentDom.value = ''; } } // If we have pre-calculated the nextDOM node, use it. Else calculate it now // Strictly check for `undefined` here cuz `null` is a valid value of `nextDom`. // See more detail in create-element.js:createVNode if (nextDom !== undefined) { oldDom = nextDom; } else { oldDom = newDom.nextSibling; } if (typeof newParentVNode.type == 'function') { // Because the newParentVNode is Fragment-like, we need to set it's // _nextDom property to the nextSibling of its last child DOM node. // // `oldDom` contains the correct value here because if the last child // is a Fragment-like, then oldDom has already been set to that child's _nextDom. // If the last child is a DOM VNode, then oldDom will be set to that DOM // node's nextSibling. newParentVNode._nextDom = oldDom; } } } i++; return childVNode; } ); newParentVNode._dom = firstChildDom; // Remove children that are not part of any vnode. if (excessDomChildren != null && typeof newParentVNode.type !== 'function') { for (i = excessDomChildren.length; i--; ) { if (excessDomChildren[i] != null) removeNode(excessDomChildren[i]); } } // Remove remaining oldChildren if there are any. for (i = oldChildrenLength; i--; ) { if (oldChildren[i] != null) unmount(oldChildren[i], oldChildren[i]); } // Set refs only after unmount if (refs) { for (i = 0; i < refs.length; i++) { applyRef(refs[i], refs[++i], refs[++i]); } } } /** * Flatten and loop through the children of a virtual node * @param {import('../index').ComponentChildren} children The unflattened * children of a virtual node * @param {(vnode: import('../internal').VNode) => import('../internal').VNode} [callback] * A function to invoke for each child before it is added to the flattened list. * @param {Array<import('../internal').VNode | string | number>} [flattened] An flat array of children to modify * @returns {import('../internal').VNode[]} */ export function toChildArray(children, callback, flattened) { if (flattened == null) flattened = []; if (children == null || typeof children === 'boolean') { if (callback) flattened.push(callback(null)); } else if (Array.isArray(children)) { for (let i = 0; i < children.length; i++) { toChildArray(children[i], callback, flattened); } } else if (!callback) { flattened.push(children); } else if (typeof children === 'string' || typeof children === 'number') { flattened.push(callback(createVNode(null, children, null, null))); } else if (children._dom != null || children._component != null) { flattened.push( callback(createVNode(children.type, children.props, children.key, null)) ); } else { flattened.push(callback(children)); } return flattened; }
1
15,313
Nit: I think it should be `to handle` here.
preactjs-preact
js
@@ -2810,7 +2810,7 @@ public class MessagingController implements Runnable { LocalMessage message = localFolder.getMessage(uid); if (message == null - || message.getId() == 0) { + || message.getId() == 0) { throw new IllegalArgumentException("Message not found: folder=" + folder + ", uid=" + uid); } // IMAP search results will usually need to be downloaded before viewing.
1
package com.fsck.k9.controller; import java.io.CharArrayWriter; import java.io.IOException; import java.io.PrintWriter; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.Date; import java.util.EnumSet; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Set; import java.util.concurrent.*; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import android.content.ContentResolver; import android.content.Context; import android.content.Intent; import android.content.pm.PackageInfo; import android.database.Cursor; import android.net.Uri; import android.os.Build; import android.os.PowerManager; import android.os.Process; import android.util.Log; import com.fsck.k9.Account; import com.fsck.k9.Account.DeletePolicy; import com.fsck.k9.Account.Expunge; import com.fsck.k9.AccountStats; import com.fsck.k9.K9; import com.fsck.k9.K9.Intents; import com.fsck.k9.Preferences; import com.fsck.k9.R; import com.fsck.k9.activity.MessageReference; import com.fsck.k9.activity.setup.AccountSetupCheckSettings.CheckDirection; import com.fsck.k9.cache.EmailProviderCache; import com.fsck.k9.mail.CertificateValidationException; import com.fsck.k9.mail.power.TracingPowerManager; import com.fsck.k9.mail.power.TracingPowerManager.TracingWakeLock; import com.fsck.k9.mail.Address; import com.fsck.k9.mail.FetchProfile; import com.fsck.k9.mail.Flag; import com.fsck.k9.mail.Folder; import com.fsck.k9.mail.Folder.FolderType; import com.fsck.k9.mail.Message; import com.fsck.k9.mail.Message.RecipientType; import com.fsck.k9.mail.MessagingException; import com.fsck.k9.mail.Part; import com.fsck.k9.mail.PushReceiver; import com.fsck.k9.mail.Pusher; import com.fsck.k9.mail.Store; import com.fsck.k9.mail.Transport; import com.fsck.k9.mail.internet.MessageExtractor; import com.fsck.k9.mail.internet.MimeMessage; import com.fsck.k9.mail.internet.MimeMessageHelper; import com.fsck.k9.mail.internet.MimeUtility; import com.fsck.k9.mail.internet.TextBody; import com.fsck.k9.mailstore.LocalFolder.MoreMessages; import com.fsck.k9.mailstore.MessageRemovalListener; import com.fsck.k9.mail.MessageRetrievalListener; import com.fsck.k9.mailstore.LocalFolder; import com.fsck.k9.mailstore.LocalMessage; import com.fsck.k9.mailstore.LocalStore; import com.fsck.k9.mailstore.LocalStore.PendingCommand; import com.fsck.k9.mail.store.pop3.Pop3Store; import com.fsck.k9.mailstore.UnavailableStorageException; import com.fsck.k9.notification.NotificationController; import com.fsck.k9.provider.EmailProvider; import com.fsck.k9.provider.EmailProvider.StatsColumns; import com.fsck.k9.search.ConditionsTreeNode; import com.fsck.k9.search.LocalSearch; import com.fsck.k9.search.SearchAccount; import com.fsck.k9.search.SearchSpecification; import com.fsck.k9.search.SqlQueryBuilder; /** * Starts a long running (application) Thread that will run through commands * that require remote mailbox access. This class is used to serialize and * prioritize these commands. Each method that will submit a command requires a * MessagingListener instance to be provided. It is expected that that listener * has also been added as a registered listener using addListener(). When a * command is to be executed, if the listener that was provided with the command * is no longer registered the command is skipped. The design idea for the above * is that when an Activity starts it registers as a listener. When it is paused * it removes itself. Thus, any commands that that activity submitted are * removed from the queue once the activity is no longer active. */ public class MessagingController implements Runnable { public static final long INVALID_MESSAGE_ID = -1; /** * Immutable empty {@link String} array */ private static final String[] EMPTY_STRING_ARRAY = new String[0]; /** * The maximum message size that we'll consider to be "small". A small message is downloaded * in full immediately instead of in pieces. Anything over this size will be downloaded in * pieces with attachments being left off completely and downloaded on demand. * * * 25k for a "small" message was picked by educated trial and error. * http://answers.google.com/answers/threadview?id=312463 claims that the * average size of an email is 59k, which I feel is too large for our * blind download. The following tests were performed on a download of * 25 random messages. * <pre> * 5k - 61 seconds, * 25k - 51 seconds, * 55k - 53 seconds, * </pre> * So 25k gives good performance and a reasonable data footprint. Sounds good to me. */ private static final String PENDING_COMMAND_MOVE_OR_COPY = "com.fsck.k9.MessagingController.moveOrCopy"; private static final String PENDING_COMMAND_MOVE_OR_COPY_BULK = "com.fsck.k9.MessagingController.moveOrCopyBulk"; private static final String PENDING_COMMAND_MOVE_OR_COPY_BULK_NEW = "com.fsck.k9.MessagingController.moveOrCopyBulkNew"; private static final String PENDING_COMMAND_EMPTY_TRASH = "com.fsck.k9.MessagingController.emptyTrash"; private static final String PENDING_COMMAND_SET_FLAG_BULK = "com.fsck.k9.MessagingController.setFlagBulk"; private static final String PENDING_COMMAND_SET_FLAG = "com.fsck.k9.MessagingController.setFlag"; private static final String PENDING_COMMAND_APPEND = "com.fsck.k9.MessagingController.append"; private static final String PENDING_COMMAND_MARK_ALL_AS_READ = "com.fsck.k9.MessagingController.markAllAsRead"; private static final String PENDING_COMMAND_EXPUNGE = "com.fsck.k9.MessagingController.expunge"; /** * Maximum number of unsynced messages to store at once */ private static final int UNSYNC_CHUNK_SIZE = 5; private static MessagingController inst = null; private BlockingQueue<Command> mCommands = new PriorityBlockingQueue<Command>(); private Thread mThread; private Set<MessagingListener> mListeners = new CopyOnWriteArraySet<MessagingListener>(); private final ConcurrentHashMap<String, AtomicInteger> sendCount = new ConcurrentHashMap<String, AtomicInteger>(); ConcurrentHashMap<Account, Pusher> pushers = new ConcurrentHashMap<Account, Pusher>(); private final ExecutorService threadPool = Executors.newCachedThreadPool(); private MessagingListener checkMailListener = null; private MemorizingListener memorizingListener = new MemorizingListener(); private boolean mBusy; private final Context context; private final NotificationController notificationController; private static final Set<Flag> SYNC_FLAGS = EnumSet.of(Flag.SEEN, Flag.FLAGGED, Flag.ANSWERED, Flag.FORWARDED); private void suppressMessages(Account account, List<LocalMessage> messages) { EmailProviderCache cache = EmailProviderCache.getCache(account.getUuid(), context); cache.hideMessages(messages); } private void unsuppressMessages(Account account, List<? extends Message> messages) { EmailProviderCache cache = EmailProviderCache.getCache(account.getUuid(), context); cache.unhideMessages(messages); } private boolean isMessageSuppressed(LocalMessage message) { long messageId = message.getId(); long folderId = message.getFolder().getId(); EmailProviderCache cache = EmailProviderCache.getCache(message.getFolder().getAccountUuid(), context); return cache.isMessageHidden(messageId, folderId); } private void setFlagInCache(final Account account, final List<Long> messageIds, final Flag flag, final boolean newState) { EmailProviderCache cache = EmailProviderCache.getCache(account.getUuid(), context); String columnName = LocalStore.getColumnNameForFlag(flag); String value = Integer.toString((newState) ? 1 : 0); cache.setValueForMessages(messageIds, columnName, value); } private void removeFlagFromCache(final Account account, final List<Long> messageIds, final Flag flag) { EmailProviderCache cache = EmailProviderCache.getCache(account.getUuid(), context); String columnName = LocalStore.getColumnNameForFlag(flag); cache.removeValueForMessages(messageIds, columnName); } private void setFlagForThreadsInCache(final Account account, final List<Long> threadRootIds, final Flag flag, final boolean newState) { EmailProviderCache cache = EmailProviderCache.getCache(account.getUuid(), context); String columnName = LocalStore.getColumnNameForFlag(flag); String value = Integer.toString((newState) ? 1 : 0); cache.setValueForThreads(threadRootIds, columnName, value); } private void removeFlagForThreadsFromCache(final Account account, final List<Long> messageIds, final Flag flag) { EmailProviderCache cache = EmailProviderCache.getCache(account.getUuid(), context); String columnName = LocalStore.getColumnNameForFlag(flag); cache.removeValueForThreads(messageIds, columnName); } private MessagingController(Context context, NotificationController notificationController) { this.context = context; this.notificationController = notificationController; mThread = new Thread(this); mThread.setName("MessagingController"); mThread.start(); if (memorizingListener != null) { addListener(memorizingListener); } } public synchronized static MessagingController getInstance(Context context) { if (inst == null) { Context appContext = context.getApplicationContext(); NotificationController notificationController = NotificationController.newInstance(appContext); inst = new MessagingController(appContext, notificationController); } return inst; } public boolean isBusy() { return mBusy; } @Override public void run() { Process.setThreadPriority(Process.THREAD_PRIORITY_BACKGROUND); while (true) { String commandDescription = null; try { final Command command = mCommands.take(); if (command != null) { commandDescription = command.description; if (K9.DEBUG) Log.i(K9.LOG_TAG, "Running " + (command.isForeground ? "Foreground" : "Background") + " command '" + command.description + "', seq = " + command.sequence); mBusy = true; try { command.runnable.run(); } catch (UnavailableAccountException e) { // retry later new Thread() { @Override public void run() { try { sleep(30 * 1000); mCommands.put(command); } catch (InterruptedException e) { Log.e(K9.LOG_TAG, "interrupted while putting a pending command for" + " an unavailable account back into the queue." + " THIS SHOULD NEVER HAPPEN."); } } } .start(); } if (K9.DEBUG) Log.i(K9.LOG_TAG, (command.isForeground ? "Foreground" : "Background") + " Command '" + command.description + "' completed"); for (MessagingListener l : getListeners(command.listener)) { l.controllerCommandCompleted(!mCommands.isEmpty()); } } } catch (Exception e) { Log.e(K9.LOG_TAG, "Error running command '" + commandDescription + "'", e); } mBusy = false; } } private void put(String description, MessagingListener listener, Runnable runnable) { putCommand(mCommands, description, listener, runnable, true); } private void putBackground(String description, MessagingListener listener, Runnable runnable) { putCommand(mCommands, description, listener, runnable, false); } private void putCommand(BlockingQueue<Command> queue, String description, MessagingListener listener, Runnable runnable, boolean isForeground) { int retries = 10; Exception e = null; while (retries-- > 0) { try { Command command = new Command(); command.listener = listener; command.runnable = runnable; command.description = description; command.isForeground = isForeground; queue.put(command); return; } catch (InterruptedException ie) { try { Thread.sleep(200); } catch (InterruptedException ne) { } e = ie; } } throw new Error(e); } public void addListener(MessagingListener listener) { mListeners.add(listener); refreshListener(listener); } public void refreshListener(MessagingListener listener) { if (memorizingListener != null && listener != null) { memorizingListener.refreshOther(listener); } } public void removeListener(MessagingListener listener) { mListeners.remove(listener); } public Set<MessagingListener> getListeners() { return mListeners; } public Set<MessagingListener> getListeners(MessagingListener listener) { if (listener == null) { return mListeners; } Set<MessagingListener> listeners = new HashSet<MessagingListener>(mListeners); listeners.add(listener); return listeners; } /** * Lists folders that are available locally and remotely. This method calls * listFoldersCallback for local folders before it returns, and then for * remote folders at some later point. If there are no local folders * includeRemote is forced by this method. This method should be called from * a Thread as it may take several seconds to list the local folders. * TODO this needs to cache the remote folder list * * @param account * @param listener * @throws MessagingException */ public void listFolders(final Account account, final boolean refreshRemote, final MessagingListener listener) { threadPool.execute(new Runnable() { @Override public void run() { listFoldersSynchronous(account, refreshRemote, listener); } }); } /** * Lists folders that are available locally and remotely. This method calls * listFoldersCallback for local folders before it returns, and then for * remote folders at some later point. If there are no local folders * includeRemote is forced by this method. This method is called in the * foreground. * TODO this needs to cache the remote folder list * * @param account * @param listener * @throws MessagingException */ public void listFoldersSynchronous(final Account account, final boolean refreshRemote, final MessagingListener listener) { for (MessagingListener l : getListeners(listener)) { l.listFoldersStarted(account); } List<LocalFolder> localFolders = null; if (!account.isAvailable(context)) { Log.i(K9.LOG_TAG, "not listing folders of unavailable account"); } else { try { LocalStore localStore = account.getLocalStore(); localFolders = localStore.getPersonalNamespaces(false); if (refreshRemote || localFolders.isEmpty()) { doRefreshRemote(account, listener); return; } for (MessagingListener l : getListeners(listener)) { l.listFolders(account, localFolders); } } catch (Exception e) { for (MessagingListener l : getListeners(listener)) { l.listFoldersFailed(account, e.getMessage()); } addErrorMessage(account, null, e); return; } finally { if (localFolders != null) { for (Folder localFolder : localFolders) { closeFolder(localFolder); } } } } for (MessagingListener l : getListeners(listener)) { l.listFoldersFinished(account); } } private void doRefreshRemote(final Account account, final MessagingListener listener) { put("doRefreshRemote", listener, new Runnable() { @Override public void run() { List<LocalFolder> localFolders = null; try { Store store = account.getRemoteStore(); List <? extends Folder > remoteFolders = store.getPersonalNamespaces(false); LocalStore localStore = account.getLocalStore(); Set<String> remoteFolderNames = new HashSet<String>(); List<LocalFolder> foldersToCreate = new LinkedList<LocalFolder>(); localFolders = localStore.getPersonalNamespaces(false); Set<String> localFolderNames = new HashSet<String>(); for (Folder localFolder : localFolders) { localFolderNames.add(localFolder.getName()); } for (Folder remoteFolder : remoteFolders) { if (localFolderNames.contains(remoteFolder.getName()) == false) { LocalFolder localFolder = localStore.getFolder(remoteFolder.getName()); foldersToCreate.add(localFolder); } remoteFolderNames.add(remoteFolder.getName()); } localStore.createFolders(foldersToCreate, account.getDisplayCount()); localFolders = localStore.getPersonalNamespaces(false); /* * Clear out any folders that are no longer on the remote store. */ for (Folder localFolder : localFolders) { String localFolderName = localFolder.getName(); // FIXME: This is a hack used to clean up when we accidentally created the // special placeholder folder "-NONE-". if (K9.FOLDER_NONE.equals(localFolderName)) { localFolder.delete(false); } if (!account.isSpecialFolder(localFolderName) && !remoteFolderNames.contains(localFolderName)) { localFolder.delete(false); } } localFolders = localStore.getPersonalNamespaces(false); for (MessagingListener l : getListeners(listener)) { l.listFolders(account, localFolders); } for (MessagingListener l : getListeners(listener)) { l.listFoldersFinished(account); } } catch (Exception e) { for (MessagingListener l : getListeners(listener)) { l.listFoldersFailed(account, ""); } addErrorMessage(account, null, e); } finally { if (localFolders != null) { for (Folder localFolder : localFolders) { closeFolder(localFolder); } } } } }); } /** * Find all messages in any local account which match the query 'query' * @throws MessagingException */ public void searchLocalMessages(final LocalSearch search, final MessagingListener listener) { threadPool.execute(new Runnable() { @Override public void run() { searchLocalMessagesSynchronous(search, listener); } }); } public void searchLocalMessagesSynchronous(final LocalSearch search, final MessagingListener listener) { final AccountStats stats = new AccountStats(); final Set<String> uuidSet = new HashSet<String>(Arrays.asList(search.getAccountUuids())); List<Account> accounts = Preferences.getPreferences(context).getAccounts(); boolean allAccounts = uuidSet.contains(SearchSpecification.ALL_ACCOUNTS); // for every account we want to search do the query in the localstore for (final Account account : accounts) { if (!allAccounts && !uuidSet.contains(account.getUuid())) { continue; } // Collecting statistics of the search result MessageRetrievalListener retrievalListener = new MessageRetrievalListener<LocalMessage>() { @Override public void messageStarted(String message, int number, int ofTotal) {} @Override public void messagesFinished(int number) {} @Override public void messageFinished(LocalMessage message, int number, int ofTotal) { if (!isMessageSuppressed(message)) { List<LocalMessage> messages = new ArrayList<LocalMessage>(); messages.add(message); stats.unreadMessageCount += (!message.isSet(Flag.SEEN)) ? 1 : 0; stats.flaggedMessageCount += (message.isSet(Flag.FLAGGED)) ? 1 : 0; if (listener != null) { listener.listLocalMessagesAddMessages(account, null, messages); } } } }; // alert everyone the search has started if (listener != null) { listener.listLocalMessagesStarted(account, null); } // build and do the query in the localstore try { LocalStore localStore = account.getLocalStore(); localStore.searchForMessages(retrievalListener, search); } catch (Exception e) { if (listener != null) { listener.listLocalMessagesFailed(account, null, e.getMessage()); } addErrorMessage(account, null, e); } finally { if (listener != null) { listener.listLocalMessagesFinished(account, null); } } } // publish the total search statistics if (listener != null) { listener.searchStats(stats); } } public Future<?> searchRemoteMessages(final String acctUuid, final String folderName, final String query, final Set<Flag> requiredFlags, final Set<Flag> forbiddenFlags, final MessagingListener listener) { if (K9.DEBUG) { String msg = "searchRemoteMessages (" + "acct=" + acctUuid + ", folderName = " + folderName + ", query = " + query + ")"; Log.i(K9.LOG_TAG, msg); } return threadPool.submit(new Runnable() { @Override public void run() { searchRemoteMessagesSynchronous(acctUuid, folderName, query, requiredFlags, forbiddenFlags, listener); } }); } public void searchRemoteMessagesSynchronous(final String acctUuid, final String folderName, final String query, final Set<Flag> requiredFlags, final Set<Flag> forbiddenFlags, final MessagingListener listener) { final Account acct = Preferences.getPreferences(context).getAccount(acctUuid); if (listener != null) { listener.remoteSearchStarted(folderName); } List<Message> extraResults = new ArrayList<Message>(); try { Store remoteStore = acct.getRemoteStore(); LocalStore localStore = acct.getLocalStore(); if (remoteStore == null || localStore == null) { throw new MessagingException("Could not get store"); } Folder remoteFolder = remoteStore.getFolder(folderName); LocalFolder localFolder = localStore.getFolder(folderName); if (remoteFolder == null || localFolder == null) { throw new MessagingException("Folder not found"); } List<Message> messages = remoteFolder.search(query, requiredFlags, forbiddenFlags); if (K9.DEBUG) { Log.i("Remote Search", "Remote search got " + messages.size() + " results"); } // There's no need to fetch messages already completely downloaded List<Message> remoteMessages = localFolder.extractNewMessages(messages); messages.clear(); if (listener != null) { listener.remoteSearchServerQueryComplete(folderName, remoteMessages.size(), acct.getRemoteSearchNumResults()); } Collections.sort(remoteMessages, new UidReverseComparator()); int resultLimit = acct.getRemoteSearchNumResults(); if (resultLimit > 0 && remoteMessages.size() > resultLimit) { extraResults = remoteMessages.subList(resultLimit, remoteMessages.size()); remoteMessages = remoteMessages.subList(0, resultLimit); } loadSearchResultsSynchronous(remoteMessages, localFolder, remoteFolder, listener); } catch (Exception e) { if (Thread.currentThread().isInterrupted()) { Log.i(K9.LOG_TAG, "Caught exception on aborted remote search; safe to ignore.", e); } else { Log.e(K9.LOG_TAG, "Could not complete remote search", e); if (listener != null) { listener.remoteSearchFailed(null, e.getMessage()); } addErrorMessage(acct, null, e); } } finally { if (listener != null) { listener.remoteSearchFinished(folderName, 0, acct.getRemoteSearchNumResults(), extraResults); } } } public void loadSearchResults(final Account account, final String folderName, final List<Message> messages, final MessagingListener listener) { threadPool.execute(new Runnable() { @Override public void run() { if (listener != null) { listener.enableProgressIndicator(true); } try { Store remoteStore = account.getRemoteStore(); LocalStore localStore = account.getLocalStore(); if (remoteStore == null || localStore == null) { throw new MessagingException("Could not get store"); } Folder remoteFolder = remoteStore.getFolder(folderName); LocalFolder localFolder = localStore.getFolder(folderName); if (remoteFolder == null || localFolder == null) { throw new MessagingException("Folder not found"); } loadSearchResultsSynchronous(messages, localFolder, remoteFolder, listener); } catch (MessagingException e) { Log.e(K9.LOG_TAG, "Exception in loadSearchResults: " + e); addErrorMessage(account, null, e); } finally { if (listener != null) { listener.enableProgressIndicator(false); } } } }); } public void loadSearchResultsSynchronous(List<Message> messages, LocalFolder localFolder, Folder remoteFolder, MessagingListener listener) throws MessagingException { final FetchProfile header = new FetchProfile(); header.add(FetchProfile.Item.FLAGS); header.add(FetchProfile.Item.ENVELOPE); final FetchProfile structure = new FetchProfile(); structure.add(FetchProfile.Item.STRUCTURE); int i = 0; for (Message message : messages) { i++; LocalMessage localMsg = localFolder.getMessage(message.getUid()); if (localMsg == null) { remoteFolder.fetch(Collections.singletonList(message), header, null); //fun fact: ImapFolder.fetch can't handle getting STRUCTURE at same time as headers remoteFolder.fetch(Collections.singletonList(message), structure, null); localFolder.appendMessages(Collections.singletonList(message)); localMsg = localFolder.getMessage(message.getUid()); } if (listener != null) { listener.remoteSearchAddMessage(remoteFolder.getName(), localMsg, i, messages.size()); } } } public void loadMoreMessages(Account account, String folder, MessagingListener listener) { try { LocalStore localStore = account.getLocalStore(); LocalFolder localFolder = localStore.getFolder(folder); if (localFolder.getVisibleLimit() > 0) { localFolder.setVisibleLimit(localFolder.getVisibleLimit() + account.getDisplayCount()); } synchronizeMailbox(account, folder, listener, null); } catch (MessagingException me) { addErrorMessage(account, null, me); throw new RuntimeException("Unable to set visible limit on folder", me); } } public void resetVisibleLimits(Collection<Account> accounts) { for (Account account : accounts) { account.resetVisibleLimits(); } } /** * Start background synchronization of the specified folder. * @param account * @param folder * @param listener * @param providedRemoteFolder TODO */ public void synchronizeMailbox(final Account account, final String folder, final MessagingListener listener, final Folder providedRemoteFolder) { putBackground("synchronizeMailbox", listener, new Runnable() { @Override public void run() { synchronizeMailboxSynchronous(account, folder, listener, providedRemoteFolder); } }); } /** * Start foreground synchronization of the specified folder. This is generally only called * by synchronizeMailbox. * @param account * @param folder * * TODO Break this method up into smaller chunks. * @param providedRemoteFolder TODO */ private void synchronizeMailboxSynchronous(final Account account, final String folder, final MessagingListener listener, Folder providedRemoteFolder) { Folder remoteFolder = null; LocalFolder tLocalFolder = null; if (K9.DEBUG) Log.i(K9.LOG_TAG, "Synchronizing folder " + account.getDescription() + ":" + folder); for (MessagingListener l : getListeners(listener)) { l.synchronizeMailboxStarted(account, folder); } /* * We don't ever sync the Outbox or errors folder */ if (folder.equals(account.getOutboxFolderName()) || folder.equals(account.getErrorFolderName())) { for (MessagingListener l : getListeners(listener)) { l.synchronizeMailboxFinished(account, folder, 0, 0); } return; } Exception commandException = null; try { if (K9.DEBUG) Log.d(K9.LOG_TAG, "SYNC: About to process pending commands for account " + account.getDescription()); try { processPendingCommandsSynchronous(account); } catch (Exception e) { addErrorMessage(account, null, e); Log.e(K9.LOG_TAG, "Failure processing command, but allow message sync attempt", e); commandException = e; } /* * Get the message list from the local store and create an index of * the uids within the list. */ if (K9.DEBUG) Log.v(K9.LOG_TAG, "SYNC: About to get local folder " + folder); final LocalStore localStore = account.getLocalStore(); tLocalFolder = localStore.getFolder(folder); final LocalFolder localFolder = tLocalFolder; localFolder.open(Folder.OPEN_MODE_RW); localFolder.updateLastUid(); List<? extends Message> localMessages = localFolder.getMessages(null); Map<String, Message> localUidMap = new HashMap<String, Message>(); for (Message message : localMessages) { localUidMap.put(message.getUid(), message); } if (providedRemoteFolder != null) { if (K9.DEBUG) Log.v(K9.LOG_TAG, "SYNC: using providedRemoteFolder " + folder); remoteFolder = providedRemoteFolder; } else { Store remoteStore = account.getRemoteStore(); if (K9.DEBUG) Log.v(K9.LOG_TAG, "SYNC: About to get remote folder " + folder); remoteFolder = remoteStore.getFolder(folder); if (! verifyOrCreateRemoteSpecialFolder(account, folder, remoteFolder, listener)) { return; } /* * Synchronization process: * Open the folder Upload any local messages that are marked as PENDING_UPLOAD (Drafts, Sent, Trash) Get the message count Get the list of the newest K9.DEFAULT_VISIBLE_LIMIT messages getMessages(messageCount - K9.DEFAULT_VISIBLE_LIMIT, messageCount) See if we have each message locally, if not fetch it's flags and envelope Get and update the unread count for the folder Update the remote flags of any messages we have locally with an internal date newer than the remote message. Get the current flags for any messages we have locally but did not just download Update local flags For any message we have locally but not remotely, delete the local message to keep cache clean. Download larger parts of any new messages. (Optional) Download small attachments in the background. */ /* * Open the remote folder. This pre-loads certain metadata like message count. */ if (K9.DEBUG) Log.v(K9.LOG_TAG, "SYNC: About to open remote folder " + folder); remoteFolder.open(Folder.OPEN_MODE_RW); if (Expunge.EXPUNGE_ON_POLL == account.getExpungePolicy()) { if (K9.DEBUG) Log.d(K9.LOG_TAG, "SYNC: Expunging folder " + account.getDescription() + ":" + folder); remoteFolder.expunge(); } } /* * Get the remote message count. */ int remoteMessageCount = remoteFolder.getMessageCount(); int visibleLimit = localFolder.getVisibleLimit(); if (visibleLimit < 0) { visibleLimit = K9.DEFAULT_VISIBLE_LIMIT; } final List<Message> remoteMessages = new ArrayList<Message>(); Map<String, Message> remoteUidMap = new HashMap<String, Message>(); if (K9.DEBUG) Log.v(K9.LOG_TAG, "SYNC: Remote message count for folder " + folder + " is " + remoteMessageCount); final Date earliestDate = account.getEarliestPollDate(); int remoteStart; if (remoteMessageCount > 0) { /* Message numbers start at 1. */ if (visibleLimit > 0) { remoteStart = Math.max(0, remoteMessageCount - visibleLimit) + 1; } else { remoteStart = 1; } int remoteEnd = remoteMessageCount; if (K9.DEBUG) Log.v(K9.LOG_TAG, "SYNC: About to get messages " + remoteStart + " through " + remoteEnd + " for folder " + folder); final AtomicInteger headerProgress = new AtomicInteger(0); for (MessagingListener l : getListeners(listener)) { l.synchronizeMailboxHeadersStarted(account, folder); } List<? extends Message> remoteMessageArray = remoteFolder.getMessages(remoteStart, remoteEnd, earliestDate, null); int messageCount = remoteMessageArray.size(); for (Message thisMess : remoteMessageArray) { headerProgress.incrementAndGet(); for (MessagingListener l : getListeners(listener)) { l.synchronizeMailboxHeadersProgress(account, folder, headerProgress.get(), messageCount); } Message localMessage = localUidMap.get(thisMess.getUid()); if (localMessage == null || !localMessage.olderThan(earliestDate)) { remoteMessages.add(thisMess); remoteUidMap.put(thisMess.getUid(), thisMess); } } if (K9.DEBUG) Log.v(K9.LOG_TAG, "SYNC: Got " + remoteUidMap.size() + " messages for folder " + folder); for (MessagingListener l : getListeners(listener)) { l.synchronizeMailboxHeadersFinished(account, folder, headerProgress.get(), remoteUidMap.size()); } } else { throw new Exception("Message count " + remoteMessageCount + " for folder " + folder); } /* * Remove any messages that are in the local store but no longer on the remote store or are too old */ MoreMessages moreMessages = localFolder.getMoreMessages(); if (account.syncRemoteDeletions()) { List<Message> destroyMessages = new ArrayList<Message>(); for (Message localMessage : localMessages) { if (remoteUidMap.get(localMessage.getUid()) == null) { destroyMessages.add(localMessage); } } if (!destroyMessages.isEmpty()) { moreMessages = MoreMessages.UNKNOWN; localFolder.destroyMessages(destroyMessages); for (Message destroyMessage : destroyMessages) { for (MessagingListener l : getListeners(listener)) { l.synchronizeMailboxRemovedMessage(account, folder, destroyMessage); } } } } localMessages = null; if (moreMessages == MoreMessages.UNKNOWN) { updateMoreMessages(remoteFolder, localFolder, earliestDate, remoteStart); } /* * Now we download the actual content of messages. */ int newMessages = downloadMessages(account, remoteFolder, localFolder, remoteMessages, false); int unreadMessageCount = localFolder.getUnreadMessageCount(); for (MessagingListener l : getListeners()) { l.folderStatusChanged(account, folder, unreadMessageCount); } /* Notify listeners that we're finally done. */ localFolder.setLastChecked(System.currentTimeMillis()); localFolder.setStatus(null); if (K9.DEBUG) Log.d(K9.LOG_TAG, "Done synchronizing folder " + account.getDescription() + ":" + folder + " @ " + new Date() + " with " + newMessages + " new messages"); for (MessagingListener l : getListeners(listener)) { l.synchronizeMailboxFinished(account, folder, remoteMessageCount, newMessages); } if (commandException != null) { String rootMessage = getRootCauseMessage(commandException); Log.e(K9.LOG_TAG, "Root cause failure in " + account.getDescription() + ":" + tLocalFolder.getName() + " was '" + rootMessage + "'"); localFolder.setStatus(rootMessage); for (MessagingListener l : getListeners(listener)) { l.synchronizeMailboxFailed(account, folder, rootMessage); } } if (K9.DEBUG) Log.i(K9.LOG_TAG, "Done synchronizing folder " + account.getDescription() + ":" + folder); } catch (Exception e) { Log.e(K9.LOG_TAG, "synchronizeMailbox", e); // If we don't set the last checked, it can try too often during // failure conditions String rootMessage = getRootCauseMessage(e); if (tLocalFolder != null) { try { tLocalFolder.setStatus(rootMessage); tLocalFolder.setLastChecked(System.currentTimeMillis()); } catch (MessagingException me) { Log.e(K9.LOG_TAG, "Could not set last checked on folder " + account.getDescription() + ":" + tLocalFolder.getName(), e); } } for (MessagingListener l : getListeners(listener)) { l.synchronizeMailboxFailed(account, folder, rootMessage); } notifyUserIfCertificateProblem(account, e, true); addErrorMessage(account, null, e); Log.e(K9.LOG_TAG, "Failed synchronizing folder " + account.getDescription() + ":" + folder + " @ " + new Date()); } finally { if (providedRemoteFolder == null) { closeFolder(remoteFolder); } closeFolder(tLocalFolder); } } private void updateMoreMessages(Folder remoteFolder, LocalFolder localFolder, Date earliestDate, int remoteStart) throws MessagingException, IOException { if (remoteStart == 1) { localFolder.setMoreMessages(MoreMessages.FALSE); } else { boolean moreMessagesAvailable = remoteFolder.areMoreMessagesAvailable(remoteStart, earliestDate); MoreMessages newMoreMessages = (moreMessagesAvailable) ? MoreMessages.TRUE : MoreMessages.FALSE; localFolder.setMoreMessages(newMoreMessages); } } private void closeFolder(Folder f) { if (f != null) { f.close(); } } /* * If the folder is a "special" folder we need to see if it exists * on the remote server. It if does not exist we'll try to create it. If we * can't create we'll abort. This will happen on every single Pop3 folder as * designed and on Imap folders during error conditions. This allows us * to treat Pop3 and Imap the same in this code. */ private boolean verifyOrCreateRemoteSpecialFolder(final Account account, final String folder, final Folder remoteFolder, final MessagingListener listener) throws MessagingException { if (folder.equals(account.getTrashFolderName()) || folder.equals(account.getSentFolderName()) || folder.equals(account.getDraftsFolderName())) { if (!remoteFolder.exists()) { if (!remoteFolder.create(FolderType.HOLDS_MESSAGES)) { for (MessagingListener l : getListeners(listener)) { l.synchronizeMailboxFinished(account, folder, 0, 0); } if (K9.DEBUG) Log.i(K9.LOG_TAG, "Done synchronizing folder " + folder); return false; } } } return true; } /** * Fetches the messages described by inputMessages from the remote store and writes them to * local storage. * * @param account * The account the remote store belongs to. * @param remoteFolder * The remote folder to download messages from. * @param localFolder * The {@link LocalFolder} instance corresponding to the remote folder. * @param inputMessages * A list of messages objects that store the UIDs of which messages to download. * @param flagSyncOnly * Only flags will be fetched from the remote store if this is {@code true}. * * @return The number of downloaded messages that are not flagged as {@link Flag#SEEN}. * * @throws MessagingException */ private int downloadMessages(final Account account, final Folder remoteFolder, final LocalFolder localFolder, List<Message> inputMessages, boolean flagSyncOnly) throws MessagingException { final Date earliestDate = account.getEarliestPollDate(); Date downloadStarted = new Date(); // now if (earliestDate != null) { if (K9.DEBUG) { Log.d(K9.LOG_TAG, "Only syncing messages after " + earliestDate); } } final String folder = remoteFolder.getName(); int unreadBeforeStart = 0; try { AccountStats stats = account.getStats(context); unreadBeforeStart = stats.unreadMessageCount; } catch (MessagingException e) { Log.e(K9.LOG_TAG, "Unable to getUnreadMessageCount for account: " + account, e); } List<Message> syncFlagMessages = new ArrayList<Message>(); List<Message> unsyncedMessages = new ArrayList<Message>(); final AtomicInteger newMessages = new AtomicInteger(0); List<Message> messages = new ArrayList<Message>(inputMessages); for (Message message : messages) { evaluateMessageForDownload(message, folder, localFolder, remoteFolder, account, unsyncedMessages, syncFlagMessages , flagSyncOnly); } final AtomicInteger progress = new AtomicInteger(0); final int todo = unsyncedMessages.size() + syncFlagMessages.size(); for (MessagingListener l : getListeners()) { l.synchronizeMailboxProgress(account, folder, progress.get(), todo); } if (K9.DEBUG) Log.d(K9.LOG_TAG, "SYNC: Have " + unsyncedMessages.size() + " unsynced messages"); messages.clear(); final List<Message> largeMessages = new ArrayList<Message>(); final List<Message> smallMessages = new ArrayList<Message>(); if (!unsyncedMessages.isEmpty()) { /* * Reverse the order of the messages. Depending on the server this may get us * fetch results for newest to oldest. If not, no harm done. */ Collections.sort(unsyncedMessages, new UidReverseComparator()); int visibleLimit = localFolder.getVisibleLimit(); int listSize = unsyncedMessages.size(); if ((visibleLimit > 0) && (listSize > visibleLimit)) { unsyncedMessages = unsyncedMessages.subList(0, visibleLimit); } FetchProfile fp = new FetchProfile(); if (remoteFolder.supportsFetchingFlags()) { fp.add(FetchProfile.Item.FLAGS); } fp.add(FetchProfile.Item.ENVELOPE); if (K9.DEBUG) Log.d(K9.LOG_TAG, "SYNC: About to fetch " + unsyncedMessages.size() + " unsynced messages for folder " + folder); fetchUnsyncedMessages(account, remoteFolder, unsyncedMessages, smallMessages, largeMessages, progress, todo, fp); String updatedPushState = localFolder.getPushState(); for (Message message : unsyncedMessages) { String newPushState = remoteFolder.getNewPushState(updatedPushState, message); if (newPushState != null) { updatedPushState = newPushState; } } localFolder.setPushState(updatedPushState); if (K9.DEBUG) { Log.d(K9.LOG_TAG, "SYNC: Synced unsynced messages for folder " + folder); } } if (K9.DEBUG) Log.d(K9.LOG_TAG, "SYNC: Have " + largeMessages.size() + " large messages and " + smallMessages.size() + " small messages out of " + unsyncedMessages.size() + " unsynced messages"); unsyncedMessages.clear(); /* * Grab the content of the small messages first. This is going to * be very fast and at very worst will be a single up of a few bytes and a single * download of 625k. */ FetchProfile fp = new FetchProfile(); fp.add(FetchProfile.Item.BODY); // fp.add(FetchProfile.Item.FLAGS); // fp.add(FetchProfile.Item.ENVELOPE); downloadSmallMessages(account, remoteFolder, localFolder, smallMessages, progress, unreadBeforeStart, newMessages, todo, fp); smallMessages.clear(); /* * Now do the large messages that require more round trips. */ fp.clear(); fp.add(FetchProfile.Item.STRUCTURE); downloadLargeMessages(account, remoteFolder, localFolder, largeMessages, progress, unreadBeforeStart, newMessages, todo, fp); largeMessages.clear(); /* * Refresh the flags for any messages in the local store that we didn't just * download. */ refreshLocalMessageFlags(account, remoteFolder, localFolder, syncFlagMessages, progress, todo); if (K9.DEBUG) Log.d(K9.LOG_TAG, "SYNC: Synced remote messages for folder " + folder + ", " + newMessages.get() + " new messages"); localFolder.purgeToVisibleLimit(new MessageRemovalListener() { @Override public void messageRemoved(Message message) { for (MessagingListener l : getListeners()) { l.synchronizeMailboxRemovedMessage(account, folder, message); } } }); // If the oldest message seen on this sync is newer than // the oldest message seen on the previous sync, then // we want to move our high-water mark forward // this is all here just for pop which only syncs inbox // this would be a little wrong for IMAP (we'd want a folder-level pref, not an account level pref.) // fortunately, we just don't care. Long oldestMessageTime = localFolder.getOldestMessageDate(); if (oldestMessageTime != null) { Date oldestExtantMessage = new Date(oldestMessageTime); if (oldestExtantMessage.before(downloadStarted) && oldestExtantMessage.after(new Date(account.getLatestOldMessageSeenTime()))) { account.setLatestOldMessageSeenTime(oldestExtantMessage.getTime()); account.save(Preferences.getPreferences(context)); } } return newMessages.get(); } private void evaluateMessageForDownload(final Message message, final String folder, final LocalFolder localFolder, final Folder remoteFolder, final Account account, final List<Message> unsyncedMessages, final List<Message> syncFlagMessages, boolean flagSyncOnly) throws MessagingException { if (message.isSet(Flag.DELETED)) { syncFlagMessages.add(message); return; } Message localMessage = localFolder.getMessage(message.getUid()); if (localMessage == null) { if (!flagSyncOnly) { if (!message.isSet(Flag.X_DOWNLOADED_FULL) && !message.isSet(Flag.X_DOWNLOADED_PARTIAL)) { if (K9.DEBUG) Log.v(K9.LOG_TAG, "Message with uid " + message.getUid() + " has not yet been downloaded"); unsyncedMessages.add(message); } else { if (K9.DEBUG) Log.v(K9.LOG_TAG, "Message with uid " + message.getUid() + " is partially or fully downloaded"); // Store the updated message locally localFolder.appendMessages(Collections.singletonList(message)); localMessage = localFolder.getMessage(message.getUid()); localMessage.setFlag(Flag.X_DOWNLOADED_FULL, message.isSet(Flag.X_DOWNLOADED_FULL)); localMessage.setFlag(Flag.X_DOWNLOADED_PARTIAL, message.isSet(Flag.X_DOWNLOADED_PARTIAL)); for (MessagingListener l : getListeners()) { l.synchronizeMailboxAddOrUpdateMessage(account, folder, localMessage); if (!localMessage.isSet(Flag.SEEN)) { l.synchronizeMailboxNewMessage(account, folder, localMessage); } } } } } else if (!localMessage.isSet(Flag.DELETED)) { if (K9.DEBUG) Log.v(K9.LOG_TAG, "Message with uid " + message.getUid() + " is present in the local store"); if (!localMessage.isSet(Flag.X_DOWNLOADED_FULL) && !localMessage.isSet(Flag.X_DOWNLOADED_PARTIAL)) { if (K9.DEBUG) Log.v(K9.LOG_TAG, "Message with uid " + message.getUid() + " is not downloaded, even partially; trying again"); unsyncedMessages.add(message); } else { String newPushState = remoteFolder.getNewPushState(localFolder.getPushState(), message); if (newPushState != null) { localFolder.setPushState(newPushState); } syncFlagMessages.add(message); } } } private <T extends Message> void fetchUnsyncedMessages(final Account account, final Folder<T> remoteFolder, List<T> unsyncedMessages, final List<Message> smallMessages, final List<Message> largeMessages, final AtomicInteger progress, final int todo, FetchProfile fp) throws MessagingException { final String folder = remoteFolder.getName(); final Date earliestDate = account.getEarliestPollDate(); remoteFolder.fetch(unsyncedMessages, fp, new MessageRetrievalListener<T>() { @Override public void messageFinished(T message, int number, int ofTotal) { try { if (message.isSet(Flag.DELETED) || message.olderThan(earliestDate)) { if (K9.DEBUG) { if (message.isSet(Flag.DELETED)) { Log.v(K9.LOG_TAG, "Newly downloaded message " + account + ":" + folder + ":" + message.getUid() + " was marked deleted on server, skipping"); } else { Log.d(K9.LOG_TAG, "Newly downloaded message " + message.getUid() + " is older than " + earliestDate + ", skipping"); } } progress.incrementAndGet(); for (MessagingListener l : getListeners()) { l.synchronizeMailboxProgress(account, folder, progress.get(), todo); } return; } if (account.getMaximumAutoDownloadMessageSize() > 0 && message.getSize() > account.getMaximumAutoDownloadMessageSize()) { largeMessages.add(message); } else { smallMessages.add(message); } } catch (Exception e) { Log.e(K9.LOG_TAG, "Error while storing downloaded message.", e); addErrorMessage(account, null, e); } } @Override public void messageStarted(String uid, int number, int ofTotal) {} @Override public void messagesFinished(int total) { // FIXME this method is almost never invoked by various Stores! Don't rely on it unless fixed!! } }); } private boolean shouldImportMessage(final Account account, final String folder, final Message message, final AtomicInteger progress, final Date earliestDate) { if (account.isSearchByDateCapable() && message.olderThan(earliestDate)) { if (K9.DEBUG) { Log.d(K9.LOG_TAG, "Message " + message.getUid() + " is older than " + earliestDate + ", hence not saving"); } return false; } return true; } private <T extends Message> void downloadSmallMessages(final Account account, final Folder<T> remoteFolder, final LocalFolder localFolder, List<T> smallMessages, final AtomicInteger progress, final int unreadBeforeStart, final AtomicInteger newMessages, final int todo, FetchProfile fp) throws MessagingException { final String folder = remoteFolder.getName(); final Date earliestDate = account.getEarliestPollDate(); if (K9.DEBUG) Log.d(K9.LOG_TAG, "SYNC: Fetching small messages for folder " + folder); remoteFolder.fetch(smallMessages, fp, new MessageRetrievalListener<T>() { @Override public void messageFinished(final T message, int number, int ofTotal) { try { if (!shouldImportMessage(account, folder, message, progress, earliestDate)) { progress.incrementAndGet(); return; } // Store the updated message locally final LocalMessage localMessage = localFolder.storeSmallMessage(message, new Runnable() { @Override public void run() { progress.incrementAndGet(); } }); // Increment the number of "new messages" if the newly downloaded message is // not marked as read. if (!localMessage.isSet(Flag.SEEN)) { newMessages.incrementAndGet(); } if (K9.DEBUG) Log.v(K9.LOG_TAG, "About to notify listeners that we got a new small message " + account + ":" + folder + ":" + message.getUid()); // Update the listener with what we've found for (MessagingListener l : getListeners()) { l.synchronizeMailboxAddOrUpdateMessage(account, folder, localMessage); l.synchronizeMailboxProgress(account, folder, progress.get(), todo); if (!localMessage.isSet(Flag.SEEN)) { l.synchronizeMailboxNewMessage(account, folder, localMessage); } } // Send a notification of this message if (shouldNotifyForMessage(account, localFolder, message)) { // Notify with the localMessage so that we don't have to recalculate the content preview. notificationController.addNewMailNotification(account, localMessage, unreadBeforeStart); } } catch (MessagingException me) { addErrorMessage(account, null, me); Log.e(K9.LOG_TAG, "SYNC: fetch small messages", me); } } @Override public void messageStarted(String uid, int number, int ofTotal) {} @Override public void messagesFinished(int total) {} }); if (K9.DEBUG) Log.d(K9.LOG_TAG, "SYNC: Done fetching small messages for folder " + folder); } private <T extends Message> void downloadLargeMessages(final Account account, final Folder<T> remoteFolder, final LocalFolder localFolder, List<T> largeMessages, final AtomicInteger progress, final int unreadBeforeStart, final AtomicInteger newMessages, final int todo, FetchProfile fp) throws MessagingException { final String folder = remoteFolder.getName(); final Date earliestDate = account.getEarliestPollDate(); if (K9.DEBUG) Log.d(K9.LOG_TAG, "SYNC: Fetching large messages for folder " + folder); remoteFolder.fetch(largeMessages, fp, null); for (T message : largeMessages) { if (!shouldImportMessage(account, folder, message, progress, earliestDate)) { progress.incrementAndGet(); continue; } if (message.getBody() == null) { /* * The provider was unable to get the structure of the message, so * we'll download a reasonable portion of the messge and mark it as * incomplete so the entire thing can be downloaded later if the user * wishes to download it. */ fp.clear(); fp.add(FetchProfile.Item.BODY_SANE); /* * TODO a good optimization here would be to make sure that all Stores set * the proper size after this fetch and compare the before and after size. If * they equal we can mark this SYNCHRONIZED instead of PARTIALLY_SYNCHRONIZED */ remoteFolder.fetch(Collections.singletonList(message), fp, null); // Store the updated message locally localFolder.appendMessages(Collections.singletonList(message)); Message localMessage = localFolder.getMessage(message.getUid()); // Certain (POP3) servers give you the whole message even when you ask for only the first x Kb if (!message.isSet(Flag.X_DOWNLOADED_FULL)) { /* * Mark the message as fully downloaded if the message size is smaller than * the account's autodownload size limit, otherwise mark as only a partial * download. This will prevent the system from downloading the same message * twice. * * If there is no limit on autodownload size, that's the same as the message * being smaller than the max size */ if (account.getMaximumAutoDownloadMessageSize() == 0 || message.getSize() < account.getMaximumAutoDownloadMessageSize()) { localMessage.setFlag(Flag.X_DOWNLOADED_FULL, true); } else { // Set a flag indicating that the message has been partially downloaded and // is ready for view. localMessage.setFlag(Flag.X_DOWNLOADED_PARTIAL, true); } } } else { /* * We have a structure to deal with, from which * we can pull down the parts we want to actually store. * Build a list of parts we are interested in. Text parts will be downloaded * right now, attachments will be left for later. */ Set<Part> viewables = MessageExtractor.collectTextParts(message); /* * Now download the parts we're interested in storing. */ for (Part part : viewables) { remoteFolder.fetchPart(message, part, null); } // Store the updated message locally localFolder.appendMessages(Collections.singletonList(message)); Message localMessage = localFolder.getMessage(message.getUid()); // Set a flag indicating this message has been fully downloaded and can be // viewed. localMessage.setFlag(Flag.X_DOWNLOADED_PARTIAL, true); } if (K9.DEBUG) Log.v(K9.LOG_TAG, "About to notify listeners that we got a new large message " + account + ":" + folder + ":" + message.getUid()); // Update the listener with what we've found progress.incrementAndGet(); // TODO do we need to re-fetch this here? LocalMessage localMessage = localFolder.getMessage(message.getUid()); // Increment the number of "new messages" if the newly downloaded message is // not marked as read. if (!localMessage.isSet(Flag.SEEN)) { newMessages.incrementAndGet(); } for (MessagingListener l : getListeners()) { l.synchronizeMailboxAddOrUpdateMessage(account, folder, localMessage); l.synchronizeMailboxProgress(account, folder, progress.get(), todo); if (!localMessage.isSet(Flag.SEEN)) { l.synchronizeMailboxNewMessage(account, folder, localMessage); } } // Send a notification of this message if (shouldNotifyForMessage(account, localFolder, message)) { // Notify with the localMessage so that we don't have to recalculate the content preview. notificationController.addNewMailNotification(account, localMessage, unreadBeforeStart); } }//for large messages if (K9.DEBUG) Log.d(K9.LOG_TAG, "SYNC: Done fetching large messages for folder " + folder); } private void refreshLocalMessageFlags(final Account account, final Folder remoteFolder, final LocalFolder localFolder, List<Message> syncFlagMessages, final AtomicInteger progress, final int todo ) throws MessagingException { final String folder = remoteFolder.getName(); if (remoteFolder.supportsFetchingFlags()) { if (K9.DEBUG) Log.d(K9.LOG_TAG, "SYNC: About to sync flags for " + syncFlagMessages.size() + " remote messages for folder " + folder); FetchProfile fp = new FetchProfile(); fp.add(FetchProfile.Item.FLAGS); List<Message> undeletedMessages = new LinkedList<Message>(); for (Message message : syncFlagMessages) { if (!message.isSet(Flag.DELETED)) { undeletedMessages.add(message); } } remoteFolder.fetch(undeletedMessages, fp, null); for (Message remoteMessage : syncFlagMessages) { LocalMessage localMessage = localFolder.getMessage(remoteMessage.getUid()); boolean messageChanged = syncFlags(localMessage, remoteMessage); if (messageChanged) { boolean shouldBeNotifiedOf = false; if (localMessage.isSet(Flag.DELETED) || isMessageSuppressed(localMessage)) { for (MessagingListener l : getListeners()) { l.synchronizeMailboxRemovedMessage(account, folder, localMessage); } } else { for (MessagingListener l : getListeners()) { l.synchronizeMailboxAddOrUpdateMessage(account, folder, localMessage); } if (shouldNotifyForMessage(account, localFolder, localMessage)) { shouldBeNotifiedOf = true; } } // we're only interested in messages that need removing if (!shouldBeNotifiedOf) { MessageReference messageReference = localMessage.makeMessageReference(); notificationController.removeNewMailNotification(account, messageReference); } } progress.incrementAndGet(); for (MessagingListener l : getListeners()) { l.synchronizeMailboxProgress(account, folder, progress.get(), todo); } } } } private boolean syncFlags(LocalMessage localMessage, Message remoteMessage) throws MessagingException { boolean messageChanged = false; if (localMessage == null || localMessage.isSet(Flag.DELETED)) { return false; } if (remoteMessage.isSet(Flag.DELETED)) { if (localMessage.getFolder().syncRemoteDeletions()) { localMessage.setFlag(Flag.DELETED, true); messageChanged = true; } } else { for (Flag flag : MessagingController.SYNC_FLAGS) { if (remoteMessage.isSet(flag) != localMessage.isSet(flag)) { localMessage.setFlag(flag, remoteMessage.isSet(flag)); messageChanged = true; } } } return messageChanged; } private String getRootCauseMessage(Throwable t) { Throwable rootCause = t; Throwable nextCause = rootCause; do { nextCause = rootCause.getCause(); if (nextCause != null) { rootCause = nextCause; } } while (nextCause != null); if (rootCause instanceof MessagingException) { return rootCause.getMessage(); } else { // Remove the namespace on the exception so we have a fighting chance of seeing more of the error in the // notification. return (rootCause.getLocalizedMessage() != null) ? (rootCause.getClass().getSimpleName() + ": " + rootCause.getLocalizedMessage()) : rootCause.getClass().getSimpleName(); } } private void queuePendingCommand(Account account, PendingCommand command) { try { LocalStore localStore = account.getLocalStore(); localStore.addPendingCommand(command); } catch (Exception e) { addErrorMessage(account, null, e); throw new RuntimeException("Unable to enqueue pending command", e); } } private void processPendingCommands(final Account account) { putBackground("processPendingCommands", null, new Runnable() { @Override public void run() { try { processPendingCommandsSynchronous(account); } catch (UnavailableStorageException e) { Log.i(K9.LOG_TAG, "Failed to process pending command because storage is not available - trying again later."); throw new UnavailableAccountException(e); } catch (MessagingException me) { Log.e(K9.LOG_TAG, "processPendingCommands", me); addErrorMessage(account, null, me); /* * Ignore any exceptions from the commands. Commands will be processed * on the next round. */ } } }); } private void processPendingCommandsSynchronous(Account account) throws MessagingException { LocalStore localStore = account.getLocalStore(); List<PendingCommand> commands = localStore.getPendingCommands(); int progress = 0; int todo = commands.size(); if (todo == 0) { return; } for (MessagingListener l : getListeners()) { l.pendingCommandsProcessing(account); l.synchronizeMailboxProgress(account, null, progress, todo); } PendingCommand processingCommand = null; try { for (PendingCommand command : commands) { processingCommand = command; if (K9.DEBUG) Log.d(K9.LOG_TAG, "Processing pending command '" + command + "'"); String[] components = command.command.split("\\."); String commandTitle = components[components.length - 1]; for (MessagingListener l : getListeners()) { l.pendingCommandStarted(account, commandTitle); } /* * We specifically do not catch any exceptions here. If a command fails it is * most likely due to a server or IO error and it must be retried before any * other command processes. This maintains the order of the commands. */ try { if (PENDING_COMMAND_APPEND.equals(command.command)) { processPendingAppend(command, account); } else if (PENDING_COMMAND_SET_FLAG_BULK.equals(command.command)) { processPendingSetFlag(command, account); } else if (PENDING_COMMAND_SET_FLAG.equals(command.command)) { processPendingSetFlagOld(command, account); } else if (PENDING_COMMAND_MARK_ALL_AS_READ.equals(command.command)) { processPendingMarkAllAsRead(command, account); } else if (PENDING_COMMAND_MOVE_OR_COPY_BULK.equals(command.command)) { processPendingMoveOrCopyOld2(command, account); } else if (PENDING_COMMAND_MOVE_OR_COPY_BULK_NEW.equals(command.command)) { processPendingMoveOrCopy(command, account); } else if (PENDING_COMMAND_MOVE_OR_COPY.equals(command.command)) { processPendingMoveOrCopyOld(command, account); } else if (PENDING_COMMAND_EMPTY_TRASH.equals(command.command)) { processPendingEmptyTrash(command, account); } else if (PENDING_COMMAND_EXPUNGE.equals(command.command)) { processPendingExpunge(command, account); } localStore.removePendingCommand(command); if (K9.DEBUG) Log.d(K9.LOG_TAG, "Done processing pending command '" + command + "'"); } catch (MessagingException me) { if (me.isPermanentFailure()) { addErrorMessage(account, null, me); Log.e(K9.LOG_TAG, "Failure of command '" + command + "' was permanent, removing command from queue"); localStore.removePendingCommand(processingCommand); } else { throw me; } } finally { progress++; for (MessagingListener l : getListeners()) { l.synchronizeMailboxProgress(account, null, progress, todo); l.pendingCommandCompleted(account, commandTitle); } } } } catch (MessagingException me) { notifyUserIfCertificateProblem(account, me, true); addErrorMessage(account, null, me); Log.e(K9.LOG_TAG, "Could not process command '" + processingCommand + "'", me); throw me; } finally { for (MessagingListener l : getListeners()) { l.pendingCommandsFinished(account); } } } /** * Process a pending append message command. This command uploads a local message to the * server, first checking to be sure that the server message is not newer than * the local message. Once the local message is successfully processed it is deleted so * that the server message will be synchronized down without an additional copy being * created. * TODO update the local message UID instead of deleteing it * * @param command arguments = (String folder, String uid) * @param account * @throws MessagingException */ private void processPendingAppend(PendingCommand command, Account account) throws MessagingException { Folder remoteFolder = null; LocalFolder localFolder = null; try { String folder = command.arguments[0]; String uid = command.arguments[1]; if (account.getErrorFolderName().equals(folder)) { return; } LocalStore localStore = account.getLocalStore(); localFolder = localStore.getFolder(folder); LocalMessage localMessage = localFolder.getMessage(uid); if (localMessage == null) { return; } Store remoteStore = account.getRemoteStore(); remoteFolder = remoteStore.getFolder(folder); if (!remoteFolder.exists()) { if (!remoteFolder.create(FolderType.HOLDS_MESSAGES)) { return; } } remoteFolder.open(Folder.OPEN_MODE_RW); if (remoteFolder.getMode() != Folder.OPEN_MODE_RW) { return; } Message remoteMessage = null; if (!localMessage.getUid().startsWith(K9.LOCAL_UID_PREFIX)) { remoteMessage = remoteFolder.getMessage(localMessage.getUid()); } if (remoteMessage == null) { if (localMessage.isSet(Flag.X_REMOTE_COPY_STARTED)) { Log.w(K9.LOG_TAG, "Local message with uid " + localMessage.getUid() + " has flag " + Flag.X_REMOTE_COPY_STARTED + " already set, checking for remote message with " + " same message id"); String rUid = remoteFolder.getUidFromMessageId(localMessage); if (rUid != null) { Log.w(K9.LOG_TAG, "Local message has flag " + Flag.X_REMOTE_COPY_STARTED + " already set, and there is a remote message with " + " uid " + rUid + ", assuming message was already copied and aborting this copy"); String oldUid = localMessage.getUid(); localMessage.setUid(rUid); localFolder.changeUid(localMessage); for (MessagingListener l : getListeners()) { l.messageUidChanged(account, folder, oldUid, localMessage.getUid()); } return; } else { Log.w(K9.LOG_TAG, "No remote message with message-id found, proceeding with append"); } } /* * If the message does not exist remotely we just upload it and then * update our local copy with the new uid. */ FetchProfile fp = new FetchProfile(); fp.add(FetchProfile.Item.BODY); localFolder.fetch(Collections.singletonList(localMessage) , fp, null); String oldUid = localMessage.getUid(); localMessage.setFlag(Flag.X_REMOTE_COPY_STARTED, true); remoteFolder.appendMessages(Collections.singletonList(localMessage)); localFolder.changeUid(localMessage); for (MessagingListener l : getListeners()) { l.messageUidChanged(account, folder, oldUid, localMessage.getUid()); } } else { /* * If the remote message exists we need to determine which copy to keep. */ /* * See if the remote message is newer than ours. */ FetchProfile fp = new FetchProfile(); fp.add(FetchProfile.Item.ENVELOPE); remoteFolder.fetch(Collections.singletonList(remoteMessage), fp, null); Date localDate = localMessage.getInternalDate(); Date remoteDate = remoteMessage.getInternalDate(); if (remoteDate != null && remoteDate.compareTo(localDate) > 0) { /* * If the remote message is newer than ours we'll just * delete ours and move on. A sync will get the server message * if we need to be able to see it. */ localMessage.destroy(); } else { /* * Otherwise we'll upload our message and then delete the remote message. */ fp.clear(); fp = new FetchProfile(); fp.add(FetchProfile.Item.BODY); localFolder.fetch(Collections.singletonList(localMessage), fp, null); String oldUid = localMessage.getUid(); localMessage.setFlag(Flag.X_REMOTE_COPY_STARTED, true); remoteFolder.appendMessages(Collections.singletonList(localMessage)); localFolder.changeUid(localMessage); for (MessagingListener l : getListeners()) { l.messageUidChanged(account, folder, oldUid, localMessage.getUid()); } if (remoteDate != null) { remoteMessage.setFlag(Flag.DELETED, true); if (Expunge.EXPUNGE_IMMEDIATELY == account.getExpungePolicy()) { remoteFolder.expunge(); } } } } } finally { closeFolder(remoteFolder); closeFolder(localFolder); } } private void queueMoveOrCopy(Account account, String srcFolder, String destFolder, boolean isCopy, String uids[]) { if (account.getErrorFolderName().equals(srcFolder)) { return; } PendingCommand command = new PendingCommand(); command.command = PENDING_COMMAND_MOVE_OR_COPY_BULK_NEW; int length = 4 + uids.length; command.arguments = new String[length]; command.arguments[0] = srcFolder; command.arguments[1] = destFolder; command.arguments[2] = Boolean.toString(isCopy); command.arguments[3] = Boolean.toString(false); System.arraycopy(uids, 0, command.arguments, 4, uids.length); queuePendingCommand(account, command); } private void queueMoveOrCopy(Account account, String srcFolder, String destFolder, boolean isCopy, String uids[], Map<String, String> uidMap) { if (uidMap == null || uidMap.isEmpty()) { queueMoveOrCopy(account, srcFolder, destFolder, isCopy, uids); } else { if (account.getErrorFolderName().equals(srcFolder)) { return; } PendingCommand command = new PendingCommand(); command.command = PENDING_COMMAND_MOVE_OR_COPY_BULK_NEW; int length = 4 + uidMap.keySet().size() + uidMap.values().size(); command.arguments = new String[length]; command.arguments[0] = srcFolder; command.arguments[1] = destFolder; command.arguments[2] = Boolean.toString(isCopy); command.arguments[3] = Boolean.toString(true); System.arraycopy(uidMap.keySet().toArray(), 0, command.arguments, 4, uidMap.keySet().size()); System.arraycopy(uidMap.values().toArray(), 0, command.arguments, 4 + uidMap.keySet().size(), uidMap.values().size()); queuePendingCommand(account, command); } } /** * Convert pending command to new format and call * {@link #processPendingMoveOrCopy(PendingCommand, Account)}. * * <p> * TODO: This method is obsolete and is only for transition from K-9 4.0 to K-9 4.2 * Eventually, it should be removed. * </p> * * @param command * Pending move/copy command in old format. * @param account * The account the pending command belongs to. * * @throws MessagingException * In case of an error. */ private void processPendingMoveOrCopyOld2(PendingCommand command, Account account) throws MessagingException { PendingCommand newCommand = new PendingCommand(); int len = command.arguments.length; newCommand.command = PENDING_COMMAND_MOVE_OR_COPY_BULK_NEW; newCommand.arguments = new String[len + 1]; newCommand.arguments[0] = command.arguments[0]; newCommand.arguments[1] = command.arguments[1]; newCommand.arguments[2] = command.arguments[2]; newCommand.arguments[3] = Boolean.toString(false); System.arraycopy(command.arguments, 3, newCommand.arguments, 4, len - 3); processPendingMoveOrCopy(newCommand, account); } /** * Process a pending trash message command. * * @param command arguments = (String folder, String uid) * @param account * @throws MessagingException */ private void processPendingMoveOrCopy(PendingCommand command, Account account) throws MessagingException { Folder remoteSrcFolder = null; Folder remoteDestFolder = null; LocalFolder localDestFolder = null; try { String srcFolder = command.arguments[0]; if (account.getErrorFolderName().equals(srcFolder)) { return; } String destFolder = command.arguments[1]; String isCopyS = command.arguments[2]; String hasNewUidsS = command.arguments[3]; boolean hasNewUids = false; if (hasNewUidsS != null) { hasNewUids = Boolean.parseBoolean(hasNewUidsS); } Store remoteStore = account.getRemoteStore(); remoteSrcFolder = remoteStore.getFolder(srcFolder); Store localStore = account.getLocalStore(); localDestFolder = (LocalFolder) localStore.getFolder(destFolder); List<Message> messages = new ArrayList<Message>(); /* * We split up the localUidMap into two parts while sending the command, here we assemble it back. */ Map<String, String> localUidMap = new HashMap<String, String>(); if (hasNewUids) { int offset = (command.arguments.length - 4) / 2; for (int i = 4; i < 4 + offset; i++) { localUidMap.put(command.arguments[i], command.arguments[i + offset]); String uid = command.arguments[i]; if (!uid.startsWith(K9.LOCAL_UID_PREFIX)) { messages.add(remoteSrcFolder.getMessage(uid)); } } } else { for (int i = 4; i < command.arguments.length; i++) { String uid = command.arguments[i]; if (!uid.startsWith(K9.LOCAL_UID_PREFIX)) { messages.add(remoteSrcFolder.getMessage(uid)); } } } boolean isCopy = false; if (isCopyS != null) { isCopy = Boolean.parseBoolean(isCopyS); } if (!remoteSrcFolder.exists()) { throw new MessagingException("processingPendingMoveOrCopy: remoteFolder " + srcFolder + " does not exist", true); } remoteSrcFolder.open(Folder.OPEN_MODE_RW); if (remoteSrcFolder.getMode() != Folder.OPEN_MODE_RW) { throw new MessagingException("processingPendingMoveOrCopy: could not open remoteSrcFolder " + srcFolder + " read/write", true); } if (K9.DEBUG) Log.d(K9.LOG_TAG, "processingPendingMoveOrCopy: source folder = " + srcFolder + ", " + messages.size() + " messages, destination folder = " + destFolder + ", isCopy = " + isCopy); Map <String, String> remoteUidMap = null; if (!isCopy && destFolder.equals(account.getTrashFolderName())) { if (K9.DEBUG) Log.d(K9.LOG_TAG, "processingPendingMoveOrCopy doing special case for deleting message"); String destFolderName = destFolder; if (K9.FOLDER_NONE.equals(destFolderName)) { destFolderName = null; } remoteSrcFolder.delete(messages, destFolderName); } else { remoteDestFolder = remoteStore.getFolder(destFolder); if (isCopy) { remoteUidMap = remoteSrcFolder.copyMessages(messages, remoteDestFolder); } else { remoteUidMap = remoteSrcFolder.moveMessages(messages, remoteDestFolder); } } if (!isCopy && Expunge.EXPUNGE_IMMEDIATELY == account.getExpungePolicy()) { if (K9.DEBUG) Log.i(K9.LOG_TAG, "processingPendingMoveOrCopy expunging folder " + account.getDescription() + ":" + srcFolder); remoteSrcFolder.expunge(); } /* * This next part is used to bring the local UIDs of the local destination folder * upto speed with the remote UIDs of remote destination folder. */ if (!localUidMap.isEmpty() && remoteUidMap != null && !remoteUidMap.isEmpty()) { for (Map.Entry<String, String> entry : remoteUidMap.entrySet()) { String remoteSrcUid = entry.getKey(); String localDestUid = localUidMap.get(remoteSrcUid); String newUid = entry.getValue(); Message localDestMessage = localDestFolder.getMessage(localDestUid); if (localDestMessage != null) { localDestMessage.setUid(newUid); localDestFolder.changeUid((LocalMessage)localDestMessage); for (MessagingListener l : getListeners()) { l.messageUidChanged(account, destFolder, localDestUid, newUid); } } } } } finally { closeFolder(remoteSrcFolder); closeFolder(remoteDestFolder); } } private void queueSetFlag(final Account account, final String folderName, final String newState, final String flag, final String[] uids) { putBackground("queueSetFlag " + account.getDescription() + ":" + folderName, null, new Runnable() { @Override public void run() { PendingCommand command = new PendingCommand(); command.command = PENDING_COMMAND_SET_FLAG_BULK; int length = 3 + uids.length; command.arguments = new String[length]; command.arguments[0] = folderName; command.arguments[1] = newState; command.arguments[2] = flag; System.arraycopy(uids, 0, command.arguments, 3, uids.length); queuePendingCommand(account, command); processPendingCommands(account); } }); } /** * Processes a pending mark read or unread command. * * @param command arguments = (String folder, String uid, boolean read) * @param account */ private void processPendingSetFlag(PendingCommand command, Account account) throws MessagingException { String folder = command.arguments[0]; if (account.getErrorFolderName().equals(folder)) { return; } boolean newState = Boolean.parseBoolean(command.arguments[1]); Flag flag = Flag.valueOf(command.arguments[2]); Store remoteStore = account.getRemoteStore(); Folder remoteFolder = remoteStore.getFolder(folder); if (!remoteFolder.exists() || !remoteFolder.isFlagSupported(flag)) { return; } try { remoteFolder.open(Folder.OPEN_MODE_RW); if (remoteFolder.getMode() != Folder.OPEN_MODE_RW) { return; } List<Message> messages = new ArrayList<Message>(); for (int i = 3; i < command.arguments.length; i++) { String uid = command.arguments[i]; if (!uid.startsWith(K9.LOCAL_UID_PREFIX)) { messages.add(remoteFolder.getMessage(uid)); } } if (messages.isEmpty()) { return; } remoteFolder.setFlags(messages, Collections.singleton(flag), newState); } finally { closeFolder(remoteFolder); } } // TODO: This method is obsolete and is only for transition from K-9 2.0 to K-9 2.1 // Eventually, it should be removed private void processPendingSetFlagOld(PendingCommand command, Account account) throws MessagingException { String folder = command.arguments[0]; String uid = command.arguments[1]; if (account.getErrorFolderName().equals(folder)) { return; } if (K9.DEBUG) Log.d(K9.LOG_TAG, "processPendingSetFlagOld: folder = " + folder + ", uid = " + uid); boolean newState = Boolean.parseBoolean(command.arguments[2]); Flag flag = Flag.valueOf(command.arguments[3]); Folder remoteFolder = null; try { Store remoteStore = account.getRemoteStore(); remoteFolder = remoteStore.getFolder(folder); if (!remoteFolder.exists()) { return; } remoteFolder.open(Folder.OPEN_MODE_RW); if (remoteFolder.getMode() != Folder.OPEN_MODE_RW) { return; } Message remoteMessage = null; if (!uid.startsWith(K9.LOCAL_UID_PREFIX)) { remoteMessage = remoteFolder.getMessage(uid); } if (remoteMessage == null) { return; } remoteMessage.setFlag(flag, newState); } finally { closeFolder(remoteFolder); } } private void queueExpunge(final Account account, final String folderName) { putBackground("queueExpunge " + account.getDescription() + ":" + folderName, null, new Runnable() { @Override public void run() { PendingCommand command = new PendingCommand(); command.command = PENDING_COMMAND_EXPUNGE; command.arguments = new String[1]; command.arguments[0] = folderName; queuePendingCommand(account, command); processPendingCommands(account); } }); } private void processPendingExpunge(PendingCommand command, Account account) throws MessagingException { String folder = command.arguments[0]; if (account.getErrorFolderName().equals(folder)) { return; } if (K9.DEBUG) Log.d(K9.LOG_TAG, "processPendingExpunge: folder = " + folder); Store remoteStore = account.getRemoteStore(); Folder remoteFolder = remoteStore.getFolder(folder); try { if (!remoteFolder.exists()) { return; } remoteFolder.open(Folder.OPEN_MODE_RW); if (remoteFolder.getMode() != Folder.OPEN_MODE_RW) { return; } remoteFolder.expunge(); if (K9.DEBUG) Log.d(K9.LOG_TAG, "processPendingExpunge: complete for folder = " + folder); } finally { closeFolder(remoteFolder); } } // TODO: This method is obsolete and is only for transition from K-9 2.0 to K-9 2.1 // Eventually, it should be removed private void processPendingMoveOrCopyOld(PendingCommand command, Account account) throws MessagingException { String srcFolder = command.arguments[0]; String uid = command.arguments[1]; String destFolder = command.arguments[2]; String isCopyS = command.arguments[3]; boolean isCopy = false; if (isCopyS != null) { isCopy = Boolean.parseBoolean(isCopyS); } if (account.getErrorFolderName().equals(srcFolder)) { return; } Store remoteStore = account.getRemoteStore(); Folder remoteSrcFolder = remoteStore.getFolder(srcFolder); Folder remoteDestFolder = remoteStore.getFolder(destFolder); if (!remoteSrcFolder.exists()) { throw new MessagingException("processPendingMoveOrCopyOld: remoteFolder " + srcFolder + " does not exist", true); } remoteSrcFolder.open(Folder.OPEN_MODE_RW); if (remoteSrcFolder.getMode() != Folder.OPEN_MODE_RW) { throw new MessagingException("processPendingMoveOrCopyOld: could not open remoteSrcFolder " + srcFolder + " read/write", true); } Message remoteMessage = null; if (!uid.startsWith(K9.LOCAL_UID_PREFIX)) { remoteMessage = remoteSrcFolder.getMessage(uid); } if (remoteMessage == null) { throw new MessagingException("processPendingMoveOrCopyOld: remoteMessage " + uid + " does not exist", true); } if (K9.DEBUG) Log.d(K9.LOG_TAG, "processPendingMoveOrCopyOld: source folder = " + srcFolder + ", uid = " + uid + ", destination folder = " + destFolder + ", isCopy = " + isCopy); if (!isCopy && destFolder.equals(account.getTrashFolderName())) { if (K9.DEBUG) Log.d(K9.LOG_TAG, "processPendingMoveOrCopyOld doing special case for deleting message"); remoteMessage.delete(account.getTrashFolderName()); remoteSrcFolder.close(); return; } remoteDestFolder.open(Folder.OPEN_MODE_RW); if (remoteDestFolder.getMode() != Folder.OPEN_MODE_RW) { throw new MessagingException("processPendingMoveOrCopyOld: could not open remoteDestFolder " + srcFolder + " read/write", true); } if (isCopy) { remoteSrcFolder.copyMessages(Collections.singletonList(remoteMessage), remoteDestFolder); } else { remoteSrcFolder.moveMessages(Collections.singletonList(remoteMessage), remoteDestFolder); } remoteSrcFolder.close(); remoteDestFolder.close(); } private void processPendingMarkAllAsRead(PendingCommand command, Account account) throws MessagingException { String folder = command.arguments[0]; Folder remoteFolder = null; LocalFolder localFolder = null; try { Store localStore = account.getLocalStore(); localFolder = (LocalFolder) localStore.getFolder(folder); localFolder.open(Folder.OPEN_MODE_RW); List<? extends Message> messages = localFolder.getMessages(null, false); for (Message message : messages) { if (!message.isSet(Flag.SEEN)) { message.setFlag(Flag.SEEN, true); for (MessagingListener l : getListeners()) { l.listLocalMessagesUpdateMessage(account, folder, message); } } } for (MessagingListener l : getListeners()) { l.folderStatusChanged(account, folder, 0); } if (account.getErrorFolderName().equals(folder)) { return; } Store remoteStore = account.getRemoteStore(); remoteFolder = remoteStore.getFolder(folder); if (!remoteFolder.exists() || !remoteFolder.isFlagSupported(Flag.SEEN)) { return; } remoteFolder.open(Folder.OPEN_MODE_RW); if (remoteFolder.getMode() != Folder.OPEN_MODE_RW) { return; } remoteFolder.setFlags(Collections.singleton(Flag.SEEN), true); remoteFolder.close(); } catch (UnsupportedOperationException uoe) { Log.w(K9.LOG_TAG, "Could not mark all server-side as read because store doesn't support operation", uoe); } finally { closeFolder(localFolder); closeFolder(remoteFolder); } } static long uidfill = 0; static AtomicBoolean loopCatch = new AtomicBoolean(); public void addErrorMessage(Account account, String subject, Throwable t) { try { if (t == null) { return; } CharArrayWriter baos = new CharArrayWriter(t.getStackTrace().length * 10); PrintWriter ps = new PrintWriter(baos); try { PackageInfo packageInfo = context.getPackageManager().getPackageInfo( context.getPackageName(), 0); ps.format("K9-Mail version: %s\r\n", packageInfo.versionName); } catch (Exception e) { // ignore } ps.format("Device make: %s\r\n", Build.MANUFACTURER); ps.format("Device model: %s\r\n", Build.MODEL); ps.format("Android version: %s\r\n\r\n", Build.VERSION.RELEASE); t.printStackTrace(ps); ps.close(); if (subject == null) { subject = getRootCauseMessage(t); } addErrorMessage(account, subject, baos.toString()); } catch (Throwable it) { Log.e(K9.LOG_TAG, "Could not save error message to " + account.getErrorFolderName(), it); } } public void addErrorMessage(Account account, String subject, String body) { if (!K9.DEBUG) { return; } if (!loopCatch.compareAndSet(false, true)) { return; } try { if (body == null || body.length() < 1) { return; } Store localStore = account.getLocalStore(); LocalFolder localFolder = (LocalFolder)localStore.getFolder(account.getErrorFolderName()); MimeMessage message = new MimeMessage(); MimeMessageHelper.setBody(message, new TextBody(body)); message.setFlag(Flag.X_DOWNLOADED_FULL, true); message.setSubject(subject); long nowTime = System.currentTimeMillis(); Date nowDate = new Date(nowTime); message.setInternalDate(nowDate); message.addSentDate(nowDate, K9.hideTimeZone()); message.setFrom(new Address(account.getEmail(), "K9mail internal")); localFolder.appendMessages(Collections.singletonList(message)); localFolder.clearMessagesOlderThan(nowTime - (15 * 60 * 1000)); } catch (Throwable it) { Log.e(K9.LOG_TAG, "Could not save error message to " + account.getErrorFolderName(), it); } finally { loopCatch.set(false); } } public void markAllMessagesRead(final Account account, final String folder) { if (K9.DEBUG) Log.i(K9.LOG_TAG, "Marking all messages in " + account.getDescription() + ":" + folder + " as read"); List<String> args = new ArrayList<String>(); args.add(folder); PendingCommand command = new PendingCommand(); command.command = PENDING_COMMAND_MARK_ALL_AS_READ; command.arguments = args.toArray(EMPTY_STRING_ARRAY); queuePendingCommand(account, command); processPendingCommands(account); } public void setFlag(final Account account, final List<Long> messageIds, final Flag flag, final boolean newState) { setFlagInCache(account, messageIds, flag, newState); threadPool.execute(new Runnable() { @Override public void run() { setFlagSynchronous(account, messageIds, flag, newState, false); } }); } public void setFlagForThreads(final Account account, final List<Long> threadRootIds, final Flag flag, final boolean newState) { setFlagForThreadsInCache(account, threadRootIds, flag, newState); threadPool.execute(new Runnable() { @Override public void run() { setFlagSynchronous(account, threadRootIds, flag, newState, true); } }); } private void setFlagSynchronous(final Account account, final List<Long> ids, final Flag flag, final boolean newState, final boolean threadedList) { LocalStore localStore; try { localStore = account.getLocalStore(); } catch (MessagingException e) { Log.e(K9.LOG_TAG, "Couldn't get LocalStore instance", e); return; } // Update affected messages in the database. This should be as fast as possible so the UI // can be updated with the new state. try { if (threadedList) { localStore.setFlagForThreads(ids, flag, newState); removeFlagForThreadsFromCache(account, ids, flag); } else { localStore.setFlag(ids, flag, newState); removeFlagFromCache(account, ids, flag); } } catch (MessagingException e) { Log.e(K9.LOG_TAG, "Couldn't set flags in local database", e); } // Read folder name and UID of messages from the database Map<String, List<String>> folderMap; try { folderMap = localStore.getFoldersAndUids(ids, threadedList); } catch (MessagingException e) { Log.e(K9.LOG_TAG, "Couldn't get folder name and UID of messages", e); return; } // Loop over all folders for (Entry<String, List<String>> entry : folderMap.entrySet()) { String folderName = entry.getKey(); // Notify listeners of changed folder status LocalFolder localFolder = localStore.getFolder(folderName); try { int unreadMessageCount = localFolder.getUnreadMessageCount(); for (MessagingListener l : getListeners()) { l.folderStatusChanged(account, folderName, unreadMessageCount); } } catch (MessagingException e) { Log.w(K9.LOG_TAG, "Couldn't get unread count for folder: " + folderName, e); } // The error folder is always a local folder // TODO: Skip the remote part for all local-only folders if (account.getErrorFolderName().equals(folderName)) { continue; } // Send flag change to server String[] uids = entry.getValue().toArray(EMPTY_STRING_ARRAY); queueSetFlag(account, folderName, Boolean.toString(newState), flag.toString(), uids); processPendingCommands(account); } } /** * Set or remove a flag for a set of messages in a specific folder. * * <p> * The {@link Message} objects passed in are updated to reflect the new flag state. * </p> * * @param account * The account the folder containing the messages belongs to. * @param folderName * The name of the folder. * @param messages * The messages to change the flag for. * @param flag * The flag to change. * @param newState * {@code true}, if the flag should be set. {@code false} if it should be removed. */ public void setFlag(Account account, String folderName, List<? extends Message> messages, Flag flag, boolean newState) { // TODO: Put this into the background, but right now some callers depend on the message // objects being modified right after this method returns. Folder localFolder = null; try { Store localStore = account.getLocalStore(); localFolder = localStore.getFolder(folderName); localFolder.open(Folder.OPEN_MODE_RW); // Allows for re-allowing sending of messages that could not be sent if (flag == Flag.FLAGGED && !newState && account.getOutboxFolderName().equals(folderName)) { for (Message message : messages) { String uid = message.getUid(); if (uid != null) { sendCount.remove(uid); } } } // Update the messages in the local store localFolder.setFlags(messages, Collections.singleton(flag), newState); int unreadMessageCount = localFolder.getUnreadMessageCount(); for (MessagingListener l : getListeners()) { l.folderStatusChanged(account, folderName, unreadMessageCount); } /* * Handle the remote side */ // The error folder is always a local folder // TODO: Skip the remote part for all local-only folders if (account.getErrorFolderName().equals(folderName)) { return; } String[] uids = new String[messages.size()]; for (int i = 0, end = uids.length; i < end; i++) { uids[i] = messages.get(i).getUid(); } queueSetFlag(account, folderName, Boolean.toString(newState), flag.toString(), uids); processPendingCommands(account); } catch (MessagingException me) { addErrorMessage(account, null, me); throw new RuntimeException(me); } finally { closeFolder(localFolder); } } /** * Set or remove a flag for a message referenced by message UID. * * @param account * The account the folder containing the message belongs to. * @param folderName * The name of the folder. * @param uid * The UID of the message to change the flag for. * @param flag * The flag to change. * @param newState * {@code true}, if the flag should be set. {@code false} if it should be removed. */ public void setFlag(Account account, String folderName, String uid, Flag flag, boolean newState) { Folder localFolder = null; try { LocalStore localStore = account.getLocalStore(); localFolder = localStore.getFolder(folderName); localFolder.open(Folder.OPEN_MODE_RW); Message message = localFolder.getMessage(uid); if (message != null) { setFlag(account, folderName, Collections.singletonList(message), flag, newState); } } catch (MessagingException me) { addErrorMessage(account, null, me); throw new RuntimeException(me); } finally { closeFolder(localFolder); } } public void clearAllPending(final Account account) { try { Log.w(K9.LOG_TAG, "Clearing pending commands!"); LocalStore localStore = account.getLocalStore(); localStore.removePendingCommands(); } catch (MessagingException me) { Log.e(K9.LOG_TAG, "Unable to clear pending command", me); addErrorMessage(account, null, me); } } //TODO: Fix the callback mess. See GH-782 public void loadMessageForViewRemote(final Account account, final String folder, final String uid, final MessagingListener listener) { put("loadMessageForViewRemote", listener, new Runnable() { @Override public void run() { loadMessageForViewRemoteSynchronous(account, folder, uid, listener, false, false); } }); } public boolean loadMessageForViewRemoteSynchronous(final Account account, final String folder, final String uid, final MessagingListener listener, final boolean force, final boolean loadPartialFromSearch) { Folder remoteFolder = null; LocalFolder localFolder = null; try { LocalStore localStore = account.getLocalStore(); localFolder = localStore.getFolder(folder); localFolder.open(Folder.OPEN_MODE_RW); LocalMessage message = localFolder.getMessage(uid); if (uid.startsWith(K9.LOCAL_UID_PREFIX)) { Log.w(K9.LOG_TAG, "Message has local UID so cannot download fully."); // ASH move toast android.widget.Toast.makeText(context, "Message has local UID so cannot download fully", android.widget.Toast.LENGTH_LONG).show(); // TODO: Using X_DOWNLOADED_FULL is wrong because it's only a partial message. But // one we can't download completely. Maybe add a new flag; X_PARTIAL_MESSAGE ? message.setFlag(Flag.X_DOWNLOADED_FULL, true); message.setFlag(Flag.X_DOWNLOADED_PARTIAL, false); } /* commented out because this was pulled from another unmerged branch: } else if (localFolder.isLocalOnly() && !force) { Log.w(K9.LOG_TAG, "Message in local-only folder so cannot download fully."); // ASH move toast android.widget.Toast.makeText(mApplication, "Message in local-only folder so cannot download fully", android.widget.Toast.LENGTH_LONG).show(); message.setFlag(Flag.X_DOWNLOADED_FULL, true); message.setFlag(Flag.X_DOWNLOADED_PARTIAL, false); }*/ if (message.isSet(Flag.X_DOWNLOADED_FULL)) { /* * If the message has been synchronized since we were called we'll * just hand it back cause it's ready to go. */ FetchProfile fp = new FetchProfile(); fp.add(FetchProfile.Item.ENVELOPE); fp.add(FetchProfile.Item.BODY); localFolder.fetch(Collections.singletonList(message), fp, null); } else { /* * At this point the message is not available, so we need to download it * fully if possible. */ Store remoteStore = account.getRemoteStore(); remoteFolder = remoteStore.getFolder(folder); remoteFolder.open(Folder.OPEN_MODE_RW); // Get the remote message and fully download it Message remoteMessage = remoteFolder.getMessage(uid); FetchProfile fp = new FetchProfile(); fp.add(FetchProfile.Item.BODY); remoteFolder.fetch(Collections.singletonList(remoteMessage), fp, null); // Store the message locally and load the stored message into memory localFolder.appendMessages(Collections.singletonList(remoteMessage)); if (loadPartialFromSearch) { fp.add(FetchProfile.Item.BODY); } fp.add(FetchProfile.Item.ENVELOPE); message = localFolder.getMessage(uid); localFolder.fetch(Collections.singletonList(message), fp, null); // Mark that this message is now fully synched if (account.isMarkMessageAsReadOnView()) { message.setFlag(Flag.SEEN, true); } message.setFlag(Flag.X_DOWNLOADED_FULL, true); } // now that we have the full message, refresh the headers for (MessagingListener l : getListeners(listener)) { l.loadMessageForViewHeadersAvailable(account, folder, uid, message); } for (MessagingListener l : getListeners(listener)) { l.loadMessageForViewBodyAvailable(account, folder, uid, message); } for (MessagingListener l : getListeners(listener)) { l.loadMessageForViewFinished(account, folder, uid, message); } return true; } catch (Exception e) { for (MessagingListener l : getListeners(listener)) { l.loadMessageForViewFailed(account, folder, uid, e); } notifyUserIfCertificateProblem(account, e, true); addErrorMessage(account, null, e); return false; } finally { closeFolder(remoteFolder); closeFolder(localFolder); } } public void loadMessageForView(final Account account, final String folder, final String uid, final MessagingListener listener) { for (MessagingListener l : getListeners(listener)) { l.loadMessageForViewStarted(account, folder, uid); } threadPool.execute(new Runnable() { @Override public void run() { try { LocalStore localStore = account.getLocalStore(); LocalFolder localFolder = localStore.getFolder(folder); localFolder.open(Folder.OPEN_MODE_RW); LocalMessage message = localFolder.getMessage(uid); if (message == null || message.getId() == 0) { throw new IllegalArgumentException("Message not found: folder=" + folder + ", uid=" + uid); } // IMAP search results will usually need to be downloaded before viewing. // TODO: limit by account.getMaximumAutoDownloadMessageSize(). if (!message.isSet(Flag.X_DOWNLOADED_FULL) && !message.isSet(Flag.X_DOWNLOADED_PARTIAL)) { if (loadMessageForViewRemoteSynchronous(account, folder, uid, listener, false, true)) { markMessageAsReadOnView(account, message); } return; } for (MessagingListener l : getListeners(listener)) { l.loadMessageForViewHeadersAvailable(account, folder, uid, message); } FetchProfile fp = new FetchProfile(); fp.add(FetchProfile.Item.ENVELOPE); fp.add(FetchProfile.Item.BODY); localFolder.fetch(Collections.singletonList(message), fp, null); localFolder.close(); for (MessagingListener l : getListeners(listener)) { l.loadMessageForViewBodyAvailable(account, folder, uid, message); } for (MessagingListener l : getListeners(listener)) { l.loadMessageForViewFinished(account, folder, uid, message); } markMessageAsReadOnView(account, message); } catch (Exception e) { for (MessagingListener l : getListeners(listener)) { l.loadMessageForViewFailed(account, folder, uid, e); } addErrorMessage(account, null, e); } } }); } public LocalMessage loadMessage(Account account, String folderName, String uid) throws MessagingException { LocalStore localStore = account.getLocalStore(); LocalFolder localFolder = localStore.getFolder(folderName); localFolder.open(Folder.OPEN_MODE_RW); LocalMessage message = localFolder.getMessage(uid); if (message == null || message.getId() == 0) { throw new IllegalArgumentException("Message not found: folder=" + folderName + ", uid=" + uid); } FetchProfile fp = new FetchProfile(); fp.add(FetchProfile.Item.BODY); localFolder.fetch(Collections.singletonList(message), fp, null); localFolder.close(); notificationController.removeNewMailNotification(account, message.makeMessageReference()); markMessageAsReadOnView(account, message); return message; } private void markMessageAsReadOnView(Account account, LocalMessage message) throws MessagingException { if (account.isMarkMessageAsReadOnView() && !message.isSet(Flag.SEEN)) { List<Long> messageIds = Collections.singletonList(message.getId()); setFlag(account, messageIds, Flag.SEEN, true); message.setFlagInternal(Flag.SEEN, true); } } public void loadAttachment(final Account account, final LocalMessage message, final Part part, final MessagingListener listener) { put("loadAttachment", listener, new Runnable() { @Override public void run() { Folder remoteFolder = null; LocalFolder localFolder = null; try { String folderName = message.getFolder().getName(); LocalStore localStore = account.getLocalStore(); localFolder = localStore.getFolder(folderName); Store remoteStore = account.getRemoteStore(); remoteFolder = remoteStore.getFolder(folderName); remoteFolder.open(Folder.OPEN_MODE_RW); Message remoteMessage = remoteFolder.getMessage(message.getUid()); remoteFolder.fetchPart(remoteMessage, part, null); localFolder.addPartToMessage(message, part); for (MessagingListener l : getListeners(listener)) { l.loadAttachmentFinished(account, message, part); } } catch (MessagingException me) { if (K9.DEBUG) Log.v(K9.LOG_TAG, "Exception loading attachment", me); for (MessagingListener l : getListeners(listener)) { l.loadAttachmentFailed(account, message, part, me.getMessage()); } notifyUserIfCertificateProblem(account, me, true); addErrorMessage(account, null, me); } finally { closeFolder(localFolder); closeFolder(remoteFolder); } } }); } /** * Stores the given message in the Outbox and starts a sendPendingMessages command to * attempt to send the message. * @param account * @param message * @param listener */ public void sendMessage(final Account account, final Message message, MessagingListener listener) { try { LocalStore localStore = account.getLocalStore(); LocalFolder localFolder = localStore.getFolder(account.getOutboxFolderName()); localFolder.open(Folder.OPEN_MODE_RW); localFolder.appendMessages(Collections.singletonList(message)); Message localMessage = localFolder.getMessage(message.getUid()); localMessage.setFlag(Flag.X_DOWNLOADED_FULL, true); localFolder.close(); sendPendingMessages(account, listener); } catch (Exception e) { /* for (MessagingListener l : getListeners()) { // TODO general failed } */ addErrorMessage(account, null, e); } } public void sendPendingMessages(MessagingListener listener) { final Preferences prefs = Preferences.getPreferences(context); for (Account account : prefs.getAvailableAccounts()) { sendPendingMessages(account, listener); } } /** * Attempt to send any messages that are sitting in the Outbox. * @param account * @param listener */ public void sendPendingMessages(final Account account, MessagingListener listener) { putBackground("sendPendingMessages", listener, new Runnable() { @Override public void run() { if (!account.isAvailable(context)) { throw new UnavailableAccountException(); } if (messagesPendingSend(account)) { showSendingNotificationIfNecessary(account); try { sendPendingMessagesSynchronous(account); } finally { clearSendingNotificationIfNecessary(account); } } } }); } private void showSendingNotificationIfNecessary(Account account) { if (account.isShowOngoing()) { notificationController.showSendingNotification(account); } } private void clearSendingNotificationIfNecessary(Account account) { if (account.isShowOngoing()) { notificationController.clearSendingNotification(account); } } public boolean messagesPendingSend(final Account account) { Folder localFolder = null; try { localFolder = account.getLocalStore().getFolder( account.getOutboxFolderName()); if (!localFolder.exists()) { return false; } localFolder.open(Folder.OPEN_MODE_RW); if (localFolder.getMessageCount() > 0) { return true; } } catch (Exception e) { Log.e(K9.LOG_TAG, "Exception while checking for unsent messages", e); } finally { closeFolder(localFolder); } return false; } /** * Attempt to send any messages that are sitting in the Outbox. * @param account */ public void sendPendingMessagesSynchronous(final Account account) { LocalFolder localFolder = null; Exception lastFailure = null; boolean wasPermanentFailure = false; try { LocalStore localStore = account.getLocalStore(); localFolder = localStore.getFolder( account.getOutboxFolderName()); if (!localFolder.exists()) { return; } for (MessagingListener l : getListeners()) { l.sendPendingMessagesStarted(account); } localFolder.open(Folder.OPEN_MODE_RW); List<LocalMessage> localMessages = localFolder.getMessages(null); int progress = 0; int todo = localMessages.size(); for (MessagingListener l : getListeners()) { l.synchronizeMailboxProgress(account, account.getSentFolderName(), progress, todo); } /* * The profile we will use to pull all of the content * for a given local message into memory for sending. */ FetchProfile fp = new FetchProfile(); fp.add(FetchProfile.Item.ENVELOPE); fp.add(FetchProfile.Item.BODY); if (K9.DEBUG) Log.i(K9.LOG_TAG, "Scanning folder '" + account.getOutboxFolderName() + "' (" + localFolder.getId() + ") for messages to send"); Transport transport = Transport.getInstance(K9.app, account); for (LocalMessage message : localMessages) { if (message.isSet(Flag.DELETED)) { message.destroy(); continue; } try { AtomicInteger count = new AtomicInteger(0); AtomicInteger oldCount = sendCount.putIfAbsent(message.getUid(), count); if (oldCount != null) { count = oldCount; } if (K9.DEBUG) Log.i(K9.LOG_TAG, "Send count for message " + message.getUid() + " is " + count.get()); if (count.incrementAndGet() > K9.MAX_SEND_ATTEMPTS) { Log.e(K9.LOG_TAG, "Send count for message " + message.getUid() + " can't be delivered after " + K9.MAX_SEND_ATTEMPTS + " attempts. Giving up until the user restarts the device"); notificationController.showSendFailedNotification(account, new MessagingException(message.getSubject())); continue; } localFolder.fetch(Collections.singletonList(message), fp, null); try { if (message.getHeader(K9.IDENTITY_HEADER).length > 0) { Log.v(K9.LOG_TAG, "The user has set the Outbox and Drafts folder to the same thing. " + "This message appears to be a draft, so K-9 will not send it"); continue; } message.setFlag(Flag.X_SEND_IN_PROGRESS, true); if (K9.DEBUG) Log.i(K9.LOG_TAG, "Sending message with UID " + message.getUid()); transport.sendMessage(message); message.setFlag(Flag.X_SEND_IN_PROGRESS, false); message.setFlag(Flag.SEEN, true); progress++; for (MessagingListener l : getListeners()) { l.synchronizeMailboxProgress(account, account.getSentFolderName(), progress, todo); } if (!account.hasSentFolder()) { if (K9.DEBUG) Log.i(K9.LOG_TAG, "Account does not have a sent mail folder; deleting sent message"); message.setFlag(Flag.DELETED, true); } else { LocalFolder localSentFolder = localStore.getFolder(account.getSentFolderName()); if (K9.DEBUG) Log.i(K9.LOG_TAG, "Moving sent message to folder '" + account.getSentFolderName() + "' (" + localSentFolder.getId() + ") "); localFolder.moveMessages(Collections.singletonList(message), localSentFolder); if (K9.DEBUG) Log.i(K9.LOG_TAG, "Moved sent message to folder '" + account.getSentFolderName() + "' (" + localSentFolder.getId() + ") "); PendingCommand command = new PendingCommand(); command.command = PENDING_COMMAND_APPEND; command.arguments = new String[] { localSentFolder.getName(), message.getUid() }; queuePendingCommand(account, command); processPendingCommands(account); } } catch (CertificateValidationException e) { lastFailure = e; wasPermanentFailure = false; notifyUserIfCertificateProblem(account, e, false); handleSendFailure(account, localStore, localFolder, message, e, wasPermanentFailure); } catch (MessagingException e) { lastFailure = e; wasPermanentFailure = e.isPermanentFailure(); handleSendFailure(account, localStore, localFolder, message, e, wasPermanentFailure); } catch (Exception e) { lastFailure = e; wasPermanentFailure = true; handleSendFailure(account, localStore, localFolder, message, e, wasPermanentFailure); } } catch (Exception e) { lastFailure = e; wasPermanentFailure = false; Log.e(K9.LOG_TAG, "Failed to fetch message for sending", e); addErrorMessage(account, "Failed to fetch message for sending", e); notifySynchronizeMailboxFailed(account, localFolder, e); } } for (MessagingListener l : getListeners()) { l.sendPendingMessagesCompleted(account); } if (lastFailure != null) { if (wasPermanentFailure) { notificationController.showSendFailedNotification(account, lastFailure); } else { notificationController.showSendFailedNotification(account, lastFailure); } } } catch (UnavailableStorageException e) { Log.i(K9.LOG_TAG, "Failed to send pending messages because storage is not available - trying again later."); throw new UnavailableAccountException(e); } catch (Exception e) { for (MessagingListener l : getListeners()) { l.sendPendingMessagesFailed(account); } addErrorMessage(account, null, e); } finally { if (lastFailure == null) { notificationController.clearSendFailedNotification(account); } closeFolder(localFolder); } } private void handleSendFailure(Account account, Store localStore, Folder localFolder, Message message, Exception exception, boolean permanentFailure) throws MessagingException { Log.e(K9.LOG_TAG, "Failed to send message", exception); if (permanentFailure) { moveMessageToDraftsFolder(account, localFolder, localStore, message); } addErrorMessage(account, "Failed to send message", exception); message.setFlag(Flag.X_SEND_FAILED, true); notifySynchronizeMailboxFailed(account, localFolder, exception); } private void moveMessageToDraftsFolder(Account account, Folder localFolder, Store localStore, Message message) throws MessagingException { LocalFolder draftsFolder = (LocalFolder) localStore.getFolder(account.getDraftsFolderName()); localFolder.moveMessages(Collections.singletonList(message), draftsFolder); } private void notifySynchronizeMailboxFailed(Account account, Folder localFolder, Exception exception) { String folderName = localFolder.getName(); String errorMessage = getRootCauseMessage(exception); for (MessagingListener listener : getListeners()) { listener.synchronizeMailboxFailed(account, folderName, errorMessage); } } public void getAccountStats(final Context context, final Account account, final MessagingListener listener) { threadPool.execute(new Runnable() { @Override public void run() { try { AccountStats stats = account.getStats(context); listener.accountStatusChanged(account, stats); } catch (MessagingException me) { Log.e(K9.LOG_TAG, "Count not get unread count for account " + account.getDescription(), me); } } }); } public void getSearchAccountStats(final SearchAccount searchAccount, final MessagingListener listener) { threadPool.execute(new Runnable() { @Override public void run() { getSearchAccountStatsSynchronous(searchAccount, listener); } }); } public AccountStats getSearchAccountStatsSynchronous(final SearchAccount searchAccount, final MessagingListener listener) { Preferences preferences = Preferences.getPreferences(context); LocalSearch search = searchAccount.getRelatedSearch(); // Collect accounts that belong to the search String[] accountUuids = search.getAccountUuids(); List<Account> accounts; if (search.searchAllAccounts()) { accounts = preferences.getAccounts(); } else { accounts = new ArrayList<Account>(accountUuids.length); for (int i = 0, len = accountUuids.length; i < len; i++) { String accountUuid = accountUuids[i]; accounts.set(i, preferences.getAccount(accountUuid)); } } ContentResolver cr = context.getContentResolver(); int unreadMessageCount = 0; int flaggedMessageCount = 0; String[] projection = { StatsColumns.UNREAD_COUNT, StatsColumns.FLAGGED_COUNT }; for (Account account : accounts) { StringBuilder query = new StringBuilder(); List<String> queryArgs = new ArrayList<String>(); ConditionsTreeNode conditions = search.getConditions(); SqlQueryBuilder.buildWhereClause(account, conditions, query, queryArgs); String selection = query.toString(); String[] selectionArgs = queryArgs.toArray(EMPTY_STRING_ARRAY); Uri uri = Uri.withAppendedPath(EmailProvider.CONTENT_URI, "account/" + account.getUuid() + "/stats"); // Query content provider to get the account stats Cursor cursor = cr.query(uri, projection, selection, selectionArgs, null); try { if (cursor.moveToFirst()) { unreadMessageCount += cursor.getInt(0); flaggedMessageCount += cursor.getInt(1); } } finally { cursor.close(); } } // Create AccountStats instance... AccountStats stats = new AccountStats(); stats.unreadMessageCount = unreadMessageCount; stats.flaggedMessageCount = flaggedMessageCount; // ...and notify the listener if (listener != null) { listener.accountStatusChanged(searchAccount, stats); } return stats; } public void getFolderUnreadMessageCount(final Account account, final String folderName, final MessagingListener l) { Runnable unreadRunnable = new Runnable() { @Override public void run() { int unreadMessageCount = 0; try { Folder localFolder = account.getLocalStore().getFolder(folderName); unreadMessageCount = localFolder.getUnreadMessageCount(); } catch (MessagingException me) { Log.e(K9.LOG_TAG, "Count not get unread count for account " + account.getDescription(), me); } l.folderStatusChanged(account, folderName, unreadMessageCount); } }; put("getFolderUnread:" + account.getDescription() + ":" + folderName, l, unreadRunnable); } public boolean isMoveCapable(Message message) { return !message.getUid().startsWith(K9.LOCAL_UID_PREFIX); } public boolean isCopyCapable(Message message) { return isMoveCapable(message); } public boolean isMoveCapable(final Account account) { try { Store localStore = account.getLocalStore(); Store remoteStore = account.getRemoteStore(); return localStore.isMoveCapable() && remoteStore.isMoveCapable(); } catch (MessagingException me) { Log.e(K9.LOG_TAG, "Exception while ascertaining move capability", me); return false; } } public boolean isCopyCapable(final Account account) { try { Store localStore = account.getLocalStore(); Store remoteStore = account.getRemoteStore(); return localStore.isCopyCapable() && remoteStore.isCopyCapable(); } catch (MessagingException me) { Log.e(K9.LOG_TAG, "Exception while ascertaining copy capability", me); return false; } } public void moveMessages(final Account account, final String srcFolder, final List<LocalMessage> messages, final String destFolder, final MessagingListener listener) { suppressMessages(account, messages); putBackground("moveMessages", null, new Runnable() { @Override public void run() { moveOrCopyMessageSynchronous(account, srcFolder, messages, destFolder, false, listener); } }); } public void moveMessagesInThread(final Account account, final String srcFolder, final List<LocalMessage> messages, final String destFolder) { suppressMessages(account, messages); putBackground("moveMessagesInThread", null, new Runnable() { @Override public void run() { try { List<Message> messagesInThreads = collectMessagesInThreads(account, messages); moveOrCopyMessageSynchronous(account, srcFolder, messagesInThreads, destFolder, false, null); } catch (MessagingException e) { addErrorMessage(account, "Exception while moving messages", e); } } }); } public void moveMessage(final Account account, final String srcFolder, final LocalMessage message, final String destFolder, final MessagingListener listener) { moveMessages(account, srcFolder, Collections.singletonList(message), destFolder, listener); } public void copyMessages(final Account account, final String srcFolder, final List<? extends Message> messages, final String destFolder, final MessagingListener listener) { putBackground("copyMessages", null, new Runnable() { @Override public void run() { moveOrCopyMessageSynchronous(account, srcFolder, messages, destFolder, true, listener); } }); } public void copyMessagesInThread(final Account account, final String srcFolder, final List<? extends Message> messages, final String destFolder) { putBackground("copyMessagesInThread", null, new Runnable() { @Override public void run() { try { List<Message> messagesInThreads = collectMessagesInThreads(account, messages); moveOrCopyMessageSynchronous(account, srcFolder, messagesInThreads, destFolder, true, null); } catch (MessagingException e) { addErrorMessage(account, "Exception while copying messages", e); } } }); } public void copyMessage(final Account account, final String srcFolder, final Message message, final String destFolder, final MessagingListener listener) { copyMessages(account, srcFolder, Collections.singletonList(message), destFolder, listener); } private void moveOrCopyMessageSynchronous(final Account account, final String srcFolder, final List<? extends Message> inMessages, final String destFolder, final boolean isCopy, MessagingListener listener) { try { Map<String, String> uidMap = new HashMap<String, String>(); LocalStore localStore = account.getLocalStore(); Store remoteStore = account.getRemoteStore(); if (!isCopy && (!remoteStore.isMoveCapable() || !localStore.isMoveCapable())) { return; } if (isCopy && (!remoteStore.isCopyCapable() || !localStore.isCopyCapable())) { return; } LocalFolder localSrcFolder = localStore.getFolder(srcFolder); Folder localDestFolder = localStore.getFolder(destFolder); boolean unreadCountAffected = false; List<String> uids = new LinkedList<String>(); for (Message message : inMessages) { String uid = message.getUid(); if (!uid.startsWith(K9.LOCAL_UID_PREFIX)) { uids.add(uid); } if (!unreadCountAffected && !message.isSet(Flag.SEEN)) { unreadCountAffected = true; } } List<LocalMessage> messages = localSrcFolder.getMessages(uids.toArray(EMPTY_STRING_ARRAY), null); if (messages.size() > 0) { Map<String, Message> origUidMap = new HashMap<String, Message>(); for (Message message : messages) { origUidMap.put(message.getUid(), message); } if (K9.DEBUG) Log.i(K9.LOG_TAG, "moveOrCopyMessageSynchronous: source folder = " + srcFolder + ", " + messages.size() + " messages, " + ", destination folder = " + destFolder + ", isCopy = " + isCopy); if (isCopy) { FetchProfile fp = new FetchProfile(); fp.add(FetchProfile.Item.ENVELOPE); fp.add(FetchProfile.Item.BODY); localSrcFolder.fetch(messages, fp, null); uidMap = localSrcFolder.copyMessages(messages, localDestFolder); if (unreadCountAffected) { // If this copy operation changes the unread count in the destination // folder, notify the listeners. int unreadMessageCount = localDestFolder.getUnreadMessageCount(); for (MessagingListener l : getListeners()) { l.folderStatusChanged(account, destFolder, unreadMessageCount); } } } else { uidMap = localSrcFolder.moveMessages(messages, localDestFolder); for (Map.Entry<String, Message> entry : origUidMap.entrySet()) { String origUid = entry.getKey(); Message message = entry.getValue(); for (MessagingListener l : getListeners()) { l.messageUidChanged(account, srcFolder, origUid, message.getUid()); } } unsuppressMessages(account, messages); if (unreadCountAffected) { // If this move operation changes the unread count, notify the listeners // that the unread count changed in both the source and destination folder. int unreadMessageCountSrc = localSrcFolder.getUnreadMessageCount(); int unreadMessageCountDest = localDestFolder.getUnreadMessageCount(); for (MessagingListener l : getListeners()) { l.folderStatusChanged(account, srcFolder, unreadMessageCountSrc); l.folderStatusChanged(account, destFolder, unreadMessageCountDest); } } } queueMoveOrCopy(account, srcFolder, destFolder, isCopy, origUidMap.keySet().toArray(EMPTY_STRING_ARRAY), uidMap); } processPendingCommands(account); } catch (UnavailableStorageException e) { Log.i(K9.LOG_TAG, "Failed to move/copy message because storage is not available - trying again later."); throw new UnavailableAccountException(e); } catch (MessagingException me) { addErrorMessage(account, null, me); throw new RuntimeException("Error moving message", me); } } public void expunge(final Account account, final String folder, final MessagingListener listener) { putBackground("expunge", null, new Runnable() { @Override public void run() { queueExpunge(account, folder); } }); } public void deleteDraft(final Account account, long id) { LocalFolder localFolder = null; try { LocalStore localStore = account.getLocalStore(); localFolder = localStore.getFolder(account.getDraftsFolderName()); localFolder.open(Folder.OPEN_MODE_RW); String uid = localFolder.getMessageUidById(id); if (uid != null) { LocalMessage message = localFolder.getMessage(uid); if (message != null) { deleteMessages(Collections.singletonList(message), null); } } } catch (MessagingException me) { addErrorMessage(account, null, me); } finally { closeFolder(localFolder); } } public void deleteThreads(final List<LocalMessage> messages) { actOnMessages(messages, new MessageActor() { @Override public void act(final Account account, final Folder folder, final List<Message> accountMessages) { suppressMessages(account, messages); putBackground("deleteThreads", null, new Runnable() { @Override public void run() { deleteThreadsSynchronous(account, folder.getName(), accountMessages); } }); } }); } public void deleteThreadsSynchronous(Account account, String folderName, List<Message> messages) { try { List<Message> messagesToDelete = collectMessagesInThreads(account, messages); deleteMessagesSynchronous(account, folderName, messagesToDelete, null); } catch (MessagingException e) { Log.e(K9.LOG_TAG, "Something went wrong while deleting threads", e); } } public List<Message> collectMessagesInThreads(Account account, List<? extends Message> messages) throws MessagingException { LocalStore localStore = account.getLocalStore(); List<Message> messagesInThreads = new ArrayList<Message>(); for (Message message : messages) { LocalMessage localMessage = (LocalMessage) message; long rootId = localMessage.getRootId(); long threadId = (rootId == -1) ? localMessage.getThreadId() : rootId; List<? extends Message> messagesInThread = localStore.getMessagesInThread(threadId); messagesInThreads.addAll(messagesInThread); } return messagesInThreads; } public void deleteMessages(final List<LocalMessage> messages, final MessagingListener listener) { actOnMessages(messages, new MessageActor() { @Override public void act(final Account account, final Folder folder, final List<Message> accountMessages) { suppressMessages(account, messages); putBackground("deleteMessages", null, new Runnable() { @Override public void run() { deleteMessagesSynchronous(account, folder.getName(), accountMessages, listener); } }); } }); } private void deleteMessagesSynchronous(final Account account, final String folder, final List<? extends Message> messages, MessagingListener listener) { Folder localFolder = null; Folder localTrashFolder = null; String[] uids = getUidsFromMessages(messages); try { //We need to make these callbacks before moving the messages to the trash //as messages get a new UID after being moved for (Message message : messages) { for (MessagingListener l : getListeners(listener)) { l.messageDeleted(account, folder, message); } } Store localStore = account.getLocalStore(); localFolder = localStore.getFolder(folder); Map<String, String> uidMap = null; if (folder.equals(account.getTrashFolderName()) || !account.hasTrashFolder()) { if (K9.DEBUG) Log.d(K9.LOG_TAG, "Deleting messages in trash folder or trash set to -None-, not copying"); localFolder.setFlags(messages, Collections.singleton(Flag.DELETED), true); } else { localTrashFolder = localStore.getFolder(account.getTrashFolderName()); if (!localTrashFolder.exists()) { localTrashFolder.create(Folder.FolderType.HOLDS_MESSAGES); } if (localTrashFolder.exists()) { if (K9.DEBUG) Log.d(K9.LOG_TAG, "Deleting messages in normal folder, moving"); uidMap = localFolder.moveMessages(messages, localTrashFolder); } } for (MessagingListener l : getListeners()) { l.folderStatusChanged(account, folder, localFolder.getUnreadMessageCount()); if (localTrashFolder != null) { l.folderStatusChanged(account, account.getTrashFolderName(), localTrashFolder.getUnreadMessageCount()); } } if (K9.DEBUG) Log.d(K9.LOG_TAG, "Delete policy for account " + account.getDescription() + " is " + account.getDeletePolicy()); if (folder.equals(account.getOutboxFolderName())) { for (Message message : messages) { // If the message was in the Outbox, then it has been copied to local Trash, and has // to be copied to remote trash PendingCommand command = new PendingCommand(); command.command = PENDING_COMMAND_APPEND; command.arguments = new String[] { account.getTrashFolderName(), message.getUid() }; queuePendingCommand(account, command); } processPendingCommands(account); } else if (account.getDeletePolicy() == DeletePolicy.ON_DELETE) { if (folder.equals(account.getTrashFolderName())) { queueSetFlag(account, folder, Boolean.toString(true), Flag.DELETED.toString(), uids); } else { queueMoveOrCopy(account, folder, account.getTrashFolderName(), false, uids, uidMap); } processPendingCommands(account); } else if (account.getDeletePolicy() == DeletePolicy.MARK_AS_READ) { queueSetFlag(account, folder, Boolean.toString(true), Flag.SEEN.toString(), uids); processPendingCommands(account); } else { if (K9.DEBUG) Log.d(K9.LOG_TAG, "Delete policy " + account.getDeletePolicy() + " prevents delete from server"); } unsuppressMessages(account, messages); } catch (UnavailableStorageException e) { Log.i(K9.LOG_TAG, "Failed to delete message because storage is not available - trying again later."); throw new UnavailableAccountException(e); } catch (MessagingException me) { addErrorMessage(account, null, me); throw new RuntimeException("Error deleting message from local store.", me); } finally { closeFolder(localFolder); closeFolder(localTrashFolder); } } private String[] getUidsFromMessages(List <? extends Message> messages) { String[] uids = new String[messages.size()]; for (int i = 0; i < messages.size(); i++) { uids[i] = messages.get(i).getUid(); } return uids; } private void processPendingEmptyTrash(PendingCommand command, Account account) throws MessagingException { Store remoteStore = account.getRemoteStore(); Folder remoteFolder = remoteStore.getFolder(account.getTrashFolderName()); try { if (remoteFolder.exists()) { remoteFolder.open(Folder.OPEN_MODE_RW); remoteFolder.setFlags(Collections.singleton(Flag.DELETED), true); if (Expunge.EXPUNGE_IMMEDIATELY == account.getExpungePolicy()) { remoteFolder.expunge(); } // When we empty trash, we need to actually synchronize the folder // or local deletes will never get cleaned up synchronizeFolder(account, remoteFolder, true, 0, null); compact(account, null); } } finally { closeFolder(remoteFolder); } } public void emptyTrash(final Account account, MessagingListener listener) { putBackground("emptyTrash", listener, new Runnable() { @Override public void run() { LocalFolder localFolder = null; try { Store localStore = account.getLocalStore(); localFolder = (LocalFolder) localStore.getFolder(account.getTrashFolderName()); localFolder.open(Folder.OPEN_MODE_RW); boolean isTrashLocalOnly = isTrashLocalOnly(account); if (isTrashLocalOnly) { localFolder.clearAllMessages(); } else { localFolder.setFlags(Collections.singleton(Flag.DELETED), true); } for (MessagingListener l : getListeners()) { l.emptyTrashCompleted(account); } if (!isTrashLocalOnly) { List<String> args = new ArrayList<String>(); PendingCommand command = new PendingCommand(); command.command = PENDING_COMMAND_EMPTY_TRASH; command.arguments = args.toArray(EMPTY_STRING_ARRAY); queuePendingCommand(account, command); processPendingCommands(account); } } catch (UnavailableStorageException e) { Log.i(K9.LOG_TAG, "Failed to empty trash because storage is not available - trying again later."); throw new UnavailableAccountException(e); } catch (Exception e) { Log.e(K9.LOG_TAG, "emptyTrash failed", e); addErrorMessage(account, null, e); } finally { closeFolder(localFolder); } } }); } /** * Find out whether the account type only supports a local Trash folder. * * <p>Note: Currently this is only the case for POP3 accounts.</p> * * @param account * The account to check. * * @return {@code true} if the account only has a local Trash folder that is not synchronized * with a folder on the server. {@code false} otherwise. * * @throws MessagingException * In case of an error. */ private boolean isTrashLocalOnly(Account account) throws MessagingException { // TODO: Get rid of the tight coupling once we properly support local folders return (account.getRemoteStore() instanceof Pop3Store); } public void sendAlternate(final Context context, Account account, Message message) { if (K9.DEBUG) Log.d(K9.LOG_TAG, "About to load message " + account.getDescription() + ":" + message.getFolder().getName() + ":" + message.getUid() + " for sendAlternate"); loadMessageForView(account, message.getFolder().getName(), message.getUid(), new MessagingListener() { @Override public void loadMessageForViewBodyAvailable(Account account, String folder, String uid, Message message) { if (K9.DEBUG) Log.d(K9.LOG_TAG, "Got message " + account.getDescription() + ":" + folder + ":" + message.getUid() + " for sendAlternate"); try { Intent msg = new Intent(Intent.ACTION_SEND); String quotedText = null; Part part = MimeUtility.findFirstPartByMimeType(message, "text/plain"); if (part == null) { part = MimeUtility.findFirstPartByMimeType(message, "text/html"); } if (part != null) { quotedText = MessageExtractor.getTextFromPart(part); } if (quotedText != null) { msg.putExtra(Intent.EXTRA_TEXT, quotedText); } msg.putExtra(Intent.EXTRA_SUBJECT, message.getSubject()); Address[] from = message.getFrom(); String[] senders = new String[from.length]; for (int i = 0; i < from.length; i++) { senders[i] = from[i].toString(); } msg.putExtra(Intents.Share.EXTRA_FROM, senders); Address[] to = message.getRecipients(RecipientType.TO); String[] recipientsTo = new String[to.length]; for (int i = 0; i < to.length; i++) { recipientsTo[i] = to[i].toString(); } msg.putExtra(Intent.EXTRA_EMAIL, recipientsTo); Address[] cc = message.getRecipients(RecipientType.CC); String[] recipientsCc = new String[cc.length]; for (int i = 0; i < cc.length; i++) { recipientsCc[i] = cc[i].toString(); } msg.putExtra(Intent.EXTRA_CC, recipientsCc); msg.setType("text/plain"); context.startActivity(Intent.createChooser(msg, context.getString(R.string.send_alternate_chooser_title))); } catch (MessagingException me) { Log.e(K9.LOG_TAG, "Unable to send email through alternate program", me); } } }); } /** * Checks mail for one or multiple accounts. If account is null all accounts * are checked. * * @param context * @param account * @param listener */ public void checkMail(final Context context, final Account account, final boolean ignoreLastCheckedTime, final boolean useManualWakeLock, final MessagingListener listener) { TracingWakeLock twakeLock = null; if (useManualWakeLock) { TracingPowerManager pm = TracingPowerManager.getPowerManager(context); twakeLock = pm.newWakeLock(PowerManager.PARTIAL_WAKE_LOCK, "K9 MessagingController.checkMail"); twakeLock.setReferenceCounted(false); twakeLock.acquire(K9.MANUAL_WAKE_LOCK_TIMEOUT); } final TracingWakeLock wakeLock = twakeLock; for (MessagingListener l : getListeners()) { l.checkMailStarted(context, account); } putBackground("checkMail", listener, new Runnable() { @Override public void run() { try { if (K9.DEBUG) Log.i(K9.LOG_TAG, "Starting mail check"); Preferences prefs = Preferences.getPreferences(context); Collection<Account> accounts; if (account != null) { accounts = new ArrayList<Account>(1); accounts.add(account); } else { accounts = prefs.getAvailableAccounts(); } for (final Account account : accounts) { checkMailForAccount(context, account, ignoreLastCheckedTime, prefs, listener); } } catch (Exception e) { Log.e(K9.LOG_TAG, "Unable to synchronize mail", e); addErrorMessage(account, null, e); } putBackground("finalize sync", null, new Runnable() { @Override public void run() { if (K9.DEBUG) Log.i(K9.LOG_TAG, "Finished mail sync"); if (wakeLock != null) { wakeLock.release(); } for (MessagingListener l : getListeners()) { l.checkMailFinished(context, account); } } } ); } }); } private void checkMailForAccount(final Context context, final Account account, final boolean ignoreLastCheckedTime, final Preferences prefs, final MessagingListener listener) { if (!account.isAvailable(context)) { if (K9.DEBUG) { Log.i(K9.LOG_TAG, "Skipping synchronizing unavailable account " + account.getDescription()); } return; } final long accountInterval = account.getAutomaticCheckIntervalMinutes() * 60 * 1000; if (!ignoreLastCheckedTime && accountInterval <= 0) { if (K9.DEBUG) Log.i(K9.LOG_TAG, "Skipping synchronizing account " + account.getDescription()); return; } if (K9.DEBUG) Log.i(K9.LOG_TAG, "Synchronizing account " + account.getDescription()); account.setRingNotified(false); sendPendingMessages(account, listener); try { Account.FolderMode aDisplayMode = account.getFolderDisplayMode(); Account.FolderMode aSyncMode = account.getFolderSyncMode(); Store localStore = account.getLocalStore(); for (final Folder folder : localStore.getPersonalNamespaces(false)) { folder.open(Folder.OPEN_MODE_RW); Folder.FolderClass fDisplayClass = folder.getDisplayClass(); Folder.FolderClass fSyncClass = folder.getSyncClass(); if (modeMismatch(aDisplayMode, fDisplayClass)) { // Never sync a folder that isn't displayed /* if (K9.DEBUG) Log.v(K9.LOG_TAG, "Not syncing folder " + folder.getName() + " which is in display mode " + fDisplayClass + " while account is in display mode " + aDisplayMode); */ continue; } if (modeMismatch(aSyncMode, fSyncClass)) { // Do not sync folders in the wrong class /* if (K9.DEBUG) Log.v(K9.LOG_TAG, "Not syncing folder " + folder.getName() + " which is in sync mode " + fSyncClass + " while account is in sync mode " + aSyncMode); */ continue; } synchronizeFolder(account, folder, ignoreLastCheckedTime, accountInterval, listener); } } catch (MessagingException e) { Log.e(K9.LOG_TAG, "Unable to synchronize account " + account.getName(), e); addErrorMessage(account, null, e); } finally { putBackground("clear notification flag for " + account.getDescription(), null, new Runnable() { @Override public void run() { if (K9.DEBUG) Log.v(K9.LOG_TAG, "Clearing notification flag for " + account.getDescription()); account.setRingNotified(false); try { AccountStats stats = account.getStats(context); if (stats == null || stats.unreadMessageCount == 0) { notificationController.clearNewMailNotifications(account); } } catch (MessagingException e) { Log.e(K9.LOG_TAG, "Unable to getUnreadMessageCount for account: " + account, e); } } } ); } } private void synchronizeFolder( final Account account, final Folder folder, final boolean ignoreLastCheckedTime, final long accountInterval, final MessagingListener listener) { if (K9.DEBUG) Log.v(K9.LOG_TAG, "Folder " + folder.getName() + " was last synced @ " + new Date(folder.getLastChecked())); if (!ignoreLastCheckedTime && folder.getLastChecked() > (System.currentTimeMillis() - accountInterval)) { if (K9.DEBUG) Log.v(K9.LOG_TAG, "Not syncing folder " + folder.getName() + ", previously synced @ " + new Date(folder.getLastChecked()) + " which would be too recent for the account period"); return; } putBackground("sync" + folder.getName(), null, new Runnable() { @Override public void run() { LocalFolder tLocalFolder = null; try { // In case multiple Commands get enqueued, don't run more than // once final LocalStore localStore = account.getLocalStore(); tLocalFolder = localStore.getFolder(folder.getName()); tLocalFolder.open(Folder.OPEN_MODE_RW); if (!ignoreLastCheckedTime && tLocalFolder.getLastChecked() > (System.currentTimeMillis() - accountInterval)) { if (K9.DEBUG) Log.v(K9.LOG_TAG, "Not running Command for folder " + folder.getName() + ", previously synced @ " + new Date(folder.getLastChecked()) + " which would be too recent for the account period"); return; } showFetchingMailNotificationIfNecessary(account, folder); try { synchronizeMailboxSynchronous(account, folder.getName(), listener, null); } finally { clearFetchingMailNotificationIfNecessary(account); } } catch (Exception e) { Log.e(K9.LOG_TAG, "Exception while processing folder " + account.getDescription() + ":" + folder.getName(), e); addErrorMessage(account, null, e); } finally { closeFolder(tLocalFolder); } } } ); } private void showFetchingMailNotificationIfNecessary(Account account, Folder folder) { if (account.isShowOngoing()) { notificationController.showFetchingMailNotification(account, folder); } } private void clearFetchingMailNotificationIfNecessary(Account account) { if (account.isShowOngoing()) { notificationController.clearFetchingMailNotification(account); } } public void compact(final Account account, final MessagingListener ml) { putBackground("compact:" + account.getDescription(), ml, new Runnable() { @Override public void run() { try { LocalStore localStore = account.getLocalStore(); long oldSize = localStore.getSize(); localStore.compact(); long newSize = localStore.getSize(); for (MessagingListener l : getListeners(ml)) { l.accountSizeChanged(account, oldSize, newSize); } } catch (UnavailableStorageException e) { Log.i(K9.LOG_TAG, "Failed to compact account because storage is not available - trying again later."); throw new UnavailableAccountException(e); } catch (Exception e) { Log.e(K9.LOG_TAG, "Failed to compact account " + account.getDescription(), e); } } }); } public void clear(final Account account, final MessagingListener ml) { putBackground("clear:" + account.getDescription(), ml, new Runnable() { @Override public void run() { try { LocalStore localStore = account.getLocalStore(); long oldSize = localStore.getSize(); localStore.clear(); localStore.resetVisibleLimits(account.getDisplayCount()); long newSize = localStore.getSize(); AccountStats stats = new AccountStats(); stats.size = newSize; stats.unreadMessageCount = 0; stats.flaggedMessageCount = 0; for (MessagingListener l : getListeners(ml)) { l.accountSizeChanged(account, oldSize, newSize); l.accountStatusChanged(account, stats); } } catch (UnavailableStorageException e) { Log.i(K9.LOG_TAG, "Failed to clear account because storage is not available - trying again later."); throw new UnavailableAccountException(e); } catch (Exception e) { Log.e(K9.LOG_TAG, "Failed to clear account " + account.getDescription(), e); } } }); } public void recreate(final Account account, final MessagingListener ml) { putBackground("recreate:" + account.getDescription(), ml, new Runnable() { @Override public void run() { try { LocalStore localStore = account.getLocalStore(); long oldSize = localStore.getSize(); localStore.recreate(); localStore.resetVisibleLimits(account.getDisplayCount()); long newSize = localStore.getSize(); AccountStats stats = new AccountStats(); stats.size = newSize; stats.unreadMessageCount = 0; stats.flaggedMessageCount = 0; for (MessagingListener l : getListeners(ml)) { l.accountSizeChanged(account, oldSize, newSize); l.accountStatusChanged(account, stats); } } catch (UnavailableStorageException e) { Log.i(K9.LOG_TAG, "Failed to recreate an account because storage is not available - trying again later."); throw new UnavailableAccountException(e); } catch (Exception e) { Log.e(K9.LOG_TAG, "Failed to recreate account " + account.getDescription(), e); } } }); } private boolean shouldNotifyForMessage(Account account, LocalFolder localFolder, Message message) { // If we don't even have an account name, don't show the notification. // (This happens during initial account setup) if (account.getName() == null) { return false; } // Do not notify if the user does not have notifications enabled or if the message has // been read. if (!account.isNotifyNewMail() || message.isSet(Flag.SEEN)) { return false; } Account.FolderMode aDisplayMode = account.getFolderDisplayMode(); Account.FolderMode aNotifyMode = account.getFolderNotifyNewMailMode(); Folder.FolderClass fDisplayClass = localFolder.getDisplayClass(); Folder.FolderClass fNotifyClass = localFolder.getNotifyClass(); if (modeMismatch(aDisplayMode, fDisplayClass)) { // Never notify a folder that isn't displayed return false; } if (modeMismatch(aNotifyMode, fNotifyClass)) { // Do not notify folders in the wrong class return false; } // If the account is a POP3 account and the message is older than the oldest message we've // previously seen, then don't notify about it. if (account.getStoreUri().startsWith("pop3") && message.olderThan(new Date(account.getLatestOldMessageSeenTime()))) { return false; } // No notification for new messages in Trash, Drafts, Spam or Sent folder. // But do notify if it's the INBOX (see issue 1817). Folder folder = message.getFolder(); if (folder != null) { String folderName = folder.getName(); if (!account.getInboxFolderName().equals(folderName) && (account.getTrashFolderName().equals(folderName) || account.getDraftsFolderName().equals(folderName) || account.getSpamFolderName().equals(folderName) || account.getSentFolderName().equals(folderName))) { return false; } } if (message.getUid() != null && localFolder.getLastUid() != null) { try { Integer messageUid = Integer.parseInt(message.getUid()); if (messageUid <= localFolder.getLastUid()) { if (K9.DEBUG) Log.d(K9.LOG_TAG, "Message uid is " + messageUid + ", max message uid is " + localFolder.getLastUid() + ". Skipping notification."); return false; } } catch (NumberFormatException e) { // Nothing to be done here. } } // Don't notify if the sender address matches one of our identities and the user chose not // to be notified for such messages. if (account.isAnIdentity(message.getFrom()) && !account.isNotifySelfNewMail()) { return false; } return true; } public void deleteAccount(Context context, Account account) { notificationController.clearNewMailNotifications(account); memorizingListener.removeAccount(account); } /** * Save a draft message. * @param account Account we are saving for. * @param message Message to save. * @return Message representing the entry in the local store. */ public Message saveDraft(final Account account, final Message message, long existingDraftId, boolean saveRemotely) { Message localMessage = null; try { LocalStore localStore = account.getLocalStore(); LocalFolder localFolder = localStore.getFolder(account.getDraftsFolderName()); localFolder.open(Folder.OPEN_MODE_RW); if (existingDraftId != INVALID_MESSAGE_ID) { String uid = localFolder.getMessageUidById(existingDraftId); message.setUid(uid); } // Save the message to the store. localFolder.appendMessages(Collections.singletonList(message)); // Fetch the message back from the store. This is the Message that's returned to the caller. localMessage = localFolder.getMessage(message.getUid()); localMessage.setFlag(Flag.X_DOWNLOADED_FULL, true); if (saveRemotely) { PendingCommand command = new PendingCommand(); command.command = PENDING_COMMAND_APPEND; command.arguments = new String[] { localFolder.getName(), localMessage.getUid() }; queuePendingCommand(account, command); processPendingCommands(account); } } catch (MessagingException e) { Log.e(K9.LOG_TAG, "Unable to save message as draft.", e); addErrorMessage(account, null, e); } return localMessage; } public long getId(Message message) { long id; if (message instanceof LocalMessage) { id = ((LocalMessage) message).getId(); } else { Log.w(K9.LOG_TAG, "MessagingController.getId() called without a LocalMessage"); id = INVALID_MESSAGE_ID; } return id; } public boolean modeMismatch(Account.FolderMode aMode, Folder.FolderClass fMode) { if (aMode == Account.FolderMode.NONE || (aMode == Account.FolderMode.FIRST_CLASS && fMode != Folder.FolderClass.FIRST_CLASS) || (aMode == Account.FolderMode.FIRST_AND_SECOND_CLASS && fMode != Folder.FolderClass.FIRST_CLASS && fMode != Folder.FolderClass.SECOND_CLASS) || (aMode == Account.FolderMode.NOT_SECOND_CLASS && fMode == Folder.FolderClass.SECOND_CLASS)) { return true; } else { return false; } } static AtomicInteger sequencing = new AtomicInteger(0); static class Command implements Comparable<Command> { public Runnable runnable; public MessagingListener listener; public String description; boolean isForeground; int sequence = sequencing.getAndIncrement(); @Override public int compareTo(Command other) { if (other.isForeground && !isForeground) { return 1; } else if (!other.isForeground && isForeground) { return -1; } else { return (sequence - other.sequence); } } } public MessagingListener getCheckMailListener() { return checkMailListener; } public void setCheckMailListener(MessagingListener checkMailListener) { if (this.checkMailListener != null) { removeListener(this.checkMailListener); } this.checkMailListener = checkMailListener; if (this.checkMailListener != null) { addListener(this.checkMailListener); } } public Collection<Pusher> getPushers() { return pushers.values(); } public boolean setupPushing(final Account account) { try { Pusher previousPusher = pushers.remove(account); if (previousPusher != null) { previousPusher.stop(); } Account.FolderMode aDisplayMode = account.getFolderDisplayMode(); Account.FolderMode aPushMode = account.getFolderPushMode(); List<String> names = new ArrayList<String>(); Store localStore = account.getLocalStore(); for (final Folder folder : localStore.getPersonalNamespaces(false)) { if (folder.getName().equals(account.getErrorFolderName()) || folder.getName().equals(account.getOutboxFolderName())) { /* if (K9.DEBUG) Log.v(K9.LOG_TAG, "Not pushing folder " + folder.getName() + " which should never be pushed"); */ continue; } folder.open(Folder.OPEN_MODE_RW); Folder.FolderClass fDisplayClass = folder.getDisplayClass(); Folder.FolderClass fPushClass = folder.getPushClass(); if (modeMismatch(aDisplayMode, fDisplayClass)) { // Never push a folder that isn't displayed /* if (K9.DEBUG) Log.v(K9.LOG_TAG, "Not pushing folder " + folder.getName() + " which is in display class " + fDisplayClass + " while account is in display mode " + aDisplayMode); */ continue; } if (modeMismatch(aPushMode, fPushClass)) { // Do not push folders in the wrong class /* if (K9.DEBUG) Log.v(K9.LOG_TAG, "Not pushing folder " + folder.getName() + " which is in push mode " + fPushClass + " while account is in push mode " + aPushMode); */ continue; } if (K9.DEBUG) Log.i(K9.LOG_TAG, "Starting pusher for " + account.getDescription() + ":" + folder.getName()); names.add(folder.getName()); } if (!names.isEmpty()) { PushReceiver receiver = new MessagingControllerPushReceiver(context, account, this); int maxPushFolders = account.getMaxPushFolders(); if (names.size() > maxPushFolders) { if (K9.DEBUG) Log.i(K9.LOG_TAG, "Count of folders to push for account " + account.getDescription() + " is " + names.size() + ", greater than limit of " + maxPushFolders + ", truncating"); names = names.subList(0, maxPushFolders); } try { Store store = account.getRemoteStore(); if (!store.isPushCapable()) { if (K9.DEBUG) Log.i(K9.LOG_TAG, "Account " + account.getDescription() + " is not push capable, skipping"); return false; } Pusher pusher = store.getPusher(receiver); if (pusher != null) { Pusher oldPusher = pushers.putIfAbsent(account, pusher); if (oldPusher == null) { pusher.start(names); } } } catch (Exception e) { Log.e(K9.LOG_TAG, "Could not get remote store", e); return false; } return true; } else { if (K9.DEBUG) Log.i(K9.LOG_TAG, "No folders are configured for pushing in account " + account.getDescription()); return false; } } catch (Exception e) { Log.e(K9.LOG_TAG, "Got exception while setting up pushing", e); } return false; } public void stopAllPushing() { if (K9.DEBUG) Log.i(K9.LOG_TAG, "Stopping all pushers"); Iterator<Pusher> iter = pushers.values().iterator(); while (iter.hasNext()) { Pusher pusher = iter.next(); iter.remove(); pusher.stop(); } } public void messagesArrived(final Account account, final Folder remoteFolder, final List<Message> messages, final boolean flagSyncOnly) { if (K9.DEBUG) Log.i(K9.LOG_TAG, "Got new pushed email messages for account " + account.getDescription() + ", folder " + remoteFolder.getName()); final CountDownLatch latch = new CountDownLatch(1); putBackground("Push messageArrived of account " + account.getDescription() + ", folder " + remoteFolder.getName(), null, new Runnable() { @Override public void run() { LocalFolder localFolder = null; try { LocalStore localStore = account.getLocalStore(); localFolder = localStore.getFolder(remoteFolder.getName()); localFolder.open(Folder.OPEN_MODE_RW); account.setRingNotified(false); int newCount = downloadMessages(account, remoteFolder, localFolder, messages, flagSyncOnly); int unreadMessageCount = localFolder.getUnreadMessageCount(); localFolder.setLastPush(System.currentTimeMillis()); localFolder.setStatus(null); if (K9.DEBUG) Log.i(K9.LOG_TAG, "messagesArrived newCount = " + newCount + ", unread count = " + unreadMessageCount); if (unreadMessageCount == 0) { notificationController.clearNewMailNotifications(account); } for (MessagingListener l : getListeners()) { l.folderStatusChanged(account, remoteFolder.getName(), unreadMessageCount); } } catch (Exception e) { String rootMessage = getRootCauseMessage(e); String errorMessage = "Push failed: " + rootMessage; try { // Oddly enough, using a local variable gets rid of a // potential null pointer access warning with Eclipse. LocalFolder folder = localFolder; folder.setStatus(errorMessage); } catch (Exception se) { Log.e(K9.LOG_TAG, "Unable to set failed status on localFolder", se); } for (MessagingListener l : getListeners()) { l.synchronizeMailboxFailed(account, remoteFolder.getName(), errorMessage); } addErrorMessage(account, null, e); } finally { closeFolder(localFolder); latch.countDown(); } } }); try { latch.await(); } catch (Exception e) { Log.e(K9.LOG_TAG, "Interrupted while awaiting latch release", e); } if (K9.DEBUG) Log.i(K9.LOG_TAG, "MessagingController.messagesArrivedLatch released"); } public void systemStatusChanged() { for (MessagingListener l : getListeners()) { l.systemStatusChanged(); } } public void cancelNotificationsForAccount(Account account) { notificationController.clearNewMailNotifications(account); } public void cancelNotificationForMessage(Account account, MessageReference messageReference) { notificationController.removeNewMailNotification(account, messageReference); } public void clearCertificateErrorNotifications(Account account, CheckDirection direction) { boolean incoming = (direction == CheckDirection.INCOMING); notificationController.clearCertificateErrorNotifications(account, incoming); } public void notifyUserIfCertificateProblem(Account account, Exception exception, boolean incoming) { if (!(exception instanceof CertificateValidationException)) { return; } CertificateValidationException cve = (CertificateValidationException) exception; if (!cve.needsUserAttention()) { return; } notificationController.showCertificateErrorNotification(account, incoming); } enum MemorizingState { STARTED, FINISHED, FAILED } static class Memory { Account account; String folderName; MemorizingState syncingState = null; MemorizingState sendingState = null; MemorizingState pushingState = null; MemorizingState processingState = null; String failureMessage = null; int syncingTotalMessagesInMailbox; int syncingNumNewMessages; int folderCompleted = 0; int folderTotal = 0; String processingCommandTitle = null; Memory(Account nAccount, String nFolderName) { account = nAccount; folderName = nFolderName; } String getKey() { return getMemoryKey(account, folderName); } } static String getMemoryKey(Account taccount, String tfolderName) { return taccount.getDescription() + ":" + tfolderName; } static class MemorizingListener extends MessagingListener { Map<String, Memory> memories = new HashMap<String, Memory>(31); Memory getMemory(Account account, String folderName) { Memory memory = memories.get(getMemoryKey(account, folderName)); if (memory == null) { memory = new Memory(account, folderName); memories.put(memory.getKey(), memory); } return memory; } synchronized void removeAccount(Account account) { Iterator<Entry<String, Memory>> memIt = memories.entrySet().iterator(); while (memIt.hasNext()) { Entry<String, Memory> memoryEntry = memIt.next(); String uuidForMemory = memoryEntry.getValue().account.getUuid(); if (uuidForMemory.equals(account.getUuid())) { memIt.remove(); } } } @Override public synchronized void synchronizeMailboxStarted(Account account, String folder) { Memory memory = getMemory(account, folder); memory.syncingState = MemorizingState.STARTED; memory.folderCompleted = 0; memory.folderTotal = 0; } @Override public synchronized void synchronizeMailboxFinished(Account account, String folder, int totalMessagesInMailbox, int numNewMessages) { Memory memory = getMemory(account, folder); memory.syncingState = MemorizingState.FINISHED; memory.syncingTotalMessagesInMailbox = totalMessagesInMailbox; memory.syncingNumNewMessages = numNewMessages; } @Override public synchronized void synchronizeMailboxFailed(Account account, String folder, String message) { Memory memory = getMemory(account, folder); memory.syncingState = MemorizingState.FAILED; memory.failureMessage = message; } synchronized void refreshOther(MessagingListener other) { if (other != null) { Memory syncStarted = null; Memory sendStarted = null; Memory processingStarted = null; for (Memory memory : memories.values()) { if (memory.syncingState != null) { switch (memory.syncingState) { case STARTED: syncStarted = memory; break; case FINISHED: other.synchronizeMailboxFinished(memory.account, memory.folderName, memory.syncingTotalMessagesInMailbox, memory.syncingNumNewMessages); break; case FAILED: other.synchronizeMailboxFailed(memory.account, memory.folderName, memory.failureMessage); break; } } if (memory.sendingState != null) { switch (memory.sendingState) { case STARTED: sendStarted = memory; break; case FINISHED: other.sendPendingMessagesCompleted(memory.account); break; case FAILED: other.sendPendingMessagesFailed(memory.account); break; } } if (memory.pushingState != null) { switch (memory.pushingState) { case STARTED: other.setPushActive(memory.account, memory.folderName, true); break; case FINISHED: other.setPushActive(memory.account, memory.folderName, false); break; case FAILED: break; } } if (memory.processingState != null) { switch (memory.processingState) { case STARTED: processingStarted = memory; break; case FINISHED: case FAILED: other.pendingCommandsFinished(memory.account); break; } } } Memory somethingStarted = null; if (syncStarted != null) { other.synchronizeMailboxStarted(syncStarted.account, syncStarted.folderName); somethingStarted = syncStarted; } if (sendStarted != null) { other.sendPendingMessagesStarted(sendStarted.account); somethingStarted = sendStarted; } if (processingStarted != null) { other.pendingCommandsProcessing(processingStarted.account); if (processingStarted.processingCommandTitle != null) { other.pendingCommandStarted(processingStarted.account, processingStarted.processingCommandTitle); } else { other.pendingCommandCompleted(processingStarted.account, processingStarted.processingCommandTitle); } somethingStarted = processingStarted; } if (somethingStarted != null && somethingStarted.folderTotal > 0) { other.synchronizeMailboxProgress(somethingStarted.account, somethingStarted.folderName, somethingStarted.folderCompleted, somethingStarted.folderTotal); } } } @Override public synchronized void setPushActive(Account account, String folderName, boolean active) { Memory memory = getMemory(account, folderName); memory.pushingState = (active ? MemorizingState.STARTED : MemorizingState.FINISHED); } @Override public synchronized void sendPendingMessagesStarted(Account account) { Memory memory = getMemory(account, null); memory.sendingState = MemorizingState.STARTED; memory.folderCompleted = 0; memory.folderTotal = 0; } @Override public synchronized void sendPendingMessagesCompleted(Account account) { Memory memory = getMemory(account, null); memory.sendingState = MemorizingState.FINISHED; } @Override public synchronized void sendPendingMessagesFailed(Account account) { Memory memory = getMemory(account, null); memory.sendingState = MemorizingState.FAILED; } @Override public synchronized void synchronizeMailboxProgress(Account account, String folderName, int completed, int total) { Memory memory = getMemory(account, folderName); memory.folderCompleted = completed; memory.folderTotal = total; } @Override public synchronized void pendingCommandsProcessing(Account account) { Memory memory = getMemory(account, null); memory.processingState = MemorizingState.STARTED; memory.folderCompleted = 0; memory.folderTotal = 0; } @Override public synchronized void pendingCommandsFinished(Account account) { Memory memory = getMemory(account, null); memory.processingState = MemorizingState.FINISHED; } @Override public synchronized void pendingCommandStarted(Account account, String commandTitle) { Memory memory = getMemory(account, null); memory.processingCommandTitle = commandTitle; } @Override public synchronized void pendingCommandCompleted(Account account, String commandTitle) { Memory memory = getMemory(account, null); memory.processingCommandTitle = null; } } private void actOnMessages(List<LocalMessage> messages, MessageActor actor) { Map<Account, Map<Folder, List<Message>>> accountMap = new HashMap<Account, Map<Folder, List<Message>>>(); for (LocalMessage message : messages) { if ( message == null) { continue; } Folder folder = message.getFolder(); Account account = message.getAccount(); Map<Folder, List<Message>> folderMap = accountMap.get(account); if (folderMap == null) { folderMap = new HashMap<Folder, List<Message>>(); accountMap.put(account, folderMap); } List<Message> messageList = folderMap.get(folder); if (messageList == null) { messageList = new LinkedList<Message>(); folderMap.put(folder, messageList); } messageList.add(message); } for (Map.Entry<Account, Map<Folder, List<Message>>> entry : accountMap.entrySet()) { Account account = entry.getKey(); //account.refresh(Preferences.getPreferences(K9.app)); Map<Folder, List<Message>> folderMap = entry.getValue(); for (Map.Entry<Folder, List<Message>> folderEntry : folderMap.entrySet()) { Folder folder = folderEntry.getKey(); List<Message> messageList = folderEntry.getValue(); actor.act(account, folder, messageList); } } } interface MessageActor { public void act(final Account account, final Folder folder, final List<Message> messages); } }
1
13,533
Please don't make unrelated changes or let your tools make unrelated changes.
k9mail-k-9
java
@@ -18,7 +18,7 @@ import ( "github.com/iotexproject/iotex-core/pkg/log" ) -var numAccounts int +var numAccounts uint // accountCreateCmd represents the account create command var accountCreateCmd = &cobra.Command{
1
// Copyright (c) 2019 IoTeX // This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no // warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent // permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache // License 2.0 that can be found in the LICENSE file. package account import ( "fmt" "strings" "github.com/spf13/cobra" "go.uber.org/zap" "github.com/iotexproject/iotex-core/address" "github.com/iotexproject/iotex-core/pkg/keypair" "github.com/iotexproject/iotex-core/pkg/log" ) var numAccounts int // accountCreateCmd represents the account create command var accountCreateCmd = &cobra.Command{ Use: "create", Short: "Create N new accounts and print them", Args: cobra.ExactArgs(0), Run: func(cmd *cobra.Command, args []string) { fmt.Println(accountCreate(args)) }, } func init() { accountCreateCmd.Flags().IntVarP(&numAccounts, "num", "n", 1, "number of accounts to create") } func accountCreate(_ []string) string { items := make([]string, numAccounts) for i := 0; i < numAccounts; i++ { private, err := keypair.GenerateKey() if err != nil { log.L().Fatal("failed to create key pair", zap.Error(err)) } addr, _ := address.FromBytes(private.PublicKey().Hash()) priKeyBytes := private.Bytes() pubKeyBytes := private.PublicKey().Bytes() items[i] = fmt.Sprintf( "{\"Address\": \"%s\", \"PrivateKey\": \"%x\", \"PublicKey\": \"%x\"}\n", addr.String(), priKeyBytes, pubKeyBytes) } return strings.Join(items, "") }
1
16,125
`numAccounts` is a global variable (from `gochecknoglobals`)
iotexproject-iotex-core
go
@@ -855,6 +855,9 @@ class PluginManager if ($pluginPath = self::instance()->getPluginPath($id)) { File::deleteDirectory($pluginPath); } + + // actually remove the plugin from our internal container + unset($this->plugins[$id]); } /**
1
<?php namespace System\Classes; use Db; use App; use Str; use Log; use File; use Lang; use View; use Config; use Schema; use SystemException; use RecursiveIteratorIterator; use RecursiveDirectoryIterator; /** * Plugin manager * * @package october\system * @author Alexey Bobkov, Samuel Georges */ class PluginManager { use \October\Rain\Support\Traits\Singleton; /** * The application instance, since Plugins are an extension of a Service Provider */ protected $app; /** * @var array Container array used for storing plugin information objects. */ protected $plugins; /** * @var array A map of plugins and their directory paths. */ protected $pathMap = []; /** * @var array A map of normalized plugin identifiers [lowercase.identifier => Normalized.Identifier] */ protected $normalizedMap = []; /** * @var bool Flag to indicate that all plugins have had the register() method called by registerAll() being called on this class. */ protected $registered = false; /** * @var bool Flag to indicate that all plugins have had the boot() method called by bootAll() being called on this class. */ protected $booted = false; /** * @var string Path to the JSON encoded file containing the disabled plugins. */ protected $metaFile; /** * @var array Array of disabled plugins */ protected $disabledPlugins = []; /** * @var array Cache of registration method results. */ protected $registrationMethodCache = []; /** * @var bool Prevent all plugins from registering or booting */ public static $noInit = false; /** * Initializes the plugin manager */ protected function init() { $this->bindContainerObjects(); $this->metaFile = storage_path('cms/disabled.json'); $this->loadDisabled(); $this->loadPlugins(); if ($this->app->runningInBackend()) { $this->loadDependencies(); } } /** * These objects are "soft singletons" and may be lost when * the IoC container reboots. This provides a way to rebuild * for the purposes of unit testing. */ public function bindContainerObjects() { $this->app = App::make('app'); } /** * Finds all available plugins and loads them in to the $this->plugins array. * * @return array */ public function loadPlugins() { $this->plugins = []; /** * Locate all plugins and binds them to the container */ foreach ($this->getPluginNamespaces() as $namespace => $path) { $this->loadPlugin($namespace, $path); } $this->sortDependencies(); return $this->plugins; } /** * Loads a single plugin into the manager. * * @param string $namespace Eg: Acme\Blog * @param string $path Eg: plugins_path().'/acme/blog'; * @return void */ public function loadPlugin($namespace, $path) { $className = $namespace . '\Plugin'; $classPath = $path . '/Plugin.php'; try { // Autoloader failed? if (!class_exists($className)) { include_once $classPath; } // Not a valid plugin! if (!class_exists($className)) { return; } $classObj = new $className($this->app); } catch (\Throwable $e) { Log::error('Plugin ' . $className . ' could not be instantiated.', [ 'message' => $e->getMessage(), 'file' => $e->getFile(), 'line' => $e->getLine(), 'trace' => $e->getTraceAsString() ]); return; } $classId = $this->getIdentifier($classObj); /* * Check for disabled plugins */ if ($this->isDisabled($classId)) { $classObj->disabled = true; } $this->plugins[$classId] = $classObj; $this->pathMap[$classId] = $path; $this->normalizedMap[strtolower($classId)] = $classId; return $classObj; } /** * Runs the register() method on all plugins. Can only be called once. * * @param bool $force Defaults to false, if true will force the re-registration of all plugins. Use unregisterAll() instead. * @return void */ public function registerAll($force = false) { if ($this->registered && !$force) { return; } foreach ($this->plugins as $pluginId => $plugin) { $this->registerPlugin($plugin, $pluginId); } $this->registered = true; } /** * Unregisters all plugins: the inverse of registerAll(). * * @return void */ public function unregisterAll() { $this->registered = false; $this->plugins = []; } /** * Registers a single plugin object. * * @param PluginBase $plugin The instantiated Plugin object * @param string $pluginId The string identifier for the plugin * @return void */ public function registerPlugin($plugin, $pluginId = null) { if (!$pluginId) { $pluginId = $this->getIdentifier($plugin); } $pluginPath = $this->getPluginPath($plugin); $pluginNamespace = strtolower($pluginId); /* * Register language namespaces */ $langPath = $pluginPath . '/lang'; if (File::isDirectory($langPath)) { Lang::addNamespace($pluginNamespace, $langPath); } /** * Prevent autoloaders from loading if plugin is disabled */ if ($plugin->disabled) { return; } /* * Register plugin class autoloaders */ $autoloadPath = $pluginPath . '/vendor/autoload.php'; if (File::isFile($autoloadPath)) { ComposerManager::instance()->autoload($pluginPath . '/vendor'); } /* * Register configuration path */ $configPath = $pluginPath . '/config'; if (File::isDirectory($configPath)) { Config::package($pluginNamespace, $configPath, $pluginNamespace); } /* * Register views path */ $viewsPath = $pluginPath . '/views'; if (File::isDirectory($viewsPath)) { View::addNamespace($pluginNamespace, $viewsPath); } /** * Disable plugin registration for restricted pages, unless elevated */ if (self::$noInit && !$plugin->elevated) { return; } /** * Run the plugin's register() method */ $plugin->register(); /* * Add init, if available */ $initFile = $pluginPath . '/init.php'; if (File::exists($initFile)) { require $initFile; } /* * Add routes, if available */ $routesFile = $pluginPath . '/routes.php'; if (File::exists($routesFile)) { require $routesFile; } } /** * Runs the boot() method on all plugins. Can only be called once. * * @param bool $force Defaults to false, if true will force the re-booting of all plugins * @return void */ public function bootAll($force = false) { if ($this->booted && !$force) { return; } foreach ($this->plugins as $plugin) { $this->bootPlugin($plugin); } $this->booted = true; } /** * Boots the provided plugin object. * * @param PluginBase $plugin * @return void */ public function bootPlugin($plugin) { if (!$plugin || $plugin->disabled || (self::$noInit && !$plugin->elevated)) { return; } $plugin->boot(); } /** * Returns the directory path to a plugin * * @param PluginBase|string $id The plugin to get the path for * @return string|null */ public function getPluginPath($id) { $classId = $this->getIdentifier($id); if (!isset($this->pathMap[$classId])) { return null; } return File::normalizePath($this->pathMap[$classId]); } /** * Check if a plugin exists and is enabled. * * @param string $id Plugin identifier, eg: Namespace.PluginName * @return bool */ public function exists($id) { return $this->findByIdentifier($id) && !$this->isDisabled($id); } /** * Returns an array with all enabled plugins * * @return array [$code => $pluginObj] */ public function getPlugins() { return array_diff_key($this->plugins, $this->disabledPlugins); } /** * Returns an array will all plugins detected on the filesystem * * @return array [$code => $pluginObj] */ public function getAllPlugins() { return $this->plugins; } /** * Returns a plugin registration class based on its namespace (Author\Plugin). * * @param string $namespace * @return PluginBase|null */ public function findByNamespace($namespace) { $identifier = $this->getIdentifier($namespace); return $this->plugins[$identifier] ?? null; } /** * Returns a plugin registration class based on its identifier (Author.Plugin). * * @param string|PluginBase $identifier * @return PluginBase|null */ public function findByIdentifier($identifier) { if (!isset($this->plugins[$identifier])) { $code = $this->getIdentifier($identifier); $identifier = $this->normalizeIdentifier($code); } return $this->plugins[$identifier] ?? null; } /** * Checks to see if a plugin has been registered. * * @param string|PluginBase * @return bool */ public function hasPlugin($namespace) { $classId = $this->getIdentifier($namespace); $normalized = $this->normalizeIdentifier($classId); return isset($this->plugins[$normalized]); } /** * Returns a flat array of vendor plugin namespaces and their paths * * @return array ['Author\Plugin' => 'plugins/author/plugin'] */ public function getPluginNamespaces() { $classNames = []; foreach ($this->getVendorAndPluginNames() as $vendorName => $vendorList) { foreach ($vendorList as $pluginName => $pluginPath) { $namespace = '\\'.$vendorName.'\\'.$pluginName; $namespace = Str::normalizeClassName($namespace); $classNames[$namespace] = $pluginPath; } } return $classNames; } /** * Returns a 2 dimensional array of vendors and their plugins. * * @return array ['vendor' => ['author' => 'plugins/author/plugin']] */ public function getVendorAndPluginNames() { $plugins = []; $dirPath = plugins_path(); if (!File::isDirectory($dirPath)) { return $plugins; } $it = new RecursiveIteratorIterator( new RecursiveDirectoryIterator($dirPath, RecursiveDirectoryIterator::FOLLOW_SYMLINKS) ); $it->setMaxDepth(2); $it->rewind(); while ($it->valid()) { if (($it->getDepth() > 1) && $it->isFile() && (strtolower($it->getFilename()) == "plugin.php")) { $filePath = dirname($it->getPathname()); $pluginName = basename($filePath); $vendorName = basename(dirname($filePath)); $plugins[$vendorName][$pluginName] = $filePath; } $it->next(); } return $plugins; } /** * Resolves a plugin identifier (Author.Plugin) from a plugin class name or object. * * @param mixed Plugin class name or object * @return string Identifier in format of Author.Plugin */ public function getIdentifier($namespace) { $namespace = Str::normalizeClassName($namespace); if (strpos($namespace, '\\') === null) { return $namespace; } $parts = explode('\\', $namespace); $slice = array_slice($parts, 1, 2); $namespace = implode('.', $slice); return $namespace; } /** * Takes a human plugin code (acme.blog) and makes it authentic (Acme.Blog) * Returns the provided identifier if a match isn't found * * @param string $identifier * @return string */ public function normalizeIdentifier($identifier) { $id = strtolower($identifier); if (isset($this->normalizedMap[$id])) { return $this->normalizedMap[$id]; } return $identifier; } /** * Spins over every plugin object and collects the results of a method call. Results are cached in memory. * * @param string $methodName * @return array */ public function getRegistrationMethodValues($methodName) { if (isset($this->registrationMethodCache[$methodName])) { return $this->registrationMethodCache[$methodName]; } $results = []; $plugins = $this->getPlugins(); foreach ($plugins as $id => $plugin) { if (!method_exists($plugin, $methodName)) { continue; } $results[$id] = $plugin->{$methodName}(); } return $this->registrationMethodCache[$methodName] = $results; } // // Disability // /** * Clears the disabled plugins cache file * * @return void */ public function clearDisabledCache() { File::delete($this->metaFile); $this->disabledPlugins = []; } /** * Loads all disabled plugins from the cached JSON file. * * @return void */ protected function loadDisabled() { $path = $this->metaFile; if (($configDisabled = Config::get('cms.disablePlugins')) && is_array($configDisabled)) { foreach ($configDisabled as $disabled) { $this->disabledPlugins[$disabled] = true; } } if (File::exists($path)) { $disabled = json_decode(File::get($path), true) ?: []; $this->disabledPlugins = array_merge($this->disabledPlugins, $disabled); } else { $this->populateDisabledPluginsFromDb(); $this->writeDisabled(); } } /** * Determines if a plugin is disabled by looking at the meta information * or the application configuration. * * @param string|PluginBase $id * @return bool */ public function isDisabled($id) { $code = $this->getIdentifier($id); $normalized = $this->normalizeIdentifier($code); return isset($this->disabledPlugins[$normalized]); } /** * Write the disabled plugins to a meta file. * * @return void */ protected function writeDisabled() { File::put($this->metaFile, json_encode($this->disabledPlugins)); } /** * Populates information about disabled plugins from database * * @return void */ protected function populateDisabledPluginsFromDb() { if (!App::hasDatabase()) { return; } if (!Schema::hasTable('system_plugin_versions')) { return; } $disabled = Db::table('system_plugin_versions')->where('is_disabled', 1)->lists('code'); foreach ($disabled as $code) { $this->disabledPlugins[$code] = true; } } /** * Disables a single plugin in the system. * * @param string|PluginBase $id Plugin code/namespace * @param bool $isUser Set to true if disabled by the user, false by default * @return bool Returns false if the plugin was already disabled, true otherwise */ public function disablePlugin($id, $isUser = false) { $code = $this->getIdentifier($id); $code = $this->normalizeIdentifier($code); if (isset($this->disabledPlugins[$code])) { return false; } $this->disabledPlugins[$code] = $isUser; $this->writeDisabled(); if ($pluginObj = $this->findByIdentifier($code)) { $pluginObj->disabled = true; } return true; } /** * Enables a single plugin in the system. * * @param string|PluginBase $id Plugin code/namespace * @param bool $isUser Set to true if enabled by the user, false by default * @return bool Returns false if the plugin wasn't already disabled or if the user disabled a plugin that the system is trying to re-enable, true otherwise */ public function enablePlugin($id, $isUser = false) { $code = $this->getIdentifier($id); $code = $this->normalizeIdentifier($code); if (!isset($this->disabledPlugins[$code])) { return false; } // Prevent system from enabling plugins disabled by the user if (!$isUser && $this->disabledPlugins[$code] === true) { return false; } unset($this->disabledPlugins[$code]); $this->writeDisabled(); if ($pluginObj = $this->findByIdentifier($code)) { $pluginObj->disabled = false; } return true; } // // Dependencies // /** * Scans the system plugins to locate any dependencies that are not currently * installed. Returns an array of missing plugin codes keyed by the plugin that requires them. * * ['Author.Plugin' => ['Required.Plugin1', 'Required.Plugin2'] * * PluginManager::instance()->findMissingDependencies(); * * @return array */ public function findMissingDependencies() { $missing = []; foreach ($this->plugins as $id => $plugin) { if (!$required = $this->getDependencies($plugin)) { continue; } foreach ($required as $require) { if ($this->hasPlugin($require)) { continue; } if (!in_array($require, $missing)) { $missing[$this->getIdentifier($plugin)][] = $require; } } } return $missing; } /** * Cross checks all plugins and their dependancies, if not met plugins * are disabled and vice versa. * * @return void */ protected function loadDependencies() { foreach ($this->plugins as $id => $plugin) { if (!$required = $this->getDependencies($plugin)) { continue; } $disable = false; foreach ($required as $require) { if (!$pluginObj = $this->findByIdentifier($require)) { $disable = true; } elseif ($pluginObj->disabled) { $disable = true; } } if ($disable) { $this->disablePlugin($id); } else { $this->enablePlugin($id); } } } /** * Sorts a collection of plugins, in the order that they should be actioned, * according to their given dependencies. Least dependent come first. * * @return array Array of sorted plugin identifiers and instantiated classes ['Author.Plugin' => PluginBase] * @throws SystemException If a possible circular dependency is detected */ protected function sortDependencies() { ksort($this->plugins); /* * Canvas the dependency tree */ $checklist = $this->plugins; $result = []; $loopCount = 0; while (count($checklist)) { if (++$loopCount > 2048) { throw new SystemException('Too much recursion! Check for circular dependencies in your plugins.'); } foreach ($checklist as $code => $plugin) { /* * Get dependencies and remove any aliens */ $depends = $this->getDependencies($plugin); $depends = array_filter($depends, function ($pluginCode) { return isset($this->plugins[$pluginCode]); }); /* * No dependencies */ if (!$depends) { array_push($result, $code); unset($checklist[$code]); continue; } /* * Find dependencies that have not been checked */ $depends = array_diff($depends, $result); if (count($depends) > 0) { continue; } /* * All dependencies are checked */ array_push($result, $code); unset($checklist[$code]); } } /* * Reassemble plugin map */ $sortedPlugins = []; foreach ($result as $code) { $sortedPlugins[$code] = $this->plugins[$code]; } return $this->plugins = $sortedPlugins; } /** * Returns the plugin identifiers that are required by the supplied plugin. * * @param string $plugin Plugin identifier, object or class * @return array */ public function getDependencies($plugin) { if (is_string($plugin) && (!$plugin = $this->findByIdentifier($plugin))) { return []; } if (!isset($plugin->require) || !$plugin->require) { return []; } return is_array($plugin->require) ? $plugin->require : [$plugin->require]; } /** * @deprecated Plugins are now sorted by default. See getPlugins() * Remove if year >= 2022 */ public function sortByDependencies($plugins = null) { traceLog('PluginManager::sortByDependencies is deprecated. Plugins are now sorted by default. Use PluginManager::getPlugins()'); return array_keys($plugins ?: $this->getPlugins()); } // // Management // /** * Completely roll back and delete a plugin from the system. * * @param string $id Plugin code/namespace * @return void */ public function deletePlugin($id) { /* * Rollback plugin */ UpdateManager::instance()->rollbackPlugin($id); /* * Delete from file system */ if ($pluginPath = self::instance()->getPluginPath($id)) { File::deleteDirectory($pluginPath); } } /** * Tears down a plugin's database tables and rebuilds them. * * @param string $id Plugin code/namespace * @return void */ public function refreshPlugin($id) { $manager = UpdateManager::instance(); $manager->rollbackPlugin($id); $manager->updatePlugin($id); } }
1
19,039
Is the ID correctly normalized at this point?
octobercms-october
php
@@ -0,0 +1,11 @@ +package commands + +import ( + "context" + + "github.com/ledgerwatch/erigon/p2p" +) + +func (api *ErigonImpl) NodeInfo(ctx context.Context) ([]p2p.NodeInfo, error) { + return api.ethBackend.NodeInfo(ctx, 0) +}
1
1
22,770
if 0 is a special constant meaning "no limit" let's make it a constant and name it correctly :)
ledgerwatch-erigon
go
@@ -140,8 +140,8 @@ func run(ctx context.Context) { } else { setOsConfig(resp) setPatchPolicies(resp.PatchPolicies) - runInventory() } + runInventory() select { case <-ticker.C:
1
// Copyright 2018 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // osconfig_agent interacts with the osconfig api. package main import ( "context" "encoding/json" "flag" "fmt" "io" "log" "math" "net/http" "os" "time" osconfig "github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/osconfig_agent/_internal/gapi-cloud-osconfig-go/cloud.google.com/go/osconfig/apiv1alpha1" "github.com/GoogleCloudPlatform/compute-image-tools/go/service" "github.com/kylelemons/godebug/pretty" "google.golang.org/api/option" ) var ( oauth = flag.String("oauth", "", "path to oauth json file") resource = flag.String("resource", "", "projects/*/zones/*/instances/*") endpoint = flag.String("endpoint", "osconfig.googleapis.com:443", "osconfig endpoint override") logger = log.New(os.Stdout, "", 0) ) var dump = &pretty.Config{IncludeUnexported: true} const ( // TODO: make interval configurable. interval = 10 * time.Minute instanceMetadata = "http://metadata.google.internal/computeMetadata/v1/instance" metadataRecursive = instanceMetadata + "/?recursive=true&alt=json" reportURL = instanceMetadata + "/guest-attributes" maxRetryDelay = 30 ) type metadataJSON struct { ID int Zone string } func getResourceName(r string) (string, error) { if r != "" { return r, nil } client := &http.Client{} req, err := http.NewRequest("GET", metadataRecursive, nil) if err != nil { return "", err } req.Header.Add("Metadata-Flavor", "Google") var res *http.Response // Retry forever, increase sleep between retries (up to 20s) in order // to wait for slow network initialization. for i := 1; ; i++ { res, err = client.Do(req) if err == nil { break } rt := time.Duration(math.Min(float64(3*i), maxRetryDelay)) * time.Second logger.Printf("Error connecting to metadata server (error number: %d), retrying in %s, error: %v\n", i, rt, err) time.Sleep(rt) } defer res.Body.Close() dec := json.NewDecoder(res.Body) var m metadataJSON for { if err := dec.Decode(&m); err == io.EOF { break } else if err != nil { return "", err } } return fmt.Sprintf("%s/instances/%d", m.Zone, m.ID), nil } func strIn(s string, ss []string) bool { for _, x := range ss { if s == x { return true } } return false } func postAttribute(url string, value io.Reader) error { req, err := http.NewRequest("PUT", url, value) if err != nil { return err } req.Header.Add("Metadata-Flavor", "Google") resp, err := http.DefaultClient.Do(req) if err != nil { return err } if resp.StatusCode != http.StatusOK { return fmt.Errorf(`received status code %q for request "%s %s"`, resp.Status, req.Method, req.URL.String()) } return nil } func run(ctx context.Context) { client, err := osconfig.NewClient(ctx, option.WithEndpoint(*endpoint), option.WithCredentialsFile(*oauth)) if err != nil { log.Fatalln("NewClient Error:", err) } res, err := getResourceName(*resource) if err != nil { log.Fatalln("getResourceName error:", err) } patchInit() ticker := time.NewTicker(interval) for { resp, err := lookupConfigs(ctx, client, res) if err != nil { log.Println("ERROR:", err) } else { setOsConfig(resp) setPatchPolicies(resp.PatchPolicies) runInventory() } select { case <-ticker.C: continue case <-ctx.Done(): return } } } func main() { flag.Parse() ctx := context.Background() action := flag.Arg(0) if err := service.Register(ctx, "google_osconfig_agent", "Google OSConfig Agent", "", run, action); err != nil { log.Fatal(err) } }
1
7,999
Curious about the reason behind this change
GoogleCloudPlatform-compute-image-tools
go
@@ -18,7 +18,7 @@ func (node *Node) AddNewBlock(ctx context.Context, b *types.Block) (err error) { // Put block in storage wired to an exchange so this node and other // nodes can fetch it. log.Debugf("putting block in bitswap exchange: %s", b.Cid().String()) - blkCid, err := node.OnlineStore.Put(ctx, b) + blkCid, err := node.cborStore.Put(ctx, b) if err != nil { return errors.Wrap(err, "could not add new block to online storage") }
1
package node import ( "context" "gx/ipfs/QmR8BauakNcBa3RbE4nbQu76PDiJgoQgz8AJdhJuiU4TAw/go-cid" "gx/ipfs/QmVmDhyTTUcQXFD1rRQ64fGLMSAoaQvNH3hwuaCFAPq2hy/errors" "github.com/filecoin-project/go-filecoin/net/pubsub" "github.com/filecoin-project/go-filecoin/types" ) // BlockTopic is the pubsub topic identifier on which new blocks are announced. const BlockTopic = "/fil/blocks" // AddNewBlock receives a newly mined block and stores, validates and propagates it to the network. func (node *Node) AddNewBlock(ctx context.Context, b *types.Block) (err error) { // Put block in storage wired to an exchange so this node and other // nodes can fetch it. log.Debugf("putting block in bitswap exchange: %s", b.Cid().String()) blkCid, err := node.OnlineStore.Put(ctx, b) if err != nil { return errors.Wrap(err, "could not add new block to online storage") } log.Debugf("syncing new block: %s", b.Cid().String()) if err := node.Syncer.HandleNewBlocks(ctx, []cid.Cid{blkCid}); err != nil { return err } // TODO: should this just be a cid? Right now receivers ask to fetch // the block over bitswap anyway. return node.PorcelainAPI.PubSubPublish(BlockTopic, b.ToNode().RawData()) } func (node *Node) processBlock(ctx context.Context, pubSubMsg pubsub.Message) (err error) { // ignore messages from ourself if pubSubMsg.GetFrom() == node.Host().ID() { return nil } blk, err := types.DecodeBlock(pubSubMsg.GetData()) if err != nil { return errors.Wrap(err, "got bad block data") } log.Infof("Received new block from network cid: %s", blk.Cid().String()) log.Debugf("Received new block from network: %s", blk) err = node.Syncer.HandleNewBlocks(ctx, []cid.Cid{blk.Cid()}) if err != nil { return errors.Wrap(err, "processing block from network") } return nil }
1
17,156
wait, i thought the cborstore was for state tree?
filecoin-project-venus
go
@@ -1101,7 +1101,19 @@ class Package(object): self._fix_sha256() pkg = self._materialize(dest) + + def physical_key_is_temp_file(pk): + if not file_is_local(pk): + return False + return pathlib.Path(parse_file_url(urlparse(pk))).parent == APP_DIR_TEMPFILE_DIR + temp_file_logical_keys = [lk for lk, entry in self.walk() if physical_key_is_temp_file(entry.physical_keys[0])] + self._delete_temporary_files() # Now that data has been pushed, delete tmp files created by pkg.set('KEY', obj) + + # Update old package to point to the materialized location of the file since the tempfile no longest exists + for lk in temp_file_logical_keys: + self.set(lk, pkg[lk]) + pkg.build(name, registry=registry, message=message) return pkg
1
from collections import deque import copy import hashlib import io import json import pathlib import os import time from multiprocessing import Pool from urllib.parse import quote, urlparse, unquote import uuid import warnings import jsonlines from .data_transfer import ( calculate_sha256, copy_file, copy_file_list, get_bytes, get_size_and_meta, list_object_versions, put_bytes ) from .exceptions import PackageException from .formats import FormatRegistry from .util import ( QuiltException, fix_url, get_from_config, get_install_location, make_s3_url, parse_file_url, parse_s3_url, validate_package_name, quiltignore_filter, validate_key, extract_file_extension, file_is_local ) from .util import TEMPFILE_DIR_PATH as APP_DIR_TEMPFILE_DIR def hash_file(readable_file): """ Returns SHA256 hash of readable file-like object """ buf = readable_file.read(4096) hasher = hashlib.sha256() while buf: hasher.update(buf) buf = readable_file.read(4096) return hasher.hexdigest() def _to_singleton(physical_keys): """ Ensure that there is a single physical key, throw otherwise. Temporary utility method to avoid repeated, identical checks. Args: pkeys (list): list of physical keys Returns: A physical key Throws: NotImplementedError TODO: support multiple physical keys """ if len(physical_keys) > 1: raise NotImplementedError("Multiple physical keys not supported") return physical_keys[0] def del_if_temp(physical_key): """ Delete a file if the physical key points to a local file in Quilt's APP_DIR_TEMPFILE_DIR folder """ if file_is_local(physical_key): path = pathlib.Path(parse_file_url(urlparse(physical_key))) if path.parent == APP_DIR_TEMPFILE_DIR: path.unlink() class PackageEntry(object): """ Represents an entry at a logical key inside a package. """ __slots__ = ['physical_keys', 'size', 'hash', '_meta'] def __init__(self, physical_keys, size, hash_obj, meta): """ Creates an entry. Args: physical_keys: a nonempty list of URIs (either `s3://` or `file://`) size(number): size of object in bytes hash({'type': string, 'value': string}): hash object for example: {'type': 'SHA256', 'value': 'bb08a...'} meta(dict): metadata dictionary Returns: a PackageEntry """ self.physical_keys = [fix_url(x) for x in physical_keys] self.size = size self.hash = hash_obj self._meta = meta or {} def __eq__(self, other): return ( # Don't check physical keys. self.size == other.size and self.hash == other.hash and self._meta == other._meta ) def __repr__(self): return f"PackageEntry('{self.physical_keys[0]}')" def as_dict(self): """ Returns dict representation of entry. """ ret = { 'physical_keys': self.physical_keys, 'size': self.size, 'hash': self.hash, 'meta': self._meta } return copy.deepcopy(ret) def _clone(self): """ Returns clone of this PackageEntry. """ return self.__class__(copy.deepcopy(self.physical_keys), self.size, \ copy.deepcopy(self.hash), copy.deepcopy(self._meta)) @property def meta(self): return self._meta.get('user_meta', dict()) def set_meta(self, meta): """ Sets the user_meta for this PackageEntry. """ self._meta['user_meta'] = meta def _verify_hash(self, read_bytes): """ Verifies hash of bytes """ if self.hash is None: raise QuiltException("Hash missing - need to build the package") if self.hash.get('type') != 'SHA256': raise NotImplementedError digest = hashlib.sha256(read_bytes).hexdigest() if digest != self.hash.get('value'): raise QuiltException("Hash validation failed") def set(self, path=None, meta=None): """ Returns self with the physical key set to path. Args: logical_key(string): logical key to update path(string): new path to place at logical_key in the package Currently only supports a path on local disk meta(dict): metadata dict to attach to entry. If meta is provided, set just updates the meta attached to logical_key without changing anything else in the entry Returns: self """ if path is not None: self.physical_keys = [fix_url(path)] self.size = None self.hash = None elif meta is not None: self.set_meta(meta) else: raise PackageException('Must specify either path or meta') def get(self): """ Returns the physical key of this PackageEntry. """ return _to_singleton(self.physical_keys) def deserialize(self, func=None, **format_opts): """ Returns the object this entry corresponds to. Args: func: Skip normal deserialization process, and call func(bytes), returning the result directly. **format_opts: Some data formats may take options. Though normally handled by metadata, these can be overridden here. Returns: The deserialized object from the logical_key Raises: physical key failure hash verification fail when deserialization metadata is not present """ physical_key = _to_singleton(self.physical_keys) data, _ = get_bytes(physical_key) if func is not None: return func(data) pkey_ext = pathlib.PurePosixPath(unquote(urlparse(physical_key).path)).suffix # Verify format can be handled before checking hash. Raises if none found. formats = FormatRegistry.search(None, self._meta, pkey_ext) # Verify hash before deserializing.. self._verify_hash(data) return formats[0].deserialize(data, self._meta, pkey_ext, **format_opts) def fetch(self, dest=None): """ Gets objects from entry and saves them to dest. Args: dest: where to put the files Defaults to the entry name Returns: None """ physical_key = _to_singleton(self.physical_keys) if dest is None: name = pathlib.PurePosixPath(unquote(urlparse(physical_key).path)).name dest = (pathlib.Path().resolve() / name).as_uri() else: dest = fix_url(dest) copy_file(physical_key, dest, self._meta) # return a package reroot package physical keys after the copy operation succeeds # see GH#388 for context entry = self._clone() entry.physical_keys = [dest] return entry def __call__(self, func=None, **kwargs): """ Shorthand for self.deserialize() """ return self.deserialize(func=func, **kwargs) class Package(object): """ In-memory representation of a package """ def __init__(self): self._children = {} self._meta = {'version': 'v0'} def __repr__(self, max_lines=20): """ String representation of the Package. """ def _create_str(results_dict, level=0, parent=True): """ Creates a string from the results dict """ result = '' keys = sorted(results_dict.keys()) if not keys: return result if parent: has_remote_entries = any( self.map( lambda lk, entry: urlparse( fix_url(_to_singleton(entry.physical_keys)) ).scheme != 'file' ) ) pkg_type = 'remote' if has_remote_entries else 'local' result = f'({pkg_type} Package)\n' for key in keys: result += ' ' + (' ' * level) + '└─' + key + '\n' result += _create_str(results_dict[key], level + 1, parent=False) return result if not self.keys(): return '(empty Package)' # traverse the tree of package directories and entries to get the list of # display objects. candidates is a deque of shape # ((logical_key, Package | PackageEntry), [list of parent key]) candidates = deque(([x, []] for x in self._children.items())) results_dict = {} results_total = 0 more_objects_than_lines = False while candidates: [[logical_key, entry], parent_keys] = candidates.popleft() if isinstance(entry, Package): logical_key = logical_key + '/' new_parent_keys = parent_keys.copy() new_parent_keys.append(logical_key) for child_key in sorted(entry.keys()): candidates.append([[child_key, entry[child_key]], new_parent_keys]) current_result_level = results_dict for key in parent_keys: current_result_level = current_result_level[key] current_result_level[logical_key] = {} results_total += 1 if results_total >= max_lines: more_objects_than_lines = True break repr_str = _create_str(results_dict) # append '...' if the package is larger than max_size if more_objects_than_lines: repr_str += ' ' + '...\n' return repr_str @property def meta(self): return self._meta.get('user_meta', dict()) @classmethod def install(cls, name, registry=None, top_hash=None, dest=None, dest_registry=None): """ Installs a named package to the local registry and downloads its files. Args: name(str): Name of package to install. registry(str): Registry where package is located. Defaults to the default remote registry. top_hash(str): Hash of package to install. Defaults to latest. dest(str): Local path to download files to. dest_registry(str): Registry to install package to. Defaults to local registry. Returns: A new Package that points to files on your local machine. """ if registry is None: registry = get_from_config('default_remote_registry') if registry is None: raise QuiltException( "No registry specified and no default_remote_registry configured. Please " "specify a registry or configure a default remote registry with quilt.config" ) if dest_registry is None: dest_registry = get_from_config('default_local_registry') else: dest_registry_parsed = urlparse(fix_url(dest_registry)) if dest_registry_parsed.scheme != 'file': raise QuiltException( f"Can only 'install' to a local registry, but 'dest_registry' " f"{dest_registry!r} is a remote path. To store a package in a remote " f"registry, use 'push' or 'build' instead." ) if dest is None: dest = get_install_location().rstrip('/') + '/' + quote(name) else: dest_parsed = urlparse(fix_url(dest)) if dest_parsed.scheme != 'file': raise QuiltException( f"Invalid package destination path {dest!r}. 'dest', if set, must point at " f"the local filesystem. To copy a package to a remote registry use 'push' or " f"'build' instead." ) pkg = cls.browse(name=name, registry=registry, top_hash=top_hash) dest = fix_url(dest) message = pkg._meta.get('message', None) # propagate the package message pkg = pkg._materialize(dest) pkg.build(name, registry=dest_registry, message=message) return pkg @classmethod def browse(cls, name=None, registry=None, top_hash=None): """ Load a package into memory from a registry without making a local copy of the manifest. Args: name(string): name of package to load registry(string): location of registry to load package from top_hash(string): top hash of package version to load """ if registry is None: registry = get_from_config('default_local_registry') registry = registry.rstrip('/') validate_package_name(name) name = quote(name) if top_hash is not None: # TODO: verify that name is correct with respect to this top_hash # TODO: allow partial hashes (e.g. first six alphanumeric) pkg_manifest_uri = fix_url(f'{registry}/.quilt/packages/{top_hash}') return cls._from_path(pkg_manifest_uri) else: pkg_timestamp_file = f'{registry}/.quilt/named_packages/{name}/latest' latest_pkg_hash, _ = get_bytes(pkg_timestamp_file) latest_pkg_hash = latest_pkg_hash.decode('utf-8').strip() pkg_manifest_uri = fix_url(f'{registry}/.quilt/packages/{quote(latest_pkg_hash)}') return cls._from_path(pkg_manifest_uri) @classmethod def _from_path(cls, uri): """ Takes a URI and returns a package loaded from that URI """ src_url = urlparse(uri) if src_url.scheme == 'file': with open(parse_file_url(src_url)) as open_file: pkg = cls.load(open_file) elif src_url.scheme == 's3': body, _ = get_bytes(uri) pkg = cls.load(io.BytesIO(body)) else: raise NotImplementedError return pkg @classmethod def _split_key(cls, logical_key): """ Converts a string logical key like 'a/b/c' into a list of ['a', 'b', 'c']. Returns the original key if it's already a list or a tuple. """ if isinstance(logical_key, str): path = logical_key.split('/') elif isinstance(logical_key, (tuple, list)): path = logical_key else: raise TypeError('Invalid logical_key: %r' % logical_key) return path def __contains__(self, logical_key): """ Checks whether the package contains a specified logical_key. Returns: True or False """ try: self[logical_key] return True except KeyError: return False def __getitem__(self, logical_key): """ Filters the package based on prefix, and returns either a new Package or a PackageEntry. Args: prefix(str): prefix to filter on Returns: PackageEntry if prefix matches a logical_key exactly otherwise Package """ pkg = self for key_fragment in self._split_key(logical_key): pkg = pkg._children[key_fragment] return pkg def fetch(self, dest='./'): """ Copy all descendants to `dest`. Descendants are written under their logical names _relative_ to self. Args: dest: where to put the files (locally) Returns: None """ nice_dest = fix_url(dest).rstrip('/') file_list = [] pkg = Package() for logical_key, entry in self.walk(): physical_key = _to_singleton(entry.physical_keys) new_physical_key = f'{nice_dest}/{quote(logical_key)}' file_list.append((physical_key, new_physical_key, entry.size, entry.meta)) # return a package reroot package physical keys after the copy operation succeeds # see GH#388 for context new_entry = entry._clone() new_entry.physical_keys = [new_physical_key] pkg.set(logical_key, new_entry) copy_file_list(file_list) return pkg def keys(self): """ Returns logical keys in the package. """ return self._children.keys() def __iter__(self): return iter(self._children) def __len__(self): return len(self._children) def walk(self): """ Generator that traverses all entries in the package tree and returns tuples of (key, entry), with keys in alphabetical order. """ for name, child in sorted(self._children.items()): if isinstance(child, PackageEntry): yield name, child else: for key, value in child.walk(): yield name + '/' + key, value def _walk_dir_meta(self): """ Generator that traverses all entries in the package tree and returns tuples of (key, meta) for each directory with metadata. Keys will all end in '/' to indicate that they are directories. """ for key, child in sorted(self._children.items()): if isinstance(child, PackageEntry): continue meta = child.meta if meta: yield key + '/', meta for child_key, child_meta in child._walk_dir_meta(): yield key + '/' + child_key, child_meta @classmethod def load(cls, readable_file): """ Loads a package from a readable file-like object. Args: readable_file: readable file-like object to deserialize package from Returns: A new Package object Raises: file not found json decode error invalid package exception """ reader = jsonlines.Reader(readable_file) meta = reader.read() meta.pop('top_hash', None) # Obsolete as of PR #130 pkg = cls() pkg._meta = meta for obj in reader: path = cls._split_key(obj.pop('logical_key')) subpkg = pkg._ensure_subpackage(path[:-1]) key = path[-1] if not obj.get('physical_keys', None): # directory-level metadata subpkg.set_meta(obj['meta']) continue if key in subpkg._children: raise PackageException("Duplicate logical key while loading package") subpkg._children[key] = PackageEntry( obj['physical_keys'], obj['size'], obj['hash'], obj['meta'] ) return pkg def set_dir(self, lkey, path=None, meta=None): """ Adds all files from `path` to the package. Recursively enumerates every file in `path`, and adds them to the package according to their relative location to `path`. Args: lkey(string): prefix to add to every logical key, use '/' for the root of the package. path(string): path to scan for files to add to package. If None, lkey will be substituted in as the path. meta(dict): user level metadata dict to attach to lkey directory entry. Returns: self Raises: When `path` doesn't exist """ lkey = lkey.strip("/") if not lkey or lkey == '.' or lkey == './': root = self else: validate_key(lkey) root = self._ensure_subpackage(self._split_key(lkey)) root.set_meta(meta) if not path: current_working_dir = pathlib.Path.cwd() logical_key_abs_path = pathlib.Path(lkey).absolute() path = logical_key_abs_path.relative_to(current_working_dir) # TODO: deserialization metadata url = urlparse(fix_url(path).strip('/')) if url.scheme == 'file': src_path = pathlib.Path(parse_file_url(url)) if not src_path.is_dir(): raise PackageException("The specified directory doesn't exist") files = src_path.rglob('*') ignore = src_path / '.quiltignore' if ignore.exists(): files = quiltignore_filter(files, ignore, 'file') for f in files: if not f.is_file(): continue entry = PackageEntry([f.as_uri()], f.stat().st_size, None, None) logical_key = f.relative_to(src_path).as_posix() root.set(logical_key, entry) elif url.scheme == 's3': src_bucket, src_key, src_version = parse_s3_url(url) if src_version: raise PackageException("Directories cannot have versions") if src_key and not src_key.endswith('/'): src_key += '/' objects, _ = list_object_versions(src_bucket, src_key) for obj in objects: if not obj['IsLatest']: continue # Skip S3 pseduo directory files and Keys that end in / if obj['Key'].endswith('/'): if obj['Size'] != 0: warnings.warn(f'Logical keys cannot end in "/", skipping: {obj["Key"]}') continue obj_url = make_s3_url(src_bucket, obj['Key'], obj.get('VersionId')) entry = PackageEntry([obj_url], obj['Size'], None, None) logical_key = obj['Key'][len(src_key):] root.set(logical_key, entry) else: raise NotImplementedError return self def get(self, logical_key=None): """ Gets object from logical_key and returns its physical path. Equivalent to self[logical_key].get(). Args: logical_key(string): logical key of the object to get Returns: Physical path as a string. Raises: KeyError: when logical_key is not present in the package ValueError: if the logical_key points to a Package rather than PackageEntry. """ if logical_key: obj = self[logical_key] if not isinstance(obj, PackageEntry): raise ValueError("Key does not point to a PackageEntry") return obj.get() else: # a package has a logical root directory if all of its children are rooted at the # same directory or object path. in other words, the following things must match: # * the URL scheme of every physical key is the same # * the root path to each physical key is the same first_lkey, first_entry = next(self.walk()) first_pkey = first_entry.physical_keys[0].split('?versionId=')[0] hypothesized_scheme = urlparse(first_pkey).scheme # ensure that the first entry has a logically consistent physical and logical key if not first_pkey.endswith(first_lkey): raise QuiltException( f"Cannot get the root directory for this package because it is not " f"physically consistent, as it contains entries whose logical keys and " f"physical keys have different names. For example, the {first_lkey!r} package " f"entry points to a file named {first_pkey.split('/')[-1]!r} (expected a file " f"named {first_lkey.split('/')[-1]!r}). To make this package physically " f"consistent run 'quilt.Package.install(\"$PKG_NAME\")', replacing '$PKG_NAME' " f"with the name of this package. Note that this will result in file copying." ) hypothesized_root_path = first_pkey[:first_pkey.rfind(first_lkey)] # every subsequent entry will be checked to ensure that its logical key is rooted # at the same physical key as the first entry for lkey, entry in self.walk(): pkey = entry.physical_keys[0].split('?versionId=')[0] scheme = urlparse(pkey).scheme root_path = pkey[:pkey.rfind(lkey)] if scheme != hypothesized_scheme: raise QuiltException( f"Cannot get the root directory for this package because it is not " f"physically consistent, as it contains both local and remote entries. " f"For example, the {first_lkey!r} package entry is a " f"'{hypothesized_scheme}' entry, whist the {lkey!r} package entry is a " f"'{scheme}' entry. To make this package physically " f"consistent run 'quilt.Package.install(\"$PKG_NAME\")', replacing " f"'$PKG_NAME' with the name of this package. Note that this will result " f"in file copying." ) elif not pkey.endswith(lkey): raise QuiltException( f"Cannot get the root directory for this package because it is not " f"physically consistent, as it contains entries whose logical keys and " f"physical keys have different names. For example, the " f"{lkey!r} package entry points to a file named " f"{pkey.split('/')[-1]!r} (expected a file named " f"{first_lkey.split('/')[-1]!r}). To make this package physically " f"consistent run 'quilt.Package.install(\"$PKG_NAME\")', replacing " f"'$PKG_NAME' with the name of this package. Note that this will result " f"in file copying." ) elif root_path != hypothesized_root_path: raise QuiltException( f"Cannot get the root directory for this package because it is not " f"physically consistent, as it contains entries rooted in different " f"physical locations. For example, the {lkey!r} entry is located in the " f"{'/'.join(pkey.split('/')[:-1])!r} directory, whilst the " f"{first_lkey!r} is located in the " f"{'/'.join(first_pkey.split('/')[:-1])!r} directory. To make this " f"package physically consistent run " f"'quilt.Package.install(\"$PKG_NAME\")', " f"replacing '$PKG_NAME' with the name of this package. Note that this " f"will result in file copying." ) return hypothesized_root_path # def get_as_pathlib(self): def set_meta(self, meta): """ Sets user metadata on this Package. """ self._meta['user_meta'] = meta return self def _fix_sha256(self): entries = [entry for key, entry in self.walk() if entry.hash is None] if not entries: return physical_keys = [] sizes = [] for entry in entries: physical_keys.append(entry.physical_keys[0]) sizes.append(entry.size) results = calculate_sha256(physical_keys, sizes) for entry, obj_hash in zip(entries, results): entry.hash = dict(type='SHA256', value=obj_hash) def _set_commit_message(self, msg): """ Sets a commit message. Args: msg: a message string Returns: None Raises: a ValueError if msg is not a string """ if msg is not None and not isinstance(msg, str): raise ValueError( f"The package commit message must be a string, but the message provided is an " f"instance of {type(msg)}." ) self._meta.update({'message': msg}) def build(self, name=None, registry=None, message=None): """ Serializes this package to a registry. Args: name: optional name for package registry: registry to build to defaults to local registry message: the commit message of the package Returns: The top hash as a string. """ self._set_commit_message(message) if registry is None: registry = get_from_config('default_local_registry') registry = registry.rstrip('/') validate_package_name(name) name = quote(name) self._fix_sha256() manifest = io.BytesIO() self.dump(manifest) pkg_manifest_file = f'{registry}/.quilt/packages/{self.top_hash}' put_bytes( manifest.getvalue(), pkg_manifest_file ) named_path = f'{registry}/.quilt/named_packages/{name}/' # TODO: use a float to string formater instead of double casting hash_bytes = self.top_hash.encode('utf-8') timestamp_path = named_path + str(int(time.time())) latest_path = named_path + "latest" put_bytes(hash_bytes, timestamp_path) put_bytes(hash_bytes, latest_path) return self def dump(self, writable_file): """ Serializes this package to a writable file-like object. Args: writable_file: file-like object to write serialized package. Returns: None Raises: fail to create file fail to finish write """ writer = jsonlines.Writer(writable_file) for line in self.manifest: writer.write(line) @property def manifest(self): """ Provides a generator of the dicts that make up the serialized package. """ yield self._meta for dir_key, meta in self._walk_dir_meta(): yield {'logical_key': dir_key, 'meta': meta} for logical_key, entry in self.walk(): yield {'logical_key': logical_key, **entry.as_dict()} def set(self, logical_key, entry=None, meta=None, serialization_location=None, serialization_format_opts=None): """ Returns self with the object at logical_key set to entry. Args: logical_key(string): logical key to update entry(PackageEntry OR string OR object): new entry to place at logical_key in the package. If entry is a string, it is treated as a URL, and an entry is created based on it. If entry is None, the logical key string will be substituted as the entry value. If entry is an object and quilt knows how to serialize it, it will immediately be serialized and written to disk, either to serialization_location or to a location managed by quilt. List of types that Quilt can serialize is available by calling `quilt3.formats.FormatRegistry.all_supported_formats()` meta(dict): user level metadata dict to attach to entry serialization_format_opts(dict): Optional. If passed in, only used if entry is an object. Options to help Quilt understand how the object should be serialized. Useful for underspecified file formats like csv when content contains confusing characters. Will be passed as kwargs to the FormatHandler.serialize() function. See docstrings for individual FormatHandlers for full list of options - https://github.com/quiltdata/quilt/blob/master/api/python/quilt3/formats.py serialization_location(string): Optional. If passed in, only used if entry is an object. Where the serialized object should be written, e.g. "./mydataframe.parquet" Returns: self """ if not logical_key or logical_key.endswith('/'): raise QuiltException( f"Invalid logical key {logical_key!r}. " f"A package entry logical key cannot be a directory." ) validate_key(logical_key) if entry is None: current_working_dir = pathlib.Path.cwd() logical_key_abs_path = pathlib.Path(logical_key).absolute() entry = logical_key_abs_path.relative_to(current_working_dir) if isinstance(entry, (str, os.PathLike)): url = fix_url(str(entry)) size, orig_meta, version = get_size_and_meta(url) # Determine if a new version needs to be appended. parsed_url = urlparse(url) if parsed_url.scheme == 's3': bucket, key, current_version = parse_s3_url(parsed_url) if not current_version and version: url = make_s3_url(bucket, key, version) entry = PackageEntry([url], size, None, orig_meta) elif isinstance(entry, PackageEntry): entry = entry._clone() elif FormatRegistry.object_is_serializable(entry): # Use file extension from serialization_location, fall back to file extension from logical_key # If neither has a file extension, Quilt picks the serialization format. logical_key_ext = extract_file_extension(logical_key) serialize_loc_ext = None if serialization_location is not None: serialize_loc_ext = extract_file_extension(serialization_location) if logical_key_ext is not None and serialize_loc_ext is not None: assert logical_key_ext == serialize_loc_ext, f"The logical_key and the serialization_location have " \ f"different file extensions: {logical_key_ext} vs " \ f"{serialize_loc_ext}. Quilt doesn't know which to use!" if serialize_loc_ext is not None: ext = serialize_loc_ext elif logical_key_ext is not None: ext = logical_key_ext else: ext = None format_handlers = FormatRegistry.search(type(entry)) if ext: format_handlers = [f for f in format_handlers if ext in f.handled_extensions] if len(format_handlers) == 0: error_message = f'Quilt does not know how to serialize a {type(entry)}' if ext is not None: error_message += f' as a {ext} file.' error_message += f'. If you think this should be supported, please open an issue or PR at ' \ f'https://github.com/quiltdata/quilt' raise QuiltException(error_message) if serialization_format_opts is None: serialization_format_opts = {} serialized_object_bytes, new_meta = format_handlers[0].serialize(entry, meta=None, ext=ext, **serialization_format_opts) if serialization_location is None: serialization_path = APP_DIR_TEMPFILE_DIR / str(uuid.uuid4()) if ext: serialization_path = serialization_path.with_suffix(f'.{ext}') else: serialization_path = pathlib.Path(serialization_location).expanduser().resolve() serialization_path.parent.mkdir(exist_ok=True, parents=True) serialization_path.write_bytes(serialized_object_bytes) size = serialization_path.stat().st_size write_url = serialization_path.as_uri() entry = PackageEntry([write_url], size, hash_obj=None, meta=new_meta) else: raise TypeError(f"Expected a string for entry, but got an instance of {type(entry)}.") if meta is not None: entry.set_meta(meta) path = self._split_key(logical_key) pkg = self._ensure_subpackage(path[:-1], ensure_no_entry=True) if path[-1] in pkg and isinstance(pkg[path[-1]], Package): raise QuiltException("Cannot overwrite directory with PackageEntry") pkg._children[path[-1]] = entry return self def _ensure_subpackage(self, path, ensure_no_entry=False): """ Creates a package and any intermediate packages at the given path. Args: path(list): logical key as a list or tuple ensure_no_entry(boolean): if True, throws if this would overwrite a PackageEntry that already exists in the tree. Returns: newly created or existing package at that path """ pkg = self for key_fragment in path: if ensure_no_entry and key_fragment in pkg \ and isinstance(pkg[key_fragment], PackageEntry): raise QuiltException("Already a PackageEntry along the path.") pkg = pkg._children.setdefault(key_fragment, Package()) return pkg def delete(self, logical_key): """ Returns the package with logical_key removed. Returns: self Raises: KeyError: when logical_key is not present to be deleted """ path = self._split_key(logical_key) pkg = self[path[:-1]] del pkg._children[path[-1]] return self @property def top_hash(self): """ Returns the top hash of the package. Note that physical keys are not hashed because the package has the same semantics regardless of where the bytes come from. Returns: A string that represents the top hash of the package """ top_hash = hashlib.sha256() assert 'top_hash' not in self._meta top_meta = json.dumps(self._meta, sort_keys=True, separators=(',', ':')) top_hash.update(top_meta.encode('utf-8')) for logical_key, entry in self.walk(): if entry.hash is None or entry.size is None: raise QuiltException( "PackageEntry missing hash and/or size: %s" % entry.physical_keys[0] ) entry_dict = entry.as_dict() entry_dict['logical_key'] = logical_key entry_dict.pop('physical_keys', None) entry_dict_str = json.dumps(entry_dict, sort_keys=True, separators=(',', ':')) top_hash.update(entry_dict_str.encode('utf-8')) return top_hash.hexdigest() def push(self, name, registry=None, dest=None, message=None): """ Copies objects to path, then creates a new package that points to those objects. Copies each object in this package to path according to logical key structure, then adds to the registry a serialized version of this package with physical keys that point to the new copies. Args: name: name for package in registry dest: where to copy the objects in the package registry: registry where to create the new package message: the commit message for the new package Returns: A new package that points to the copied objects. """ validate_package_name(name) if registry is None: registry = get_from_config('default_remote_registry') if not registry: raise QuiltException( "No registry specified and no default remote registry configured. Please " "specify a registry or configure a default remote registry with quilt3.config" ) registry_parsed = urlparse(fix_url(registry)) else: registry_parsed = urlparse(fix_url(registry)) if registry_parsed.scheme == 's3': bucket, path, _ = parse_s3_url(registry_parsed) if path != '': # parse_s3_url returns path == '' if input is pathless raise QuiltException( f"The 'registry' argument expects an S3 bucket but the S3 object path " f"{registry!r} was provided instead. You probably wanted to set " f"'registry' to {'s3://' + bucket!r} instead. To specify that package " f"data land in a specific directory use 'dest'." ) registry = 's3://' + bucket elif registry_parsed.scheme == 'file': raise QuiltException( f"Can only 'push' to remote registries in S3, but {registry!r} " f"is a local file. To store a package in the local registry, use " f"'build' instead." ) else: raise NotImplementedError if dest is None: dest = registry.rstrip('/') + '/' + quote(name) else: dest_parsed = urlparse(fix_url(dest)) if dest_parsed.scheme != registry_parsed.scheme: raise QuiltException( f"Invalid package destination path {dest!r}. 'dest', if set, must be a path " f"in the {registry!r} package registry specified by 'registry'." ) assert dest_parsed.scheme == 's3' registry_bucket, _, _ = parse_s3_url(registry_parsed) dest_bucket, _, _ = parse_s3_url(dest_parsed) if registry_bucket != dest_bucket: raise QuiltException( f"Invalid package destination path {dest!r}. 'dest', if set, must be a path " f"in the {registry!r} package registry specified by 'registry'." ) self._fix_sha256() pkg = self._materialize(dest) self._delete_temporary_files() # Now that data has been pushed, delete tmp files created by pkg.set('KEY', obj) pkg.build(name, registry=registry, message=message) return pkg def _materialize(self, dest_url): """ Copies all Package entries to the destination, then creates a new package that points to those objects. Copies each object in this package to path according to logical key structure, and returns a package with physical_keys that point to the new copies. Args: path: where to copy the objects in the package Returns: A new package that points to the copied objects Raises: fail to get bytes fail to put bytes fail to put package to registry """ pkg = self.__class__() pkg._meta = self._meta # Since all that is modified is physical keys, pkg will have the same top hash file_list = [] for logical_key, entry in self.walk(): # Copy the datafiles in the package. physical_key = _to_singleton(entry.physical_keys) new_physical_key = dest_url + "/" + quote(logical_key) file_list.append((physical_key, new_physical_key, entry.size, entry.meta)) results = copy_file_list(file_list) for (logical_key, entry), versioned_key in zip(self.walk(), results): # Create a new package entry pointing to the new remote key. assert versioned_key is not None new_entry = entry._clone() new_entry.physical_keys = [versioned_key] pkg.set(logical_key, new_entry) return pkg def _delete_temporary_files(self): physical_keys = [entry.get() for _, entry in self.walk()] with Pool(10) as p: p.map(del_if_temp, physical_keys) def diff(self, other_pkg): """ Returns three lists -- added, modified, deleted. Added: present in other_pkg but not in self. Modified: present in both, but different. Deleted: present in self, but not other_pkg. Args: other_pkg: Package to diff Returns: added, modified, deleted (all lists of logical keys) """ deleted = [] modified = [] other_entries = dict(other_pkg.walk()) for lk, entry in self.walk(): other_entry = other_entries.pop(lk, None) if other_entry is None: deleted.append(lk) elif entry != other_entry: modified.append(lk) added = list(sorted(other_entries)) return added, modified, deleted def map(self, f, include_directories=False): """ Performs a user-specified operation on each entry in the package. Args: f(x, y): function The function to be applied to each package entry. It should take two inputs, a logical key and a PackageEntry. include_directories: bool Whether or not to include directory entries in the map. Returns: list The list of results generated by the map. """ if include_directories: for lk, _ in self._walk_dir_meta(): yield f(lk, self[lk.rstrip("/")]) for lk, entity in self.walk(): yield f(lk, entity) def filter(self, f, include_directories=False): """ Applies a user-specified operation to each entry in the package, removing results that evaluate to False from the output. Args: f(x, y): function The function to be applied to each package entry. It should take two inputs, a logical key and a PackageEntry. This function should return a boolean. include_directories: bool Whether or not to include directory entries in the map. Returns: A new package with entries that evaluated to False removed """ p = Package() excluded_dirs = set() if include_directories: for lk, _ in self._walk_dir_meta(): if not f(lk, self[lk.rstrip("/")]): excluded_dirs.add(lk) for lk, entity in self.walk(): if (not any(p in excluded_dirs for p in pathlib.PurePosixPath(lk).parents) and f(lk, entity)): p.set(lk, entity) return p
1
17,865
This logic is now duplicated here and in `_delete_temporary_files`... Maybe do it all at once - find temp files, delete them, and fix the logical keys?
quiltdata-quilt
py
@@ -25,6 +25,11 @@ import io.swagger.converter.ModelConverters; import io.swagger.models.properties.Property; public class DefaultResponseTypeProcessor implements ResponseTypeProcessor { + @Override + public Class<?> getResponseType() { + // not care for this. + return null; + } @Override public Property process(OperationGenerator operationGenerator) {
1
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.servicecomb.swagger.generator.core.processor.response; import java.lang.reflect.Type; import io.servicecomb.swagger.generator.core.OperationGenerator; import io.servicecomb.swagger.generator.core.ResponseTypeProcessor; import io.servicecomb.swagger.generator.core.utils.ParamUtils; import io.swagger.converter.ModelConverters; import io.swagger.models.properties.Property; public class DefaultResponseTypeProcessor implements ResponseTypeProcessor { @Override public Property process(OperationGenerator operationGenerator) { Type responseType = operationGenerator.getProviderMethod().getGenericReturnType(); ParamUtils.addDefinitions(operationGenerator.getSwagger(), responseType); return ModelConverters.getInstance().readAsProperty(responseType); } }
1
8,145
How about name it with NullResponseTypeProcessor
apache-servicecomb-java-chassis
java
@@ -580,6 +580,8 @@ Dataset* DatasetLoader::CostructFromSampleData(double** sample_values, BinType bin_type = BinType::NumericalBin; if (categorical_features_.count(i)) { bin_type = BinType::CategoricalBin; + bool feat_is_unconstrained = ((config_.monotone_constraints.size() == 0) || (config_.monotone_constraints[i] == 0)); + CHECK(feat_is_unconstrained); } bin_mappers[i].reset(new BinMapper()); if (config_.max_bin_by_feature.empty()) {
1
/*! * Copyright (c) 2016 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #include <LightGBM/dataset_loader.h> #include <LightGBM/network.h> #include <LightGBM/utils/array_args.h> #include <LightGBM/utils/log.h> #include <LightGBM/utils/openmp_wrapper.h> namespace LightGBM { DatasetLoader::DatasetLoader(const Config& io_config, const PredictFunction& predict_fun, int num_class, const char* filename) :config_(io_config), random_(config_.data_random_seed), predict_fun_(predict_fun), num_class_(num_class) { label_idx_ = 0; weight_idx_ = NO_SPECIFIC; group_idx_ = NO_SPECIFIC; SetHeader(filename); } DatasetLoader::~DatasetLoader() { } void DatasetLoader::SetHeader(const char* filename) { std::unordered_map<std::string, int> name2idx; std::string name_prefix("name:"); if (filename != nullptr) { TextReader<data_size_t> text_reader(filename, config_.header); // get column names if (config_.header) { std::string first_line = text_reader.first_line(); feature_names_ = Common::Split(first_line.c_str(), "\t,"); } // load label idx first if (config_.label_column.size() > 0) { if (Common::StartsWith(config_.label_column, name_prefix)) { std::string name = config_.label_column.substr(name_prefix.size()); label_idx_ = -1; for (int i = 0; i < static_cast<int>(feature_names_.size()); ++i) { if (name == feature_names_[i]) { label_idx_ = i; break; } } if (label_idx_ >= 0) { Log::Info("Using column %s as label", name.c_str()); } else { Log::Fatal("Could not find label column %s in data file \n" "or data file doesn't contain header", name.c_str()); } } else { if (!Common::AtoiAndCheck(config_.label_column.c_str(), &label_idx_)) { Log::Fatal("label_column is not a number,\n" "if you want to use a column name,\n" "please add the prefix \"name:\" to the column name"); } Log::Info("Using column number %d as label", label_idx_); } } if (!feature_names_.empty()) { // erase label column name feature_names_.erase(feature_names_.begin() + label_idx_); for (size_t i = 0; i < feature_names_.size(); ++i) { name2idx[feature_names_[i]] = static_cast<int>(i); } } // load ignore columns if (config_.ignore_column.size() > 0) { if (Common::StartsWith(config_.ignore_column, name_prefix)) { std::string names = config_.ignore_column.substr(name_prefix.size()); for (auto name : Common::Split(names.c_str(), ',')) { if (name2idx.count(name) > 0) { int tmp = name2idx[name]; ignore_features_.emplace(tmp); } else { Log::Fatal("Could not find ignore column %s in data file", name.c_str()); } } } else { for (auto token : Common::Split(config_.ignore_column.c_str(), ',')) { int tmp = 0; if (!Common::AtoiAndCheck(token.c_str(), &tmp)) { Log::Fatal("ignore_column is not a number,\n" "if you want to use a column name,\n" "please add the prefix \"name:\" to the column name"); } ignore_features_.emplace(tmp); } } } // load weight idx if (config_.weight_column.size() > 0) { if (Common::StartsWith(config_.weight_column, name_prefix)) { std::string name = config_.weight_column.substr(name_prefix.size()); if (name2idx.count(name) > 0) { weight_idx_ = name2idx[name]; Log::Info("Using column %s as weight", name.c_str()); } else { Log::Fatal("Could not find weight column %s in data file", name.c_str()); } } else { if (!Common::AtoiAndCheck(config_.weight_column.c_str(), &weight_idx_)) { Log::Fatal("weight_column is not a number,\n" "if you want to use a column name,\n" "please add the prefix \"name:\" to the column name"); } Log::Info("Using column number %d as weight", weight_idx_); } ignore_features_.emplace(weight_idx_); } // load group idx if (config_.group_column.size() > 0) { if (Common::StartsWith(config_.group_column, name_prefix)) { std::string name = config_.group_column.substr(name_prefix.size()); if (name2idx.count(name) > 0) { group_idx_ = name2idx[name]; Log::Info("Using column %s as group/query id", name.c_str()); } else { Log::Fatal("Could not find group/query column %s in data file", name.c_str()); } } else { if (!Common::AtoiAndCheck(config_.group_column.c_str(), &group_idx_)) { Log::Fatal("group_column is not a number,\n" "if you want to use a column name,\n" "please add the prefix \"name:\" to the column name"); } Log::Info("Using column number %d as group/query id", group_idx_); } ignore_features_.emplace(group_idx_); } } if (config_.categorical_feature.size() > 0) { if (Common::StartsWith(config_.categorical_feature, name_prefix)) { std::string names = config_.categorical_feature.substr(name_prefix.size()); for (auto name : Common::Split(names.c_str(), ',')) { if (name2idx.count(name) > 0) { int tmp = name2idx[name]; categorical_features_.emplace(tmp); } else { Log::Fatal("Could not find categorical_feature %s in data file", name.c_str()); } } } else { for (auto token : Common::Split(config_.categorical_feature.c_str(), ',')) { int tmp = 0; if (!Common::AtoiAndCheck(token.c_str(), &tmp)) { Log::Fatal("categorical_feature is not a number,\n" "if you want to use a column name,\n" "please add the prefix \"name:\" to the column name"); } categorical_features_.emplace(tmp); } } } } Dataset* DatasetLoader::LoadFromFile(const char* filename, const char* initscore_file, int rank, int num_machines) { // don't support query id in data file when training in parallel if (num_machines > 1 && !config_.pre_partition) { if (group_idx_ > 0) { Log::Fatal("Using a query id without pre-partitioning the data file is not supported for parallel training.\n" "Please use an additional query file or pre-partition the data"); } } auto dataset = std::unique_ptr<Dataset>(new Dataset()); data_size_t num_global_data = 0; std::vector<data_size_t> used_data_indices; auto bin_filename = CheckCanLoadFromBin(filename); if (bin_filename.size() == 0) { auto parser = std::unique_ptr<Parser>(Parser::CreateParser(filename, config_.header, 0, label_idx_)); if (parser == nullptr) { Log::Fatal("Could not recognize data format of %s", filename); } dataset->data_filename_ = filename; dataset->label_idx_ = label_idx_; dataset->metadata_.Init(filename, initscore_file); if (!config_.two_round) { // read data to memory auto text_data = LoadTextDataToMemory(filename, dataset->metadata_, rank, num_machines, &num_global_data, &used_data_indices); dataset->num_data_ = static_cast<data_size_t>(text_data.size()); // sample data auto sample_data = SampleTextDataFromMemory(text_data); // construct feature bin mappers ConstructBinMappersFromTextData(rank, num_machines, sample_data, parser.get(), dataset.get()); // initialize label dataset->metadata_.Init(dataset->num_data_, weight_idx_, group_idx_); // extract features ExtractFeaturesFromMemory(text_data, parser.get(), dataset.get()); text_data.clear(); } else { // sample data from file auto sample_data = SampleTextDataFromFile(filename, dataset->metadata_, rank, num_machines, &num_global_data, &used_data_indices); if (used_data_indices.size() > 0) { dataset->num_data_ = static_cast<data_size_t>(used_data_indices.size()); } else { dataset->num_data_ = num_global_data; } // construct feature bin mappers ConstructBinMappersFromTextData(rank, num_machines, sample_data, parser.get(), dataset.get()); // initialize label dataset->metadata_.Init(dataset->num_data_, weight_idx_, group_idx_); // extract features ExtractFeaturesFromFile(filename, parser.get(), used_data_indices, dataset.get()); } } else { // load data from binary file dataset.reset(LoadFromBinFile(filename, bin_filename.c_str(), rank, num_machines, &num_global_data, &used_data_indices)); } // check meta data dataset->metadata_.CheckOrPartition(num_global_data, used_data_indices); // need to check training data CheckDataset(dataset.get()); return dataset.release(); } Dataset* DatasetLoader::LoadFromFileAlignWithOtherDataset(const char* filename, const char* initscore_file, const Dataset* train_data) { data_size_t num_global_data = 0; std::vector<data_size_t> used_data_indices; auto dataset = std::unique_ptr<Dataset>(new Dataset()); auto bin_filename = CheckCanLoadFromBin(filename); if (bin_filename.size() == 0) { auto parser = std::unique_ptr<Parser>(Parser::CreateParser(filename, config_.header, 0, label_idx_)); if (parser == nullptr) { Log::Fatal("Could not recognize data format of %s", filename); } dataset->data_filename_ = filename; dataset->label_idx_ = label_idx_; dataset->metadata_.Init(filename, initscore_file); if (!config_.two_round) { // read data in memory auto text_data = LoadTextDataToMemory(filename, dataset->metadata_, 0, 1, &num_global_data, &used_data_indices); dataset->num_data_ = static_cast<data_size_t>(text_data.size()); // initialize label dataset->metadata_.Init(dataset->num_data_, weight_idx_, group_idx_); dataset->CreateValid(train_data); // extract features ExtractFeaturesFromMemory(text_data, parser.get(), dataset.get()); text_data.clear(); } else { TextReader<data_size_t> text_reader(filename, config_.header); // Get number of lines of data file dataset->num_data_ = static_cast<data_size_t>(text_reader.CountLine()); num_global_data = dataset->num_data_; // initialize label dataset->metadata_.Init(dataset->num_data_, weight_idx_, group_idx_); dataset->CreateValid(train_data); // extract features ExtractFeaturesFromFile(filename, parser.get(), used_data_indices, dataset.get()); } } else { // load data from binary file dataset.reset(LoadFromBinFile(filename, bin_filename.c_str(), 0, 1, &num_global_data, &used_data_indices)); } // not need to check validation data // check meta data dataset->metadata_.CheckOrPartition(num_global_data, used_data_indices); return dataset.release(); } Dataset* DatasetLoader::LoadFromBinFile(const char* data_filename, const char* bin_filename, int rank, int num_machines, int* num_global_data, std::vector<data_size_t>* used_data_indices) { auto dataset = std::unique_ptr<Dataset>(new Dataset()); auto reader = VirtualFileReader::Make(bin_filename); dataset->data_filename_ = data_filename; if (!reader->Init()) { Log::Fatal("Could not read binary data from %s", bin_filename); } // buffer to read binary file size_t buffer_size = 16 * 1024 * 1024; auto buffer = std::vector<char>(buffer_size); // check token size_t size_of_token = std::strlen(Dataset::binary_file_token); size_t read_cnt = reader->Read(buffer.data(), sizeof(char) * size_of_token); if (read_cnt != sizeof(char) * size_of_token) { Log::Fatal("Binary file error: token has the wrong size"); } if (std::string(buffer.data()) != std::string(Dataset::binary_file_token)) { Log::Fatal("Input file is not LightGBM binary file"); } // read size of header read_cnt = reader->Read(buffer.data(), sizeof(size_t)); if (read_cnt != sizeof(size_t)) { Log::Fatal("Binary file error: header has the wrong size"); } size_t size_of_head = *(reinterpret_cast<size_t*>(buffer.data())); // re-allocmate space if not enough if (size_of_head > buffer_size) { buffer_size = size_of_head; buffer.resize(buffer_size); } // read header read_cnt = reader->Read(buffer.data(), size_of_head); if (read_cnt != size_of_head) { Log::Fatal("Binary file error: header is incorrect"); } // get header const char* mem_ptr = buffer.data(); dataset->num_data_ = *(reinterpret_cast<const data_size_t*>(mem_ptr)); mem_ptr += sizeof(dataset->num_data_); dataset->num_features_ = *(reinterpret_cast<const int*>(mem_ptr)); mem_ptr += sizeof(dataset->num_features_); dataset->num_total_features_ = *(reinterpret_cast<const int*>(mem_ptr)); mem_ptr += sizeof(dataset->num_total_features_); dataset->label_idx_ = *(reinterpret_cast<const int*>(mem_ptr)); mem_ptr += sizeof(dataset->label_idx_); dataset->max_bin_ = *(reinterpret_cast<const int*>(mem_ptr)); mem_ptr += sizeof(dataset->max_bin_); dataset->bin_construct_sample_cnt_ = *(reinterpret_cast<const int*>(mem_ptr)); mem_ptr += sizeof(dataset->bin_construct_sample_cnt_); dataset->min_data_in_bin_ = *(reinterpret_cast<const int*>(mem_ptr)); mem_ptr += sizeof(dataset->min_data_in_bin_); dataset->use_missing_ = *(reinterpret_cast<const bool*>(mem_ptr)); mem_ptr += sizeof(dataset->use_missing_); dataset->zero_as_missing_ = *(reinterpret_cast<const bool*>(mem_ptr)); mem_ptr += sizeof(dataset->zero_as_missing_); dataset->sparse_threshold_ = *(reinterpret_cast<const double*>(mem_ptr)); mem_ptr += sizeof(dataset->sparse_threshold_); const int* tmp_feature_map = reinterpret_cast<const int*>(mem_ptr); dataset->used_feature_map_.clear(); for (int i = 0; i < dataset->num_total_features_; ++i) { dataset->used_feature_map_.push_back(tmp_feature_map[i]); } mem_ptr += sizeof(int) * dataset->num_total_features_; // num_groups dataset->num_groups_ = *(reinterpret_cast<const int*>(mem_ptr)); mem_ptr += sizeof(dataset->num_groups_); // real_feature_idx_ const int* tmp_ptr_real_feature_idx_ = reinterpret_cast<const int*>(mem_ptr); dataset->real_feature_idx_.clear(); for (int i = 0; i < dataset->num_features_; ++i) { dataset->real_feature_idx_.push_back(tmp_ptr_real_feature_idx_[i]); } mem_ptr += sizeof(int) * dataset->num_features_; // feature2group const int* tmp_ptr_feature2group = reinterpret_cast<const int*>(mem_ptr); dataset->feature2group_.clear(); for (int i = 0; i < dataset->num_features_; ++i) { dataset->feature2group_.push_back(tmp_ptr_feature2group[i]); } mem_ptr += sizeof(int) * dataset->num_features_; // feature2subfeature const int* tmp_ptr_feature2subfeature = reinterpret_cast<const int*>(mem_ptr); dataset->feature2subfeature_.clear(); for (int i = 0; i < dataset->num_features_; ++i) { dataset->feature2subfeature_.push_back(tmp_ptr_feature2subfeature[i]); } mem_ptr += sizeof(int) * dataset->num_features_; // group_bin_boundaries const uint64_t* tmp_ptr_group_bin_boundaries = reinterpret_cast<const uint64_t*>(mem_ptr); dataset->group_bin_boundaries_.clear(); for (int i = 0; i < dataset->num_groups_ + 1; ++i) { dataset->group_bin_boundaries_.push_back(tmp_ptr_group_bin_boundaries[i]); } mem_ptr += sizeof(uint64_t) * (dataset->num_groups_ + 1); // group_feature_start_ const int* tmp_ptr_group_feature_start = reinterpret_cast<const int*>(mem_ptr); dataset->group_feature_start_.clear(); for (int i = 0; i < dataset->num_groups_; ++i) { dataset->group_feature_start_.push_back(tmp_ptr_group_feature_start[i]); } mem_ptr += sizeof(int) * (dataset->num_groups_); // group_feature_cnt_ const int* tmp_ptr_group_feature_cnt = reinterpret_cast<const int*>(mem_ptr); dataset->group_feature_cnt_.clear(); for (int i = 0; i < dataset->num_groups_; ++i) { dataset->group_feature_cnt_.push_back(tmp_ptr_group_feature_cnt[i]); } mem_ptr += sizeof(int) * (dataset->num_groups_); if (!config_.monotone_constraints.empty()) { CHECK(static_cast<size_t>(dataset->num_total_features_) == config_.monotone_constraints.size()); dataset->monotone_types_.resize(dataset->num_features_); for (int i = 0; i < dataset->num_total_features_; ++i) { int inner_fidx = dataset->InnerFeatureIndex(i); if (inner_fidx >= 0) { dataset->monotone_types_[inner_fidx] = config_.monotone_constraints[i]; } } } else { const int8_t* tmp_ptr_monotone_type = reinterpret_cast<const int8_t*>(mem_ptr); dataset->monotone_types_.clear(); for (int i = 0; i < dataset->num_features_; ++i) { dataset->monotone_types_.push_back(tmp_ptr_monotone_type[i]); } } mem_ptr += sizeof(int8_t) * (dataset->num_features_); if (ArrayArgs<int8_t>::CheckAllZero(dataset->monotone_types_)) { dataset->monotone_types_.clear(); } if (!config_.feature_contri.empty()) { CHECK(static_cast<size_t>(dataset->num_total_features_) == config_.feature_contri.size()); dataset->feature_penalty_.resize(dataset->num_features_); for (int i = 0; i < dataset->num_total_features_; ++i) { int inner_fidx = dataset->InnerFeatureIndex(i); if (inner_fidx >= 0) { dataset->feature_penalty_[inner_fidx] = config_.feature_contri[i]; } } } else { const double* tmp_ptr_feature_penalty = reinterpret_cast<const double*>(mem_ptr); dataset->feature_penalty_.clear(); for (int i = 0; i < dataset->num_features_; ++i) { dataset->feature_penalty_.push_back(tmp_ptr_feature_penalty[i]); } } mem_ptr += sizeof(double) * (dataset->num_features_); if (ArrayArgs<double>::CheckAll(dataset->feature_penalty_, 1)) { dataset->feature_penalty_.clear(); } if (!config_.max_bin_by_feature.empty()) { CHECK(static_cast<size_t>(dataset->num_total_features_) == config_.max_bin_by_feature.size()); CHECK(*(std::min_element(config_.max_bin_by_feature.begin(), config_.max_bin_by_feature.end())) > 1); dataset->max_bin_by_feature_.resize(dataset->num_total_features_); dataset->max_bin_by_feature_.assign(config_.max_bin_by_feature.begin(), config_.max_bin_by_feature.end()); } else { const int32_t* tmp_ptr_max_bin_by_feature = reinterpret_cast<const int32_t*>(mem_ptr); dataset->max_bin_by_feature_.clear(); for (int i = 0; i < dataset->num_total_features_; ++i) { dataset->max_bin_by_feature_.push_back(tmp_ptr_max_bin_by_feature[i]); } } mem_ptr += sizeof(int32_t) * (dataset->num_total_features_); if (ArrayArgs<int32_t>::CheckAll(dataset->max_bin_by_feature_, -1)) { dataset->max_bin_by_feature_.clear(); } // get feature names dataset->feature_names_.clear(); // write feature names for (int i = 0; i < dataset->num_total_features_; ++i) { int str_len = *(reinterpret_cast<const int*>(mem_ptr)); mem_ptr += sizeof(int); std::stringstream str_buf; for (int j = 0; j < str_len; ++j) { char tmp_char = *(reinterpret_cast<const char*>(mem_ptr)); mem_ptr += sizeof(char); str_buf << tmp_char; } dataset->feature_names_.emplace_back(str_buf.str()); } // read size of meta data read_cnt = reader->Read(buffer.data(), sizeof(size_t)); if (read_cnt != sizeof(size_t)) { Log::Fatal("Binary file error: meta data has the wrong size"); } size_t size_of_metadata = *(reinterpret_cast<size_t*>(buffer.data())); // re-allocate space if not enough if (size_of_metadata > buffer_size) { buffer_size = size_of_metadata; buffer.resize(buffer_size); } // read meta data read_cnt = reader->Read(buffer.data(), size_of_metadata); if (read_cnt != size_of_metadata) { Log::Fatal("Binary file error: meta data is incorrect"); } // load meta data dataset->metadata_.LoadFromMemory(buffer.data()); *num_global_data = dataset->num_data_; used_data_indices->clear(); // sample local used data if need to partition if (num_machines > 1 && !config_.pre_partition) { const data_size_t* query_boundaries = dataset->metadata_.query_boundaries(); if (query_boundaries == nullptr) { // if not contain query file, minimal sample unit is one record for (data_size_t i = 0; i < dataset->num_data_; ++i) { if (random_.NextShort(0, num_machines) == rank) { used_data_indices->push_back(i); } } } else { // if contain query file, minimal sample unit is one query data_size_t num_queries = dataset->metadata_.num_queries(); data_size_t qid = -1; bool is_query_used = false; for (data_size_t i = 0; i < dataset->num_data_; ++i) { if (qid >= num_queries) { Log::Fatal("Current query exceeds the range of the query file,\n" "please ensure the query file is correct"); } if (i >= query_boundaries[qid + 1]) { // if is new query is_query_used = false; if (random_.NextShort(0, num_machines) == rank) { is_query_used = true; } ++qid; } if (is_query_used) { used_data_indices->push_back(i); } } } dataset->num_data_ = static_cast<data_size_t>((*used_data_indices).size()); } dataset->metadata_.PartitionLabel(*used_data_indices); // read feature data for (int i = 0; i < dataset->num_groups_; ++i) { // read feature size read_cnt = reader->Read(buffer.data(), sizeof(size_t)); if (read_cnt != sizeof(size_t)) { Log::Fatal("Binary file error: feature %d has the wrong size", i); } size_t size_of_feature = *(reinterpret_cast<size_t*>(buffer.data())); // re-allocate space if not enough if (size_of_feature > buffer_size) { buffer_size = size_of_feature; buffer.resize(buffer_size); } read_cnt = reader->Read(buffer.data(), size_of_feature); if (read_cnt != size_of_feature) { Log::Fatal("Binary file error: feature %d is incorrect, read count: %d", i, read_cnt); } dataset->feature_groups_.emplace_back(std::unique_ptr<FeatureGroup>( new FeatureGroup(buffer.data(), *num_global_data, *used_data_indices))); } dataset->feature_groups_.shrink_to_fit(); dataset->is_finish_load_ = true; return dataset.release(); } Dataset* DatasetLoader::CostructFromSampleData(double** sample_values, int** sample_indices, int num_col, const int* num_per_col, size_t total_sample_size, data_size_t num_data) { std::vector<std::unique_ptr<BinMapper>> bin_mappers(num_col); // fill feature_names_ if not header if (feature_names_.empty()) { for (int i = 0; i < num_col; ++i) { std::stringstream str_buf; str_buf << "Column_" << i; feature_names_.push_back(str_buf.str()); } } if (!config_.max_bin_by_feature.empty()) { CHECK(static_cast<size_t>(num_col) == config_.max_bin_by_feature.size()); CHECK(*(std::min_element(config_.max_bin_by_feature.begin(), config_.max_bin_by_feature.end())) > 1); } const data_size_t filter_cnt = static_cast<data_size_t>( static_cast<double>(config_.min_data_in_leaf * total_sample_size) / num_data); if (Network::num_machines() == 1) { // if only one machine, find bin locally OMP_INIT_EX(); #pragma omp parallel for schedule(guided) for (int i = 0; i < num_col; ++i) { OMP_LOOP_EX_BEGIN(); if (ignore_features_.count(i) > 0) { bin_mappers[i] = nullptr; continue; } BinType bin_type = BinType::NumericalBin; if (categorical_features_.count(i)) { bin_type = BinType::CategoricalBin; } bin_mappers[i].reset(new BinMapper()); if (config_.max_bin_by_feature.empty()) { bin_mappers[i]->FindBin(sample_values[i], num_per_col[i], total_sample_size, config_.max_bin, config_.min_data_in_bin, filter_cnt, bin_type, config_.use_missing, config_.zero_as_missing); } else { bin_mappers[i]->FindBin(sample_values[i], num_per_col[i], total_sample_size, config_.max_bin_by_feature[i], config_.min_data_in_bin, filter_cnt, bin_type, config_.use_missing, config_.zero_as_missing); } OMP_LOOP_EX_END(); } OMP_THROW_EX(); } else { // if have multi-machines, need to find bin distributed // different machines will find bin for different features int num_machines = Network::num_machines(); int rank = Network::rank(); int total_num_feature = num_col; total_num_feature = Network::GlobalSyncUpByMin(total_num_feature); // start and len will store the process feature indices for different machines // machine i will find bins for features in [ start[i], start[i] + len[i] ) std::vector<int> start(num_machines); std::vector<int> len(num_machines); int step = (total_num_feature + num_machines - 1) / num_machines; if (step < 1) { step = 1; } start[0] = 0; for (int i = 0; i < num_machines - 1; ++i) { len[i] = std::min(step, total_num_feature - start[i]); start[i + 1] = start[i] + len[i]; } len[num_machines - 1] = total_num_feature - start[num_machines - 1]; OMP_INIT_EX(); #pragma omp parallel for schedule(guided) for (int i = 0; i < len[rank]; ++i) { OMP_LOOP_EX_BEGIN(); if (ignore_features_.count(start[rank] + i) > 0) { continue; } BinType bin_type = BinType::NumericalBin; if (categorical_features_.count(start[rank] + i)) { bin_type = BinType::CategoricalBin; } bin_mappers[i].reset(new BinMapper()); if (config_.max_bin_by_feature.empty()) { bin_mappers[i]->FindBin(sample_values[start[rank] + i], num_per_col[start[rank] + i], total_sample_size, config_.max_bin, config_.min_data_in_bin, filter_cnt, bin_type, config_.use_missing, config_.zero_as_missing); } else { bin_mappers[i]->FindBin(sample_values[start[rank] + i], num_per_col[start[rank] + i], total_sample_size, config_.max_bin_by_feature[start[rank] + i], config_.min_data_in_bin, filter_cnt, bin_type, config_.use_missing, config_.zero_as_missing); } OMP_LOOP_EX_END(); } OMP_THROW_EX(); int max_bin = 0; for (int i = 0; i < len[rank]; ++i) { if (bin_mappers[i] != nullptr) { max_bin = std::max(max_bin, bin_mappers[i]->num_bin()); } } max_bin = Network::GlobalSyncUpByMax(max_bin); // get size of bin mapper with max_bin size int type_size = BinMapper::SizeForSpecificBin(max_bin); // since sizes of different feature may not be same, we expand all bin mapper to type_size comm_size_t buffer_size = type_size * total_num_feature; CHECK(buffer_size >= 0); auto input_buffer = std::vector<char>(buffer_size); auto output_buffer = std::vector<char>(buffer_size); // find local feature bins and copy to buffer #pragma omp parallel for schedule(guided) for (int i = 0; i < len[rank]; ++i) { OMP_LOOP_EX_BEGIN(); if (ignore_features_.count(start[rank] + i) > 0) { continue; } bin_mappers[i]->CopyTo(input_buffer.data() + i * type_size); // free bin_mappers[i].reset(nullptr); OMP_LOOP_EX_END(); } OMP_THROW_EX(); std::vector<comm_size_t> size_start(num_machines); std::vector<comm_size_t> size_len(num_machines); // convert to binary size for (int i = 0; i < num_machines; ++i) { size_start[i] = start[i] * static_cast<comm_size_t>(type_size); size_len[i] = len[i] * static_cast<comm_size_t>(type_size); } // gather global feature bin mappers Network::Allgather(input_buffer.data(), size_start.data(), size_len.data(), output_buffer.data(), buffer_size); // restore features bins from buffer for (int i = 0; i < total_num_feature; ++i) { if (ignore_features_.count(i) > 0) { bin_mappers[i] = nullptr; continue; } bin_mappers[i].reset(new BinMapper()); bin_mappers[i]->CopyFrom(output_buffer.data() + i * type_size); } } auto dataset = std::unique_ptr<Dataset>(new Dataset(num_data)); dataset->Construct(bin_mappers, sample_indices, num_per_col, total_sample_size, config_); dataset->set_feature_names(feature_names_); return dataset.release(); } // ---- private functions ---- void DatasetLoader::CheckDataset(const Dataset* dataset) { if (dataset->num_data_ <= 0) { Log::Fatal("Data file %s is empty", dataset->data_filename_.c_str()); } if (dataset->feature_names_.size() != static_cast<size_t>(dataset->num_total_features_)) { Log::Fatal("Size of feature name error, should be %d, got %d", dataset->num_total_features_, static_cast<int>(dataset->feature_names_.size())); } bool is_feature_order_by_group = true; int last_group = -1; int last_sub_feature = -1; // if features are ordered, not need to use hist_buf for (int i = 0; i < dataset->num_features_; ++i) { int group = dataset->feature2group_[i]; int sub_feature = dataset->feature2subfeature_[i]; if (group < last_group) { is_feature_order_by_group = false; } else if (group == last_group) { if (sub_feature <= last_sub_feature) { is_feature_order_by_group = false; break; } } last_group = group; last_sub_feature = sub_feature; } if (!is_feature_order_by_group) { Log::Fatal("Features in dataset should be ordered by group"); } } std::vector<std::string> DatasetLoader::LoadTextDataToMemory(const char* filename, const Metadata& metadata, int rank, int num_machines, int* num_global_data, std::vector<data_size_t>* used_data_indices) { TextReader<data_size_t> text_reader(filename, config_.header); used_data_indices->clear(); if (num_machines == 1 || config_.pre_partition) { // read all lines *num_global_data = text_reader.ReadAllLines(); } else { // need partition data // get query data const data_size_t* query_boundaries = metadata.query_boundaries(); if (query_boundaries == nullptr) { // if not contain query data, minimal sample unit is one record *num_global_data = text_reader.ReadAndFilterLines([this, rank, num_machines](data_size_t) { if (random_.NextShort(0, num_machines) == rank) { return true; } else { return false; } }, used_data_indices); } else { // if contain query data, minimal sample unit is one query data_size_t num_queries = metadata.num_queries(); data_size_t qid = -1; bool is_query_used = false; *num_global_data = text_reader.ReadAndFilterLines( [this, rank, num_machines, &qid, &query_boundaries, &is_query_used, num_queries] (data_size_t line_idx) { if (qid >= num_queries) { Log::Fatal("Current query exceeds the range of the query file,\n" "please ensure the query file is correct"); } if (line_idx >= query_boundaries[qid + 1]) { // if is new query is_query_used = false; if (random_.NextShort(0, num_machines) == rank) { is_query_used = true; } ++qid; } return is_query_used; }, used_data_indices); } } return std::move(text_reader.Lines()); } std::vector<std::string> DatasetLoader::SampleTextDataFromMemory(const std::vector<std::string>& data) { int sample_cnt = config_.bin_construct_sample_cnt; if (static_cast<size_t>(sample_cnt) > data.size()) { sample_cnt = static_cast<int>(data.size()); } auto sample_indices = random_.Sample(static_cast<int>(data.size()), sample_cnt); std::vector<std::string> out(sample_indices.size()); for (size_t i = 0; i < sample_indices.size(); ++i) { const size_t idx = sample_indices[i]; out[i] = data[idx]; } return out; } std::vector<std::string> DatasetLoader::SampleTextDataFromFile(const char* filename, const Metadata& metadata, int rank, int num_machines, int* num_global_data, std::vector<data_size_t>* used_data_indices) { const data_size_t sample_cnt = static_cast<data_size_t>(config_.bin_construct_sample_cnt); TextReader<data_size_t> text_reader(filename, config_.header); std::vector<std::string> out_data; if (num_machines == 1 || config_.pre_partition) { *num_global_data = static_cast<data_size_t>(text_reader.SampleFromFile(random_, sample_cnt, &out_data)); } else { // need partition data // get query data const data_size_t* query_boundaries = metadata.query_boundaries(); if (query_boundaries == nullptr) { // if not contain query file, minimal sample unit is one record *num_global_data = text_reader.SampleAndFilterFromFile([this, rank, num_machines] (data_size_t) { if (random_.NextShort(0, num_machines) == rank) { return true; } else { return false; } }, used_data_indices, random_, sample_cnt, &out_data); } else { // if contain query file, minimal sample unit is one query data_size_t num_queries = metadata.num_queries(); data_size_t qid = -1; bool is_query_used = false; *num_global_data = text_reader.SampleAndFilterFromFile( [this, rank, num_machines, &qid, &query_boundaries, &is_query_used, num_queries] (data_size_t line_idx) { if (qid >= num_queries) { Log::Fatal("Query id exceeds the range of the query file, " "please ensure the query file is correct"); } if (line_idx >= query_boundaries[qid + 1]) { // if is new query is_query_used = false; if (random_.NextShort(0, num_machines) == rank) { is_query_used = true; } ++qid; } return is_query_used; }, used_data_indices, random_, sample_cnt, &out_data); } } return out_data; } void DatasetLoader::ConstructBinMappersFromTextData(int rank, int num_machines, const std::vector<std::string>& sample_data, const Parser* parser, Dataset* dataset) { std::vector<std::vector<double>> sample_values; std::vector<std::vector<int>> sample_indices; std::vector<std::pair<int, double>> oneline_features; double label; for (int i = 0; i < static_cast<int>(sample_data.size()); ++i) { oneline_features.clear(); // parse features parser->ParseOneLine(sample_data[i].c_str(), &oneline_features, &label); for (std::pair<int, double>& inner_data : oneline_features) { if (static_cast<size_t>(inner_data.first) >= sample_values.size()) { sample_values.resize(inner_data.first + 1); sample_indices.resize(inner_data.first + 1); } if (std::fabs(inner_data.second) > kZeroThreshold || std::isnan(inner_data.second)) { sample_values[inner_data.first].emplace_back(inner_data.second); sample_indices[inner_data.first].emplace_back(i); } } } dataset->feature_groups_.clear(); if (feature_names_.empty()) { // -1 means doesn't use this feature dataset->num_total_features_ = std::max(static_cast<int>(sample_values.size()), parser->TotalColumns() - 1); dataset->used_feature_map_ = std::vector<int>(dataset->num_total_features_, -1); } else { dataset->used_feature_map_ = std::vector<int>(feature_names_.size(), -1); dataset->num_total_features_ = static_cast<int>(feature_names_.size()); } if (!config_.max_bin_by_feature.empty()) { CHECK(static_cast<size_t>(dataset->num_total_features_) == config_.max_bin_by_feature.size()); CHECK(*(std::min_element(config_.max_bin_by_feature.begin(), config_.max_bin_by_feature.end())) > 1); } // check the range of label_idx, weight_idx and group_idx CHECK(label_idx_ >= 0 && label_idx_ <= dataset->num_total_features_); CHECK(weight_idx_ < 0 || weight_idx_ < dataset->num_total_features_); CHECK(group_idx_ < 0 || group_idx_ < dataset->num_total_features_); // fill feature_names_ if not header if (feature_names_.empty()) { for (int i = 0; i < dataset->num_total_features_; ++i) { std::stringstream str_buf; str_buf << "Column_" << i; feature_names_.push_back(str_buf.str()); } } dataset->set_feature_names(feature_names_); std::vector<std::unique_ptr<BinMapper>> bin_mappers(dataset->num_total_features_); const data_size_t filter_cnt = static_cast<data_size_t>( static_cast<double>(config_.min_data_in_leaf* sample_data.size()) / dataset->num_data_); // start find bins if (num_machines == 1) { // if only one machine, find bin locally OMP_INIT_EX(); #pragma omp parallel for schedule(guided) for (int i = 0; i < static_cast<int>(sample_values.size()); ++i) { OMP_LOOP_EX_BEGIN(); if (ignore_features_.count(i) > 0) { bin_mappers[i] = nullptr; continue; } BinType bin_type = BinType::NumericalBin; if (categorical_features_.count(i)) { bin_type = BinType::CategoricalBin; } bin_mappers[i].reset(new BinMapper()); if (config_.max_bin_by_feature.empty()) { bin_mappers[i]->FindBin(sample_values[i].data(), static_cast<int>(sample_values[i].size()), sample_data.size(), config_.max_bin, config_.min_data_in_bin, filter_cnt, bin_type, config_.use_missing, config_.zero_as_missing); } else { bin_mappers[i]->FindBin(sample_values[i].data(), static_cast<int>(sample_values[i].size()), sample_data.size(), config_.max_bin_by_feature[i], config_.min_data_in_bin, filter_cnt, bin_type, config_.use_missing, config_.zero_as_missing); } OMP_LOOP_EX_END(); } OMP_THROW_EX(); } else { // if have multi-machines, need to find bin distributed // different machines will find bin for different features int num_total_features = dataset->num_total_features_; num_total_features = Network::GlobalSyncUpByMin(num_total_features); dataset->num_total_features_ = num_total_features; // start and len will store the process feature indices for different machines // machine i will find bins for features in [ start[i], start[i] + len[i] ) std::vector<int> start(num_machines); std::vector<int> len(num_machines); int step = (num_total_features + num_machines - 1) / num_machines; if (step < 1) { step = 1; } start[0] = 0; for (int i = 0; i < num_machines - 1; ++i) { len[i] = std::min(step, num_total_features - start[i]); start[i + 1] = start[i] + len[i]; } len[num_machines - 1] = num_total_features - start[num_machines - 1]; OMP_INIT_EX(); #pragma omp parallel for schedule(guided) for (int i = 0; i < len[rank]; ++i) { OMP_LOOP_EX_BEGIN(); if (ignore_features_.count(start[rank] + i) > 0) { continue; } BinType bin_type = BinType::NumericalBin; if (categorical_features_.count(start[rank] + i)) { bin_type = BinType::CategoricalBin; } bin_mappers[i].reset(new BinMapper()); if (config_.max_bin_by_feature.empty()) { bin_mappers[i]->FindBin(sample_values[start[rank] + i].data(), static_cast<int>(sample_values[start[rank] + i].size()), sample_data.size(), config_.max_bin, config_.min_data_in_bin, filter_cnt, bin_type, config_.use_missing, config_.zero_as_missing); } else { bin_mappers[i]->FindBin(sample_values[start[rank] + i].data(), static_cast<int>(sample_values[start[rank] + i].size()), sample_data.size(), config_.max_bin_by_feature[i], config_.min_data_in_bin, filter_cnt, bin_type, config_.use_missing, config_.zero_as_missing); } OMP_LOOP_EX_END(); } OMP_THROW_EX(); int max_bin = 0; for (int i = 0; i < len[rank]; ++i) { if (bin_mappers[i] != nullptr) { max_bin = std::max(max_bin, bin_mappers[i]->num_bin()); } } max_bin = Network::GlobalSyncUpByMax(max_bin); // get size of bin mapper with max_bin size int type_size = BinMapper::SizeForSpecificBin(max_bin); // since sizes of different feature may not be same, we expand all bin mapper to type_size comm_size_t buffer_size = type_size * num_total_features; CHECK(buffer_size >= 0); auto input_buffer = std::vector<char>(buffer_size); auto output_buffer = std::vector<char>(buffer_size); // find local feature bins and copy to buffer #pragma omp parallel for schedule(guided) for (int i = 0; i < len[rank]; ++i) { OMP_LOOP_EX_BEGIN(); if (ignore_features_.count(start[rank] + i) > 0) { continue; } bin_mappers[i]->CopyTo(input_buffer.data() + i * type_size); // free bin_mappers[i].reset(nullptr); OMP_LOOP_EX_END(); } OMP_THROW_EX(); std::vector<comm_size_t> size_start(num_machines); std::vector<comm_size_t> size_len(num_machines); // convert to binary size for (int i = 0; i < num_machines; ++i) { size_start[i] = start[i] * static_cast<comm_size_t>(type_size); size_len[i] = len[i] * static_cast<comm_size_t>(type_size); } // gather global feature bin mappers Network::Allgather(input_buffer.data(), size_start.data(), size_len.data(), output_buffer.data(), buffer_size); // restore features bins from buffer for (int i = 0; i < num_total_features; ++i) { if (ignore_features_.count(i) > 0) { bin_mappers[i] = nullptr; continue; } bin_mappers[i].reset(new BinMapper()); bin_mappers[i]->CopyFrom(output_buffer.data() + i * type_size); } } sample_values.clear(); dataset->Construct(bin_mappers, Common::Vector2Ptr<int>(sample_indices).data(), Common::VectorSize<int>(sample_indices).data(), sample_data.size(), config_); } /*! \brief Extract local features from memory */ void DatasetLoader::ExtractFeaturesFromMemory(std::vector<std::string>& text_data, const Parser* parser, Dataset* dataset) { std::vector<std::pair<int, double>> oneline_features; double tmp_label = 0.0f; if (predict_fun_ == nullptr) { OMP_INIT_EX(); // if doesn't need to prediction with initial model #pragma omp parallel for schedule(static) private(oneline_features) firstprivate(tmp_label) for (data_size_t i = 0; i < dataset->num_data_; ++i) { OMP_LOOP_EX_BEGIN(); const int tid = omp_get_thread_num(); oneline_features.clear(); // parser parser->ParseOneLine(text_data[i].c_str(), &oneline_features, &tmp_label); // set label dataset->metadata_.SetLabelAt(i, static_cast<label_t>(tmp_label)); // free processed line: text_data[i].clear(); // shrink_to_fit will be very slow in linux, and seems not free memory, disable for now // text_reader_->Lines()[i].shrink_to_fit(); // push data for (auto& inner_data : oneline_features) { if (inner_data.first >= dataset->num_total_features_) { continue; } int feature_idx = dataset->used_feature_map_[inner_data.first]; if (feature_idx >= 0) { // if is used feature int group = dataset->feature2group_[feature_idx]; int sub_feature = dataset->feature2subfeature_[feature_idx]; dataset->feature_groups_[group]->PushData(tid, sub_feature, i, inner_data.second); } else { if (inner_data.first == weight_idx_) { dataset->metadata_.SetWeightAt(i, static_cast<label_t>(inner_data.second)); } else if (inner_data.first == group_idx_) { dataset->metadata_.SetQueryAt(i, static_cast<data_size_t>(inner_data.second)); } } } OMP_LOOP_EX_END(); } OMP_THROW_EX(); } else { OMP_INIT_EX(); // if need to prediction with initial model std::vector<double> init_score(dataset->num_data_ * num_class_); #pragma omp parallel for schedule(static) private(oneline_features) firstprivate(tmp_label) for (data_size_t i = 0; i < dataset->num_data_; ++i) { OMP_LOOP_EX_BEGIN(); const int tid = omp_get_thread_num(); oneline_features.clear(); // parser parser->ParseOneLine(text_data[i].c_str(), &oneline_features, &tmp_label); // set initial score std::vector<double> oneline_init_score(num_class_); predict_fun_(oneline_features, oneline_init_score.data()); for (int k = 0; k < num_class_; ++k) { init_score[k * dataset->num_data_ + i] = static_cast<double>(oneline_init_score[k]); } // set label dataset->metadata_.SetLabelAt(i, static_cast<label_t>(tmp_label)); // free processed line: text_data[i].clear(); // shrink_to_fit will be very slow in linux, and seems not free memory, disable for now // text_reader_->Lines()[i].shrink_to_fit(); // push data for (auto& inner_data : oneline_features) { if (inner_data.first >= dataset->num_total_features_) { continue; } int feature_idx = dataset->used_feature_map_[inner_data.first]; if (feature_idx >= 0) { // if is used feature int group = dataset->feature2group_[feature_idx]; int sub_feature = dataset->feature2subfeature_[feature_idx]; dataset->feature_groups_[group]->PushData(tid, sub_feature, i, inner_data.second); } else { if (inner_data.first == weight_idx_) { dataset->metadata_.SetWeightAt(i, static_cast<label_t>(inner_data.second)); } else if (inner_data.first == group_idx_) { dataset->metadata_.SetQueryAt(i, static_cast<data_size_t>(inner_data.second)); } } } OMP_LOOP_EX_END(); } OMP_THROW_EX(); // metadata_ will manage space of init_score dataset->metadata_.SetInitScore(init_score.data(), dataset->num_data_ * num_class_); } dataset->FinishLoad(); // text data can be free after loaded feature values text_data.clear(); } /*! \brief Extract local features from file */ void DatasetLoader::ExtractFeaturesFromFile(const char* filename, const Parser* parser, const std::vector<data_size_t>& used_data_indices, Dataset* dataset) { std::vector<double> init_score; if (predict_fun_ != nullptr) { init_score = std::vector<double>(dataset->num_data_ * num_class_); } std::function<void(data_size_t, const std::vector<std::string>&)> process_fun = [this, &init_score, &parser, &dataset] (data_size_t start_idx, const std::vector<std::string>& lines) { std::vector<std::pair<int, double>> oneline_features; double tmp_label = 0.0f; OMP_INIT_EX(); #pragma omp parallel for schedule(static) private(oneline_features) firstprivate(tmp_label) for (data_size_t i = 0; i < static_cast<data_size_t>(lines.size()); ++i) { OMP_LOOP_EX_BEGIN(); const int tid = omp_get_thread_num(); oneline_features.clear(); // parser parser->ParseOneLine(lines[i].c_str(), &oneline_features, &tmp_label); // set initial score if (!init_score.empty()) { std::vector<double> oneline_init_score(num_class_); predict_fun_(oneline_features, oneline_init_score.data()); for (int k = 0; k < num_class_; ++k) { init_score[k * dataset->num_data_ + start_idx + i] = static_cast<double>(oneline_init_score[k]); } } // set label dataset->metadata_.SetLabelAt(start_idx + i, static_cast<label_t>(tmp_label)); // push data for (auto& inner_data : oneline_features) { if (inner_data.first >= dataset->num_total_features_) { continue; } int feature_idx = dataset->used_feature_map_[inner_data.first]; if (feature_idx >= 0) { // if is used feature int group = dataset->feature2group_[feature_idx]; int sub_feature = dataset->feature2subfeature_[feature_idx]; dataset->feature_groups_[group]->PushData(tid, sub_feature, start_idx + i, inner_data.second); } else { if (inner_data.first == weight_idx_) { dataset->metadata_.SetWeightAt(start_idx + i, static_cast<label_t>(inner_data.second)); } else if (inner_data.first == group_idx_) { dataset->metadata_.SetQueryAt(start_idx + i, static_cast<data_size_t>(inner_data.second)); } } } OMP_LOOP_EX_END(); } OMP_THROW_EX(); }; TextReader<data_size_t> text_reader(filename, config_.header); if (!used_data_indices.empty()) { // only need part of data text_reader.ReadPartAndProcessParallel(used_data_indices, process_fun); } else { // need full data text_reader.ReadAllAndProcessParallel(process_fun); } // metadata_ will manage space of init_score if (!init_score.empty()) { dataset->metadata_.SetInitScore(init_score.data(), dataset->num_data_ * num_class_); } dataset->FinishLoad(); } /*! \brief Check can load from binary file */ std::string DatasetLoader::CheckCanLoadFromBin(const char* filename) { std::string bin_filename(filename); bin_filename.append(".bin"); auto reader = VirtualFileReader::Make(bin_filename.c_str()); if (!reader->Init()) { bin_filename = std::string(filename); reader = VirtualFileReader::Make(bin_filename.c_str()); if (!reader->Init()) { Log::Fatal("Cannot open data file %s", bin_filename.c_str()); } } size_t buffer_size = 256; auto buffer = std::vector<char>(buffer_size); // read size of token size_t size_of_token = std::strlen(Dataset::binary_file_token); size_t read_cnt = reader->Read(buffer.data(), size_of_token); if (read_cnt == size_of_token && std::string(buffer.data()) == std::string(Dataset::binary_file_token)) { return bin_filename; } else { return std::string(); } } } // namespace LightGBM
1
20,726
Maybe `Log::Fatal` here? `[LightGBM] [Fatal] Check failed: feat_is_unconstrained at ...` (and kernel death in case of Jupyter Notebook) seems to be not so informative and user-friendly.
microsoft-LightGBM
cpp