patch
stringlengths 17
31.2k
| y
int64 1
1
| oldf
stringlengths 0
2.21M
| idx
int64 1
1
| id
int64 4.29k
68.4k
| msg
stringlengths 8
843
| proj
stringclasses 212
values | lang
stringclasses 9
values |
---|---|---|---|---|---|---|---|
@@ -263,6 +263,7 @@ public abstract class ElasticsearchStorage extends zipkin2.storage.StorageCompon
public abstract int namesLookback();
@Override
+ @Memoized // Memoized to reduce garbage as this is effectively a singleton.
public SpanStore spanStore() {
ensureIndexTemplates();
return new ElasticsearchSpanStore(this); | 1 | /*
* Copyright 2015-2019 The OpenZipkin Authors
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package zipkin2.elasticsearch;
import com.fasterxml.jackson.core.JsonParser;
import com.google.auto.value.AutoValue;
import com.google.auto.value.extension.memoized.Memoized;
import com.linecorp.armeria.client.ClientFactory;
import com.linecorp.armeria.client.ClientFactoryBuilder;
import com.linecorp.armeria.client.ClientOptionsBuilder;
import com.linecorp.armeria.client.Endpoint;
import com.linecorp.armeria.client.HttpClient;
import com.linecorp.armeria.client.HttpClientBuilder;
import com.linecorp.armeria.client.encoding.HttpDecodingClient;
import com.linecorp.armeria.client.endpoint.EndpointGroup;
import com.linecorp.armeria.client.endpoint.EndpointGroupRegistry;
import com.linecorp.armeria.client.endpoint.EndpointSelectionStrategy;
import com.linecorp.armeria.client.endpoint.StaticEndpointGroup;
import com.linecorp.armeria.client.endpoint.dns.DnsAddressEndpointGroup;
import com.linecorp.armeria.client.endpoint.dns.DnsAddressEndpointGroupBuilder;
import com.linecorp.armeria.client.endpoint.healthcheck.HttpHealthCheckedEndpointGroup;
import com.linecorp.armeria.client.endpoint.healthcheck.HttpHealthCheckedEndpointGroupBuilder;
import com.linecorp.armeria.common.AggregatedHttpRequest;
import com.linecorp.armeria.common.Flags;
import com.linecorp.armeria.common.HttpData;
import com.linecorp.armeria.common.HttpMethod;
import com.linecorp.armeria.common.SessionProtocol;
import com.linecorp.armeria.common.util.AbstractListenable;
import com.linecorp.armeria.common.util.EventLoopGroups;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.ArrayList;
import java.util.Collections;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Locale;
import java.util.Set;
import java.util.concurrent.CompletionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.function.Consumer;
import java.util.stream.Collectors;
import zipkin2.CheckResult;
import zipkin2.elasticsearch.internal.IndexNameFormatter;
import zipkin2.elasticsearch.internal.client.HttpCall;
import zipkin2.elasticsearch.internal.client.HttpCall.BodyConverter;
import zipkin2.internal.Nullable;
import zipkin2.internal.Platform;
import zipkin2.storage.AutocompleteTags;
import zipkin2.storage.ServiceAndSpanNames;
import zipkin2.storage.SpanConsumer;
import zipkin2.storage.SpanStore;
import zipkin2.storage.StorageComponent;
import static zipkin2.elasticsearch.ElasticsearchAutocompleteTags.AUTOCOMPLETE;
import static zipkin2.elasticsearch.ElasticsearchSpanStore.DEPENDENCY;
import static zipkin2.elasticsearch.ElasticsearchSpanStore.SPAN;
import static zipkin2.elasticsearch.EnsureIndexTemplate.ensureIndexTemplate;
import static zipkin2.elasticsearch.internal.JsonReaders.enterPath;
import static zipkin2.elasticsearch.internal.JsonSerializers.JSON_FACTORY;
@AutoValue
public abstract class ElasticsearchStorage extends zipkin2.storage.StorageComponent {
/**
* A list of elasticsearch nodes to connect to, in http://host:port or https://host:port format.
* Note this value is only read once.
*/
public interface HostsSupplier {
List<String> get();
}
public static Builder newBuilder() {
return new $AutoValue_ElasticsearchStorage.Builder()
.clientCustomizer(unused -> {
})
.clientFactoryCustomizer(unused -> {
})
.hosts(Collections.singletonList("http://localhost:9200"))
.strictTraceId(true)
.searchEnabled(true)
.index("zipkin")
.dateSeparator('-')
.indexShards(5)
.indexReplicas(1)
.namesLookback(86400000)
.flushOnWrites(false)
.autocompleteKeys(Collections.emptyList())
.autocompleteTtl((int) TimeUnit.HOURS.toMillis(1))
.autocompleteCardinality(5 * 4000); // Ex. 5 site tags with cardinality 4000 each
}
abstract Builder toBuilder();
@AutoValue.Builder
public abstract static class Builder extends StorageComponent.Builder {
/**
* Customizes the {@link HttpClientBuilder} used when connecting to ElasticSearch. This is used
* by the server and tests to enable detailed logging and tweak timeouts.
*/
public abstract Builder clientCustomizer(Consumer<ClientOptionsBuilder> clientCustomizer);
/**
* Customizes the {@link ClientFactoryBuilder} used when connecting to ElasticSearch. This is
* used by the server and tests to tweak timeouts.
*/
public abstract Builder clientFactoryCustomizer(
Consumer<ClientFactoryBuilder> clientFactoryCustomizer);
/**
* A list of elasticsearch nodes to connect to, in http://host:port or https://host:port format.
* Defaults to "http://localhost:9200".
*/
public final Builder hosts(final List<String> hosts) {
if (hosts == null) throw new NullPointerException("hosts == null");
return hostsSupplier(new HostsSupplier() {
@Override public List<String> get() {
return hosts;
}
@Override public String toString() {
return hosts.toString();
}
});
}
/**
* Like {@link #hosts(List)}, except the value is deferred.
*
* <p>This was added to support dynamic endpoint resolution for Amazon Elasticsearch. This
* value is only read once.
*/
public abstract Builder hostsSupplier(HostsSupplier hosts);
/**
* Only valid when the destination is Elasticsearch 5.x. Indicates the ingest pipeline used
* before spans are indexed. No default.
*
* <p>See https://www.elastic.co/guide/en/elasticsearch/reference/master/pipeline.html
*/
public abstract Builder pipeline(String pipeline);
/**
* Only return span and service names where all {@link zipkin2.Span#timestamp()} are at or after
* (now - lookback) in milliseconds. Defaults to 1 day (86400000).
*/
public abstract Builder namesLookback(int namesLookback);
/**
* Internal and visible only for testing.
*
* <p>See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-refresh.html
*/
public abstract Builder flushOnWrites(boolean flushOnWrites);
/** The index prefix to use when generating daily index names. Defaults to zipkin. */
public final Builder index(String index) {
indexNameFormatterBuilder().index(index);
return this;
}
/**
* The date separator to use when generating daily index names. Defaults to '-'.
*
* <p>By default, spans with a timestamp falling on 2016/03/19 end up in the index
* 'zipkin-span-2016-03-19'. When the date separator is '.', the index would be
* 'zipkin-span-2016.03.19'. If the date separator is 0, there is no delimiter. Ex the index
* would be 'zipkin-span-20160319'
*/
public final Builder dateSeparator(char dateSeparator) {
indexNameFormatterBuilder().dateSeparator(dateSeparator);
return this;
}
/**
* The number of shards to split the index into. Each shard and its replicas are assigned to a
* machine in the cluster. Increasing the number of shards and machines in the cluster will
* improve read and write performance. Number of shards cannot be changed for existing indices,
* but new daily indices will pick up changes to the setting. Defaults to 5.
*
* <p>Corresponds to <a
* href="https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules.html">index.number_of_shards</a>
*/
public abstract Builder indexShards(int indexShards);
/**
* The number of replica copies of each shard in the index. Each shard and its replicas are
* assigned to a machine in the cluster. Increasing the number of replicas and machines in the
* cluster will improve read performance, but not write performance. Number of replicas can be
* changed for existing indices. Defaults to 1. It is highly discouraged to set this to 0 as it
* would mean a machine failure results in data loss.
*
* <p>Corresponds to <a
* href="https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules.html">index.number_of_replicas</a>
*/
public abstract Builder indexReplicas(int indexReplicas);
@Override
public abstract Builder strictTraceId(boolean strictTraceId);
@Override
public abstract Builder searchEnabled(boolean searchEnabled);
/** {@inheritDoc} */
@Override
public abstract Builder autocompleteKeys(List<String> autocompleteKeys);
/** {@inheritDoc} */
@Override
public abstract Builder autocompleteTtl(int autocompleteTtl);
/** {@inheritDoc} */
@Override
public abstract Builder autocompleteCardinality(int autocompleteCardinality);
@Override
public abstract ElasticsearchStorage build();
abstract IndexNameFormatter.Builder indexNameFormatterBuilder();
Builder() {
}
}
abstract Consumer<ClientOptionsBuilder> clientCustomizer();
abstract Consumer<ClientFactoryBuilder> clientFactoryCustomizer();
public abstract HostsSupplier hostsSupplier();
@Nullable public abstract String pipeline();
public abstract boolean flushOnWrites();
public abstract boolean strictTraceId();
abstract boolean searchEnabled();
abstract List<String> autocompleteKeys();
abstract int autocompleteTtl();
abstract int autocompleteCardinality();
abstract int indexShards();
abstract int indexReplicas();
public abstract IndexNameFormatter indexNameFormatter();
public abstract int namesLookback();
@Override
public SpanStore spanStore() {
ensureIndexTemplates();
return new ElasticsearchSpanStore(this);
}
@Override
public ServiceAndSpanNames serviceAndSpanNames() {
return (ServiceAndSpanNames) spanStore();
}
@Override
public AutocompleteTags autocompleteTags() {
ensureIndexTemplates();
return new ElasticsearchAutocompleteTags(this);
}
@Override
public SpanConsumer spanConsumer() {
ensureIndexTemplates();
return new ElasticsearchSpanConsumer(this);
}
/** Returns the Elasticsearch version of the connected cluster. Internal use only */
public float version() {
return ensureIndexTemplates().version();
}
char indexTypeDelimiter() {
return ensureIndexTemplates().indexTypeDelimiter();
}
/** This is an internal blocking call, only used in tests. */
public void clear() throws IOException {
Set<String> toClear = new LinkedHashSet<>();
toClear.add(indexNameFormatter().formatType(SPAN));
toClear.add(indexNameFormatter().formatType(DEPENDENCY));
for (String index : toClear) clear(index);
}
void clear(String index) throws IOException {
String url = '/' + index;
AggregatedHttpRequest delete = AggregatedHttpRequest.of(HttpMethod.DELETE, url);
http().newCall(delete, BodyConverters.NULL).execute();
}
/** This is blocking so that we can determine if the cluster is healthy or not */
@Override
public CheckResult check() {
HttpClient client = httpClient();
EndpointGroup healthChecked = EndpointGroupRegistry.get("elasticsearch_healthchecked");
if (healthChecked instanceof HttpHealthCheckedEndpointGroup) {
try {
((HttpHealthCheckedEndpointGroup) healthChecked).awaitInitialEndpoints(
client.options().responseTimeoutMillis(), TimeUnit.MILLISECONDS);
} catch (InterruptedException | TimeoutException e) {
return CheckResult.failed(e);
}
}
return ensureClusterReady(indexNameFormatter().formatType(SPAN));
}
@Override public void close() {
EndpointGroup endpointGroup = EndpointGroupRegistry.get("elasticsearch");
if (endpointGroup != null) {
endpointGroup.close();
EndpointGroupRegistry.unregister("elasticsearch");
}
clientFactory().close();
}
CheckResult ensureClusterReady(String index) {
try {
HttpCall.Factory http = http();
AggregatedHttpRequest request = AggregatedHttpRequest.of(
HttpMethod.GET, "/_cluster/health/" + index);
return http.newCall(request, READ_STATUS).execute();
} catch (IOException | RuntimeException e) {
if (e instanceof CompletionException) return CheckResult.failed(e.getCause());
return CheckResult.failed(e);
}
}
@Memoized // since we don't want overlapping calls to apply the index templates
IndexTemplates ensureIndexTemplates() {
try {
IndexTemplates templates = new VersionSpecificTemplates(this).get();
HttpCall.Factory http = http();
ensureIndexTemplate(http, buildUrl(templates, SPAN), templates.span());
ensureIndexTemplate(http, buildUrl(templates, DEPENDENCY), templates.dependency());
ensureIndexTemplate(http, buildUrl(templates, AUTOCOMPLETE), templates.autocomplete());
return templates;
} catch (IOException e) {
throw Platform.get().uncheckedIOException(e);
}
}
String buildUrl(IndexTemplates templates, String type) {
String indexPrefix = indexNameFormatter().index() + templates.indexTypeDelimiter();
return "/_template/" + indexPrefix + type + "_template";
}
@Memoized // a new client factory means new connections
ClientFactory clientFactory() {
ClientFactoryBuilder builder = new ClientFactoryBuilder()
// TODO(anuraaga): Remove after https://github.com/line/armeria/pull/1899
.workerGroup(EventLoopGroups.newEventLoopGroup(
Flags.numCommonWorkers(), "armeria-common-worker", true), true)
.useHttp2Preface(false);
clientFactoryCustomizer().accept(builder);
return builder.build();
}
@Memoized // hosts resolution might imply a network call, and we might make a new client instance
public HttpClient httpClient() {
List<String> hosts = hostsSupplier().get();
if (hosts.isEmpty()) throw new IllegalArgumentException("no hosts configured");
List<URL> urls = hosts.stream()
.map(host -> {
try {
return new URL(host);
} catch (MalformedURLException e) {
throw new IllegalArgumentException("Invalid host: " + host, e);
}
})
.collect(Collectors.toList());
final EndpointGroup endpointGroup;
if (urls.size() == 1) {
URL url = urls.get(0);
if (isIpAddress(url.getHost())) {
endpointGroup = null;
} else {
// A host that isn't an IP may resolve to multiple IP addresses, so we use a endpoint group
// to round-robin over them.
DnsAddressEndpointGroupBuilder dnsEndpoint =
new DnsAddressEndpointGroupBuilder(url.getHost());
if (url.getPort() != -1) {
dnsEndpoint.port(url.getPort());
}
endpointGroup = dnsEndpoint.build();
}
} else {
List<EndpointGroup> endpointGroups = new ArrayList<>();
List<Endpoint> staticEndpoints = new ArrayList<>();
for (URL url : urls) {
if (isIpAddress(url.getHost())) {
staticEndpoints.add(Endpoint.parse(url.getAuthority()));
} else {
// A host that isn't an IP may resolve to multiple IP addresses, so we use a endpoint
// group to round-robin over them. Users can mix addresses that resolve to multiple IPs
// with single IPs freely, they'll all get used.
endpointGroups.add(url.getPort() == -1
? DnsAddressEndpointGroup.of(url.getHost())
: DnsAddressEndpointGroup.of(url.getHost(), url.getPort()));
}
}
if (!staticEndpoints.isEmpty()) {
endpointGroups.add(new StaticEndpointGroup(staticEndpoints));
}
if (endpointGroups.size() == 1) {
endpointGroup = endpointGroups.get(0);
} else {
endpointGroup = new CompositeEndpointGroup(endpointGroups);
}
}
final String clientUrl;
if (endpointGroup != null) {
HttpHealthCheckedEndpointGroup healthChecked = new HttpHealthCheckedEndpointGroupBuilder(
endpointGroup, "/_cluster/health")
.protocol(SessionProtocol.valueOf(urls.get(0).getProtocol().toUpperCase(Locale.ROOT)))
.clientFactory(clientFactory())
.withClientOptions(options -> {
clientCustomizer().accept(options);
return options;
})
.build();
EndpointGroup withFallback = healthChecked
// Even if all the health check requests are failing, we want to go ahead and try to send
// the request to an endpoint anyways. This will generally only be when the server is
// starting.
.orElse(endpointGroup);
EndpointGroupRegistry.register(
"elasticsearch", withFallback, EndpointSelectionStrategy.ROUND_ROBIN);
// TODO(anuraaga): Remove this after https://github.com/line/armeria/issues/1910 means we
// don't need to wait for initial endpoints ourselves.
EndpointGroupRegistry.register(
"elasticsearch_healthchecked", healthChecked, EndpointSelectionStrategy.ROUND_ROBIN);
clientUrl = urls.get(0).getProtocol() + "://group:elasticsearch" + urls.get(0).getPath();
} else {
// Just one non-domain URL, can connect directly without enabling load balancing.
clientUrl = hosts.get(0);
}
ClientOptionsBuilder options = new ClientOptionsBuilder()
.decorator(HttpDecodingClient.newDecorator());
clientCustomizer().accept(options);
HttpClientBuilder client = new HttpClientBuilder(clientUrl)
.factory(clientFactory())
.options(options.build());
return client.build();
}
@Override public final String toString() {
return "ElasticsearchStorage{hosts=" + hostsSupplier().get()
+ ", index=" + indexNameFormatter().index() + "}";
}
static boolean isIpAddress(String address) {
return zipkin2.Endpoint.newBuilder().parseIp(address);
}
// TODO(anuraaga): Move this upstream - https://github.com/line/armeria/issues/1897
static class CompositeEndpointGroup
extends AbstractListenable<List<Endpoint>> implements EndpointGroup {
final List<EndpointGroup> endpointGroups;
CompositeEndpointGroup(List<EndpointGroup> endpointGroups) {
this.endpointGroups = endpointGroups;
for (EndpointGroup group : endpointGroups) {
group.addListener(unused -> notifyListeners(endpoints()));
}
}
@Override public List<Endpoint> endpoints() {
List<Endpoint> merged = new ArrayList<>();
for (EndpointGroup group : endpointGroups) {
merged.addAll(group.endpoints());
}
return merged;
}
}
@Memoized // hosts resolution might imply a network call, and we might make a new client instance
public HttpCall.Factory http() {
return new HttpCall.Factory(httpClient());
}
ElasticsearchStorage() {
}
static final BodyConverter<CheckResult> READ_STATUS = new BodyConverter<CheckResult>() {
@Override public CheckResult convert(HttpData body) throws IOException {
String result = body.toStringUtf8();
JsonParser status = enterPath(JSON_FACTORY.createParser(result), "status");
if (status == null) {
throw new IllegalArgumentException("Health status couldn't be read " + result);
}
if ("RED".equalsIgnoreCase(status.getText())) {
return CheckResult.failed(new IllegalStateException("Health status is RED"));
}
return CheckResult.OK;
}
@Override public String toString() {
return "ReadStatus";
}
};
}
| 1 | 15,327 | why. is this called per request? if so this also adds a lock internally. can we not add this at the moment until analyzed as this assumption isnt carried to the several other impls. the only other consumer of this is the http api and that isnt hit often enough for specializations. | openzipkin-zipkin | java |
@@ -106,7 +106,7 @@ function nativeSelectValue(node) {
}
const options = querySelectorAll(vNode, 'option');
- const selectedOptions = options.filter(option => option.hasAttr('selected'));
+ const selectedOptions = options.filter(option => option.actualNode.selected);
// browser automatically selects the first option
if (!selectedOptions.length) { | 1 | import getRole from '../aria/get-role';
import unsupported from './unsupported';
import visibleVirtual from './visible-virtual';
import accessibleTextVirtual from './accessible-text-virtual';
import isNativeTextbox from '../forms/is-native-textbox';
import isNativeSelect from '../forms/is-native-select';
import isAriaTextbox from '../forms/is-aria-textbox';
import isAriaListbox from '../forms/is-aria-listbox';
import isAriaCombobox from '../forms/is-aria-combobox';
import isAriaRange from '../forms/is-aria-range';
import getOwnedVirtual from '../aria/get-owned-virtual';
import isHiddenWithCSS from '../dom/is-hidden-with-css';
import AbstractVirtualNode from '../../core/base/virtual-node/abstract-virtual-node';
import { getNodeFromTree, querySelectorAll } from '../../core/utils';
import log from '../../core/log';
const controlValueRoles = [
'textbox',
'progressbar',
'scrollbar',
'slider',
'spinbutton',
'combobox',
'listbox'
];
export const formControlValueMethods = {
nativeTextboxValue,
nativeSelectValue,
ariaTextboxValue,
ariaListboxValue,
ariaComboboxValue,
ariaRangeValue
};
/**
* Calculate value of a form control
*
* @param {VirtualNode} element The VirtualNode instance whose value we want
* @param {Object} context
* @property {Element} startNode First node in accessible name computation
* @property {String[]} unsupported List of roles where value computation is unsupported
* @property {Bool} debug Enable logging for formControlValue
* @return {string} The calculated value
*/
function formControlValue(virtualNode, context = {}) {
const { actualNode } = virtualNode;
const unsupportedRoles = unsupported.accessibleNameFromFieldValue || [];
const role = getRole(virtualNode);
if (
// For the targeted node, the accessible name is never the value:
context.startNode === virtualNode ||
// Only elements with a certain role can return their value
!controlValueRoles.includes(role) ||
// Skip unsupported roles
unsupportedRoles.includes(role)
) {
return '';
}
// Object.values:
const valueMethods = Object.keys(formControlValueMethods).map(
name => formControlValueMethods[name]
);
// Return the value of the first step that returns with text
const valueString = valueMethods.reduce((accName, step) => {
return accName || step(virtualNode, context);
}, '');
if (context.debug) {
log(valueString || '{empty-value}', actualNode, context);
}
return valueString;
}
/**
* Calculate value of a native textbox element (input and textarea)
*
* @param {VirtualNode|Element} element The element whose value we want
* @return {string} The calculated value
*/
function nativeTextboxValue(node) {
const vNode =
node instanceof AbstractVirtualNode ? node : getNodeFromTree(node);
if (isNativeTextbox(vNode)) {
return vNode.props.value || '';
}
return '';
}
/**
* Calculate value of a select element
*
* @param {VirtualNode} element The VirtualNode instance whose value we want
* @return {string} The calculated value
*/
function nativeSelectValue(node) {
const vNode =
node instanceof AbstractVirtualNode ? node : getNodeFromTree(node);
if (!isNativeSelect(vNode)) {
return '';
}
const options = querySelectorAll(vNode, 'option');
const selectedOptions = options.filter(option => option.hasAttr('selected'));
// browser automatically selects the first option
if (!selectedOptions.length) {
selectedOptions.push(options[0]);
}
return selectedOptions.map(option => visibleVirtual(option)).join(' ') || '';
}
/**
* Calculate value of a an element with role=textbox
*
* @param {VirtualNode} element The VirtualNode instance whose value we want
* @return {string} The calculated value
*/
function ariaTextboxValue(node) {
const vNode =
node instanceof AbstractVirtualNode ? node : getNodeFromTree(node);
const { actualNode } = vNode;
if (!isAriaTextbox(vNode)) {
return '';
}
if (!actualNode || (actualNode && !isHiddenWithCSS(actualNode))) {
return visibleVirtual(vNode, true);
} else {
return actualNode.textContent;
}
}
/**
* Calculate value of an element with role=combobox or role=listbox
*
* @param {VirtualNode} element The VirtualNode instance whose value we want
* @param {Object} context The VirtualNode instance whose value we want
* @property {Element} startNode First node in accessible name computation
* @property {String[]} unsupported List of roles where value computation is unsupported
* @property {Bool} debug Enable logging for formControlValue
* @return {string} The calculated value
*/
function ariaListboxValue(node, context) {
const vNode =
node instanceof AbstractVirtualNode ? node : getNodeFromTree(node);
if (!isAriaListbox(vNode)) {
return '';
}
const selected = getOwnedVirtual(vNode).filter(
owned =>
getRole(owned) === 'option' && owned.attr('aria-selected') === 'true'
);
if (selected.length === 0) {
return '';
}
// Note: Even with aria-multiselectable, only the first value will be used
// in the accessible name. This isn't spec'ed out, but seems to be how all
// browser behave.
return accessibleTextVirtual(selected[0], context);
}
/**
* Calculate value of an element with role=combobox or role=listbox
*
* @param {VirtualNode} element The VirtualNode instance whose value we want
* @param {Object} context The VirtualNode instance whose value we want
* @property {Element} startNode First node in accessible name computation
* @property {String[]} unsupported List of roles where value computation is unsupported
* @property {Bool} debug Enable logging for formControlValue
* @return {string} The calculated value
*/
function ariaComboboxValue(node, context) {
const vNode =
node instanceof AbstractVirtualNode ? node : getNodeFromTree(node);
// For combobox, find the first owned listbox:
if (!isAriaCombobox(vNode)) {
return '';
}
const listbox = getOwnedVirtual(vNode).filter(
elm => getRole(elm) === 'listbox'
)[0];
return listbox ? ariaListboxValue(listbox, context) : '';
}
/**
* Calculate value of an element with range-type role
*
* @param {VirtualNode|Node} element The VirtualNode instance whose value we want
* @return {string} The calculated value
*/
function ariaRangeValue(node) {
const vNode =
node instanceof AbstractVirtualNode ? node : getNodeFromTree(node);
if (!isAriaRange(vNode) || !vNode.hasAttr('aria-valuenow')) {
return '';
}
// Validate the number, if not, return 0.
// Chrome 70 typecasts this, Firefox 62 does not
const valueNow = +vNode.attr('aria-valuenow');
return !isNaN(valueNow) ? String(valueNow) : '0';
}
export default formControlValue;
| 1 | 17,031 | I believe this means dependent checks can no longer operate on virtual nodes, which is something we've been pushing for lately. We may need to find a different way to solve this issue, as I don't think we want to cause a regression while fixing a bug. | dequelabs-axe-core | js |
@@ -8,13 +8,15 @@ from kinto.core.storage import (
DEFAULT_ID_FIELD, DEFAULT_MODIFIED_FIELD, DEFAULT_DELETED_FIELD,
MISSING)
from kinto.core.storage.postgresql.client import create_from_config
+from kinto.core.storage.postgresql.migrator import Migrator
from kinto.core.utils import COMPARISON
logger = logging.getLogger(__name__)
+HERE = os.path.abspath(os.path.dirname(__file__))
-class Storage(StorageBase):
+class Storage(StorageBase, Migrator):
"""Storage backend using PostgreSQL.
Recommended in production (*requires PostgreSQL 9.4 or higher*). | 1 | import logging
import os
import warnings
from collections import defaultdict
from kinto.core.storage import (
StorageBase, exceptions,
DEFAULT_ID_FIELD, DEFAULT_MODIFIED_FIELD, DEFAULT_DELETED_FIELD,
MISSING)
from kinto.core.storage.postgresql.client import create_from_config
from kinto.core.utils import COMPARISON
logger = logging.getLogger(__name__)
class Storage(StorageBase):
"""Storage backend using PostgreSQL.
Recommended in production (*requires PostgreSQL 9.4 or higher*).
Enable in configuration::
kinto.storage_backend = kinto.core.storage.postgresql
Database location URI can be customized::
kinto.storage_url = postgres://user:[email protected]:5432/dbname
Alternatively, username and password could also rely on system user ident
or even specified in :file:`~/.pgpass` (*see PostgreSQL documentation*).
.. note::
Some tables and indices are created when ``kinto migrate`` is run.
This requires some privileges on the database, or some error will
be raised.
**Alternatively**, the schema can be initialized outside the
python application, using the SQL file located in
:file:`kinto/core/storage/postgresql/schema.sql`. This allows to
distinguish schema manipulation privileges from schema usage.
A connection pool is enabled by default::
kinto.storage_pool_size = 10
kinto.storage_maxoverflow = 10
kinto.storage_max_backlog = -1
kinto.storage_pool_recycle = -1
kinto.storage_pool_timeout = 30
kinto.cache_poolclass =
kinto.core.storage.postgresql.pool.QueuePoolWithMaxBacklog
The ``max_backlog`` limits the number of threads that can be in the queue
waiting for a connection. Once this limit has been reached, any further
attempts to acquire a connection will be rejected immediately, instead of
locking up all threads by keeping them waiting in the queue.
See `dedicated section in SQLAlchemy documentation
<http://docs.sqlalchemy.org/en/rel_1_0/core/engines.html>`_
for default values and behaviour.
.. note::
Using a `dedicated connection pool <http://pgpool.net>`_ is still
recommended to allow load balancing, replication or limit the number
of connections used in a multi-process deployment.
""" # NOQA
schema_version = 20
def __init__(self, client, max_fetch_size, *args, **kwargs):
super().__init__(*args, **kwargs)
self.client = client
self._max_fetch_size = max_fetch_size
def _execute_sql_file(self, filepath):
with open(filepath) as f:
schema = f.read()
# Since called outside request, force commit.
with self.client.connect(force_commit=True) as conn:
conn.execute(schema)
def initialize_schema(self, dry_run=False):
"""Create PostgreSQL tables, and run necessary schema migrations.
.. note::
Relies on JSONB fields, available in recent versions of PostgreSQL.
"""
here = os.path.abspath(os.path.dirname(__file__))
version = self._get_installed_version()
if not version:
filepath = os.path.join(here, 'schema.sql')
logger.info('Create PostgreSQL storage schema at version '
'{} from {}'.format(self.schema_version, filepath))
# Create full schema.
self._check_database_encoding()
self._check_database_timezone()
# Create full schema.
if not dry_run:
self._execute_sql_file(filepath)
logger.info('Created PostgreSQL storage schema (version {}).'.format(
self.schema_version))
return
logger.info('Detected PostgreSQL storage schema version {}.'.format(version))
migrations = [(v, v + 1) for v in range(version, self.schema_version)]
if not migrations:
logger.info('PostgreSQL storage schema is up-to-date.')
return
for migration in migrations:
# Check order of migrations.
expected = migration[0]
current = self._get_installed_version()
error_msg = 'Expected version {}. Found version {}.'
if not dry_run and expected != current:
raise AssertionError(error_msg.format(expected, current))
logger.info('Migrate PostgreSQL storage schema from'
' version {} to {}.'.format(*migration))
filename = 'migration_{0:03d}_{1:03d}.sql'.format(*migration)
filepath = os.path.join(here, 'migrations', filename)
logger.info('Execute PostgreSQL storage migration from {}'.format(filepath))
if not dry_run:
self._execute_sql_file(filepath)
logger.info('PostgreSQL storage schema migration {}'.format(
'simulated.' if dry_run else 'done.'))
def _check_database_timezone(self):
# Make sure database has UTC timezone.
query = "SELECT current_setting('TIMEZONE') AS timezone;"
with self.client.connect() as conn:
result = conn.execute(query)
record = result.fetchone()
timezone = record['timezone'].upper()
if timezone != 'UTC': # pragma: no cover
msg = 'Database timezone is not UTC ({})'.format(timezone)
warnings.warn(msg)
logger.warning(msg)
def _check_database_encoding(self):
# Make sure database is UTF-8.
query = """
SELECT pg_encoding_to_char(encoding) AS encoding
FROM pg_database
WHERE datname = current_database();
"""
with self.client.connect() as conn:
result = conn.execute(query)
record = result.fetchone()
encoding = record['encoding'].lower()
if encoding != 'utf8': # pragma: no cover
raise AssertionError('Unexpected database encoding {}'.format(encoding))
def _get_installed_version(self):
"""Return current version of schema or None if not any found.
"""
query = "SELECT tablename FROM pg_tables WHERE tablename = 'metadata';"
with self.client.connect() as conn:
result = conn.execute(query)
tables_exist = result.rowcount > 0
if not tables_exist:
return
query = """
SELECT value AS version
FROM metadata
WHERE name = 'storage_schema_version'
ORDER BY LPAD(value, 3, '0') DESC;
"""
with self.client.connect() as conn:
result = conn.execute(query)
if result.rowcount > 0:
return int(result.fetchone()['version'])
else:
# Guess current version.
query = 'SELECT COUNT(*) FROM metadata;'
result = conn.execute(query)
was_flushed = int(result.fetchone()[0]) == 0
if was_flushed:
error_msg = 'Missing schema history: consider version {}.'
logger.warning(error_msg.format(self.schema_version))
return self.schema_version
# In the first versions of Cliquet, there was no migration.
return 1
def flush(self, auth=None):
"""Delete records from tables without destroying schema. Mainly used
in tests suites.
"""
query = """
DELETE FROM records;
DELETE FROM timestamps;
DELETE FROM metadata;
"""
with self.client.connect(force_commit=True) as conn:
conn.execute(query)
logger.debug('Flushed PostgreSQL storage tables')
def collection_timestamp(self, collection_id, parent_id, auth=None):
query_existing = """
WITH existing_timestamps AS (
-- Timestamp of latest record.
(
SELECT last_modified, as_epoch(last_modified) AS last_epoch
FROM records
WHERE parent_id = :parent_id
AND collection_id = :collection_id
ORDER BY last_modified DESC
LIMIT 1
)
-- Timestamp of empty collection.
UNION
(
SELECT last_modified, as_epoch(last_modified) AS last_epoch
FROM timestamps
WHERE parent_id = :parent_id
AND collection_id = :collection_id
)
)
SELECT MAX(last_modified) AS last_modified, MAX(last_epoch) AS last_epoch
FROM existing_timestamps
"""
create_if_missing = """
INSERT INTO timestamps (parent_id, collection_id, last_modified)
VALUES (:parent_id, :collection_id, COALESCE(:last_modified, clock_timestamp()::timestamp))
ON CONFLICT (parent_id, collection_id) DO NOTHING
RETURNING as_epoch(last_modified) AS last_epoch
"""
placeholders = dict(parent_id=parent_id, collection_id=collection_id)
with self.client.connect(readonly=False) as conn:
existing_ts = None
ts_result = conn.execute(query_existing, placeholders)
row = ts_result.fetchone() # Will return (None, None) when empty.
existing_ts = row['last_modified']
create_result = conn.execute(create_if_missing,
dict(last_modified=existing_ts, **placeholders))
record = create_result.fetchone() or row
return record['last_epoch']
def create(self, collection_id, parent_id, record, id_generator=None,
id_field=DEFAULT_ID_FIELD,
modified_field=DEFAULT_MODIFIED_FIELD,
auth=None):
id_generator = id_generator or self.id_generator
record = {**record}
if id_field not in record:
record[id_field] = id_generator()
# Remove redundancy in data field
query_record = {**record}
query_record.pop(id_field, None)
query_record.pop(modified_field, None)
# If there is a record in the table and it is deleted = TRUE,
# we want to replace it. Otherwise, we want to do nothing and
# throw a UnicityError. Per
# https://stackoverflow.com/questions/15939902/is-select-or-insert-in-a-function-prone-to-race-conditions/15950324#15950324
# a WHERE clause in the DO UPDATE will lock the conflicting
# row whether it is true or not, so the subsequent SELECT is
# safe. We add a constant "inserted" field to know whether we
# need to throw or not.
query = """
WITH create_record AS (
INSERT INTO records (id, parent_id, collection_id, data, last_modified, deleted)
VALUES (:object_id, :parent_id,
:collection_id, (:data)::JSONB,
from_epoch(:last_modified),
FALSE)
ON CONFLICT (id, parent_id, collection_id) DO UPDATE
SET last_modified = from_epoch(:last_modified),
data = (:data)::JSONB,
deleted = FALSE
WHERE records.deleted = TRUE
RETURNING id, data, last_modified
)
SELECT id, data, as_epoch(last_modified) AS last_modified, TRUE AS inserted
FROM create_record
UNION ALL
SELECT id, data, as_epoch(last_modified) AS last_modified, FALSE AS inserted FROM records
WHERE id = :object_id AND parent_id = :parent_id AND collection_id = :collection_id
LIMIT 1;
"""
safe_holders = {}
placeholders = dict(object_id=record[id_field],
parent_id=parent_id,
collection_id=collection_id,
last_modified=record.get(modified_field),
data=self.json.dumps(query_record))
with self.client.connect() as conn:
result = conn.execute(query % safe_holders, placeholders)
inserted = result.fetchone()
if not inserted['inserted']:
record = inserted['data']
record[id_field] = inserted['id']
record[modified_field] = inserted['last_modified']
raise exceptions.UnicityError(id_field, record)
record[modified_field] = inserted['last_modified']
return record
def get(self, collection_id, parent_id, object_id,
id_field=DEFAULT_ID_FIELD,
modified_field=DEFAULT_MODIFIED_FIELD,
auth=None):
query = """
SELECT as_epoch(last_modified) AS last_modified, data
FROM records
WHERE id = :object_id
AND parent_id = :parent_id
AND collection_id = :collection_id
AND NOT deleted;
"""
placeholders = dict(object_id=object_id,
parent_id=parent_id,
collection_id=collection_id)
with self.client.connect(readonly=True) as conn:
result = conn.execute(query, placeholders)
if result.rowcount == 0:
raise exceptions.RecordNotFoundError(object_id)
else:
existing = result.fetchone()
record = existing['data']
record[id_field] = object_id
record[modified_field] = existing['last_modified']
return record
def update(self, collection_id, parent_id, object_id, record,
id_field=DEFAULT_ID_FIELD,
modified_field=DEFAULT_MODIFIED_FIELD,
auth=None):
# Remove redundancy in data field
query_record = {**record}
query_record.pop(id_field, None)
query_record.pop(modified_field, None)
query = """
INSERT INTO records (id, parent_id, collection_id, data, last_modified, deleted)
VALUES (:object_id, :parent_id,
:collection_id, (:data)::JSONB,
from_epoch(:last_modified),
FALSE)
ON CONFLICT (id, parent_id, collection_id) DO UPDATE
SET data = (:data)::JSONB,
deleted = FALSE,
last_modified = GREATEST(from_epoch(:last_modified),
EXCLUDED.last_modified)
RETURNING as_epoch(last_modified) AS last_modified;
"""
placeholders = dict(object_id=object_id,
parent_id=parent_id,
collection_id=collection_id,
last_modified=record.get(modified_field),
data=self.json.dumps(query_record))
with self.client.connect() as conn:
result = conn.execute(query, placeholders)
updated = result.fetchone()
record = {**record, id_field: object_id}
record[modified_field] = updated['last_modified']
return record
def delete(self, collection_id, parent_id, object_id,
id_field=DEFAULT_ID_FIELD, with_deleted=True,
modified_field=DEFAULT_MODIFIED_FIELD,
deleted_field=DEFAULT_DELETED_FIELD,
auth=None, last_modified=None):
if with_deleted:
query = """
UPDATE records
SET deleted=TRUE,
data=(:deleted_data)::JSONB,
last_modified=from_epoch(:last_modified)
WHERE id = :object_id
AND parent_id = :parent_id
AND collection_id = :collection_id
AND deleted = FALSE
RETURNING as_epoch(last_modified) AS last_modified;
"""
else:
query = """
DELETE FROM records
WHERE id = :object_id
AND parent_id = :parent_id
AND collection_id = :collection_id
AND deleted = FALSE
RETURNING as_epoch(last_modified) AS last_modified;
"""
deleted_data = self.json.dumps(dict([(deleted_field, True)]))
placeholders = dict(object_id=object_id,
parent_id=parent_id,
collection_id=collection_id,
last_modified=last_modified,
deleted_data=deleted_data)
with self.client.connect() as conn:
result = conn.execute(query, placeholders)
if result.rowcount == 0:
raise exceptions.RecordNotFoundError(object_id)
inserted = result.fetchone()
record = {}
record[modified_field] = inserted['last_modified']
record[id_field] = object_id
record[deleted_field] = True
return record
def delete_all(self, collection_id, parent_id, filters=None,
sorting=None, pagination_rules=None, limit=None,
id_field=DEFAULT_ID_FIELD, with_deleted=True,
modified_field=DEFAULT_MODIFIED_FIELD,
deleted_field=DEFAULT_DELETED_FIELD,
auth=None):
if with_deleted:
query = """
WITH matching_records AS (
SELECT id, parent_id, collection_id
FROM records
WHERE {parent_id_filter}
{collection_id_filter}
AND deleted = FALSE
{conditions_filter}
{pagination_rules}
{sorting}
LIMIT :pagination_limit
FOR UPDATE
)
UPDATE records
SET deleted=TRUE, data=(:deleted_data)::JSONB
FROM matching_records
WHERE records.id = matching_records.id
AND records.parent_id = matching_records.parent_id
AND records.collection_id = matching_records.collection_id
RETURNING records.id, as_epoch(last_modified) AS last_modified;
"""
else:
query = """
WITH matching_records AS (
SELECT id, parent_id, collection_id
FROM records
WHERE {parent_id_filter}
{collection_id_filter}
AND deleted = FALSE
{conditions_filter}
{pagination_rules}
{sorting}
LIMIT :pagination_limit
FOR UPDATE
)
DELETE
FROM records
USING matching_records
WHERE records.id = matching_records.id
AND records.parent_id = matching_records.parent_id
AND records.collection_id = matching_records.collection_id
RETURNING records.id, as_epoch(last_modified) AS last_modified;
"""
id_field = id_field or self.id_field
modified_field = modified_field or self.modified_field
deleted_data = self.json.dumps(dict([(deleted_field, True)]))
placeholders = dict(parent_id=parent_id,
collection_id=collection_id,
deleted_data=deleted_data)
# Safe strings
safeholders = defaultdict(str)
# Handle parent_id as a regex only if it contains *
if '*' in parent_id:
safeholders['parent_id_filter'] = 'parent_id LIKE :parent_id'
placeholders['parent_id'] = parent_id.replace('*', '%')
else:
safeholders['parent_id_filter'] = 'parent_id = :parent_id'
# If collection is None, remove it from query.
if collection_id is None:
safeholders['collection_id_filter'] = ''
else:
safeholders['collection_id_filter'] = 'AND collection_id = :collection_id' # NOQA
if filters:
safe_sql, holders = self._format_conditions(filters,
id_field,
modified_field)
safeholders['conditions_filter'] = 'AND {}'.format(safe_sql)
placeholders.update(**holders)
if sorting:
sql, holders = self._format_sorting(sorting, id_field,
modified_field)
safeholders['sorting'] = sql
placeholders.update(**holders)
if pagination_rules:
sql, holders = self._format_pagination(pagination_rules, id_field,
modified_field)
safeholders['pagination_rules'] = 'AND {}'.format(sql)
placeholders.update(**holders)
# Limit the number of results (pagination).
limit = min(self._max_fetch_size, limit) if limit else self._max_fetch_size
placeholders['pagination_limit'] = limit
with self.client.connect() as conn:
result = conn.execute(query.format_map(safeholders), placeholders)
deleted = result.fetchmany(self._max_fetch_size)
records = []
for result in deleted:
record = {}
record[id_field] = result['id']
record[modified_field] = result['last_modified']
record[deleted_field] = True
records.append(record)
return records
def purge_deleted(self, collection_id, parent_id, before=None,
id_field=DEFAULT_ID_FIELD,
modified_field=DEFAULT_MODIFIED_FIELD,
auth=None):
delete_tombstones = """
DELETE
FROM records
WHERE {parent_id_filter}
{collection_id_filter}
{conditions_filter}
"""
id_field = id_field or self.id_field
modified_field = modified_field or self.modified_field
placeholders = dict(parent_id=parent_id,
collection_id=collection_id)
# Safe strings
safeholders = defaultdict(str)
# Handle parent_id as a regex only if it contains *
if '*' in parent_id:
safeholders['parent_id_filter'] = 'parent_id LIKE :parent_id'
placeholders['parent_id'] = parent_id.replace('*', '%')
else:
safeholders['parent_id_filter'] = 'parent_id = :parent_id'
# If collection is None, remove it from query.
if collection_id is None:
safeholders['collection_id_filter'] = ''
else:
safeholders['collection_id_filter'] = 'AND collection_id = :collection_id' # NOQA
if before is not None:
safeholders['conditions_filter'] = (
'AND as_epoch(last_modified) < :before')
placeholders['before'] = before
with self.client.connect() as conn:
result = conn.execute(delete_tombstones.format_map(safeholders), placeholders)
deleted = result.rowcount
# If purging everything from a parent_id, then clear timestamps.
if collection_id is None and before is None:
delete_timestamps = """
DELETE
FROM timestamps
WHERE {parent_id_filter}
"""
conn.execute(delete_timestamps.format_map(safeholders), placeholders)
return deleted
def get_all(self, collection_id, parent_id, filters=None, sorting=None,
pagination_rules=None, limit=None, include_deleted=False,
id_field=DEFAULT_ID_FIELD,
modified_field=DEFAULT_MODIFIED_FIELD,
deleted_field=DEFAULT_DELETED_FIELD,
auth=None):
query = """
WITH collection_filtered AS (
SELECT id, last_modified, data, deleted
FROM records
WHERE {parent_id_filter}
AND collection_id = :collection_id
{conditions_deleted}
{conditions_filter}
),
total_filtered AS (
SELECT COUNT(id) AS count_total
FROM collection_filtered
WHERE NOT deleted
),
paginated_records AS (
SELECT DISTINCT id
FROM collection_filtered
{pagination_rules}
)
SELECT count_total,
a.id, as_epoch(a.last_modified) AS last_modified, a.data
FROM paginated_records AS p JOIN collection_filtered AS a ON (a.id = p.id),
total_filtered
{sorting}
LIMIT :pagination_limit;
"""
# Unsafe strings escaped by PostgreSQL
placeholders = dict(parent_id=parent_id,
collection_id=collection_id)
# Safe strings
safeholders = defaultdict(str)
# Handle parent_id as a regex only if it contains *
if '*' in parent_id:
safeholders['parent_id_filter'] = 'parent_id LIKE :parent_id'
placeholders['parent_id'] = parent_id.replace('*', '%')
else:
safeholders['parent_id_filter'] = 'parent_id = :parent_id'
if filters:
safe_sql, holders = self._format_conditions(filters,
id_field,
modified_field)
safeholders['conditions_filter'] = 'AND {}'.format(safe_sql)
placeholders.update(**holders)
if not include_deleted:
safeholders['conditions_deleted'] = 'AND NOT deleted'
if sorting:
sql, holders = self._format_sorting(sorting, id_field,
modified_field)
safeholders['sorting'] = sql
placeholders.update(**holders)
if pagination_rules:
sql, holders = self._format_pagination(pagination_rules, id_field,
modified_field)
safeholders['pagination_rules'] = 'WHERE {}'.format(sql)
placeholders.update(**holders)
# Limit the number of results (pagination).
limit = min(self._max_fetch_size, limit) if limit else self._max_fetch_size
placeholders['pagination_limit'] = limit
with self.client.connect(readonly=True) as conn:
result = conn.execute(query.format_map(safeholders), placeholders)
retrieved = result.fetchmany(self._max_fetch_size)
if len(retrieved) == 0:
return [], 0
count_total = retrieved[0]['count_total']
records = []
for result in retrieved:
record = result['data']
record[id_field] = result['id']
record[modified_field] = result['last_modified']
records.append(record)
return records, count_total
def _format_conditions(self, filters, id_field, modified_field,
prefix='filters'):
"""Format the filters list in SQL, with placeholders for safe escaping.
.. note::
All conditions are combined using AND.
.. note::
Field name and value are escaped as they come from HTTP API.
:returns: A SQL string with placeholders, and a dict mapping
placeholders to actual values.
:rtype: tuple
"""
operators = {
COMPARISON.EQ: '=',
COMPARISON.NOT: '<>',
COMPARISON.IN: 'IN',
COMPARISON.EXCLUDE: 'NOT IN',
COMPARISON.LIKE: 'ILIKE',
}
conditions = []
holders = {}
for i, filtr in enumerate(filters):
value = filtr.value
is_like_query = filtr.operator == COMPARISON.LIKE
if filtr.field == id_field:
sql_field = 'id'
if isinstance(value, int):
value = str(value)
elif filtr.field == modified_field:
sql_field = 'as_epoch(last_modified)'
else:
column_name = 'data'
# Subfields: ``person.name`` becomes ``data->person->>name``
subfields = filtr.field.split('.')
for j, subfield in enumerate(subfields):
# Safely escape field name
field_holder = '{}_field_{}_{}'.format(prefix, i, j)
holders[field_holder] = subfield
# Use ->> to convert the last level to text if
# needed for LIKE query. (Other queries do JSONB comparison.)
column_name += '->>' if j == len(subfields) - 1 and is_like_query else '->'
column_name += ':{}'.format(field_holder)
sql_field = column_name
string_field = filtr.field in (id_field, modified_field) or is_like_query
if not string_field and value != MISSING:
# JSONB-ify the value.
if filtr.operator not in (COMPARISON.IN, COMPARISON.EXCLUDE):
value = self.json.dumps(value)
else:
value = [self.json.dumps(v) for v in value]
if filtr.operator in (COMPARISON.IN, COMPARISON.EXCLUDE):
value = tuple(value)
# WHERE field IN (); -- Fails with syntax error.
if len(value) == 0:
value = (None,)
if is_like_query:
# Operand should be a string.
# Add implicit start/end wildchars if none is specified.
if '*' not in value:
value = '*{}*'.format(value)
value = value.replace('*', '%')
if filtr.operator == COMPARISON.HAS:
operator = 'IS NOT NULL' if filtr.value else 'IS NULL'
cond = '{} {}'.format(sql_field, operator)
elif value != MISSING:
# Safely escape value. MISSINGs get handled below.
value_holder = '{}_value_{}'.format(prefix, i)
holders[value_holder] = value
sql_operator = operators.setdefault(filtr.operator,
filtr.operator.value)
cond = '{} {} :{}'.format(sql_field, sql_operator, value_holder)
# If the field is missing, column_name will produce
# NULL. NULL has strange properties with comparisons
# in SQL -- NULL = anything => NULL, NULL <> anything => NULL.
# We generally want missing fields to be treated as a
# special value that compares as different from
# everything, including JSON null. Do this on a
# per-operator basis.
null_false_operators = (
# NULLs aren't EQ to anything (definitionally).
COMPARISON.EQ,
# So they can't match anything in an INCLUDE.
COMPARISON.IN,
# Nor can they be LIKE anything.
COMPARISON.LIKE,
)
null_true_operators = (
# NULLs are automatically not equal to everything.
COMPARISON.NOT,
# Thus they can never be excluded.
COMPARISON.EXCLUDE,
# Match Postgres's default sort behavior
# (NULLS LAST) by allowing NULLs to
# automatically be greater than everything.
COMPARISON.GT, COMPARISON.MIN,
)
if not (filtr.field == id_field or filtr.field == modified_field):
if value == MISSING:
# Handle MISSING values. The main use case for this is
# pagination, since there's no way to encode MISSING
# at the HTTP API level. Because we only need to cover
# pagination, we don't have to worry about any
# operators besides LT, LE, GT, GE, and EQ, and
# never worry about id_field or modified_field.
#
# Comparing a value against NULL is not the same
# as comparing a NULL against some other value, so
# we need another set of operators for which
# NULLs are OK.
if filtr.operator in (COMPARISON.EQ, COMPARISON.MIN):
# If a row is NULL, then it can be == NULL
# (for the purposes of pagination).
# >= NULL should only match rows that are
# NULL, since there's nothing higher.
cond = '{} IS NULL'.format(sql_field)
elif filtr.operator == COMPARISON.LT:
# If we're looking for < NULL, match only
# non-nulls.
cond = '{} IS NOT NULL'.format(sql_field)
elif filtr.operator == COMPARISON.MAX:
# <= NULL should include everything -- NULL
# because it's equal, and non-nulls because
# they're <.
cond = 'TRUE'
elif filtr.operator == COMPARISON.GT:
# Nothing can be greater than NULL (that is,
# higher in search order).
cond = 'FALSE'
else:
raise ValueError('Somehow we got a filter with MISSING value')
elif filtr.operator in null_false_operators:
cond = '({} IS NOT NULL AND {})'.format(sql_field, cond)
elif filtr.operator in null_true_operators:
cond = '({} IS NULL OR {})'.format(sql_field, cond)
else:
# No need to check for LT and MAX because NULL < foo
# is NULL, which is falsy in SQL.
pass
conditions.append(cond)
safe_sql = ' AND '.join(conditions)
return safe_sql, holders
def _format_pagination(self, pagination_rules, id_field, modified_field):
"""Format the pagination rules in SQL, with placeholders for
safe escaping.
.. note::
All rules are combined using OR.
.. note::
Field names are escaped as they come from HTTP API.
:returns: A SQL string with placeholders, and a dict mapping
placeholders to actual values.
:rtype: tuple
"""
rules = []
placeholders = {}
for i, rule in enumerate(pagination_rules):
prefix = 'rules_{}'.format(i)
safe_sql, holders = self._format_conditions(rule,
id_field,
modified_field,
prefix=prefix)
rules.append(safe_sql)
placeholders.update(**holders)
safe_sql = ' OR '.join(['({})'.format(r) for r in rules])
return safe_sql, placeholders
def _format_sorting(self, sorting, id_field, modified_field):
"""Format the sorting in SQL, with placeholders for safe escaping.
.. note::
Field names are escaped as they come from HTTP API.
:returns: A SQL string with placeholders, and a dict mapping
placeholders to actual values.
:rtype: tuple
"""
sorts = []
holders = {}
for i, sort in enumerate(sorting):
if sort.field == id_field:
sql_field = 'id'
elif sort.field == modified_field:
sql_field = 'last_modified'
else:
# Subfields: ``person.name`` becomes ``data->person->name``
subfields = sort.field.split('.')
sql_field = 'data'
for j, subfield in enumerate(subfields):
# Safely escape field name
field_holder = 'sort_field_{}_{}'.format(i, j)
holders[field_holder] = subfield
sql_field += '->(:{})'.format(field_holder)
sql_direction = 'ASC' if sort.direction > 0 else 'DESC'
sql_sort = '{} {}'.format(sql_field, sql_direction)
sorts.append(sql_sort)
safe_sql = 'ORDER BY {}'.format(', '.join(sorts))
return safe_sql, holders
def load_from_config(config):
settings = config.get_settings()
max_fetch_size = int(settings['storage_max_fetch_size'])
strict = settings.get('storage_strict_json', False)
client = create_from_config(config, prefix='storage_')
return Storage(client=client, max_fetch_size=max_fetch_size, strict_json=strict)
| 1 | 11,366 | ditto about use of `abspath` | Kinto-kinto | py |
@@ -30,13 +30,13 @@ public class Euler05Test {
}
private static long smallestPositiveNumberEvenlyDivisibleByAllNumbersFrom1To(int max) {
- final long smallestStepsNeeded = max * (max - 1);
- return Stream.gen(smallestStepsNeeded, prev -> prev + smallestStepsNeeded)
- .findFirst(val -> isEvenlyDivisibleByAllNumbersFrom1To(max, val))
- .get();
+ return Stream.rangeClosed(2, max)
+ .map(PrimeNumbers::factorization)
+ .reduce((m1, m2) -> m1.merged(m2, Math::max))
+ .foldLeft(1L, (xs, x) -> xs * pow(x.key, x.value));
}
- private static boolean isEvenlyDivisibleByAllNumbersFrom1To(int max, long val) {
- return !Stream.rangeClosed(1, max).exists(d -> val % d != 0);
+ private static long pow(long a, long p) {
+ return Stream.rangeClosed(1, p).fold(1L, (xs, x) -> xs * a);
}
} | 1 | /* / \____ _ ______ _____ / \____ ____ _____
* / \__ \/ \ / \__ \ / __// \__ \ / \/ __ \ Javaslang
* _/ // _\ \ \/ / _\ \\_ \/ // _\ \ /\ \__/ / Copyright 2014-2015 Daniel Dietrich
* /___/ \_____/\____/\_____/____/\___\_____/_/ \_/____/ Licensed under the Apache License, Version 2.0
*/
package javaslang.collection.euler;
import javaslang.collection.Stream;
import org.junit.Test;
import static org.assertj.core.api.Assertions.assertThat;
public class Euler05Test {
/**
* <strong>Problem 5: Smallest multiple</strong>
* <p>
* 2520 is the smallest number that can be divided by each of the numbers from 1
* to 10 without any remainder.
* <p>
* What is the smallest positive number that is evenly divisible by all of the
* numbers from 1 to 20?
* <p>
* See also <a href="https://projecteuler.net/problem=5">projecteuler.net problem 5</a>.
*/
@Test
public void shouldSolveProblem5() {
assertThat(smallestPositiveNumberEvenlyDivisibleByAllNumbersFrom1To(10)).isEqualTo(2_520L);
assertThat(smallestPositiveNumberEvenlyDivisibleByAllNumbersFrom1To(20)).isEqualTo(232_792_560L);
}
private static long smallestPositiveNumberEvenlyDivisibleByAllNumbersFrom1To(int max) {
final long smallestStepsNeeded = max * (max - 1);
return Stream.gen(smallestStepsNeeded, prev -> prev + smallestStepsNeeded)
.findFirst(val -> isEvenlyDivisibleByAllNumbersFrom1To(max, val))
.get();
}
private static boolean isEvenlyDivisibleByAllNumbersFrom1To(int max, long val) {
return !Stream.rangeClosed(1, max).exists(d -> val % d != 0);
}
}
| 1 | 6,210 | this reduces the runtime by 50 times | vavr-io-vavr | java |
@@ -0,0 +1,5 @@
+package org.openqa.selenium.grid.distributor.remote;
+
+public class RemoteDistributorTest {
+
+} | 1 | 1 | 16,857 | Probably best not to have an empty test.... | SeleniumHQ-selenium | py |
|
@@ -956,10 +956,10 @@ public class TryTest extends AbstractValueTest {
private static <T, X extends Throwable> Try<T> failure(Class<X> exceptionType) {
try {
- final X exception = exceptionType.newInstance();
+ final X exception = exceptionType.getConstructor().newInstance();
return Try.failure(exception);
- } catch (InstantiationException | IllegalAccessException x) {
- throw new IllegalStateException("Error instantiating " + exceptionType);
+ } catch (Throwable e) {
+ throw new IllegalStateException("Error instantiating " + exceptionType, e);
}
}
| 1 | /* / \____ _ _ ____ ______ / \ ____ __ _______
* / / \/ \ / \/ \ / /\__\/ // \/ \ // /\__\ JΛVΛSLΛNG
* _/ / /\ \ \/ / /\ \\__\\ \ // /\ \ /\\/ \ /__\ \ Copyright 2014-2016 Javaslang, http://javaslang.io
* /___/\_/ \_/\____/\_/ \_/\__\/__/\__\_/ \_// \__/\_____/ Licensed under the Apache License, Version 2.0
*/
package javaslang.control;
import javaslang.AbstractValueTest;
import javaslang.Serializables;
import javaslang.collection.Seq;
import javaslang.control.Try.NonFatalException;
import org.assertj.core.api.Assertions;
import org.junit.Test;
import java.util.*;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import java.util.function.Function;
import java.util.function.Supplier;
import static org.assertj.core.api.Assertions.assertThatThrownBy;
import static org.assertj.core.api.Assertions.fail;
public class TryTest extends AbstractValueTest {
private static final String OK = "ok";
private static final String FAILURE = "failure";
// -- AbstractValueTest
@Override
protected <T> Try<T> empty() {
return Try.failure(new NoSuchElementException());
}
@Override
protected <T> Try<T> of(T element) {
return Try.success(element);
}
@SafeVarargs
@Override
protected final <T> Try<T> of(T... elements) {
return of(elements[0]);
}
@Override
protected boolean useIsEqualToInsteadOfIsSameAs() {
return true;
}
@Override
protected int getPeekNonNilPerformingAnAction() {
return 1;
}
@Override
@Test(expected = NonFatalException.class)
public void shouldGetEmpty() {
empty().get();
}
// -- Try
// -- exists
@Test
public void shouldBeAwareOfPropertyThatHoldsExistsOfSuccess() {
assertThat(Try.success(1).exists(i -> i == 1)).isTrue();
}
@Test
public void shouldBeAwareOfPropertyThatNotHoldsExistsOfSuccess() {
assertThat(Try.success(1).exists(i -> i == 2)).isFalse();
}
@Test
public void shouldNotHoldPropertyExistsOfFailure() {
assertThat(failure().exists(e -> true)).isFalse();
}
@Test(expected = Error.class)
public void shouldNotHoldPropertyExistsWhenPredicateThrows() {
Try.success(1).exists(e -> {
throw new Error("error");
});
}
// -- forall
@Test
public void shouldBeAwareOfPropertyThatHoldsForAllOfSuccess() {
assertThat(Try.success(1).forAll(i -> i == 1)).isTrue();
}
@Test
public void shouldBeAwareOfPropertyThatNotHoldsForAllOfSuccess() {
assertThat(Try.success(1).forAll(i -> i == 2)).isFalse();
}
@Test // a property holds for all elements of no elements
public void shouldNotHoldPropertyForAllOfFailure() {
assertThat(failure().forAll(e -> true)).isTrue();
}
@Test(expected = Error.class)
public void shouldNotHoldPropertyForAllWhenPredicateThrows() {
Try.success(1).forAll(e -> {
throw new Error("error");
});
}
// -- orElse
@Test
public void shouldReturnSelfOnOrElseIfSuccess() {
Try<Integer> success = Try.success(42);
assertThat(success.orElse(Try.success(0))).isSameAs(success);
}
@Test
public void shouldReturnSelfOnOrElseSupplierIfSuccess() {
Try<Integer> success = Try.success(42);
assertThat(success.orElse(() -> Try.success(0))).isSameAs(success);
}
@Test
public void shouldReturnAlternativeOnOrElseIfFailure() {
Try<Integer> success = Try.success(42);
assertThat(Try.failure(new RuntimeException()).orElse(success)).isSameAs(success);
}
@Test
public void shouldReturnAlternativeOnOrElseSupplierIfFailure() {
Try<Integer> success = Try.success(42);
assertThat(Try.failure(new RuntimeException()).orElse(() -> success)).isSameAs(success);
}
// -- iterator
@Test
public void shouldReturnIteratorOfSuccess() {
assertThat((Iterator<Integer>) Try.success(1).iterator()).isNotNull();
}
@Test
public void shouldReturnIteratorOfFailure() {
assertThat((Iterator<Object>) failure().iterator()).isNotNull();
}
// -- Try.of
@Test
public void shouldCreateSuccessWhenCallingTryOfSupplier() {
assertThat(Try.of(() -> 1) instanceof Try.Success).isTrue();
}
@Test
public void shouldCreateFailureWhenCallingTryOfSupplier() {
assertThat(Try.of(() -> {
throw new Error("error");
}) instanceof Try.Failure).isTrue();
}
// -- Try.run
@Test
public void shouldCreateSuccessWhenCallingTryRunRunnable() {
assertThat(Try.run(() -> {
}) instanceof Try.Success).isTrue();
}
@Test
public void shouldCreateFailureWhenCallingTryRunRunnable() {
assertThat(Try.run(() -> {
throw new Error("error");
}) instanceof Try.Failure).isTrue();
}
// -- Failure.Cause
@Test(expected = Try.FatalException.class)
public void shouldDetectFatalException() throws Exception {
NonFatalException.of(new OutOfMemoryError());
}
@Test
public void shouldDetectNonFatalException() throws Exception {
final NonFatalException cause = NonFatalException.of(new Exception());
assertThat(cause).isNotNull();
}
@Test
public void shouldSubsequentlyHandOverCause() {
final Supplier<?> inner = () -> {
throw new UnknownError("\uD83D\uDCA9");
};
final Supplier<?> outer = () -> Try.of(inner::get).get();
try {
Try.of(outer::get).get();
Assertions.fail("Exception expected");
} catch (Try.FatalException x) {
Assertions.assertThat(x.getCause().getMessage()).isEqualTo("\uD83D\uDCA9");
} catch (Throwable x) {
Assertions.fail("Unexpected exception type: " + x.getClass().getName());
}
}
@Test
public void shouldCreateFailureOnNonFatalException() {
assertThat(failure().failed().get().getClass().getName()).isEqualTo(RuntimeException.class.getName());
}
// -- Failure.NonFatal
@Test
public void shouldReturnAndNotThrowOnNonFatal() {
final NonFatalException cause = NonFatalException.of(new Exception());
assertThat(NonFatalException.of(cause)).isNotNull();
}
@Test
public void shouldReturnToStringOnNonFatal() {
final Exception exception = new java.lang.Exception();
final NonFatalException cause = NonFatalException.of(exception);
assertThat(cause.toString()).isEqualTo("NonFatal(" + exception.toString() + ")");
}
@Test
public void shouldReturnHasCodeOnNonFatal() {
final Exception exception = new java.lang.Exception();
final NonFatalException cause = NonFatalException.of(exception);
assertThat(cause.hashCode()).isEqualTo(Objects.hashCode(exception));
}
// -- Failure.Fatal
@Test
public void shouldReturnToStringOnFatal() {
try {
Try.of(() -> {
throw new UnknownError();
});
fail("Exception Expected");
} catch (Try.FatalException x) {
assertThat(x.toString()).isEqualTo("Fatal(java.lang.UnknownError)");
}
}
@Test
public void shouldReturnHashCodeOnFatal() {
UnknownError error = new UnknownError();
try {
Try.of(() -> {
throw error;
});
fail("Exception Expected");
} catch (Try.FatalException x) {
assertThat(x.hashCode()).isEqualTo(Objects.hashCode(error));
}
}
@Test
public void shouldReturnEqualsOnFatal() {
UnknownError error = new UnknownError();
try {
Try.of(() -> {
throw error;
});
fail("Exception Expected");
} catch (Try.FatalException x) {
try {
Try.of(() -> {
throw error;
});
fail("Exception Expected");
} catch (Try.FatalException fatal) {
assertThat(x.equals(fatal)).isEqualTo(true);
}
}
}
// -- Failure
@Test
public void shouldDetectFailureOfRunnable() {
assertThat(Try.of(() -> {
throw new RuntimeException();
}).isFailure()).isTrue();
}
@Test(expected = Try.FatalException.class)
public void shouldPassThroughFatalException() {
Try.of(() -> {
throw new UnknownError();
});
}
// -- isFailure
@Test
public void shouldDetectFailureOnNonFatalException() {
assertThat(failure().isFailure()).isTrue();
}
// -- isSuccess
@Test
public void shouldDetectNonSuccessOnFailure() {
assertThat(failure().isSuccess()).isFalse();
}
// -- get
@Test(expected = NonFatalException.class)
public void shouldThrowWhenGetOnFailure() {
failure().get();
}
// -- getOrElse
@Test
public void shouldReturnElseWhenOrElseOnFailure() {
assertThat(failure().getOrElse(OK)).isEqualTo(OK);
}
// -- getOrElseGet
@Test
public void shouldReturnElseWhenOrElseGetOnFailure() {
assertThat(failure().getOrElseGet(x -> OK)).isEqualTo(OK);
}
// -- getOrElseThrow
@Test(expected = IllegalStateException.class)
public void shouldThrowOtherWhenGetOrElseThrowOnFailure() {
failure().getOrElseThrow(x -> new IllegalStateException(OK));
}
// -- orElseRun
@Test
public void shouldRunElseWhenOrElseRunOnFailure() {
final String[] result = new String[1];
failure().orElseRun(x -> result[0] = OK);
assertThat(result[0]).isEqualTo(OK);
}
// -- recover(Class, Function)
@Test
public void shouldRecoverWhenFailureMatchesExactly() {
final Try<String> testee = failure(RuntimeException.class);
assertThat(testee.recover(RuntimeException.class, x -> OK).isSuccess()).isTrue();
}
@Test
public void shouldRecoverWhenFailureIsAssignableFrom() {
final Try<String> testee = failure(UnsupportedOperationException.class);
assertThat(testee.recover(RuntimeException.class, x -> OK).isSuccess()).isTrue();
}
@Test
public void shouldReturnThisWhenRecoverDifferentTypeOfFailure() {
final Try<String> testee = failure(RuntimeException.class);
assertThat(testee.recover(NullPointerException.class, x -> OK)).isSameAs(testee);
}
@Test
public void shouldReturnThisWhenRecoverSpecificFailureOnSuccess() {
final Try<String> testee = success();
assertThat(testee.recover(RuntimeException.class, x -> OK)).isSameAs(testee);
}
// -- recover(Class, Object)
@Test
public void shouldRecoverWithSuccessWhenFailureMatchesExactly() {
final Try<String> testee = failure(RuntimeException.class);
assertThat(testee.recover(RuntimeException.class, OK).isSuccess()).isTrue();
}
@Test
public void shouldRecoverWithSuccessWhenFailureIsAssignableFrom() {
final Try<String> testee = failure(UnsupportedOperationException.class);
assertThat(testee.recover(RuntimeException.class, OK).isSuccess()).isTrue();
}
@Test
public void shouldReturnThisWhenRecoverWithSuccessDifferentTypeOfFailure() {
final Try<String> testee = failure(RuntimeException.class);
assertThat(testee.recover(NullPointerException.class, OK)).isSameAs(testee);
}
@Test
public void shouldReturnThisWhenRecoverWithSuccessSpecificFailureOnSuccess() {
final Try<String> testee = success();
assertThat(testee.recover(RuntimeException.class, OK)).isSameAs(testee);
}
// -- recover(Function)
@Test
public void shouldRecoverOnFailure() {
assertThat(failure().recover(x -> OK).get()).isEqualTo(OK);
}
@Test
public void shouldReturnThisWhenRecoverOnSuccess() {
final Try<String> testee = success();
assertThat(testee.recover(x -> OK)).isSameAs(testee);
}
// -- recoverWith
@Test
public void shouldRecoverWithOnFailure() {
assertThat(TryTest.<String> failure().recoverWith(x -> success()).get()).isEqualTo(OK);
}
@Test
public void shouldRecoverWithThrowingOnFailure() {
final RuntimeException error = error();
assertThat(failure().recoverWith(x -> {
throw error;
})).isEqualTo(Try.failure(error));
}
// -- onFailure
@Test
public void shouldConsumeThrowableWhenCallingOnFailureGivenFailure() {
final String[] result = new String[] { FAILURE };
failure().onFailure(x -> result[0] = OK);
assertThat(result[0]).isEqualTo(OK);
}
// -- toOption
@Test
public void shouldConvertFailureToOption() {
assertThat(failure().toOption().isDefined()).isFalse();
}
// -- toEither
@Test
public void shouldConvertFailureToEither() {
assertThat(failure().toEither().isLeft()).isTrue();
}
@Test
public void shouldConvertFailureToEitherLeft() {
assertThat(failure().toEither("test").isLeft()).isTrue();
}
@Test
public void shouldConvertFailureToEitherLeftSupplier() {
assertThat(failure().toEither(() -> "test").isLeft()).isTrue();
}
// -- toCompletableFuture
@Test
public void shouldConvertSuccessToCompletableFuture() {
CompletableFuture<String> future = success().toCompletableFuture();
assertThat(future.isDone());
assertThat(Try.of(future::get).get()).isEqualTo(success().get());
}
@Test
public void shouldConvertFailureToFailedCompletableFuture() {
final CompletableFuture<Object> future = failure().toCompletableFuture();
assertThat(future.isDone());
assertThat(future.isCompletedExceptionally());
assertThatThrownBy(future::get)
.isExactlyInstanceOf(ExecutionException.class)
.hasCauseExactlyInstanceOf(RuntimeException.class);
}
// -- toValidation
@Test
public void shouldConvertFailureToValidationLeft() {
assertThat(failure().toValidation("test").isInvalid()).isTrue();
}
@Test
public void shouldConvertFailureToValidationLeftSupplier() {
assertThat(failure().toValidation(() -> "test").isInvalid()).isTrue();
}
// -- toJavaOptional
@Test
public void shouldConvertFailureToJavaOptional() {
assertThat(failure().toJavaOptional().isPresent()).isFalse();
}
// -- filter
@Test
public void shouldFilterMatchingPredicateOnFailure() {
final Try<String> actual = failure();
assertThat(actual.filter(s -> true)).isEqualTo(actual);
}
@Test
public void shouldFilterNonMatchingPredicateOnFailure() {
final Try<String> actual = failure();
assertThat(actual.filter(s -> false)).isEqualTo(actual);
}
@Test
public void shouldFilterWithExceptionOnFailure() {
final Try<String> actual = failure();
assertThat(actual.filter(this::filter)).isEqualTo(actual);
}
@Test
public void shouldReturnIdentityWhenFilterOnFailure() {
final Try<String> identity = failure();
assertThat(identity.filter(s -> true)).isEqualTo(identity);
}
// -- flatMap
@Test
public void shouldFlatMapOnFailure() {
final Try<String> actual = failure();
assertThat(actual.flatMap(s -> Try.of(() -> s + "!"))).isEqualTo(actual);
}
@Test
public void shouldFlatMapWithExceptionOnFailure() {
final Try<String> actual = failure();
assertThat(actual.flatMap(this::flatMap)).isEqualTo(actual);
}
// -- isEmpty
@Test
public void shouldForEachOnFailure() {
final List<String> actual = new ArrayList<>();
TryTest.<String> failure().forEach(actual::add);
assertThat(actual.isEmpty()).isTrue();
}
// -- map
@Test
public void shouldMapOnFailure() {
final Try<String> actual = failure();
assertThat(actual.map(s -> s + "!")).isEqualTo(actual);
}
@Test
public void shouldMapWithExceptionOnFailure() {
final Try<String> actual = failure();
assertThat(actual.map(this::map)).isEqualTo(actual);
}
@Test
public void shouldChainSuccessWithMap() {
final Try<Integer> actual = Try.of(() -> 100)
.map(x -> x + 100)
.map(x -> x + 50);
final Try<Integer> expected = Try.success(250);
assertThat(actual).isEqualTo(expected);
}
@Test
public void shouldChainFailureWithMap() {
final Try<Integer> actual = Try.of(() -> 100)
.map(x -> x + 100)
.map(x -> Integer.parseInt("aaa") + x) //Throws exception.
.map(x -> x / 2);
assertThat(actual.toString()).isEqualTo("Failure(java.lang.NumberFormatException: For input string: \"aaa\")");
}
// -- andThen
@Test
public void shouldComposeFailureWithAndThenWhenFailing() {
final Try<Void> actual = Try.run(() -> {
throw new Error("err1");
}).andThen(() -> {
throw new Error("err2");
});
assertThat(actual.toString()).isEqualTo("Failure(java.lang.Error: err1)");
}
@Test
public void shouldChainConsumableSuccessWithAndThen() {
final Try<Integer> actual = Try.of(() -> new ArrayList<Integer>())
.andThen(arr -> arr.add(10))
.andThen(arr -> arr.add(30))
.andThen(arr -> arr.add(20))
.map(arr -> arr.get(1));
final Try<Integer> expected = Try.success(30);
assertThat(actual).isEqualTo(expected);
}
@Test
public void shouldChainConsumableFailureWithAndThen() {
final Try<Integer> actual = Try.of(() -> new ArrayList<Integer>())
.andThen(arr -> arr.add(10))
.andThen(arr -> arr.add(Integer.parseInt("aaa"))) //Throws exception.
.andThen(arr -> arr.add(20))
.map(arr -> arr.get(1));
assertThat(actual.toString()).isEqualTo("Failure(java.lang.NumberFormatException: For input string: \"aaa\")");
}
// peek
@Test
public void shouldPeekFailure() {
final List<Object> list = new ArrayList<>();
assertThat(failure().peek(list::add)).isEqualTo(failure());
assertThat(list.isEmpty()).isTrue();
}
// equals
@Test
public void shouldEqualFailureIfObjectIsSame() {
final Try<?> failure = Try.failure(error());
assertThat(failure).isEqualTo(failure);
}
@Test
public void shouldNotEqualFailureIfObjectIsNull() {
assertThat(Try.failure(error())).isNotNull();
}
@Test
public void shouldNotEqualFailureIfObjectIsOfDifferentType() {
assertThat(Try.failure(error()).equals(new Object())).isFalse();
}
@Test
public void shouldEqualFailure() {
assertThat(Try.failure(error())).isEqualTo(Try.failure(error()));
}
// hashCode
@Test
public void shouldHashFailure() {
final Throwable error = error();
assertThat(Try.failure(error).hashCode()).isEqualTo(Objects.hashCode(error));
}
// toString
@Test
public void shouldConvertFailureToString() {
assertThat(Try.failure(error()).toString()).isEqualTo("Failure(java.lang.RuntimeException: error)");
}
// -- sequence
@Test
public void shouldConvertListOfSuccessToTryOfList() {
List<Try<String>> tries = Arrays.asList(Try.success("a"), Try.success("b"), Try.success("c"));
Try<Seq<String>> reducedTry = Try.sequence(tries);
assertThat(reducedTry instanceof Try.Success).isTrue();
assertThat(reducedTry.get().size()).isEqualTo(3);
assertThat(reducedTry.get().mkString()).isEqualTo("abc");
}
@Test
public void shouldConvertListOfFailureToTryOfList() {
Throwable t = new RuntimeException("failure");
List<Try<String>> tries = Arrays.asList(Try.failure(t), Try.failure(t), Try.failure(t));
Try<Seq<String>> reducedTry = Try.sequence(tries);
assertThat(reducedTry instanceof Try.Failure).isTrue();
}
@Test
public void shouldConvertListOfMixedTryToTryOfList() {
Throwable t = new RuntimeException("failure");
List<Try<String>> tries = Arrays.asList(Try.success("a"), Try.failure(t), Try.success("c"));
Try<Seq<String>> reducedTry = Try.sequence(tries);
assertThat(reducedTry instanceof Try.Failure).isTrue();
}
// serialization
@Test
public void shouldSerializeDeserializeFailure() {
final Object actual = Serializables.deserialize(Serializables.serialize(Try.failure(error())));
assertThat(actual.toString()).isEqualTo(Try.failure(error()).toString());
}
// -- Success
@Test
public void shouldDetectSuccessOfRunnable() {
assertThat(Try.run(() -> System.out.println("side-effect")).isSuccess()).isTrue();
}
@Test
public void shouldDetectSuccess() {
assertThat(success().isSuccess()).isTrue();
}
@Test
public void shouldDetectNonFailureOnSuccess() {
assertThat(success().isFailure()).isFalse();
}
@Test
public void shouldGetOnSuccess() {
assertThat(success().get()).isEqualTo(OK);
}
@Test
public void shouldGetOrElseOnSuccess() {
assertThat(success().getOrElse((String) null)).isEqualTo(OK);
}
@Test
public void shouldOrElseGetOnSuccess() {
assertThat(success().getOrElseGet(x -> null)).isEqualTo(OK);
}
@Test
public void shouldOrElseRunOnSuccess() {
final String[] result = new String[] { OK };
success().orElseRun(x -> result[0] = FAILURE);
assertThat(result[0]).isEqualTo(OK);
}
@Test
public void shouldOrElseThrowOnSuccess() {
assertThat(success().getOrElseThrow(x -> null)).isEqualTo(OK);
}
@Test
public void shouldRecoverOnSuccess() {
assertThat(success().recover(x -> null).get()).isEqualTo(OK);
}
@Test
public void shouldRecoverWithOnSuccess() {
assertThat(success().recoverWith(x -> null).get()).isEqualTo(OK);
}
@Test
public void shouldNotConsumeThrowableWhenCallingOnFailureGivenSuccess() {
final String[] result = new String[] { OK };
success().onFailure(x -> result[0] = FAILURE);
assertThat(result[0]).isEqualTo(OK);
}
@Test
public void shouldConvertSuccessToOption() {
assertThat(success().toOption().get()).isEqualTo(OK);
}
@Test
public void shouldConvertSuccessToEither() {
assertThat(success().toEither().isRight()).isTrue();
}
@Test
public void shouldConvertSuccessToJavaOptional() {
assertThat(success().toJavaOptional().get()).isEqualTo(OK);
}
@Test
public void shouldFilterMatchingPredicateOnSuccess() {
assertThat(success().filter(s -> true).get()).isEqualTo(OK);
}
@Test(expected = NonFatalException.class)
public void shouldFilterNonMatchingPredicateOnSuccess() {
success().filter(s -> false).get();
}
@Test
public void shouldFilterNonMatchingPredicateAndDefaultThrowableSupplierOnSuccess() {
assertThat(success().filter(s -> false).getCause())
.isInstanceOf(NoSuchElementException.class);
}
@Test
public void shouldFilterNonMatchingPredicateAndCustomThrowableSupplierOnSuccess() {
assertThat(success().filter(s -> false, () -> new IllegalArgumentException()).getCause())
.isInstanceOf(IllegalArgumentException.class);
}
@Test(expected = RuntimeException.class)
public void shouldFilterWithExceptionOnSuccess() {
success().filter(s -> {
throw new RuntimeException("xxx");
}).get();
}
@Test
public void shouldFlatMapOnSuccess() {
assertThat(success().flatMap(s -> Try.of(() -> s + "!")).get()).isEqualTo(OK + "!");
}
@Test
public void shouldFlatMapOnIterable() {
final Try<Integer> success = Try.success(1);
assertThat(success().flatMap(ignored -> success)).isEqualTo(success);
}
@Test
public void shouldFlatMapOnEmptyIterable() {
final Try<Integer> failure = Try.failure(new Error());
assertThat(success().flatMap(ignored -> failure)).isEqualTo(failure);
}
@Test(expected = RuntimeException.class)
public void shouldFlatMapWithExceptionOnSuccess() {
success().flatMap(s -> {
throw new RuntimeException("xxx");
}).get();
}
@Test
public void shouldForEachOnSuccess() {
final List<String> actual = new ArrayList<>();
success().forEach(actual::add);
assertThat(actual).isEqualTo(Collections.singletonList(OK));
}
@Test
public void shouldMapOnSuccess() {
assertThat(success().map(s -> s + "!").get()).isEqualTo(OK + "!");
}
@Test(expected = NonFatalException.class)
public void shouldMapWithExceptionOnSuccess() {
success().map(s -> {
throw new RuntimeException("xxx");
}).get();
}
@Test(expected = NonFatalException.class)
public void shouldThrowWhenCallingFailedOnSuccess() {
success().failed().get();
}
@Test(expected = UnsupportedOperationException.class)
public void shouldThrowWhenCallingGetCauseOnSuccess() {
success().getCause();
}
@Test
public void shouldComposeSuccessWithAndThenWhenFailing() {
final Try<Void> actual = Try.run(() -> {
}).andThen(() -> {
throw new Error("failure");
});
assertThat(actual.toString()).isEqualTo("Failure(java.lang.Error: failure)");
}
@Test
public void shouldComposeSuccessWithAndThenWhenSucceeding() {
final Try<Void> actual = Try.run(() -> {
}).andThen(() -> {
});
final Try<Void> expected = Try.success(null);
assertThat(actual).isEqualTo(expected);
}
// peek
@Test
public void shouldPeekSuccess() {
final List<Object> list = new ArrayList<>();
assertThat(success().peek(list::add)).isEqualTo(success());
assertThat(list.isEmpty()).isFalse();
}
@Test(expected = RuntimeException.class)
public void shouldPeekSuccessAndThrow() {
success().peek(t -> failure().get());
}
// equals
@Test
public void shouldEqualSuccessIfObjectIsSame() {
final Try<?> success = Try.success(1);
assertThat(success).isEqualTo(success);
}
@Test
public void shouldNotEqualSuccessIfObjectIsNull() {
assertThat(Try.success(1)).isNotNull();
}
@Test
public void shouldNotEqualSuccessIfObjectIsOfDifferentType() {
assertThat(Try.success(1).equals(new Object())).isFalse();
}
@Test
public void shouldEqualSuccess() {
assertThat(Try.success(1)).isEqualTo(Try.success(1));
}
// hashCode
@Test
public void shouldHashSuccess() {
assertThat(Try.success(1).hashCode()).isEqualTo(Objects.hashCode(1));
}
// toString
@Test
public void shouldConvertSuccessToString() {
assertThat(Try.success(1).toString()).isEqualTo("Success(1)");
}
// serialization
@Test
public void shouldSerializeDeserializeSuccess() {
final Object actual = Serializables.deserialize(Serializables.serialize(Try.success(1)));
assertThat(actual).isEqualTo(Try.success(1));
}
// -- Checked Functions
@Test
public void shouldCreateIdentityCheckedFunction() {
assertThat(Function.identity()).isNotNull();
}
@Test
public void shouldEnsureThatIdentityCheckedFunctionReturnsIdentity() throws Throwable {
assertThat(Function.identity().apply(1)).isEqualTo(1);
}
// -- helpers
private RuntimeException error() {
return new RuntimeException("error");
}
private static <T> Try<T> failure() {
return Try.failure(new RuntimeException());
}
private static <T, X extends Throwable> Try<T> failure(Class<X> exceptionType) {
try {
final X exception = exceptionType.newInstance();
return Try.failure(exception);
} catch (InstantiationException | IllegalAccessException x) {
throw new IllegalStateException("Error instantiating " + exceptionType);
}
}
private <T> boolean filter(T t) {
throw new RuntimeException("xxx");
}
private <T> Try<T> flatMap(T t) {
throw new RuntimeException("xxx");
}
private <T> T map(T t) {
throw new RuntimeException("xxx");
}
private Try<String> success() {
return Try.of(() -> "ok");
}
}
| 1 | 10,153 | direct `newInstance` call is also deprecated now | vavr-io-vavr | java |
@@ -15,14 +15,15 @@ import (
"syscall"
"time"
+ "github.com/vishvananda/netlink"
+ weaveapi "github.com/weaveworks/weave/api"
+ "github.com/weaveworks/weave/common"
+ "golang.org/x/sys/unix"
api "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
-
- weaveapi "github.com/weaveworks/weave/api"
- "github.com/weaveworks/weave/common"
)
type nodeInfo struct { | 1 | /*
Package main deals with weave-net peers on the cluster.
This involves peer management, such as getting the latest peers or removing defunct peers from the cluster
*/
package main
import (
"flag"
"fmt"
"math/rand"
"net"
"os"
"os/signal"
"syscall"
"time"
api "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
weaveapi "github.com/weaveworks/weave/api"
"github.com/weaveworks/weave/common"
)
type nodeInfo struct {
name string
addr string
}
// return the IP addresses of all nodes in the cluster
func getKubePeers(c kubernetes.Interface, includeWithNoIPAddr bool) ([]nodeInfo, error) {
nodeList, err := c.CoreV1().Nodes().List(api.ListOptions{})
if err != nil {
return nil, err
}
addresses := make([]nodeInfo, 0, len(nodeList.Items))
for _, peer := range nodeList.Items {
var internalIP, externalIP string
for _, addr := range peer.Status.Addresses {
// Check it's a valid ipv4 address
ip := net.ParseIP(addr.Address)
if ip == nil || ip.To4() == nil {
continue
}
if addr.Type == "InternalIP" {
internalIP = ip.To4().String()
} else if addr.Type == "ExternalIP" {
externalIP = ip.To4().String()
}
}
// Fallback for cases where a Node has an ExternalIP but no InternalIP
if internalIP != "" {
addresses = append(addresses, nodeInfo{name: peer.Name, addr: internalIP})
} else if externalIP != "" {
addresses = append(addresses, nodeInfo{name: peer.Name, addr: externalIP})
} else if includeWithNoIPAddr {
addresses = append(addresses, nodeInfo{name: peer.Name, addr: ""})
}
}
return addresses, nil
}
// (minimal, incomplete) interface so weaver can be mocked for testing.
type weaveClient interface {
RmPeer(peerName string) (string, error)
}
// For each of those peers that is no longer listed as a node by
// Kubernetes, remove it from Weave IPAM
func reclaimRemovedPeers(kube kubernetes.Interface, cml *configMapAnnotations, myPeerName, myNodeName string) error {
weave := weaveapi.NewClient(os.Getenv("WEAVE_HTTP_ADDR"), common.Log)
for loopsWhenNothingChanged := 0; loopsWhenNothingChanged < 3; loopsWhenNothingChanged++ {
if err := cml.Init(); err != nil {
return err
}
// 1. Compare peers stored in the peerList against all peers reported by k8s now.
storedPeerList, err := cml.GetPeerList()
if err != nil {
return err
}
nodes, err := getKubePeers(kube, true)
nodeSet := make(map[string]struct{}, len(nodes))
for _, node := range nodes {
nodeSet[node.name] = struct{}{}
}
peerMap := make(map[string]peerInfo, len(storedPeerList.Peers))
for _, peer := range storedPeerList.Peers {
peerMap[peer.PeerName] = peer
}
// remove entries from the peer map that are current nodes
for key, peer := range peerMap {
if _, found := nodeSet[peer.NodeName]; found {
// unless they have a duplicate of my NodeName but are not me
if peer.NodeName == myNodeName && peer.PeerName != myPeerName {
continue
}
delete(peerMap, key)
}
}
// so the remainder is everything we want to clean up
common.Log.Debugln("[kube-peers] Nodes that have disappeared:", peerMap)
if len(peerMap) == 0 {
break
}
// 2. Loop for each X in the first set and not in the second - we wish to remove X from our data structures
for _, peer := range peerMap {
if peer.PeerName == myPeerName { // Don't remove myself.
common.Log.Warnln("[kube-peers] not removing myself", peer)
continue
}
changed, err := reclaimPeer(weave, cml, storedPeerList, peer.PeerName, myPeerName)
if err != nil {
return err
}
if changed {
loopsWhenNothingChanged = 0
}
}
// 9. Go back to step 1 until there is no difference between the two sets
// (or we hit the counter that says we've been round the loop 3 times and nothing happened)
}
return nil
}
// Attempt to reclaim the IP addresses owned by peerName, using the
// Kubernetes api-server as a point of consensus so that only one peer
// actions the reclaim.
// Return a bool to show whether we attempted to change anything,
// and an error if something went wrong.
func reclaimPeer(weave weaveClient, cml *configMapAnnotations, storedPeerList *peerList, peerName string, myPeerName string) (changed bool, err error) {
common.Log.Debugln("[kube-peers] Preparing to remove disappeared peer", peerName)
okToRemove := false
nonExistentPeer := false
// 3. Check if there is an existing annotation with key X
existingAnnotation, found := cml.GetAnnotation(KubePeersPrefix + peerName)
if found {
common.Log.Debugln("[kube-peers] Existing annotation", existingAnnotation)
// 4. If annotation already contains my identity, ok;
if existingAnnotation == myPeerName {
okToRemove = true
} else {
// handle an edge case where peer claimed to own the action to reclaim but no longer
// exists hence lock persists foever
if !storedPeerList.contains(existingAnnotation) {
nonExistentPeer = true
common.Log.Debugln("[kube-peers] Existing annotation", existingAnnotation, " has a non-existent peer so owning the reclaim action")
}
}
}
if !found || nonExistentPeer {
// 5. If non-existent, write an annotation with key X and contents "my identity"
common.Log.Debugln("[kube-peers] Noting I plan to remove ", peerName)
if err := cml.UpdateAnnotation(KubePeersPrefix+peerName, myPeerName); err == nil {
okToRemove = true
} else {
common.Log.Errorln("[kube-peers] error from UpdateAnnotation: ", err)
}
}
if !okToRemove {
return false, nil
}
// 6. If step 4 or 5 succeeded, rmpeer X
result, err := weave.RmPeer(peerName)
common.Log.Infof("[kube-peers] rmpeer of %s: %s", peerName, result)
if err != nil {
return false, err
}
err = cml.LoopUpdate(func() error {
// 7aa. Remove any annotations Z* that have contents X
if err := cml.RemoveAnnotationsWithValue(peerName); err != nil {
return err
}
// 7a. Remove X from peerList
storedPeerList, err := cml.GetPeerList()
if err != nil {
return err
}
storedPeerList.remove(peerName)
if err := cml.UpdatePeerList(*storedPeerList); err != nil {
return err
}
// 7b. Remove annotation with key X
return cml.RemoveAnnotation(KubePeersPrefix + peerName)
})
// 8. If step 5 failed due to optimistic lock conflict, stop: someone else is handling X
// Step 3-5 is to protect against two simultaneous rmpeers of X
// Step 4 is to pick up again after a restart between step 5 and step 7b
// If the peer doing the reclaim disappears between steps 5 and 7a, then someone will clean it up in step 7aa
// If peer doing the reclaim disappears forever between 7a and 7b then we get a dangling annotation
// This should be sufficiently rare that we don't care.
// Question: Should we narrow step 2 by checking against Weave Net IPAM?
// i.e. If peer X owns any address space and is marked unreachable, we want to rmpeer X
return true, err
}
// resetPeers replaces the peers list with current set of peers
func resetPeers(kube kubernetes.Interface) error {
nodes, err := getKubePeers(kube, false)
if err != nil {
return err
}
peerList := make([]string, 0)
for _, node := range nodes {
peerList = append(peerList, node.addr)
}
weave := weaveapi.NewClient(os.Getenv("WEAVE_HTTP_ADDR"), common.Log)
err = weave.ReplacePeers(peerList)
if err != nil {
return err
}
return nil
}
// regiesters with Kubernetes API server for node delete events. Node delete event handler
// invokes reclaimRemovedPeers to remove it from IPAM so that IP space is reclaimed
func registerForNodeUpdates(client *kubernetes.Clientset, stopCh <-chan struct{}, nodeName, peerName string) {
informerFactory := informers.NewSharedInformerFactory(client, 0)
nodeInformer := informerFactory.Core().V1().Nodes().Informer()
common.Log.Debugln("registering for updates for node delete events")
nodeInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
DeleteFunc: func(obj interface{}) {
// add random delay to avoid all nodes acting on node delete event at the same
// time leading to contention to use `weave-net` configmap
r := rand.Intn(5000)
time.Sleep(time.Duration(r) * time.Millisecond)
cml := newConfigMapAnnotations(configMapNamespace, configMapName, client)
err := reclaimRemovedPeers(client, cml, peerName, nodeName)
if err != nil {
common.Log.Fatalf("[kube-peers] Error while reclaiming space: %v", err)
}
err = resetPeers(client)
if err != nil {
common.Log.Fatalf("[kube-peers] Error resetting peer list: %v", err)
}
},
})
informerFactory.WaitForCacheSync(stopCh)
informerFactory.Start(stopCh)
}
func main() {
var (
justReclaim bool
justCheck bool
justSetNodeStatus bool
peerName string
nodeName string
logLevel string
runReclaimDaemon bool
)
flag.BoolVar(&justReclaim, "reclaim", false, "reclaim IP space from dead peers")
flag.BoolVar(&runReclaimDaemon, "run-reclaim-daemon", false, "run background process that reclaim IP space from dead peers ")
flag.BoolVar(&justCheck, "check-peer-new", false, "return success if peer name is not stored in annotation")
flag.BoolVar(&justSetNodeStatus, "set-node-status", false, "set NodeNetworkUnavailable to false")
flag.StringVar(&peerName, "peer-name", "unknown", "name of this Weave Net peer")
flag.StringVar(&nodeName, "node-name", "unknown", "name of this Kubernetes node")
flag.StringVar(&logLevel, "log-level", "info", "logging level (debug, info, warning, error)")
flag.Parse()
common.SetLogLevel(logLevel)
config, err := rest.InClusterConfig()
if err != nil {
common.Log.Fatalf("[kube-peers] Could not get cluster config: %v", err)
}
c, err := kubernetes.NewForConfig(config)
if err != nil {
common.Log.Fatalf("[kube-peers] Could not make Kubernetes connection: %v", err)
}
if justCheck {
cml := newConfigMapAnnotations(configMapNamespace, configMapName, c)
exists, err := checkIamInPeerList(cml, c, peerName)
if err != nil {
common.Log.Fatalf("[kube-peers] Could not check peer list: %v", err)
}
if exists {
os.Exit(9)
} else {
os.Exit(0)
}
}
if justSetNodeStatus {
err := setNodeNetworkUnavailableFalse(c, nodeName)
if err != nil {
common.Log.Fatalf("[kube-peers] could not set node status: %v", err)
}
return
}
if err != nil {
common.Log.Fatalf("[kube-peers] Could not get peers: %v", err)
}
if justReclaim {
cml := newConfigMapAnnotations(configMapNamespace, configMapName, c)
list, err := addMyselfToPeerList(cml, c, peerName, nodeName)
if err != nil {
common.Log.Fatalf("[kube-peers] Could not update peer list: %v", err)
}
common.Log.Infoln("[kube-peers] Added myself to peer list", list)
err = reclaimRemovedPeers(c, cml, peerName, nodeName)
if err != nil {
common.Log.Fatalf("[kube-peers] Error while reclaiming space: %v", err)
}
return
}
peers, err := getKubePeers(c, false)
for _, node := range peers {
fmt.Println(node.addr)
}
if runReclaimDaemon {
// Handle SIGINT and SIGTERM
ch := make(chan os.Signal)
signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM)
stopCh := make(chan struct{})
registerForNodeUpdates(c, stopCh, nodeName, peerName)
<-ch
close(stopCh)
}
}
| 1 | 15,743 | We have a bit of a convention where imports are split into three blocks: first Go standard library, then imports from outside the repo, then imports from inside the repo. | weaveworks-weave | go |
@@ -184,7 +184,7 @@ abstract class SnapshotProducer<ThisT> implements SnapshotUpdate<ThisT> {
/**
* Returns the snapshot summary from the implementation and updates totals.
*/
- private Map<String, String> summary(TableMetadata previous) {
+ protected Map<String, String> summary(TableMetadata previous) {
Map<String, String> summary = summary();
if (summary == null) { | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;
import com.github.benmanes.caffeine.cache.Caffeine;
import com.github.benmanes.caffeine.cache.LoadingCache;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.function.Consumer;
import org.apache.iceberg.exceptions.CommitFailedException;
import org.apache.iceberg.exceptions.RuntimeIOException;
import org.apache.iceberg.io.OutputFile;
import org.apache.iceberg.util.Exceptions;
import org.apache.iceberg.util.Tasks;
import org.apache.iceberg.util.ThreadPools;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.iceberg.TableProperties.COMMIT_MAX_RETRY_WAIT_MS;
import static org.apache.iceberg.TableProperties.COMMIT_MAX_RETRY_WAIT_MS_DEFAULT;
import static org.apache.iceberg.TableProperties.COMMIT_MIN_RETRY_WAIT_MS;
import static org.apache.iceberg.TableProperties.COMMIT_MIN_RETRY_WAIT_MS_DEFAULT;
import static org.apache.iceberg.TableProperties.COMMIT_NUM_RETRIES;
import static org.apache.iceberg.TableProperties.COMMIT_NUM_RETRIES_DEFAULT;
import static org.apache.iceberg.TableProperties.COMMIT_TOTAL_RETRY_TIME_MS;
import static org.apache.iceberg.TableProperties.COMMIT_TOTAL_RETRY_TIME_MS_DEFAULT;
import static org.apache.iceberg.TableProperties.MANIFEST_LISTS_ENABLED;
import static org.apache.iceberg.TableProperties.MANIFEST_LISTS_ENABLED_DEFAULT;
abstract class SnapshotProducer<ThisT> implements SnapshotUpdate<ThisT> {
private static final Logger LOG = LoggerFactory.getLogger(SnapshotProducer.class);
static final Set<ManifestFile> EMPTY_SET = Sets.newHashSet();
/**
* Default callback used to delete files.
*/
private final Consumer<String> defaultDelete = new Consumer<String>() {
@Override
public void accept(String file) {
ops.io().deleteFile(file);
}
};
/**
* Cache used to enrich ManifestFile instances that are written to a ManifestListWriter.
*/
private final LoadingCache<ManifestFile, ManifestFile> manifestsWithMetadata;
private final TableOperations ops;
private final String commitUUID = UUID.randomUUID().toString();
private final AtomicInteger attempt = new AtomicInteger(0);
private final List<String> manifestLists = Lists.newArrayList();
private Long snapshotId = null;
private TableMetadata base = null;
private boolean stageOnly = false;
private Consumer<String> deleteFunc = defaultDelete;
protected SnapshotProducer(TableOperations ops) {
this.ops = ops;
this.base = ops.current();
this.manifestsWithMetadata = Caffeine
.newBuilder()
.build(file -> {
if (file.snapshotId() != null) {
return file;
}
return addMetadata(ops, file);
});
}
protected abstract ThisT self();
@Override
public ThisT stageOnly() {
this.stageOnly = true;
return self();
}
@Override
public ThisT deleteWith(Consumer<String> deleteCallback) {
Preconditions.checkArgument(this.deleteFunc == defaultDelete, "Cannot set delete callback more than once");
this.deleteFunc = deleteCallback;
return self();
}
/**
* Clean up any uncommitted manifests that were created.
* <p>
* Manifests may not be committed if apply is called more because a commit conflict has occurred.
* Implementations may keep around manifests because the same changes will be made by both apply
* calls. This method instructs the implementation to clean up those manifests and passes the
* paths of the manifests that were actually committed.
*
* @param committed a set of manifest paths that were actually committed
*/
protected abstract void cleanUncommitted(Set<ManifestFile> committed);
/**
* A string that describes the action that produced the new snapshot.
*
* @return a string operation
*/
protected abstract String operation();
/**
* Apply the update's changes to the base table metadata and return the new manifest list.
*
* @param metadataToUpdate the base table metadata to apply changes to
* @return a manifest list for the new snapshot.
*/
protected abstract List<ManifestFile> apply(TableMetadata metadataToUpdate);
@Override
public Snapshot apply() {
this.base = ops.refresh();
Long parentSnapshotId = base.currentSnapshot() != null ?
base.currentSnapshot().snapshotId() : null;
List<ManifestFile> manifests = apply(base);
if (base.propertyAsBoolean(MANIFEST_LISTS_ENABLED, MANIFEST_LISTS_ENABLED_DEFAULT)) {
OutputFile manifestList = manifestListPath();
try (ManifestListWriter writer = new ManifestListWriter(
manifestList, snapshotId(), parentSnapshotId)) {
// keep track of the manifest lists created
manifestLists.add(manifestList.location());
ManifestFile[] manifestFiles = new ManifestFile[manifests.size()];
Tasks.range(manifestFiles.length)
.stopOnFailure().throwFailureWhenFinished()
.executeWith(ThreadPools.getWorkerPool())
.run(index ->
manifestFiles[index] = manifestsWithMetadata.get(manifests.get(index)));
writer.addAll(Arrays.asList(manifestFiles));
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to write manifest list file");
}
return new BaseSnapshot(ops.io(),
snapshotId(), parentSnapshotId, System.currentTimeMillis(), operation(), summary(base),
ops.io().newInputFile(manifestList.location()));
} else {
return new BaseSnapshot(ops.io(),
snapshotId(), parentSnapshotId, System.currentTimeMillis(), operation(), summary(base),
manifests);
}
}
protected abstract Map<String, String> summary();
/**
* Returns the snapshot summary from the implementation and updates totals.
*/
private Map<String, String> summary(TableMetadata previous) {
Map<String, String> summary = summary();
if (summary == null) {
return ImmutableMap.of();
}
Map<String, String> previousSummary;
if (previous.currentSnapshot() != null) {
if (previous.currentSnapshot().summary() != null) {
previousSummary = previous.currentSnapshot().summary();
} else {
// previous snapshot had no summary, use an empty summary
previousSummary = ImmutableMap.of();
}
} else {
// if there was no previous snapshot, default the summary to start totals at 0
previousSummary = ImmutableMap.of(
SnapshotSummary.TOTAL_RECORDS_PROP, "0",
SnapshotSummary.TOTAL_FILES_PROP, "0");
}
ImmutableMap.Builder<String, String> builder = ImmutableMap.builder();
// copy all summary properties from the implementation
builder.putAll(summary);
updateTotal(
builder, previousSummary, SnapshotSummary.TOTAL_RECORDS_PROP,
summary, SnapshotSummary.ADDED_RECORDS_PROP, SnapshotSummary.DELETED_RECORDS_PROP);
updateTotal(
builder, previousSummary, SnapshotSummary.TOTAL_FILES_PROP,
summary, SnapshotSummary.ADDED_FILES_PROP, SnapshotSummary.DELETED_FILES_PROP);
return builder.build();
}
@Override
public void commit() {
// this is always set to the latest commit attempt's snapshot id.
AtomicLong newSnapshotId = new AtomicLong(-1L);
try {
Tasks.foreach(ops)
.retry(base.propertyAsInt(COMMIT_NUM_RETRIES, COMMIT_NUM_RETRIES_DEFAULT))
.exponentialBackoff(
base.propertyAsInt(COMMIT_MIN_RETRY_WAIT_MS, COMMIT_MIN_RETRY_WAIT_MS_DEFAULT),
base.propertyAsInt(COMMIT_MAX_RETRY_WAIT_MS, COMMIT_MAX_RETRY_WAIT_MS_DEFAULT),
base.propertyAsInt(COMMIT_TOTAL_RETRY_TIME_MS, COMMIT_TOTAL_RETRY_TIME_MS_DEFAULT),
2.0 /* exponential */)
.onlyRetryOn(CommitFailedException.class)
.run(taskOps -> {
Snapshot newSnapshot = apply();
newSnapshotId.set(newSnapshot.snapshotId());
TableMetadata updated;
if (stageOnly) {
updated = base.addStagedSnapshot(newSnapshot);
} else {
updated = base.replaceCurrentSnapshot(newSnapshot);
}
// if the table UUID is missing, add it here. the UUID will be re-created each time this operation retries
// to ensure that if a concurrent operation assigns the UUID, this operation will not fail.
taskOps.commit(base, updated.withUUID());
});
} catch (RuntimeException e) {
Exceptions.suppressAndThrow(e, this::cleanAll);
}
LOG.info("Committed snapshot {} ({})", newSnapshotId.get(), getClass().getSimpleName());
try {
// at this point, the commit must have succeeded. after a refresh, the snapshot is loaded by
// id in case another commit was added between this commit and the refresh.
Snapshot saved = ops.refresh().snapshot(newSnapshotId.get());
if (saved != null) {
cleanUncommitted(Sets.newHashSet(saved.manifests()));
// also clean up unused manifest lists created by multiple attempts
for (String manifestList : manifestLists) {
if (!saved.manifestListLocation().equals(manifestList)) {
deleteFile(manifestList);
}
}
} else {
// saved may not be present if the latest metadata couldn't be loaded due to eventual
// consistency problems in refresh. in that case, don't clean up.
LOG.warn("Failed to load committed snapshot, skipping manifest clean-up");
}
} catch (RuntimeException e) {
LOG.warn("Failed to load committed table metadata, skipping manifest clean-up", e);
}
}
protected void cleanAll() {
for (String manifestList : manifestLists) {
deleteFile(manifestList);
}
manifestLists.clear();
cleanUncommitted(EMPTY_SET);
}
protected void deleteFile(String path) {
deleteFunc.accept(path);
}
protected OutputFile manifestListPath() {
return ops.io().newOutputFile(ops.metadataFileLocation(FileFormat.AVRO.addExtension(
String.format("snap-%d-%d-%s", snapshotId(), attempt.incrementAndGet(), commitUUID))));
}
protected OutputFile manifestPath(int manifestNumber) {
return ops.io().newOutputFile(
ops.metadataFileLocation(FileFormat.AVRO.addExtension(commitUUID + "-m" + manifestNumber)));
}
protected long snapshotId() {
if (snapshotId == null) {
this.snapshotId = ops.newSnapshotId();
}
return snapshotId;
}
private static ManifestFile addMetadata(TableOperations ops, ManifestFile manifest) {
try (ManifestReader reader = ManifestReader.read(
ops.io().newInputFile(manifest.path()), ops.current().specsById())) {
PartitionSummary stats = new PartitionSummary(ops.current().spec(manifest.partitionSpecId()));
int addedFiles = 0;
int existingFiles = 0;
int deletedFiles = 0;
Long snapshotId = null;
long maxSnapshotId = Long.MIN_VALUE;
for (ManifestEntry entry : reader.entries()) {
if (entry.snapshotId() > maxSnapshotId) {
maxSnapshotId = entry.snapshotId();
}
switch (entry.status()) {
case ADDED:
addedFiles += 1;
if (snapshotId == null) {
snapshotId = entry.snapshotId();
}
break;
case EXISTING:
existingFiles += 1;
break;
case DELETED:
deletedFiles += 1;
if (snapshotId == null) {
snapshotId = entry.snapshotId();
}
break;
}
stats.update(entry.file().partition());
}
if (snapshotId == null) {
// if no files were added or deleted, use the largest snapshot ID in the manifest
snapshotId = maxSnapshotId;
}
return new GenericManifestFile(manifest.path(), manifest.length(), manifest.partitionSpecId(),
snapshotId, addedFiles, existingFiles, deletedFiles, stats.summaries());
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to read manifest: %s", manifest.path());
}
}
private static void updateTotal(ImmutableMap.Builder<String, String> summaryBuilder,
Map<String, String> previousSummary, String totalProperty,
Map<String, String> currentSummary,
String addedProperty, String deletedProperty) {
String totalStr = previousSummary.get(totalProperty);
if (totalStr != null) {
try {
long newTotal = Long.parseLong(totalStr);
String addedStr = currentSummary.get(addedProperty);
if (newTotal >= 0 && addedStr != null) {
newTotal += Long.parseLong(addedStr);
}
String deletedStr = currentSummary.get(deletedProperty);
if (newTotal >= 0 && deletedStr != null) {
newTotal -= Long.parseLong(deletedStr);
}
if (newTotal >= 0) {
summaryBuilder.put(totalProperty, String.valueOf(newTotal));
}
} catch (NumberFormatException e) {
// ignore and do not add total
}
}
}
}
| 1 | 17,121 | Why was this change needed? | apache-iceberg | java |
@@ -53,12 +53,14 @@ def get_if_raw_addr(ifname):
try:
fd = os.popen("%s %s" % (conf.prog.ifconfig, ifname))
except OSError, msg:
- raise Scapy_Exception("Failed to execute ifconfig: (%s)" % msg)
+ warning("Failed to execute ifconfig: (%s)" % msg)
+ return "\0\0\0\0"
# Get IPv4 addresses
addresses = [l for l in fd.readlines() if l.find("netmask") >= 0]
if not addresses:
- raise Scapy_Exception("No IPv4 address found on %s !" % ifname)
+ warning("No IPv4 address found on %s !" % ifname)
+ return "\0\0\0\0"
# Pack the first address
address = addresses[0].split(' ')[1] | 1 | # Guillaume Valadon <[email protected]>
"""
Scapy *BSD native support - core
"""
from scapy.config import conf
from scapy.error import Scapy_Exception
from scapy.data import ARPHDR_LOOPBACK, ARPHDR_ETHER
from scapy.arch.common import get_if
from scapy.consts import LOOPBACK_NAME
from scapy.utils import warning
from scapy.arch.bpf.consts import *
import os
import socket
import fcntl
import struct
from ctypes import cdll, cast, pointer, POINTER, Structure
from ctypes import c_uint, c_uint32, c_int, c_ulong, c_char_p, c_ushort, c_ubyte
from ctypes.util import find_library
# ctypes definitions
LIBC = cdll.LoadLibrary(find_library("libc"))
LIBC.ioctl.argtypes = [c_int, c_ulong, c_char_p]
LIBC.ioctl.restype = c_int
class bpf_insn(Structure):
""""The BPF instruction data structure"""
_fields_ = [("code", c_ushort),
("jt", c_ubyte),
("jf", c_ubyte),
("k", c_uint32)]
class bpf_program(Structure):
""""Structure for BIOCSETF"""
_fields_ = [("bf_len", c_uint),
("bf_insns", POINTER(bpf_insn))]
# Addresses manipulation functions
def get_if_raw_addr(ifname):
"""Returns the IPv4 address configured on 'ifname', packed with inet_pton."""
# Get ifconfig output
try:
fd = os.popen("%s %s" % (conf.prog.ifconfig, ifname))
except OSError, msg:
raise Scapy_Exception("Failed to execute ifconfig: (%s)" % msg)
# Get IPv4 addresses
addresses = [l for l in fd.readlines() if l.find("netmask") >= 0]
if not addresses:
raise Scapy_Exception("No IPv4 address found on %s !" % ifname)
# Pack the first address
address = addresses[0].split(' ')[1]
return socket.inet_pton(socket.AF_INET, address)
def get_if_raw_hwaddr(ifname):
"""Returns the packed MAC address configured on 'ifname'."""
NULL_MAC_ADDRESS = '\x00'*6
# Handle the loopback interface separately
if ifname == LOOPBACK_NAME:
return (ARPHDR_LOOPBACK, NULL_MAC_ADDRESS)
# Get ifconfig output
try:
fd = os.popen("%s %s" % (conf.prog.ifconfig, ifname))
except OSError, msg:
warning("Failed to execute ifconfig: (%s)" % msg)
raise Scapy_Exception("Failed to execute ifconfig: (%s)" % msg)
# Get MAC addresses
addresses = [l for l in fd.readlines() if l.find("ether") >= 0 or
l.find("lladdr") >= 0 or
l.find("address") >= 0]
if not addresses:
raise Scapy_Exception("No MAC address found on %s !" % ifname)
# Pack and return the MAC address
mac = addresses[0].split(' ')[1]
mac = [chr(int(b, 16)) for b in mac.split(':')]
return (ARPHDR_ETHER, ''.join(mac))
# BPF specific functions
def get_dev_bpf():
"""Returns an opened BPF file object"""
# Get the first available BPF handle
for bpf in range(0, 8):
try:
fd = os.open("/dev/bpf%i" % bpf, os.O_RDWR)
return (fd, bpf)
except OSError, err:
continue
raise Scapy_Exception("No /dev/bpf handle is available !")
def attach_filter(fd, iface, bpf_filter_string):
"""Attach a BPF filter to the BPF file descriptor"""
# Retrieve the BPF byte code in decimal
command = "%s -i %s -ddd -s 1600 '%s'" % (conf.prog.tcpdump, iface, bpf_filter_string)
try:
f = os.popen(command)
except OSError, msg:
raise Scapy_Exception("Failed to execute tcpdump: (%s)" % msg)
# Convert the byte code to a BPF program structure
lines = f.readlines()
if lines == []:
raise Scapy_Exception("Got an empty BPF filter from tcpdump !")
# Allocate BPF instructions
size = int(lines[0])
bpf_insn_a = bpf_insn * size
bip = bpf_insn_a()
# Fill the BPF instruction structures with the byte code
lines = lines[1:]
for i in xrange(len(lines)):
values = [int(v) for v in lines[i].split()]
bip[i].code = c_ushort(values[0])
bip[i].jt = c_ubyte(values[1])
bip[i].jf = c_ubyte(values[2])
bip[i].k = c_uint(values[3])
# Create the BPF program and assign it to the interface
bp = bpf_program(size, bip)
ret = LIBC.ioctl(c_int(fd), BIOCSETF, cast(pointer(bp), c_char_p))
if ret < 0:
raise Scapy_Exception("Can't attach the BPF filter !")
# Interface manipulation functions
def get_if_list():
"""Returns a list containing all network interfaces."""
# Get ifconfig output
try:
fd = os.popen("%s -a" % conf.prog.ifconfig)
except OSError, msg:
raise Scapy_Exception("Failed to execute ifconfig: (%s)" % msg)
# Get interfaces
interfaces = [line[:line.find(':')] for line in fd.readlines()
if ": flags" in line.lower()]
return interfaces
def get_working_ifaces():
"""
Returns an ordered list of interfaces that could be used with BPF.
Note: the order mimics pcap_findalldevs() behavior
"""
# Only root is allowed to perform the following ioctl() call
if os.getuid() != 0:
return []
# Test all network interfaces
interfaces = []
for ifname in get_if_list():
# Unlike pcap_findalldevs(), we do not care of loopback interfaces.
if ifname == LOOPBACK_NAME:
continue
# Get interface flags
try:
result = get_if(ifname, SIOCGIFFLAGS)
except IOError, msg:
warning("ioctl(SIOCGIFFLAGS) failed on %s !" % ifname)
continue
# Convert flags
ifflags = struct.unpack("16xH14x", result)[0]
if ifflags & 0x1: # IFF_UP
# Get a BPF handle
fd, _ = get_dev_bpf()
if fd is None:
raise Scapy_Exception("No /dev/bpf are available !")
# Check if the interface can be used
try:
fcntl.ioctl(fd, BIOCSETIF, struct.pack("16s16x", ifname))
interfaces.append((ifname, int(ifname[-1])))
except IOError, err:
pass
# Close the file descriptor
os.close(fd)
# Sort to mimic pcap_findalldevs() order
interfaces.sort(lambda (ifname_left, ifid_left),
(ifname_right, ifid_right): ifid_left-ifid_right)
return interfaces
def get_working_if():
"""Returns the first interface than can be used with BPF"""
ifaces = get_working_ifaces()
if not ifaces:
# A better interface will be selected later using the routing table
return LOOPBACK_NAME
return ifaces[0][0]
| 1 | 9,068 | can you use this opportunity to remove `.readlines()` useless list creation? (`addresses = [l for l in fd if l.find("netmask") >= 0]`) | secdev-scapy | py |
@@ -51,8 +51,8 @@ test_name "C100554: \
message = "The plan was expected to notify but did not"
assert_match(/Notice:/, result.stdout, message)
winrm_nodes.each do |node|
- message = "The plan was expceted to mention the host #{node.hostname} with _output"
- assert_match(/#{node.hostname}"=>{"_output"=>/, result.stdout, message)
+ message = "The plan was expected to mention the host #{node.hostname} with _output"
+ assert_match(/#{node.hostname}","status":"success"/, result.stdout, message)
end
winrm_nodes.each do |node| | 1 | require 'bolt_command_helper'
extend Acceptance::BoltCommandHelper
test_name "C100554: \
bolt plan run executes puppet plan on remote hosts via winrm" do
winrm_nodes = select_hosts(roles: ['winrm'])
skip_test('no applicable nodes to test on') if winrm_nodes.empty?
dir = bolt.tmpdir('C100554')
testdir = winrm_nodes[0].tmpdir('C100554')
step "create test dir on winrm nodes" do
winrm_nodes.each do |node|
on(node, "mkdir #{testdir}", acceptable_exit_codes: [0, 1])
end
end
step "create plan on bolt controller" do
on(bolt, "mkdir -p #{dir}/modules/test/{tasks,plans}")
create_remote_file(bolt, "#{dir}/modules/test/tasks/a_win.ps1", <<-FILE)
(echo "Line one from task a") > #{testdir}/C100554_plan_artifact.txt
FILE
create_remote_file(bolt, "#{dir}/modules/test/tasks/b_win.ps1", <<-FILE)
(echo "Line two from task b") >> #{testdir}/C100554_plan_artifact.txt
FILE
create_remote_file(bolt,
"#{dir}/modules/test/plans/my_win_plan.pp", <<-FILE)
plan test::my_win_plan($nodes) {
$nodes_array = $nodes.split(',')
notice("${run_task(test::a_win, $nodes_array)}")
notice("${run_task(test::b_win, $nodes_array)}")
}
FILE
end
step "execute `bolt plan run` via WinRM" do
user = ENV['WINRM_USER']
password = ENV['WINRM_PASSWORD']
nodes_csv = winrm_nodes.map { |host| "winrm://#{host.hostname}" }.join(',')
bolt_command = "bolt plan run test::my_win_plan nodes=#{nodes_csv}"
flags = {
'--modulepath' => "#{dir}/modules",
'-u' => user,
'-p' => password,
'--insecure' => nil
}
result = bolt_command_on(bolt, bolt_command, flags)
message = "The plan was expected to notify but did not"
assert_match(/Notice:/, result.stdout, message)
winrm_nodes.each do |node|
message = "The plan was expceted to mention the host #{node.hostname} with _output"
assert_match(/#{node.hostname}"=>{"_output"=>/, result.stdout, message)
end
winrm_nodes.each do |node|
command = "type #{testdir}/C100554_plan_artifact.txt"
on(node, powershell(command), accept_all_exit_codes: true) do |res|
type_msg = "The powershell command 'type' was not successful"
assert_equal(res.exit_code, 0, type_msg)
msg = 'The expected contents of the plan artifact were not observed'
assert_match(/Line one from task a/, res.stdout, msg)
assert_match(/Line two from task b/, res.stdout, msg)
end
end
end
end
| 1 | 7,596 | Why does this check differ from `plan_ssh.rb`? | puppetlabs-bolt | rb |
@@ -10,8 +10,8 @@ import (
"encoding/json"
"strconv"
+ "github.com/iotexproject/go-fsm"
"github.com/prometheus/client_golang/prometheus"
- "github.com/zjshen14/go-fsm"
"go.uber.org/zap"
"github.com/iotexproject/iotex-core/consensus" | 1 | // Copyright (c) 2018 IoTeX
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package itx
import (
"encoding/json"
"strconv"
"github.com/prometheus/client_golang/prometheus"
"github.com/zjshen14/go-fsm"
"go.uber.org/zap"
"github.com/iotexproject/iotex-core/consensus"
"github.com/iotexproject/iotex-core/consensus/scheme/rolldpos"
"github.com/iotexproject/iotex-core/dispatcher"
"github.com/iotexproject/iotex-core/pkg/log"
)
// TODO: HeartbeatHandler opens encapsulation of a few structs to inspect the internal status, we need to find a better
// approach to do so in the future
var heartbeatMtc = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Name: "iotex_heartbeat_status",
Help: "Node heartbeat status.",
},
[]string{"status_type", "source"},
)
func init() {
prometheus.MustRegister(heartbeatMtc)
}
// HeartbeatHandler is the handler to periodically log the system key metrics
type HeartbeatHandler struct {
s *Server
}
// NewHeartbeatHandler instantiates a HeartbeatHandler instance
func NewHeartbeatHandler(s *Server) *HeartbeatHandler {
return &HeartbeatHandler{s: s}
}
// Log executes the logging logic
func (h *HeartbeatHandler) Log() {
// Network metrics
p2pAgent := h.s.P2PAgent()
// Dispatcher metrics
dp, ok := h.s.Dispatcher().(*dispatcher.IotxDispatcher)
if !ok {
log.L().Error("dispatcher is not the instance of IotxDispatcher")
return
}
numDPEvts := len(*dp.EventChan())
dpEvtsAudit, err := json.Marshal(dp.EventAudit())
if err != nil {
log.L().Error("error when serializing the dispatcher event audit map")
return
}
numPeers := len(p2pAgent.Neighbors())
log.L().Info("Node status.",
zap.Int("numPeers", numPeers),
zap.Int("pendingDispatcherEvents", numDPEvts),
zap.String("pendingDispatcherEventsAudit", string(dpEvtsAudit)))
heartbeatMtc.WithLabelValues("numPeers", "node").Set(float64(numPeers))
heartbeatMtc.WithLabelValues("pendingDispatcherEvents", "node").Set(float64(numDPEvts))
// chain service
for _, c := range h.s.chainservices {
// Consensus metrics
cs, ok := c.Consensus().(*consensus.IotxConsensus)
if !ok {
log.L().Info("consensus is not the instance of IotxConsensus.")
return
}
rolldpos, ok := cs.Scheme().(*rolldpos.RollDPoS)
numPendingEvts := 0
var state fsm.State
if ok {
numPendingEvts = rolldpos.NumPendingEvts()
state = rolldpos.CurrentState()
} else {
log.L().Debug("scheme is not the instance of RollDPoS")
}
// Block metrics
height := c.Blockchain().TipHeight()
actPoolSize := c.ActionPool().GetSize()
actPoolCapacity := c.ActionPool().GetCapacity()
targetHeight := c.BlockSync().TargetHeight()
log.L().Info("chain service status",
zap.Int("rolldposEvents", numPendingEvts),
zap.String("fsmState", string(state)),
zap.Uint64("blockchainHeight", height),
zap.Uint64("actpoolSize", actPoolSize),
zap.Uint64("actpoolCapacity", actPoolCapacity),
zap.Uint32("chainID", c.ChainID()),
zap.Uint64("targetHeight", targetHeight),
)
chainIDStr := strconv.FormatUint(uint64(c.ChainID()), 10)
heartbeatMtc.WithLabelValues("pendingRolldposEvents", chainIDStr).Set(float64(numPendingEvts))
heartbeatMtc.WithLabelValues("blockchainHeight", chainIDStr).Set(float64(height))
heartbeatMtc.WithLabelValues("actpoolSize", chainIDStr).Set(float64(actPoolSize))
heartbeatMtc.WithLabelValues("actpoolCapacity", chainIDStr).Set(float64(actPoolCapacity))
heartbeatMtc.WithLabelValues("targetHeight", chainIDStr).Set(float64(targetHeight))
}
}
| 1 | 14,398 | File is not `goimports`-ed (from `goimports`) | iotexproject-iotex-core | go |
@@ -16,7 +16,6 @@ namespace Microsoft.AspNet.Server.Kestrel.Http
{
public const int MaxPooledWriteReqs = 1024;
- private const int _maxPendingWrites = 3;
private const int _maxBytesPreCompleted = 65536;
private const int _initialTaskQueues = 64;
private const int _maxPooledWriteContexts = 32; | 1 | // Copyright (c) .NET Foundation. All rights reserved.
// Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.IO;
using System.Threading;
using System.Threading.Tasks;
using Microsoft.AspNet.Server.Kestrel.Infrastructure;
using Microsoft.AspNet.Server.Kestrel.Networking;
namespace Microsoft.AspNet.Server.Kestrel.Http
{
public class SocketOutput : ISocketOutput
{
public const int MaxPooledWriteReqs = 1024;
private const int _maxPendingWrites = 3;
private const int _maxBytesPreCompleted = 65536;
private const int _initialTaskQueues = 64;
private const int _maxPooledWriteContexts = 32;
private static readonly WaitCallback _returnBlocks = (state) => ReturnBlocks((MemoryPoolBlock2)state);
private readonly KestrelThread _thread;
private readonly UvStreamHandle _socket;
private readonly Connection _connection;
private readonly long _connectionId;
private readonly IKestrelTrace _log;
private readonly IThreadPool _threadPool;
// This locks all access to _tail, _isProducing and _returnFromOnProducingComplete.
// _head does not require a lock, since it is only used in the ctor and uv thread.
private readonly object _returnLock = new object();
private MemoryPoolBlock2 _head;
private MemoryPoolBlock2 _tail;
private MemoryPoolIterator2 _lastStart;
// This locks access to to all of the below fields
private readonly object _contextLock = new object();
// The number of write operations that have been scheduled so far
// but have not completed.
private int _writesPending = 0;
private int _numBytesPreCompleted = 0;
private Exception _lastWriteError;
private WriteContext _nextWriteContext;
private readonly Queue<TaskCompletionSource<object>> _tasksPending;
private readonly Queue<TaskCompletionSource<object>> _tasksCompleted;
private readonly Queue<WriteContext> _writeContextPool;
private readonly Queue<UvWriteReq> _writeReqPool;
public SocketOutput(
KestrelThread thread,
UvStreamHandle socket,
MemoryPool2 memory,
Connection connection,
long connectionId,
IKestrelTrace log,
IThreadPool threadPool,
Queue<UvWriteReq> writeReqPool)
{
_thread = thread;
_socket = socket;
_connection = connection;
_connectionId = connectionId;
_log = log;
_threadPool = threadPool;
_tasksPending = new Queue<TaskCompletionSource<object>>(_initialTaskQueues);
_tasksCompleted = new Queue<TaskCompletionSource<object>>(_initialTaskQueues);
_writeContextPool = new Queue<WriteContext>(_maxPooledWriteContexts);
_writeReqPool = writeReqPool;
_head = memory.Lease();
_tail = _head;
}
public Task WriteAsync(
ArraySegment<byte> buffer,
bool immediate = true,
bool socketShutdownSend = false,
bool socketDisconnect = false)
{
if (buffer.Count > 0)
{
var tail = ProducingStart();
tail.CopyFrom(buffer);
// We do our own accounting below
ProducingCompleteNoPreComplete(tail);
}
TaskCompletionSource<object> tcs = null;
var scheduleWrite = false;
lock (_contextLock)
{
if (_nextWriteContext == null)
{
if (_writeContextPool.Count > 0)
{
_nextWriteContext = _writeContextPool.Dequeue();
}
else
{
_nextWriteContext = new WriteContext(this);
}
}
if (socketShutdownSend)
{
_nextWriteContext.SocketShutdownSend = true;
}
if (socketDisconnect)
{
_nextWriteContext.SocketDisconnect = true;
}
if (!immediate)
{
// immediate==false calls always return complete tasks, because there is guaranteed
// to be a subsequent immediate==true call which will go down one of the previous code-paths
_numBytesPreCompleted += buffer.Count;
}
else if (_lastWriteError == null &&
_tasksPending.Count == 0 &&
_numBytesPreCompleted + buffer.Count <= _maxBytesPreCompleted)
{
// Complete the write task immediately if all previous write tasks have been completed,
// the buffers haven't grown too large, and the last write to the socket succeeded.
_numBytesPreCompleted += buffer.Count;
}
else
{
// immediate write, which is not eligable for instant completion above
tcs = new TaskCompletionSource<object>(buffer.Count);
_tasksPending.Enqueue(tcs);
}
if (_writesPending < _maxPendingWrites && immediate)
{
scheduleWrite = true;
_writesPending++;
}
}
if (scheduleWrite)
{
ScheduleWrite();
}
// Return TaskCompletionSource's Task if set, otherwise completed Task
return tcs?.Task ?? TaskUtilities.CompletedTask;
}
public void End(ProduceEndType endType)
{
switch (endType)
{
case ProduceEndType.SocketShutdownSend:
WriteAsync(default(ArraySegment<byte>),
immediate: true,
socketShutdownSend: true,
socketDisconnect: false);
break;
case ProduceEndType.SocketDisconnect:
WriteAsync(default(ArraySegment<byte>),
immediate: true,
socketShutdownSend: false,
socketDisconnect: true);
break;
}
}
public MemoryPoolIterator2 ProducingStart()
{
lock (_returnLock)
{
Debug.Assert(_lastStart.IsDefault);
if (_tail == null)
{
throw new IOException("The socket has been closed.");
}
_lastStart = new MemoryPoolIterator2(_tail, _tail.End);
return _lastStart;
}
}
public void ProducingComplete(MemoryPoolIterator2 end)
{
Debug.Assert(!_lastStart.IsDefault);
int bytesProduced, buffersIncluded;
BytesBetween(_lastStart, end, out bytesProduced, out buffersIncluded);
lock (_contextLock)
{
_numBytesPreCompleted += bytesProduced;
}
ProducingCompleteNoPreComplete(end);
}
private void ProducingCompleteNoPreComplete(MemoryPoolIterator2 end)
{
MemoryPoolBlock2 blockToReturn = null;
lock (_returnLock)
{
Debug.Assert(!_lastStart.IsDefault);
// If the socket has been closed, return the produced blocks
// instead of advancing the now non-existent tail.
if (_tail != null)
{
_tail = end.Block;
_tail.End = end.Index;
}
else
{
blockToReturn = _lastStart.Block;
}
_lastStart = default(MemoryPoolIterator2);
}
if (blockToReturn != null)
{
ThreadPool.QueueUserWorkItem(_returnBlocks, blockToReturn);
}
}
private static void ReturnBlocks(MemoryPoolBlock2 block)
{
while (block != null)
{
var returningBlock = block;
block = returningBlock.Next;
returningBlock.Pool?.Return(returningBlock);
}
}
private void ScheduleWrite()
{
_thread.Post(_this => _this.WriteAllPending(), this);
}
// This is called on the libuv event loop
private void WriteAllPending()
{
WriteContext writingContext;
lock (_contextLock)
{
if (_nextWriteContext != null)
{
writingContext = _nextWriteContext;
_nextWriteContext = null;
}
else
{
_writesPending--;
return;
}
}
try
{
writingContext.DoWriteIfNeeded();
}
catch
{
lock (_contextLock)
{
// Lock instead of using Interlocked.Decrement so _writesSending
// doesn't change in the middle of executing other synchronized code.
_writesPending--;
}
throw;
}
}
// This is called on the libuv event loop
private void OnWriteCompleted(WriteContext writeContext)
{
var bytesWritten = writeContext.ByteCount;
var status = writeContext.WriteStatus;
var error = writeContext.WriteError;
if (error != null)
{
_lastWriteError = new IOException(error.Message, error);
// Abort the connection for any failed write.
_connection.Abort();
}
bool scheduleWrite = false;
lock (_contextLock)
{
PoolWriteContext(writeContext);
if (_nextWriteContext != null)
{
scheduleWrite = true;
}
else
{
_writesPending--;
}
// _numBytesPreCompleted can temporarily go negative in the event there are
// completed writes that we haven't triggered callbacks for yet.
_numBytesPreCompleted -= bytesWritten;
// bytesLeftToBuffer can be greater than _maxBytesPreCompleted
// This allows large writes to complete once they've actually finished.
var bytesLeftToBuffer = _maxBytesPreCompleted - _numBytesPreCompleted;
while (_tasksPending.Count > 0 &&
(int)(_tasksPending.Peek().Task.AsyncState) <= bytesLeftToBuffer)
{
var tcs = _tasksPending.Dequeue();
var bytesToWrite = (int)tcs.Task.AsyncState;
_numBytesPreCompleted += bytesToWrite;
bytesLeftToBuffer -= bytesToWrite;
_tasksCompleted.Enqueue(tcs);
}
}
while (_tasksCompleted.Count > 0)
{
var tcs = _tasksCompleted.Dequeue();
if (_lastWriteError == null)
{
_threadPool.Complete(tcs);
}
else
{
_threadPool.Error(tcs, _lastWriteError);
}
}
_log.ConnectionWriteCallback(_connectionId, status);
if (scheduleWrite)
{
ScheduleWrite();
}
_tasksCompleted.Clear();
}
// This is called on the libuv event loop
private void ReturnAllBlocks()
{
lock (_returnLock)
{
var block = _head;
while (block != _tail)
{
var returnBlock = block;
block = block.Next;
returnBlock.Pool?.Return(returnBlock);
}
// Only return the _tail if we aren't between ProducingStart/Complete calls
if (_lastStart.IsDefault)
{
_tail.Pool?.Return(_tail);
}
_head = null;
_tail = null;
}
}
private void PoolWriteContext(WriteContext writeContext)
{
// called inside _contextLock
if (_writeContextPool.Count < _maxPooledWriteContexts)
{
writeContext.Reset();
_writeContextPool.Enqueue(writeContext);
}
}
void ISocketOutput.Write(ArraySegment<byte> buffer, bool immediate)
{
var task = WriteAsync(buffer, immediate);
if (task.Status == TaskStatus.RanToCompletion)
{
return;
}
else
{
task.GetAwaiter().GetResult();
}
}
Task ISocketOutput.WriteAsync(ArraySegment<byte> buffer, bool immediate, CancellationToken cancellationToken)
{
return WriteAsync(buffer, immediate);
}
private static void BytesBetween(MemoryPoolIterator2 start, MemoryPoolIterator2 end, out int bytes, out int buffers)
{
if (start.Block == end.Block)
{
bytes = end.Index - start.Index;
buffers = 1;
return;
}
bytes = start.Block.Data.Offset + start.Block.Data.Count - start.Index;
buffers = 1;
for (var block = start.Block.Next; block != end.Block; block = block.Next)
{
bytes += block.Data.Count;
buffers++;
}
bytes += end.Index - end.Block.Data.Offset;
buffers++;
}
private class WriteContext
{
private static WaitCallback _returnWrittenBlocks = (state) => ReturnWrittenBlocks((MemoryPoolBlock2)state);
private SocketOutput Self;
private UvWriteReq _writeReq;
private MemoryPoolIterator2 _lockedStart;
private MemoryPoolIterator2 _lockedEnd;
private int _bufferCount;
public int ByteCount;
public bool SocketShutdownSend;
public bool SocketDisconnect;
public int WriteStatus;
public Exception WriteError;
public int ShutdownSendStatus;
public WriteContext(SocketOutput self)
{
Self = self;
}
/// <summary>
/// First step: initiate async write if needed, otherwise go to next step
/// </summary>
public void DoWriteIfNeeded()
{
LockWrite();
if (ByteCount == 0 || Self._socket.IsClosed)
{
DoShutdownIfNeeded();
return;
}
// Sample values locally in case write completes inline
// to allow block to be Reset and still complete this function
var lockedEndBlock = _lockedEnd.Block;
var lockedEndIndex = _lockedEnd.Index;
if (Self._writeReqPool.Count > 0)
{
_writeReq = Self._writeReqPool.Dequeue();
}
else
{
_writeReq = new UvWriteReq(Self._log);
_writeReq.Init(Self._thread.Loop);
}
_writeReq.Write(Self._socket, _lockedStart, _lockedEnd, _bufferCount, (_writeReq, status, error, state) =>
{
var writeContext = (WriteContext)state;
writeContext.PoolWriteReq(writeContext._writeReq);
writeContext._writeReq = null;
writeContext.ScheduleReturnFullyWrittenBlocks();
writeContext.WriteStatus = status;
writeContext.WriteError = error;
writeContext.DoShutdownIfNeeded();
}, this);
Self._head = lockedEndBlock;
Self._head.Start = lockedEndIndex;
}
/// <summary>
/// Second step: initiate async shutdown if needed, otherwise go to next step
/// </summary>
public void DoShutdownIfNeeded()
{
if (SocketShutdownSend == false || Self._socket.IsClosed)
{
DoDisconnectIfNeeded();
return;
}
var shutdownReq = new UvShutdownReq(Self._log);
shutdownReq.Init(Self._thread.Loop);
shutdownReq.Shutdown(Self._socket, (_shutdownReq, status, state) =>
{
_shutdownReq.Dispose();
var _this = (WriteContext)state;
_this.ShutdownSendStatus = status;
_this.Self._log.ConnectionWroteFin(_this.Self._connectionId, status);
_this.DoDisconnectIfNeeded();
}, this);
}
/// <summary>
/// Third step: disconnect socket if needed, otherwise this work item is complete
/// </summary>
public void DoDisconnectIfNeeded()
{
if (SocketDisconnect == false || Self._socket.IsClosed)
{
Complete();
return;
}
Self._socket.Dispose();
Self.ReturnAllBlocks();
Self._log.ConnectionStop(Self._connectionId);
Complete();
}
public void Complete()
{
Self.OnWriteCompleted(this);
}
private void PoolWriteReq(UvWriteReq writeReq)
{
if (Self._writeReqPool.Count < MaxPooledWriteReqs)
{
Self._writeReqPool.Enqueue(writeReq);
}
else
{
writeReq.Dispose();
}
}
private void ScheduleReturnFullyWrittenBlocks()
{
var block = _lockedStart.Block;
var end = _lockedEnd.Block;
if (block == end)
{
end.Unpin();
return;
}
while (block.Next != end)
{
block = block.Next;
block.Unpin();
}
block.Next = null;
ThreadPool.QueueUserWorkItem(_returnWrittenBlocks, _lockedStart.Block);
}
private static void ReturnWrittenBlocks(MemoryPoolBlock2 block)
{
while (block != null)
{
var returnBlock = block;
block = block.Next;
returnBlock.Unpin();
returnBlock.Pool?.Return(returnBlock);
}
}
private void LockWrite()
{
var head = Self._head;
var tail = Self._tail;
if (head == null || tail == null)
{
// ReturnAllBlocks has already bee called. Nothing to do here.
// Write will no-op since _byteCount will remain 0.
return;
}
_lockedStart = new MemoryPoolIterator2(head, head.Start);
_lockedEnd = new MemoryPoolIterator2(tail, tail.End);
BytesBetween(_lockedStart, _lockedEnd, out ByteCount, out _bufferCount);
}
public void Reset()
{
_lockedStart = default(MemoryPoolIterator2);
_lockedEnd = default(MemoryPoolIterator2);
_bufferCount = 0;
ByteCount = 0;
SocketShutdownSend = false;
SocketDisconnect = false;
WriteStatus = 0;
WriteError = null;
ShutdownSendStatus = 0;
}
}
}
}
| 1 | 7,756 | It was @lodejard who initially suggested this pattern. I think that it had something to do about prioritizing future writes even if there was a pending write operation ready to handle the newly requested write. I'm not sure I fully understood the explanation for having up to 3 pending write requests, because only having one queued write at a time still makes sense to me. I'll talk to him about it. | aspnet-KestrelHttpServer | .cs |
@@ -34,6 +34,8 @@ int main (int argc, char * const * argv)
("batch_size",boost::program_options::value<std::size_t> (), "Increase sideband batch size, default 512")
("debug_block_count", "Display the number of block")
("debug_bootstrap_generate", "Generate bootstrap sequence of blocks")
+ ("debug_clear_online_weight", "clear online_weights table")
+ ("debug_dump_online_weight", "dump online_weights table")
("debug_dump_representatives", "List representatives and weights")
("debug_account_count", "Display the number of accounts")
("debug_mass_activity", "Generates fake debug activity") | 1 | #include <nano/lib/utility.hpp>
#include <nano/nano_node/daemon.hpp>
#include <nano/node/cli.hpp>
#include <nano/node/node.hpp>
#include <nano/node/rpc.hpp>
#include <nano/node/testing.hpp>
#include <sstream>
#include <argon2.h>
#include <boost/lexical_cast.hpp>
#include <boost/program_options.hpp>
int main (int argc, char * const * argv)
{
nano::set_umask ();
boost::program_options::options_description description ("Command line options");
nano::add_node_options (description);
// clang-format off
description.add_options ()
("help", "Print out options")
("version", "Prints out version")
("daemon", "Start node daemon")
("disable_backup", "Disable wallet automatic backups")
("disable_lazy_bootstrap", "Disables lazy bootstrap")
("disable_legacy_bootstrap", "Disables legacy bootstrap")
("disable_wallet_bootstrap", "Disables wallet lazy bootstrap")
("disable_bootstrap_listener", "Disables bootstrap listener (incoming connections)")
("disable_unchecked_cleanup", "Disables periodic cleanup of old records from unchecked table")
("disable_unchecked_drop", "Disables drop of unchecked table at startup")
("fast_bootstrap", "Increase bootstrap speed for high end nodes with higher limits")
("batch_size",boost::program_options::value<std::size_t> (), "Increase sideband batch size, default 512")
("debug_block_count", "Display the number of block")
("debug_bootstrap_generate", "Generate bootstrap sequence of blocks")
("debug_dump_representatives", "List representatives and weights")
("debug_account_count", "Display the number of accounts")
("debug_mass_activity", "Generates fake debug activity")
("debug_profile_generate", "Profile work generation")
("debug_opencl", "OpenCL work generation")
("debug_profile_verify", "Profile work verification")
("debug_profile_kdf", "Profile kdf function")
("debug_verify_profile", "Profile signature verification")
("debug_verify_profile_batch", "Profile batch signature verification")
("debug_profile_bootstrap", "Profile bootstrap style blocks processing (at least 10GB of free storage space required)")
("debug_profile_sign", "Profile signature generation")
("debug_profile_process", "Profile active blocks processing (only for nano_test_network)")
("debug_profile_votes", "Profile votes processing (only for nano_test_network)")
("debug_rpc", "Read an RPC command from stdin and invoke it. Network operations will have no effect.")
("debug_validate_blocks", "Check all blocks for correct hash, signature, work value")
("debug_peers", "Display peer IPv6:port connections")
("platform", boost::program_options::value<std::string> (), "Defines the <platform> for OpenCL commands")
("device", boost::program_options::value<std::string> (), "Defines <device> for OpenCL command")
("threads", boost::program_options::value<std::string> (), "Defines <threads> count for OpenCL command");
// clang-format on
boost::program_options::variables_map vm;
try
{
boost::program_options::store (boost::program_options::parse_command_line (argc, argv, description), vm);
}
catch (boost::program_options::error const & err)
{
std::cerr << err.what () << std::endl;
return 1;
}
boost::program_options::notify (vm);
int result (0);
auto data_path_it = vm.find ("data_path");
if (data_path_it == vm.end ())
{
std::string error_string;
if (!nano::migrate_working_path (error_string))
{
std::cerr << error_string << std::endl;
return 1;
}
}
boost::filesystem::path data_path ((data_path_it != vm.end ()) ? data_path_it->second.as<std::string> () : nano::working_path ());
auto ec = nano::handle_node_options (vm);
if (ec == nano::error_cli::unknown_command)
{
if (vm.count ("daemon") > 0)
{
nano_daemon::daemon daemon;
nano::node_flags flags;
auto batch_size_it = vm.find ("batch_size");
if (batch_size_it != vm.end ())
{
flags.sideband_batch_size = batch_size_it->second.as<size_t> ();
}
flags.disable_backup = (vm.count ("disable_backup") > 0);
flags.disable_lazy_bootstrap = (vm.count ("disable_lazy_bootstrap") > 0);
flags.disable_legacy_bootstrap = (vm.count ("disable_legacy_bootstrap") > 0);
flags.disable_wallet_bootstrap = (vm.count ("disable_wallet_bootstrap") > 0);
flags.disable_bootstrap_listener = (vm.count ("disable_bootstrap_listener") > 0);
flags.disable_unchecked_cleanup = (vm.count ("disable_unchecked_cleanup") > 0);
flags.disable_unchecked_drop = (vm.count ("disable_unchecked_drop") > 0);
flags.fast_bootstrap = (vm.count ("fast_bootstrap") > 0);
daemon.run (data_path, flags);
}
else if (vm.count ("debug_block_count"))
{
nano::inactive_node node (data_path);
auto transaction (node.node->store.tx_begin ());
std::cout << boost::str (boost::format ("Block count: %1%\n") % node.node->store.block_count (transaction).sum ());
}
else if (vm.count ("debug_bootstrap_generate"))
{
auto key_it = vm.find ("key");
if (key_it != vm.end ())
{
nano::uint256_union key;
if (!key.decode_hex (key_it->second.as<std::string> ()))
{
nano::keypair genesis (key.to_string ());
nano::work_pool work (std::numeric_limits<unsigned>::max (), nullptr);
std::cout << "Genesis: " << genesis.prv.data.to_string () << "\n"
<< "Public: " << genesis.pub.to_string () << "\n"
<< "Account: " << genesis.pub.to_account () << "\n";
nano::keypair landing;
std::cout << "Landing: " << landing.prv.data.to_string () << "\n"
<< "Public: " << landing.pub.to_string () << "\n"
<< "Account: " << landing.pub.to_account () << "\n";
for (auto i (0); i != 32; ++i)
{
nano::keypair rep;
std::cout << "Rep" << i << ": " << rep.prv.data.to_string () << "\n"
<< "Public: " << rep.pub.to_string () << "\n"
<< "Account: " << rep.pub.to_account () << "\n";
}
nano::uint128_t balance (std::numeric_limits<nano::uint128_t>::max ());
nano::open_block genesis_block (genesis.pub, genesis.pub, genesis.pub, genesis.prv, genesis.pub, work.generate (genesis.pub));
std::cout << genesis_block.to_json ();
std::cout.flush ();
nano::block_hash previous (genesis_block.hash ());
for (auto i (0); i != 8; ++i)
{
nano::uint128_t yearly_distribution (nano::uint128_t (1) << (127 - (i == 7 ? 6 : i)));
auto weekly_distribution (yearly_distribution / 52);
for (auto j (0); j != 52; ++j)
{
assert (balance > weekly_distribution);
balance = balance < (weekly_distribution * 2) ? 0 : balance - weekly_distribution;
nano::send_block send (previous, landing.pub, balance, genesis.prv, genesis.pub, work.generate (previous));
previous = send.hash ();
std::cout << send.to_json ();
std::cout.flush ();
}
}
}
else
{
std::cerr << "Invalid key\n";
result = -1;
}
}
else
{
std::cerr << "Bootstrapping requires one <key> option\n";
result = -1;
}
}
else if (vm.count ("debug_dump_representatives"))
{
nano::inactive_node node (data_path);
auto transaction (node.node->store.tx_begin ());
nano::uint128_t total;
for (auto i (node.node->store.representation_begin (transaction)), n (node.node->store.representation_end ()); i != n; ++i)
{
nano::account account (i->first);
auto amount (node.node->store.representation_get (transaction, account));
total += amount;
std::cout << boost::str (boost::format ("%1% %2% %3%\n") % account.to_account () % amount.convert_to<std::string> () % total.convert_to<std::string> ());
}
std::map<nano::account, nano::uint128_t> calculated;
for (auto i (node.node->store.latest_begin (transaction)), n (node.node->store.latest_end ()); i != n; ++i)
{
nano::account_info info (i->second);
nano::block_hash rep_block (node.node->ledger.representative_calculated (transaction, info.head));
auto block (node.node->store.block_get (transaction, rep_block));
calculated[block->representative ()] += info.balance.number ();
}
total = 0;
for (auto i (calculated.begin ()), n (calculated.end ()); i != n; ++i)
{
total += i->second;
std::cout << boost::str (boost::format ("%1% %2% %3%\n") % i->first.to_account () % i->second.convert_to<std::string> () % total.convert_to<std::string> ());
}
}
else if (vm.count ("debug_account_count"))
{
nano::inactive_node node (data_path);
auto transaction (node.node->store.tx_begin ());
std::cout << boost::str (boost::format ("Frontier count: %1%\n") % node.node->store.account_count (transaction));
}
else if (vm.count ("debug_mass_activity"))
{
nano::system system (24000, 1);
uint32_t count (1000000);
system.generate_mass_activity (count, *system.nodes[0]);
}
else if (vm.count ("debug_profile_kdf"))
{
nano::uint256_union result;
nano::uint256_union salt (0);
std::string password ("");
while (true)
{
auto begin1 (std::chrono::high_resolution_clock::now ());
auto success (argon2_hash (1, nano::wallet_store::kdf_work, 1, password.data (), password.size (), salt.bytes.data (), salt.bytes.size (), result.bytes.data (), result.bytes.size (), NULL, 0, Argon2_d, 0x10));
(void)success;
auto end1 (std::chrono::high_resolution_clock::now ());
std::cerr << boost::str (boost::format ("Derivation time: %1%us\n") % std::chrono::duration_cast<std::chrono::microseconds> (end1 - begin1).count ());
}
}
else if (vm.count ("debug_profile_generate"))
{
nano::work_pool work (std::numeric_limits<unsigned>::max (), nullptr);
nano::change_block block (0, 0, nano::keypair ().prv, 0, 0);
std::cerr << "Starting generation profiling\n";
while (true)
{
block.hashables.previous.qwords[0] += 1;
auto begin1 (std::chrono::high_resolution_clock::now ());
block.block_work_set (work.generate (block.root ()));
auto end1 (std::chrono::high_resolution_clock::now ());
std::cerr << boost::str (boost::format ("%|1$ 12d|\n") % std::chrono::duration_cast<std::chrono::microseconds> (end1 - begin1).count ());
}
}
else if (vm.count ("debug_opencl"))
{
bool error (false);
nano::opencl_environment environment (error);
if (!error)
{
unsigned short platform (0);
auto platform_it = vm.find ("platform");
if (platform_it != vm.end ())
{
try
{
platform = boost::lexical_cast<unsigned short> (platform_it->second.as<std::string> ());
}
catch (boost::bad_lexical_cast &)
{
std::cerr << "Invalid platform id\n";
result = -1;
}
}
unsigned short device (0);
auto device_it = vm.find ("device");
if (device_it != vm.end ())
{
try
{
device = boost::lexical_cast<unsigned short> (device_it->second.as<std::string> ());
}
catch (boost::bad_lexical_cast &)
{
std::cerr << "Invalid device id\n";
result = -1;
}
}
unsigned threads (1024 * 1024);
auto threads_it = vm.find ("threads");
if (threads_it != vm.end ())
{
try
{
threads = boost::lexical_cast<unsigned> (threads_it->second.as<std::string> ());
}
catch (boost::bad_lexical_cast &)
{
std::cerr << "Invalid threads count\n";
result = -1;
}
}
if (!result)
{
error |= platform >= environment.platforms.size ();
if (!error)
{
error |= device >= environment.platforms[platform].devices.size ();
if (!error)
{
nano::logging logging;
auto opencl (nano::opencl_work::create (true, { platform, device, threads }, logging));
nano::work_pool work_pool (std::numeric_limits<unsigned>::max (), opencl ? [&opencl](nano::uint256_union const & root_a) {
return opencl->generate_work (root_a);
}
: std::function<boost::optional<uint64_t> (nano::uint256_union const &)> (nullptr));
nano::change_block block (0, 0, nano::keypair ().prv, 0, 0);
std::cerr << boost::str (boost::format ("Starting OpenCL generation profiling. Platform: %1%. Device: %2%. Threads: %3%\n") % platform % device % threads);
for (uint64_t i (0); true; ++i)
{
block.hashables.previous.qwords[0] += 1;
auto begin1 (std::chrono::high_resolution_clock::now ());
block.block_work_set (work_pool.generate (block.root ()));
auto end1 (std::chrono::high_resolution_clock::now ());
std::cerr << boost::str (boost::format ("%|1$ 12d|\n") % std::chrono::duration_cast<std::chrono::microseconds> (end1 - begin1).count ());
}
}
else
{
std::cout << "Not available device id\n"
<< std::endl;
result = -1;
}
}
else
{
std::cout << "Not available platform id\n"
<< std::endl;
result = -1;
}
}
}
else
{
std::cout << "Error initializing OpenCL" << std::endl;
result = -1;
}
}
else if (vm.count ("debug_profile_verify"))
{
nano::work_pool work (std::numeric_limits<unsigned>::max (), nullptr);
nano::change_block block (0, 0, nano::keypair ().prv, 0, 0);
std::cerr << "Starting verification profiling\n";
while (true)
{
block.hashables.previous.qwords[0] += 1;
auto begin1 (std::chrono::high_resolution_clock::now ());
for (uint64_t t (0); t < 1000000; ++t)
{
block.hashables.previous.qwords[0] += 1;
block.block_work_set (t);
nano::work_validate (block);
}
auto end1 (std::chrono::high_resolution_clock::now ());
std::cerr << boost::str (boost::format ("%|1$ 12d|\n") % std::chrono::duration_cast<std::chrono::microseconds> (end1 - begin1).count ());
}
}
else if (vm.count ("debug_verify_profile"))
{
nano::keypair key;
nano::uint256_union message;
nano::uint512_union signature;
signature = nano::sign_message (key.prv, key.pub, message);
auto begin (std::chrono::high_resolution_clock::now ());
for (auto i (0u); i < 1000; ++i)
{
nano::validate_message (key.pub, message, signature);
}
auto end (std::chrono::high_resolution_clock::now ());
std::cerr << "Signature verifications " << std::chrono::duration_cast<std::chrono::microseconds> (end - begin).count () << std::endl;
}
else if (vm.count ("debug_verify_profile_batch"))
{
nano::keypair key;
size_t batch_count (1000);
nano::uint256_union message;
nano::uint512_union signature (nano::sign_message (key.prv, key.pub, message));
std::vector<unsigned char const *> messages (batch_count, message.bytes.data ());
std::vector<size_t> lengths (batch_count, sizeof (message));
std::vector<unsigned char const *> pub_keys (batch_count, key.pub.bytes.data ());
std::vector<unsigned char const *> signatures (batch_count, signature.bytes.data ());
std::vector<int> verifications;
verifications.resize (batch_count);
auto begin (std::chrono::high_resolution_clock::now ());
nano::validate_message_batch (messages.data (), lengths.data (), pub_keys.data (), signatures.data (), batch_count, verifications.data ());
auto end (std::chrono::high_resolution_clock::now ());
std::cerr << "Batch signature verifications " << std::chrono::duration_cast<std::chrono::microseconds> (end - begin).count () << std::endl;
}
else if (vm.count ("debug_profile_sign"))
{
std::cerr << "Starting blocks signing profiling\n";
while (true)
{
nano::keypair key;
nano::block_hash latest (0);
auto begin1 (std::chrono::high_resolution_clock::now ());
for (uint64_t balance (0); balance < 1000; ++balance)
{
nano::send_block send (latest, key.pub, balance, key.prv, key.pub, 0);
latest = send.hash ();
}
auto end1 (std::chrono::high_resolution_clock::now ());
std::cerr << boost::str (boost::format ("%|1$ 12d|\n") % std::chrono::duration_cast<std::chrono::microseconds> (end1 - begin1).count ());
}
}
else if (vm.count ("debug_profile_process"))
{
if (nano::is_test_network)
{
nano::block_builder builder;
size_t num_accounts (100000);
size_t num_interations (5); // 100,000 * 5 * 2 = 1,000,000 blocks
size_t max_blocks (2 * num_accounts * num_interations + num_accounts * 2); // 1,000,000 + 2* 100,000 = 1,200,000 blocks
std::cerr << boost::str (boost::format ("Starting pregenerating %1% blocks\n") % max_blocks);
nano::system system (24000, 1);
nano::node_init init;
nano::work_pool work (std::numeric_limits<unsigned>::max (), nullptr);
nano::logging logging;
auto path (nano::unique_path ());
logging.init (path);
auto node (std::make_shared<nano::node> (init, system.io_ctx, 24001, path, system.alarm, logging, work));
nano::block_hash genesis_latest (node->latest (nano::test_genesis_key.pub));
nano::uint128_t genesis_balance (std::numeric_limits<nano::uint128_t>::max ());
// Generating keys
std::vector<nano::keypair> keys (num_accounts);
std::vector<nano::block_hash> frontiers (num_accounts);
std::vector<nano::uint128_t> balances (num_accounts, 1000000000);
// Generating blocks
std::deque<std::shared_ptr<nano::block>> blocks;
for (auto i (0); i != num_accounts; ++i)
{
genesis_balance = genesis_balance - 1000000000;
auto send = builder.state ()
.account (nano::test_genesis_key.pub)
.previous (genesis_latest)
.representative (nano::test_genesis_key.pub)
.balance (genesis_balance)
.link (keys[i].pub)
.sign (keys[i].prv, keys[i].pub)
.work (work.generate (genesis_latest))
.build ();
genesis_latest = send->hash ();
blocks.push_back (std::move (send));
auto open = builder.state ()
.account (keys[i].pub)
.previous (0)
.representative (keys[i].pub)
.balance (balances[i])
.link (genesis_latest)
.sign (nano::test_genesis_key.prv, nano::test_genesis_key.pub)
.work (work.generate (keys[i].pub))
.build ();
frontiers[i] = open->hash ();
blocks.push_back (std::move (open));
}
for (auto i (0); i != num_interations; ++i)
{
for (auto j (0); j != num_accounts; ++j)
{
size_t other (num_accounts - j - 1);
// Sending to other account
--balances[j];
auto send = builder.state ()
.account (keys[j].pub)
.previous (frontiers[j])
.representative (keys[j].pub)
.balance (balances[j])
.link (keys[other].pub)
.sign (keys[j].prv, keys[j].pub)
.work (work.generate (frontiers[j]))
.build ();
frontiers[j] = send->hash ();
blocks.push_back (std::move (send));
// Receiving
++balances[other];
auto receive = builder.state ()
.account (keys[other].pub)
.previous (frontiers[other])
.representative (keys[other].pub)
.balance (balances[other])
.link (frontiers[j])
.sign (keys[other].prv, keys[other].pub)
.work (work.generate (frontiers[other]))
.build ();
frontiers[other] = receive->hash ();
blocks.push_back (std::move (receive));
}
}
// Processing blocks
std::cerr << boost::str (boost::format ("Starting processing %1% active blocks\n") % max_blocks);
auto begin (std::chrono::high_resolution_clock::now ());
while (!blocks.empty ())
{
auto block (blocks.front ());
node->process_active (block);
blocks.pop_front ();
}
uint64_t block_count (0);
while (block_count < max_blocks + 1)
{
std::this_thread::sleep_for (std::chrono::milliseconds (100));
auto transaction (node->store.tx_begin ());
block_count = node->store.block_count (transaction).sum ();
}
auto end (std::chrono::high_resolution_clock::now ());
auto time (std::chrono::duration_cast<std::chrono::microseconds> (end - begin).count ());
node->stop ();
std::cerr << boost::str (boost::format ("%|1$ 12d| us \n%2% blocks per second\n") % time % (max_blocks * 1000000 / time));
}
else
{
std::cerr << "For this test ACTIVE_NETWORK should be nano_test_network" << std::endl;
}
}
else if (vm.count ("debug_profile_votes"))
{
if (nano::is_test_network)
{
nano::block_builder builder;
size_t num_elections (40000);
size_t num_representatives (25);
size_t max_votes (num_elections * num_representatives); // 40,000 * 25 = 1,000,000 votes
std::cerr << boost::str (boost::format ("Starting pregenerating %1% votes\n") % max_votes);
nano::system system (24000, 1);
nano::node_init init;
nano::work_pool work (std::numeric_limits<unsigned>::max (), nullptr);
nano::logging logging;
auto path (nano::unique_path ());
logging.init (path);
auto node (std::make_shared<nano::node> (init, system.io_ctx, 24001, path, system.alarm, logging, work));
nano::block_hash genesis_latest (node->latest (nano::test_genesis_key.pub));
nano::uint128_t genesis_balance (std::numeric_limits<nano::uint128_t>::max ());
// Generating keys
std::vector<nano::keypair> keys (num_representatives);
nano::uint128_t balance ((node->config.online_weight_minimum.number () / num_representatives) + 1);
for (auto i (0); i != num_representatives; ++i)
{
auto transaction (node->store.tx_begin_write ());
genesis_balance = genesis_balance - balance;
auto send = builder.state ()
.account (nano::test_genesis_key.pub)
.previous (genesis_latest)
.representative (nano::test_genesis_key.pub)
.balance (genesis_balance)
.link (keys[i].pub)
.sign (nano::test_genesis_key.prv, nano::test_genesis_key.pub)
.work (work.generate (genesis_latest))
.build ();
genesis_latest = send->hash ();
node->ledger.process (transaction, *send);
auto open = builder.state ()
.account (keys[i].pub)
.previous (0)
.representative (keys[i].pub)
.balance (balance)
.link (genesis_latest)
.sign (keys[i].prv, keys[i].pub)
.work (work.generate (keys[i].pub))
.build ();
node->ledger.process (transaction, *open);
}
// Generating blocks
std::deque<std::shared_ptr<nano::block>> blocks;
for (auto i (0); i != num_elections; ++i)
{
genesis_balance = genesis_balance - 1;
nano::keypair destination;
auto send = builder.state ()
.account (nano::test_genesis_key.pub)
.previous (genesis_latest)
.representative (nano::test_genesis_key.pub)
.balance (genesis_balance)
.link (destination.pub)
.sign (nano::test_genesis_key.prv, nano::test_genesis_key.pub)
.work (work.generate (genesis_latest))
.build ();
genesis_latest = send->hash ();
blocks.push_back (std::move (send));
}
// Generating votes
std::deque<std::shared_ptr<nano::vote>> votes;
for (auto j (0); j != num_representatives; ++j)
{
uint64_t sequence (1);
for (auto & i : blocks)
{
auto vote (std::make_shared<nano::vote> (keys[j].pub, keys[j].prv, sequence, std::vector<nano::block_hash> (1, i->hash ())));
votes.push_back (vote);
sequence++;
}
}
// Processing block & start elections
while (!blocks.empty ())
{
auto block (blocks.front ());
node->process_active (block);
blocks.pop_front ();
}
node->block_processor.flush ();
// Processing votes
std::cerr << boost::str (boost::format ("Starting processing %1% votes\n") % max_votes);
auto begin (std::chrono::high_resolution_clock::now ());
while (!votes.empty ())
{
auto vote (votes.front ());
node->vote_processor.vote (vote, node->network.endpoint ());
votes.pop_front ();
}
while (!node->active.empty ())
{
std::this_thread::sleep_for (std::chrono::milliseconds (100));
}
auto end (std::chrono::high_resolution_clock::now ());
auto time (std::chrono::duration_cast<std::chrono::microseconds> (end - begin).count ());
node->stop ();
std::cerr << boost::str (boost::format ("%|1$ 12d| us \n%2% votes per second\n") % time % (max_votes * 1000000 / time));
}
else
{
std::cerr << "For this test ACTIVE_NETWORK should be nano_test_network" << std::endl;
}
}
else if (vm.count ("debug_rpc"))
{
std::string rpc_input_l;
std::ostringstream command_l;
while (std::cin >> rpc_input_l)
{
command_l << rpc_input_l;
}
auto response_handler_l ([](boost::property_tree::ptree const & tree_a) {
boost::property_tree::write_json (std::cout, tree_a);
// Terminate as soon as we have the result, even if background threads (like work generation) are running.
std::exit (0);
});
nano::inactive_node inactive_node_l (data_path);
nano::rpc_config rpc_config_l;
rpc_config_l.enable_control = true;
std::unique_ptr<nano::rpc> rpc_l = get_rpc (inactive_node_l.node->io_ctx, *inactive_node_l.node, rpc_config_l);
std::string req_id_l ("1");
nano::rpc_handler handler_l (*inactive_node_l.node, *rpc_l, command_l.str (), req_id_l, response_handler_l);
handler_l.process_request ();
}
else if (vm.count ("debug_validate_blocks"))
{
nano::inactive_node node (data_path);
auto transaction (node.node->store.tx_begin ());
std::cerr << boost::str (boost::format ("Performing blocks hash, signature, work validation...\n"));
size_t count (0);
for (auto i (node.node->store.latest_begin (transaction)), n (node.node->store.latest_end ()); i != n; ++i)
{
++count;
if ((count % 20000) == 0)
{
std::cout << boost::str (boost::format ("%1% accounts validated\n") % count);
}
nano::account_info info (i->second);
nano::account account (i->first);
auto hash (info.open_block);
nano::block_hash calculated_hash (0);
nano::block_sideband sideband;
uint64_t height (0);
uint64_t previous_timestamp (0);
while (!hash.is_zero ())
{
// Retrieving block data
auto block (node.node->store.block_get (transaction, hash, &sideband));
// Check for state & open blocks if account field is correct
if (block->type () == nano::block_type::open || block->type () == nano::block_type::state)
{
if (block->account () != account)
{
std::cerr << boost::str (boost::format ("Incorrect account field for block %1%\n") % hash.to_string ());
}
}
// Check if sideband account is correct
else if (sideband.account != account)
{
std::cerr << boost::str (boost::format ("Incorrect sideband account for block %1%\n") % hash.to_string ());
}
// Check if previous field is correct
if (calculated_hash != block->previous ())
{
std::cerr << boost::str (boost::format ("Incorrect previous field for block %1%\n") % hash.to_string ());
}
// Check if block data is correct (calculating hash)
calculated_hash = block->hash ();
if (calculated_hash != hash)
{
std::cerr << boost::str (boost::format ("Invalid data inside block %1% calculated hash: %2%\n") % hash.to_string () % calculated_hash.to_string ());
}
// Check if block signature is correct
if (validate_message (account, hash, block->block_signature ()))
{
bool invalid (true);
// Epoch blocks
if (!node.node->ledger.epoch_link.is_zero () && block->type () == nano::block_type::state)
{
auto & state_block (static_cast<nano::state_block &> (*block.get ()));
nano::amount prev_balance (0);
if (!state_block.hashables.previous.is_zero ())
{
prev_balance = node.node->ledger.balance (transaction, state_block.hashables.previous);
}
if (node.node->ledger.is_epoch_link (state_block.hashables.link) && state_block.hashables.balance == prev_balance)
{
invalid = validate_message (node.node->ledger.epoch_signer, hash, block->block_signature ());
}
}
if (invalid)
{
std::cerr << boost::str (boost::format ("Invalid signature for block %1%\n") % hash.to_string ());
}
}
// Check if block work value is correct
if (nano::work_validate (*block.get ()))
{
std::cerr << boost::str (boost::format ("Invalid work for block %1% value: %2%\n") % hash.to_string () % nano::to_string_hex (block->block_work ()));
}
// Check if sideband height is correct
++height;
if (sideband.height != height)
{
std::cerr << boost::str (boost::format ("Incorrect sideband height for block %1%. Sideband: %2%. Expected: %3%\n") % hash.to_string () % sideband.height % height);
}
// Check if sideband timestamp is after previous timestamp
if (sideband.timestamp < previous_timestamp)
{
std::cerr << boost::str (boost::format ("Incorrect sideband timestamp for block %1%\n") % hash.to_string ());
}
previous_timestamp = sideband.timestamp;
// Retrieving successor block hash
hash = node.node->store.block_successor (transaction, hash);
}
if (info.block_count != height)
{
std::cerr << boost::str (boost::format ("Incorrect block count for account %1%. Actual: %2%. Expected: %3%\n") % account.to_account () % height % info.block_count);
}
if (info.head != calculated_hash)
{
std::cerr << boost::str (boost::format ("Incorrect frontier for account %1%. Actual: %2%. Expected: %3%\n") % account.to_account () % calculated_hash.to_string () % info.head.to_string ());
}
}
std::cout << boost::str (boost::format ("%1% accounts validated\n") % count);
count = 0;
for (auto i (node.node->store.pending_begin (transaction)), n (node.node->store.pending_end ()); i != n; ++i)
{
++count;
if ((count % 50000) == 0)
{
std::cout << boost::str (boost::format ("%1% pending blocks validated\n") % count);
}
nano::pending_key key (i->first);
nano::pending_info info (i->second);
// Check block existance
auto block (node.node->store.block_get (transaction, key.hash));
if (block == nullptr)
{
std::cerr << boost::str (boost::format ("Pending block not existing %1%\n") % key.hash.to_string ());
}
else
{
// Check if pending destination is correct
nano::account destination (0);
if (auto state = dynamic_cast<nano::state_block *> (block.get ()))
{
if (node.node->ledger.is_send (transaction, *state))
{
destination = state->hashables.link;
}
}
else if (auto send = dynamic_cast<nano::send_block *> (block.get ()))
{
destination = send->hashables.destination;
}
else
{
std::cerr << boost::str (boost::format ("Incorrect type for pending block %1%\n") % key.hash.to_string ());
}
if (key.account != destination)
{
std::cerr << boost::str (boost::format ("Incorrect destination for pending block %1%\n") % key.hash.to_string ());
}
// Check if pending source is correct
auto account (node.node->ledger.account (transaction, key.hash));
if (info.source != account)
{
std::cerr << boost::str (boost::format ("Incorrect source for pending block %1%\n") % key.hash.to_string ());
}
// Check if pending amount is correct
auto amount (node.node->ledger.amount (transaction, key.hash));
if (info.amount != amount)
{
std::cerr << boost::str (boost::format ("Incorrect amount for pending block %1%\n") % key.hash.to_string ());
}
}
}
std::cout << boost::str (boost::format ("%1% pending blocks validated\n") % count);
}
else if (vm.count ("debug_profile_bootstrap"))
{
nano::inactive_node node2 (nano::unique_path (), 24001);
node2.node->flags.fast_bootstrap = (vm.count ("fast_bootstrap") > 0);
nano::genesis genesis;
auto begin (std::chrono::high_resolution_clock::now ());
uint64_t block_count (0);
size_t count (0);
{
nano::inactive_node node (data_path, 24000);
auto transaction (node.node->store.tx_begin ());
block_count = node.node->store.block_count (transaction).sum ();
std::cout << boost::str (boost::format ("Performing bootstrap emulation, %1% blocks in ledger...") % block_count) << std::endl;
for (auto i (node.node->store.latest_begin (transaction)), n (node.node->store.latest_end ()); i != n; ++i)
{
nano::account account (i->first);
nano::account_info info (i->second);
auto hash (info.head);
while (!hash.is_zero ())
{
// Retrieving block data
auto block (node.node->store.block_get (transaction, hash));
if (block != nullptr)
{
++count;
if ((count % 100000) == 0)
{
std::cout << boost::str (boost::format ("%1% blocks retrieved") % count) << std::endl;
}
nano::unchecked_info unchecked_info (block, account, 0, nano::signature_verification::unknown);
node2.node->block_processor.add (unchecked_info);
// Retrieving previous block hash
hash = block->previous ();
}
}
}
}
count = 0;
uint64_t block_count_2 (0);
while (block_count_2 != block_count)
{
std::this_thread::sleep_for (std::chrono::seconds (1));
auto transaction_2 (node2.node->store.tx_begin ());
block_count_2 = node2.node->store.block_count (transaction_2).sum ();
if ((count % 60) == 0)
{
std::cout << boost::str (boost::format ("%1% (%2%) blocks processed") % block_count_2 % node2.node->store.unchecked_count (transaction_2)) << std::endl;
}
count++;
}
auto end (std::chrono::high_resolution_clock::now ());
auto time (std::chrono::duration_cast<std::chrono::microseconds> (end - begin).count ());
auto seconds (time / 1000000);
nano::remove_temporary_directories ();
std::cout << boost::str (boost::format ("%|1$ 12d| seconds \n%2% blocks per second") % seconds % (block_count / seconds)) << std::endl;
}
else if (vm.count ("debug_peers"))
{
nano::inactive_node node (data_path);
auto transaction (node.node->store.tx_begin ());
for (auto i (node.node->store.peers_begin (transaction)), n (node.node->store.peers_end ()); i != n; ++i)
{
std::cout << boost::str (boost::format ("%1%\n") % nano::endpoint (boost::asio::ip::address_v6 (i->first.address_bytes ()), i->first.port ()));
}
}
else if (vm.count ("version"))
{
if (NANO_VERSION_PATCH == 0)
{
std::cout << "Version " << NANO_MAJOR_MINOR_VERSION << std::endl;
}
else
{
std::cout << "Version " << NANO_MAJOR_MINOR_RC_VERSION << std::endl;
}
}
else
{
std::cout << description << std::endl;
result = -1;
}
}
return result;
}
| 1 | 14,982 | We have cli --online_weight_clear in cli.cpp | nanocurrency-nano-node | cpp |
@@ -225,7 +225,9 @@ func HTTPServerAttributesFromHTTPRequest(serverName, route string, request *http
attrs = append(attrs, HTTPRouteKey.String(route))
}
if values, ok := request.Header["X-Forwarded-For"]; ok && len(values) > 0 {
- attrs = append(attrs, HTTPClientIPKey.String(values[0]))
+ if addresses := strings.Split(values[0], ","); len(addresses) > 0 {
+ attrs = append(attrs, HTTPClientIPKey.String(addresses[0]))
+ }
}
return append(attrs, httpCommonAttributesFromHTTPRequest(request)...) | 1 | // Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package semconv // import "go.opentelemetry.io/otel/semconv/v1.4.0"
import (
"fmt"
"net"
"net/http"
"strconv"
"strings"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
)
// HTTP scheme attributes.
var (
HTTPSchemeHTTP = HTTPSchemeKey.String("http")
HTTPSchemeHTTPS = HTTPSchemeKey.String("https")
)
// NetAttributesFromHTTPRequest generates attributes of the net
// namespace as specified by the OpenTelemetry specification for a
// span. The network parameter is a string that net.Dial function
// from standard library can understand.
func NetAttributesFromHTTPRequest(network string, request *http.Request) []attribute.KeyValue {
attrs := []attribute.KeyValue{}
switch network {
case "tcp", "tcp4", "tcp6":
attrs = append(attrs, NetTransportTCP)
case "udp", "udp4", "udp6":
attrs = append(attrs, NetTransportUDP)
case "ip", "ip4", "ip6":
attrs = append(attrs, NetTransportIP)
case "unix", "unixgram", "unixpacket":
attrs = append(attrs, NetTransportUnix)
default:
attrs = append(attrs, NetTransportOther)
}
peerName, peerIP, peerPort := "", "", 0
{
hostPart := request.RemoteAddr
portPart := ""
if idx := strings.LastIndex(hostPart, ":"); idx >= 0 {
hostPart = request.RemoteAddr[:idx]
portPart = request.RemoteAddr[idx+1:]
}
if hostPart != "" {
if ip := net.ParseIP(hostPart); ip != nil {
peerIP = ip.String()
} else {
peerName = hostPart
}
if portPart != "" {
numPort, err := strconv.ParseUint(portPart, 10, 16)
if err == nil {
peerPort = (int)(numPort)
} else {
peerName, peerIP = "", ""
}
}
}
}
if peerName != "" {
attrs = append(attrs, NetPeerNameKey.String(peerName))
}
if peerIP != "" {
attrs = append(attrs, NetPeerIPKey.String(peerIP))
}
if peerPort != 0 {
attrs = append(attrs, NetPeerPortKey.Int(peerPort))
}
hostIP, hostName, hostPort := "", "", 0
for _, someHost := range []string{request.Host, request.Header.Get("Host"), request.URL.Host} {
hostPart := ""
if idx := strings.LastIndex(someHost, ":"); idx >= 0 {
strPort := someHost[idx+1:]
numPort, err := strconv.ParseUint(strPort, 10, 16)
if err == nil {
hostPort = (int)(numPort)
}
hostPart = someHost[:idx]
} else {
hostPart = someHost
}
if hostPart != "" {
ip := net.ParseIP(hostPart)
if ip != nil {
hostIP = ip.String()
} else {
hostName = hostPart
}
break
} else {
hostPort = 0
}
}
if hostIP != "" {
attrs = append(attrs, NetHostIPKey.String(hostIP))
}
if hostName != "" {
attrs = append(attrs, NetHostNameKey.String(hostName))
}
if hostPort != 0 {
attrs = append(attrs, NetHostPortKey.Int(hostPort))
}
return attrs
}
// EndUserAttributesFromHTTPRequest generates attributes of the
// enduser namespace as specified by the OpenTelemetry specification
// for a span.
func EndUserAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue {
if username, _, ok := request.BasicAuth(); ok {
return []attribute.KeyValue{EnduserIDKey.String(username)}
}
return nil
}
// HTTPClientAttributesFromHTTPRequest generates attributes of the
// http namespace as specified by the OpenTelemetry specification for
// a span on the client side.
func HTTPClientAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue {
attrs := []attribute.KeyValue{}
if request.Method != "" {
attrs = append(attrs, HTTPMethodKey.String(request.Method))
} else {
attrs = append(attrs, HTTPMethodKey.String(http.MethodGet))
}
// remove any username/password info that may be in the URL
// before adding it to the attributes
userinfo := request.URL.User
request.URL.User = nil
attrs = append(attrs, HTTPURLKey.String(request.URL.String()))
// restore any username/password info that was removed
request.URL.User = userinfo
return append(attrs, httpCommonAttributesFromHTTPRequest(request)...)
}
func httpCommonAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue {
attrs := []attribute.KeyValue{}
if ua := request.UserAgent(); ua != "" {
attrs = append(attrs, HTTPUserAgentKey.String(ua))
}
if request.ContentLength > 0 {
attrs = append(attrs, HTTPRequestContentLengthKey.Int64(request.ContentLength))
}
return append(attrs, httpBasicAttributesFromHTTPRequest(request)...)
}
func httpBasicAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue {
// as these attributes are used by HTTPServerMetricAttributesFromHTTPRequest, they should be low-cardinality
attrs := []attribute.KeyValue{}
if request.TLS != nil {
attrs = append(attrs, HTTPSchemeHTTPS)
} else {
attrs = append(attrs, HTTPSchemeHTTP)
}
if request.Host != "" {
attrs = append(attrs, HTTPHostKey.String(request.Host))
}
flavor := ""
if request.ProtoMajor == 1 {
flavor = fmt.Sprintf("1.%d", request.ProtoMinor)
} else if request.ProtoMajor == 2 {
flavor = "2"
}
if flavor != "" {
attrs = append(attrs, HTTPFlavorKey.String(flavor))
}
return attrs
}
// HTTPServerMetricAttributesFromHTTPRequest generates low-cardinality attributes
// to be used with server-side HTTP metrics.
func HTTPServerMetricAttributesFromHTTPRequest(serverName string, request *http.Request) []attribute.KeyValue {
attrs := []attribute.KeyValue{}
if serverName != "" {
attrs = append(attrs, HTTPServerNameKey.String(serverName))
}
return append(attrs, httpBasicAttributesFromHTTPRequest(request)...)
}
// HTTPServerAttributesFromHTTPRequest generates attributes of the
// http namespace as specified by the OpenTelemetry specification for
// a span on the server side. Currently, only basic authentication is
// supported.
func HTTPServerAttributesFromHTTPRequest(serverName, route string, request *http.Request) []attribute.KeyValue {
attrs := []attribute.KeyValue{
HTTPMethodKey.String(request.Method),
HTTPTargetKey.String(request.RequestURI),
}
if serverName != "" {
attrs = append(attrs, HTTPServerNameKey.String(serverName))
}
if route != "" {
attrs = append(attrs, HTTPRouteKey.String(route))
}
if values, ok := request.Header["X-Forwarded-For"]; ok && len(values) > 0 {
attrs = append(attrs, HTTPClientIPKey.String(values[0]))
}
return append(attrs, httpCommonAttributesFromHTTPRequest(request)...)
}
// HTTPAttributesFromHTTPStatusCode generates attributes of the http
// namespace as specified by the OpenTelemetry specification for a
// span.
func HTTPAttributesFromHTTPStatusCode(code int) []attribute.KeyValue {
attrs := []attribute.KeyValue{
HTTPStatusCodeKey.Int(code),
}
return attrs
}
type codeRange struct {
fromInclusive int
toInclusive int
}
func (r codeRange) contains(code int) bool {
return r.fromInclusive <= code && code <= r.toInclusive
}
var validRangesPerCategory = map[int][]codeRange{
1: {
{http.StatusContinue, http.StatusEarlyHints},
},
2: {
{http.StatusOK, http.StatusAlreadyReported},
{http.StatusIMUsed, http.StatusIMUsed},
},
3: {
{http.StatusMultipleChoices, http.StatusUseProxy},
{http.StatusTemporaryRedirect, http.StatusPermanentRedirect},
},
4: {
{http.StatusBadRequest, http.StatusTeapot}, // yes, teapot is so useful…
{http.StatusMisdirectedRequest, http.StatusUpgradeRequired},
{http.StatusPreconditionRequired, http.StatusTooManyRequests},
{http.StatusRequestHeaderFieldsTooLarge, http.StatusRequestHeaderFieldsTooLarge},
{http.StatusUnavailableForLegalReasons, http.StatusUnavailableForLegalReasons},
},
5: {
{http.StatusInternalServerError, http.StatusLoopDetected},
{http.StatusNotExtended, http.StatusNetworkAuthenticationRequired},
},
}
// SpanStatusFromHTTPStatusCode generates a status code and a message
// as specified by the OpenTelemetry specification for a span.
func SpanStatusFromHTTPStatusCode(code int) (codes.Code, string) {
spanCode, valid := validateHTTPStatusCode(code)
if !valid {
return spanCode, fmt.Sprintf("Invalid HTTP status code %d", code)
}
return spanCode, ""
}
// Validates the HTTP status code and returns corresponding span status code.
// If the `code` is not a valid HTTP status code, returns span status Error
// and false.
func validateHTTPStatusCode(code int) (codes.Code, bool) {
category := code / 100
ranges, ok := validRangesPerCategory[category]
if !ok {
return codes.Error, false
}
ok = false
for _, crange := range ranges {
ok = crange.contains(code)
if ok {
break
}
}
if !ok {
return codes.Error, false
}
if category > 0 && category < 4 {
return codes.Unset, true
}
return codes.Error, true
}
| 1 | 16,693 | So the request.Headers is a map of []string. Are you sure that the HTTP library doesn't already do this split for us? | open-telemetry-opentelemetry-go | go |
@@ -93,6 +93,8 @@ public class WebUtils {
return "Killing";
case DISPATCHING:
return "Dispatching";
+ case EXECUTION_STOPPED:
+ return "Execution stopped, crashed executor/container";
default:
}
return "Unknown"; | 1 | /*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.webapp.servlet;
import static azkaban.Constants.ConfigurationKeys.AZKABAN_SERVER_HOST_NAME;
import azkaban.ServiceProvider;
import azkaban.executor.Status;
import azkaban.spi.AzkabanEventReporter;
import azkaban.spi.EventType;
import azkaban.webapp.AzkabanWebServer;
import com.google.common.base.Strings;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.text.NumberFormat;
import java.util.HashMap;
import java.util.Map;
import javax.servlet.http.HttpServletRequest;
import org.slf4j.LoggerFactory;
public class WebUtils {
public static final String X_FORWARDED_FOR_HEADER = "X-Forwarded-For";
private static final long ONE_KB = 1024;
private static final long ONE_MB = 1024 * ONE_KB;
private static final long ONE_GB = 1024 * ONE_MB;
private static final long ONE_TB = 1024 * ONE_GB;
private static AzkabanEventReporter azkabanEventReporter;
static {
try {
azkabanEventReporter = ServiceProvider.SERVICE_PROVIDER
.getInstance(AzkabanEventReporter.class);
} catch (Exception e) {
LoggerFactory.getLogger(WebUtils.class).warn("AzkabanEventReporter not configured", e);
}
}
public static String displayBytes(final long sizeBytes) {
final NumberFormat nf = NumberFormat.getInstance();
nf.setMaximumFractionDigits(2);
if (sizeBytes >= ONE_TB) {
return nf.format(sizeBytes / (double) ONE_TB) + " tb";
} else if (sizeBytes >= ONE_GB) {
return nf.format(sizeBytes / (double) ONE_GB) + " gb";
} else if (sizeBytes >= ONE_MB) {
return nf.format(sizeBytes / (double) ONE_MB) + " mb";
} else if (sizeBytes >= ONE_KB) {
return nf.format(sizeBytes / (double) ONE_KB) + " kb";
} else {
return sizeBytes + " B";
}
}
public static String formatStatus(final Status status) {
switch (status) {
case SUCCEEDED:
return "Success";
case FAILED:
return "Failed";
case RUNNING:
return "Running";
case DISABLED:
return "Disabled";
case KILLED:
return "Killed";
case FAILED_FINISHING:
return "Running w/Failure";
case PREPARING:
return "Preparing";
case READY:
return "Ready";
case PAUSED:
return "Paused";
case SKIPPED:
return "Skipped";
case KILLING:
return "Killing";
case DISPATCHING:
return "Dispatching";
default:
}
return "Unknown";
}
/**
* Gets the actual client IP address on a best effort basis as user could be sitting
* behind a VPN. Get the IP by inspecting the X-Forwarded-For HTTP header or using the
* provided 'remote IP address' from the low level TCP connection from the client.
*
* If multiple IP addresses are provided in the X-Forwarded-For header then the first one (first
* hop) is used
*
* @param httpHeaders List of HTTP headers for the current request
* @param remoteAddr The client IP address and port from the current request's TCP connection
* @return The actual client IP address
*/
// TODO djaiswal83: Refactor this code and merge into single API
public static String getRealClientIpAddr(final Map<String, String> httpHeaders,
final String remoteAddr) {
// If some upstream device added an X-Forwarded-For header
// use it for the client ip
// This will support scenarios where load balancers or gateways
// front the Azkaban web server and a changing Ip address invalidates the session
String clientIp = httpHeaders.getOrDefault(X_FORWARDED_FOR_HEADER, null);
if (clientIp == null) {
clientIp = remoteAddr;
} else {
// header can contain comma separated list of upstream servers - get the first one
final String[] ips = clientIp.split(",");
clientIp = ips[0];
}
// Strip off port and only get IP address
// todo: this is broken for IPv6, where e.g. a "loopback" address looks like "0:0:0:0:0:0:0:1"
final String[] parts = clientIp.split(":");
clientIp = parts[0];
return clientIp;
}
/**
* Gets the actual client IP address on a best effort basis as user could be sitting
* behind a VPN. Get the IP by inspecting the X-Forwarded-For HTTP header or using the
* provided 'remote IP address' from the low level TCP connection from the client.
*
* If multiple IP addresses are provided in the X-Forwarded-For header then the first one (first
* hop) is used
*
* @param req HttpServletRequest
* @return The actual client IP address
*/
public static String getRealClientIpAddr(final HttpServletRequest req) {
// If some upstream device added an X-Forwarded-For header
// use it for the client ip
// This will support scenarios where load balancers or gateways
// front the Azkaban web server and a changing Ip address invalidates
// the session
final HashMap<String, String> headers = new HashMap<>();
headers.put(WebUtils.X_FORWARDED_FOR_HEADER,
req.getHeader(WebUtils.X_FORWARDED_FOR_HEADER.toLowerCase()));
return WebUtils.getRealClientIpAddr(headers, req.getRemoteAddr());
}
private static String hostName;
static {
try {
hostName = InetAddress.getLocalHost().getCanonicalHostName();
} catch (UnknownHostException e) {
hostName = "unknown";
}
}
/**
* Report login/logout events via {@link AzkabanEventReporter}, if configured.
* @param eventType login or logout
* @param username if known
* @param ip address of originating host
* @param isSuccess AKA outcome
* @param message AKA reason
*/
public static void reportLoginEvent(final EventType eventType, final String username,
final String ip, final boolean isSuccess, final String message) {
if (azkabanEventReporter != null) {
final Map<String, String> metadata = new HashMap<>();
metadata.put("azkabanHost",
AzkabanWebServer.getAzkabanProperties().getString(AZKABAN_SERVER_HOST_NAME, hostName));
metadata.put("sessionUser", Strings.isNullOrEmpty(username) ? "unknown" : username);
metadata.put("sessionIP", ip);
metadata.put("reason", message);
metadata.put("appOutcome", isSuccess ? "SUCCESS" : "FAILURE");
azkabanEventReporter.report(eventType, metadata);
}
}
public static void reportLoginEvent(final EventType eventType, final String username, final String ip) {
reportLoginEvent(eventType, username, ip, true, null);
}
}
| 1 | 22,162 | How is this message used? Can we remove the "crashed executor/container" part which is an implementation detail? | azkaban-azkaban | java |
@@ -247,7 +247,8 @@ public class HttpCommandExecutor implements CommandExecutor, NeedsLocalLogs {
.put(GET_LOG, post("/session/:sessionId/log"))
.put(GET_AVAILABLE_LOG_TYPES, get("/session/:sessionId/log/types"))
- .put(STATUS, get("/status"));
+ .put(STATUS, get("/status"))
+ .put(HEAP_SNAPSHOT, get("/session/:sessionId/chromium/heap_snapshot"));
nameToUrl = builder.build();
} | 1 | /*
Copyright 2007-2011 Selenium committers
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package org.openqa.selenium.remote;
import com.google.common.base.Throwables;
import com.google.common.collect.ImmutableMap;
import org.apache.http.Header;
import org.apache.http.HttpEntity;
import org.apache.http.HttpHost;
import org.apache.http.HttpResponse;
import org.apache.http.NoHttpResponseException;
import org.apache.http.auth.AuthScope;
import org.apache.http.auth.UsernamePasswordCredentials;
import org.apache.http.client.ClientProtocolException;
import org.apache.http.client.HttpClient;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.client.methods.HttpUriRequest;
import org.apache.http.client.params.HttpClientParams;
import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity;
import org.apache.http.impl.client.DefaultHttpClient;
import org.apache.http.params.BasicHttpParams;
import org.apache.http.params.CoreConnectionPNames;
import org.apache.http.params.HttpParams;
import org.apache.http.protocol.BasicHttpContext;
import org.apache.http.protocol.HttpContext;
import org.apache.http.util.EntityUtils;
import org.openqa.selenium.UnsupportedCommandException;
import org.openqa.selenium.WebDriverException;
import org.openqa.selenium.logging.LocalLogs;
import org.openqa.selenium.logging.LogEntry;
import org.openqa.selenium.logging.LogType;
import org.openqa.selenium.logging.NeedsLocalLogs;
import org.openqa.selenium.logging.profiler.HttpProfilerLogEntry;
import org.openqa.selenium.remote.internal.HttpClientFactory;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.net.BindException;
import java.net.MalformedURLException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import java.nio.charset.Charset;
import java.util.Map;
import static org.apache.http.protocol.ExecutionContext.HTTP_TARGET_HOST;
import static org.openqa.selenium.remote.DriverCommand.*;
public class HttpCommandExecutor implements CommandExecutor, NeedsLocalLogs {
private static final int MAX_REDIRECTS = 10;
private final HttpHost targetHost;
private final URL remoteServer;
private final Map<String, CommandInfo> nameToUrl;
private final HttpClient client;
private final ErrorCodes errorCodes = new ErrorCodes();
private static HttpClientFactory httpClientFactory;
private LocalLogs logs = LocalLogs.getNullLogger();
public HttpCommandExecutor(URL addressOfRemoteServer) {
this(ImmutableMap.<String, CommandInfo>of(), addressOfRemoteServer);
}
public HttpCommandExecutor(Map<String, CommandInfo> additionalCommands, URL addressOfRemoteServer) {
try {
remoteServer = addressOfRemoteServer == null ?
new URL(System.getProperty("webdriver.remote.server", "http://localhost:4444/wd/hub")) :
addressOfRemoteServer;
} catch (MalformedURLException e) {
throw new WebDriverException(e);
}
HttpParams params = new BasicHttpParams();
// Use the JRE default for the socket linger timeout.
params.setParameter(CoreConnectionPNames.SO_LINGER, -1);
HttpClientParams.setRedirecting(params, false);
synchronized (HttpCommandExecutor.class) {
if (httpClientFactory == null) {
httpClientFactory = new HttpClientFactory();
}
}
client = httpClientFactory.getHttpClient();
if (addressOfRemoteServer != null && addressOfRemoteServer.getUserInfo() != null) {
// Use HTTP Basic auth
UsernamePasswordCredentials credentials = new
UsernamePasswordCredentials(addressOfRemoteServer.getUserInfo());
((DefaultHttpClient) client).getCredentialsProvider().
setCredentials(AuthScope.ANY, credentials);
}
// Some machines claim "localhost.localdomain" is the same as "localhost".
// This assumption is not always true.
String host = remoteServer.getHost().replace(".localdomain", "");
targetHost = new HttpHost(
host, remoteServer.getPort(), remoteServer.getProtocol());
ImmutableMap.Builder<String, CommandInfo> builder = ImmutableMap.builder();
for (Map.Entry<String, CommandInfo> entry : additionalCommands.entrySet()) {
builder.put(entry.getKey(), entry.getValue());
}
builder
.put(GET_ALL_SESSIONS, get("/sessions"))
.put(NEW_SESSION, post("/session"))
.put(GET_CAPABILITIES, get("/session/:sessionId"))
.put(QUIT, delete("/session/:sessionId"))
.put(GET_CURRENT_WINDOW_HANDLE, get("/session/:sessionId/window_handle"))
.put(GET_WINDOW_HANDLES, get("/session/:sessionId/window_handles"))
.put(GET, post("/session/:sessionId/url"))
// The Alert API is still experimental and should not be used.
.put(GET_ALERT, get("/session/:sessionId/alert"))
.put(DISMISS_ALERT, post("/session/:sessionId/dismiss_alert"))
.put(ACCEPT_ALERT, post("/session/:sessionId/accept_alert"))
.put(GET_ALERT_TEXT, get("/session/:sessionId/alert_text"))
.put(SET_ALERT_VALUE, post("/session/:sessionId/alert_text"))
.put(GO_FORWARD, post("/session/:sessionId/forward"))
.put(GO_BACK, post("/session/:sessionId/back"))
.put(REFRESH, post("/session/:sessionId/refresh"))
.put(EXECUTE_SCRIPT, post("/session/:sessionId/execute"))
.put(EXECUTE_ASYNC_SCRIPT, post("/session/:sessionId/execute_async"))
.put(GET_CURRENT_URL, get("/session/:sessionId/url"))
.put(GET_TITLE, get("/session/:sessionId/title"))
.put(GET_PAGE_SOURCE, get("/session/:sessionId/source"))
.put(SCREENSHOT, get("/session/:sessionId/screenshot"))
.put(SET_BROWSER_VISIBLE, post("/session/:sessionId/visible"))
.put(IS_BROWSER_VISIBLE, get("/session/:sessionId/visible"))
.put(FIND_ELEMENT, post("/session/:sessionId/element"))
.put(FIND_ELEMENTS, post("/session/:sessionId/elements"))
.put(GET_ACTIVE_ELEMENT, post("/session/:sessionId/element/active"))
.put(FIND_CHILD_ELEMENT, post("/session/:sessionId/element/:id/element"))
.put(FIND_CHILD_ELEMENTS, post("/session/:sessionId/element/:id/elements"))
.put(CLICK_ELEMENT, post("/session/:sessionId/element/:id/click"))
.put(CLEAR_ELEMENT, post("/session/:sessionId/element/:id/clear"))
.put(SUBMIT_ELEMENT, post("/session/:sessionId/element/:id/submit"))
.put(GET_ELEMENT_TEXT, get("/session/:sessionId/element/:id/text"))
.put(SEND_KEYS_TO_ELEMENT, post("/session/:sessionId/element/:id/value"))
.put(UPLOAD_FILE, post("/session/:sessionId/file"))
.put(GET_ELEMENT_VALUE, get("/session/:sessionId/element/:id/value"))
.put(GET_ELEMENT_TAG_NAME, get("/session/:sessionId/element/:id/name"))
.put(IS_ELEMENT_SELECTED, get("/session/:sessionId/element/:id/selected"))
.put(IS_ELEMENT_ENABLED, get("/session/:sessionId/element/:id/enabled"))
.put(IS_ELEMENT_DISPLAYED, get("/session/:sessionId/element/:id/displayed"))
.put(HOVER_OVER_ELEMENT, post("/session/:sessionId/element/:id/hover"))
.put(GET_ELEMENT_LOCATION, get("/session/:sessionId/element/:id/location"))
.put(GET_ELEMENT_LOCATION_ONCE_SCROLLED_INTO_VIEW,
get("/session/:sessionId/element/:id/location_in_view"))
.put(GET_ELEMENT_SIZE, get("/session/:sessionId/element/:id/size"))
.put(GET_ELEMENT_ATTRIBUTE, get("/session/:sessionId/element/:id/attribute/:name"))
.put(ELEMENT_EQUALS, get("/session/:sessionId/element/:id/equals/:other"))
.put(GET_ALL_COOKIES, get("/session/:sessionId/cookie"))
.put(ADD_COOKIE, post("/session/:sessionId/cookie"))
.put(DELETE_ALL_COOKIES, delete("/session/:sessionId/cookie"))
.put(DELETE_COOKIE, delete("/session/:sessionId/cookie/:name"))
.put(SWITCH_TO_FRAME, post("/session/:sessionId/frame"))
.put(SWITCH_TO_WINDOW, post("/session/:sessionId/window"))
.put(GET_WINDOW_SIZE, get("/session/:sessionId/window/:windowHandle/size"))
.put(GET_WINDOW_POSITION, get("/session/:sessionId/window/:windowHandle/position"))
.put(SET_WINDOW_SIZE, post("/session/:sessionId/window/:windowHandle/size"))
.put(SET_WINDOW_POSITION, post("/session/:sessionId/window/:windowHandle/position"))
.put(MAXIMIZE_WINDOW, post("/session/:sessionId/window/:windowHandle/maximize"))
.put(CLOSE, delete("/session/:sessionId/window"))
.put(DRAG_ELEMENT, post("/session/:sessionId/element/:id/drag"))
.put(GET_ELEMENT_VALUE_OF_CSS_PROPERTY,
get("/session/:sessionId/element/:id/css/:propertyName"))
.put(IMPLICITLY_WAIT, post("/session/:sessionId/timeouts/implicit_wait"))
.put(SET_SCRIPT_TIMEOUT, post("/session/:sessionId/timeouts/async_script"))
.put(SET_TIMEOUT, post("/session/:sessionId/timeouts"))
.put(EXECUTE_SQL, post("/session/:sessionId/execute_sql"))
.put(GET_LOCATION, get("/session/:sessionId/location"))
.put(SET_LOCATION, post("/session/:sessionId/location"))
.put(GET_APP_CACHE_STATUS, get("/session/:sessionId/application_cache/status"))
.put(IS_BROWSER_ONLINE, get("/session/:sessionId/browser_connection"))
.put(SET_BROWSER_ONLINE, post("/session/:sessionId/browser_connection"))
// TODO (user): Would it be better to combine this command with
// GET_LOCAL_STORAGE_SIZE?
.put(GET_LOCAL_STORAGE_ITEM, get("/session/:sessionId/local_storage/key/:key"))
.put(REMOVE_LOCAL_STORAGE_ITEM, delete("/session/:sessionId/local_storage/key/:key"))
.put(GET_LOCAL_STORAGE_KEYS, get("/session/:sessionId/local_storage"))
.put(SET_LOCAL_STORAGE_ITEM, post("/session/:sessionId/local_storage"))
.put(CLEAR_LOCAL_STORAGE, delete("/session/:sessionId/local_storage"))
.put(GET_LOCAL_STORAGE_SIZE, get("/session/:sessionId/local_storage/size"))
// TODO (user): Would it be better to combine this command with
// GET_SESSION_STORAGE_SIZE?
.put(GET_SESSION_STORAGE_ITEM, get("/session/:sessionId/session_storage/key/:key"))
.put(REMOVE_SESSION_STORAGE_ITEM, delete("/session/:sessionId/session_storage/key/:key"))
.put(GET_SESSION_STORAGE_KEYS, get("/session/:sessionId/session_storage"))
.put(SET_SESSION_STORAGE_ITEM, post("/session/:sessionId/session_storage"))
.put(CLEAR_SESSION_STORAGE, delete("/session/:sessionId/session_storage"))
.put(GET_SESSION_STORAGE_SIZE, get("/session/:sessionId/session_storage/size"))
.put(GET_SCREEN_ORIENTATION, get("/session/:sessionId/orientation"))
.put(SET_SCREEN_ORIENTATION, post("/session/:sessionId/orientation"))
// Interactions-related commands.
.put(CLICK, post("/session/:sessionId/click"))
.put(DOUBLE_CLICK, post("/session/:sessionId/doubleclick"))
.put(MOUSE_DOWN, post("/session/:sessionId/buttondown"))
.put(MOUSE_UP, post("/session/:sessionId/buttonup"))
.put(MOVE_TO, post("/session/:sessionId/moveto"))
.put(SEND_KEYS_TO_ACTIVE_ELEMENT, post("/session/:sessionId/keys"))
// IME related commands.
.put(IME_GET_AVAILABLE_ENGINES, get("/session/:sessionId/ime/available_engines"))
.put(IME_GET_ACTIVE_ENGINE, get("/session/:sessionId/ime/active_engine"))
.put(IME_IS_ACTIVATED, get("/session/:sessionId/ime/activated"))
.put(IME_DEACTIVATE, post("/session/:sessionId/ime/deactivate"))
.put(IME_ACTIVATE_ENGINE, post("/session/:sessionId/ime/activate"))
// Advanced Touch API commands
// TODO(berrada): Refactor single tap with mouse click.
.put(TOUCH_SINGLE_TAP, post("/session/:sessionId/touch/click"))
.put(TOUCH_DOWN, post("/session/:sessionId/touch/down"))
.put(TOUCH_UP, post("/session/:sessionId/touch/up"))
.put(TOUCH_MOVE, post("/session/:sessionId/touch/move"))
.put(TOUCH_SCROLL, post("/session/:sessionId/touch/scroll"))
.put(TOUCH_DOUBLE_TAP, post("/session/:sessionId/touch/doubleclick"))
.put(TOUCH_LONG_PRESS, post("/session/:sessionId/touch/longclick"))
.put(TOUCH_FLICK, post("/session/:sessionId/touch/flick"))
.put(GET_LOG, post("/session/:sessionId/log"))
.put(GET_AVAILABLE_LOG_TYPES, get("/session/:sessionId/log/types"))
.put(STATUS, get("/status"));
nameToUrl = builder.build();
}
public void setLocalLogs(LocalLogs logs) {
this.logs = logs;
}
private void log(String logType, LogEntry entry) {
logs.addEntry(logType, entry);
}
public URL getAddressOfRemoteServer() {
return remoteServer;
}
public Response execute(Command command) throws IOException {
HttpContext context = new BasicHttpContext();
if (command.getSessionId() == null) {
if (QUIT.equals(command.getName())) {
return new Response();
}
if (!GET_ALL_SESSIONS.equals(command.getName())
&& !NEW_SESSION.equals(command.getName())) {
throw new SessionNotFoundException("Session ID is null");
}
}
CommandInfo info = nameToUrl.get(command.getName());
try {
HttpUriRequest httpMethod = info.getMethod(remoteServer, command);
setAcceptHeader(httpMethod);
if (httpMethod instanceof HttpPost) {
String payload = new BeanToJsonConverter().convert(command.getParameters());
((HttpPost) httpMethod).setEntity(new StringEntity(payload, "utf-8"));
httpMethod.addHeader("Content-Type", "application/json; charset=utf-8");
}
// Do not allow web proxy caches to cache responses to "get" commands
if (httpMethod instanceof HttpGet) {
httpMethod.addHeader("Cache-Control", "no-cache");
}
log(LogType.PROFILER, new HttpProfilerLogEntry(command.getName(), true));
HttpResponse response = fallBackExecute(context, httpMethod);
log(LogType.PROFILER, new HttpProfilerLogEntry(command.getName(), false));
response = followRedirects(client, context, response, /* redirect count */0);
final EntityWithEncoding entityWithEncoding = new EntityWithEncoding(response.getEntity());
return createResponse(response, context, entityWithEncoding);
} catch (UnsupportedCommandException e) {
if (e.getMessage() == null || "".equals(e.getMessage())) {
throw new UnsupportedOperationException(
"No information from server. Command name was: " + command.getName(),
e.getCause());
}
throw e;
}
}
private HttpResponse fallBackExecute(HttpContext context, HttpUriRequest httpMethod)
throws IOException {
try {
return client.execute(targetHost, httpMethod, context);
} catch (BindException e) {
// If we get this, there's a chance we've used all the local ephemeral sockets
// Sleep for a bit to let the OS reclaim them, then try the request again.
try {
Thread.sleep(2000);
} catch (InterruptedException ie) {
throw Throwables.propagate(ie);
}
} catch (NoHttpResponseException e) {
// If we get this, there's a chance we've used all the remote ephemeral sockets
// Sleep for a bit to let the OS reclaim them, then try the request again.
try {
Thread.sleep(2000);
} catch (InterruptedException ie) {
throw Throwables.propagate(ie);
}
}
return client.execute(targetHost, httpMethod, context);
}
private void setAcceptHeader(HttpUriRequest httpMethod) {
httpMethod.addHeader("Accept", "application/json, image/png");
}
private HttpResponse followRedirects(
HttpClient client, HttpContext context, HttpResponse response, int redirectCount) {
if (!isRedirect(response)) {
return response;
}
try {
// Make sure that the previous connection is freed.
HttpEntity httpEntity = response.getEntity();
if (httpEntity != null) {
EntityUtils.consume(httpEntity);
}
} catch (IOException e) {
throw new WebDriverException(e);
}
if (redirectCount > MAX_REDIRECTS) {
throw new WebDriverException("Maximum number of redirects exceeded. Aborting");
}
String location = response.getFirstHeader("location").getValue();
URI uri;
try {
uri = buildUri(context, location);
HttpGet get = new HttpGet(uri);
setAcceptHeader(get);
HttpResponse newResponse = client.execute(targetHost, get, context);
return followRedirects(client, context, newResponse, redirectCount + 1);
} catch (URISyntaxException e) {
throw new WebDriverException(e);
} catch (ClientProtocolException e) {
throw new WebDriverException(e);
} catch (IOException e) {
throw new WebDriverException(e);
}
}
private URI buildUri(HttpContext context, String location) throws URISyntaxException {
URI uri;
uri = new URI(location);
if (!uri.isAbsolute()) {
HttpHost host = (HttpHost) context.getAttribute(HTTP_TARGET_HOST);
uri = new URI(host.toURI() + location);
}
return uri;
}
private boolean isRedirect(HttpResponse response) {
int code = response.getStatusLine().getStatusCode();
return (code == 301 || code == 302 || code == 303 || code == 307)
&& response.containsHeader("location");
}
class EntityWithEncoding {
private final String charSet;
private final byte[] content;
EntityWithEncoding(HttpEntity entity) throws IOException {
try {
if (entity != null) {
content = EntityUtils.toByteArray(entity);
Charset entityCharset = ContentType.getOrDefault(entity).getCharset();
charSet = entityCharset != null ? entityCharset.name() : null;
} else {
content = new byte[0];
charSet = null;
}
} finally {
EntityUtils.consume(entity);
}
}
public String getContentString()
throws UnsupportedEncodingException {
return new String(content, charSet != null ? charSet : "utf-8");
}
public byte[] getContent() {
return content;
}
public boolean hasEntityContent() {
return content != null;
}
}
private Response createResponse(HttpResponse httpResponse, HttpContext context,
EntityWithEncoding entityWithEncoding) throws IOException {
final Response response;
Header header = httpResponse.getFirstHeader("Content-Type");
if (header != null && header.getValue().startsWith("application/json")) {
String responseAsText = entityWithEncoding.getContentString();
try {
response = new JsonToBeanConverter().convert(Response.class, responseAsText);
} catch (ClassCastException e) {
if (responseAsText != null && "".equals(responseAsText)) {
// The remote server has died, but has already set some headers.
// Normally this occurs when the final window of the firefox driver
// is closed on OS X. Return null, as the return value _should_ be
// being ignored. This is not an elegant solution.
return null;
}
throw new WebDriverException("Cannot convert text to response: " + responseAsText, e);
}
} else {
response = new Response();
if (header != null && header.getValue().startsWith("image/png")) {
response.setValue(entityWithEncoding.getContent());
} else if (entityWithEncoding.hasEntityContent()) {
response.setValue(entityWithEncoding.getContentString());
}
HttpHost finalHost = (HttpHost) context.getAttribute(HTTP_TARGET_HOST);
String uri = finalHost.toURI();
String sessionId = HttpSessionId.getSessionId(uri);
if (sessionId != null) {
response.setSessionId(sessionId);
}
int statusCode = httpResponse.getStatusLine().getStatusCode();
if (!(statusCode > 199 && statusCode < 300)) {
// 4xx represents an unknown command or a bad request.
if (statusCode > 399 && statusCode < 500) {
response.setStatus(ErrorCodes.UNKNOWN_COMMAND);
} else if (statusCode > 499 && statusCode < 600) {
// 5xx represents an internal server error. The response status should already be set, but
// if not, set it to a general error code.
if (response.getStatus() == ErrorCodes.SUCCESS) {
response.setStatus(ErrorCodes.UNHANDLED_ERROR);
}
} else {
response.setStatus(ErrorCodes.UNHANDLED_ERROR);
}
}
if (response.getValue() instanceof String) {
// We normalise to \n because Java will translate this to \r\n
// if this is suitable on our platform, and if we have \r\n, java will
// turn this into \r\r\n, which would be Bad!
response.setValue(((String) response.getValue()).replace("\r\n", "\n"));
}
}
response.setState(errorCodes.toState(response.getStatus()));
return response;
}
private static CommandInfo get(String url) {
return new CommandInfo(url, HttpVerb.GET);
}
private static CommandInfo post(String url) {
return new CommandInfo(url, HttpVerb.POST);
}
private static CommandInfo delete(String url) {
return new CommandInfo(url, HttpVerb.DELETE);
}
}
| 1 | 10,649 | Instead of building in routing for a browser-specific command, could you refactor the HttpCommandExecutor to allow arbitrary commands to be registered? | SeleniumHQ-selenium | py |
@@ -4,7 +4,13 @@
<div class="text-box">
<% if @purchaseable.video_available?(@video) %>
- <%= render 'watch_video', video: @video, purchase: @purchase %>
+ <div class="purchase-return-link">
+ <%=link_to "Back", purchase_path(@purchase)%>
+ </div>
+
+ <h3 class="video-headline">Watch or Download Video</h3>
+
+ <%= render 'watch_video', video: @video %>
<% else %>
<p>This video is not yet available. It will be released on <%= @purchaseable.video_available_on(@video).to_s(:simple) %></p>
<% end %> | 1 | <% content_for :subject, @purchase.purchaseable_name %>
<div class="text-box-wrapper">
<div class="text-box">
<% if @purchaseable.video_available?(@video) %>
<%= render 'watch_video', video: @video, purchase: @purchase %>
<% else %>
<p>This video is not yet available. It will be released on <%= @purchaseable.video_available_on(@video).to_s(:simple) %></p>
<% end %>
</div>
</div>
<%= render sidebar_partial_name(@purchaseable), purchaseable: @purchaseable %>
| 1 | 6,856 | Space after `=`. | thoughtbot-upcase | rb |
@@ -59,6 +59,7 @@ public class EdgeInvocation extends AbstractRestInvocation {
this.responseEx = new VertxServerResponseToHttpServletResponse(context.response());
this.httpServerFilters = httpServerFilters;
requestEx.setAttribute(RestConst.REST_REQUEST, requestEx);
+ setAfterCreateInvocationHandler(invocation -> context.put(RestConst.REST_INVOCATION_CONTEXT, invocation));
}
public void edgeInvoke() { | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.servicecomb.edge.core;
import java.util.List;
import org.apache.servicecomb.common.rest.AbstractRestInvocation;
import org.apache.servicecomb.common.rest.RestConst;
import org.apache.servicecomb.common.rest.filter.HttpServerFilter;
import org.apache.servicecomb.common.rest.locator.OperationLocator;
import org.apache.servicecomb.common.rest.locator.ServicePathManager;
import org.apache.servicecomb.core.Const;
import org.apache.servicecomb.core.definition.MicroserviceVersionMeta;
import org.apache.servicecomb.core.invocation.InvocationFactory;
import org.apache.servicecomb.core.provider.consumer.ReactiveResponseExecutor;
import org.apache.servicecomb.core.provider.consumer.ReferenceConfig;
import org.apache.servicecomb.foundation.common.exceptions.ServiceCombException;
import org.apache.servicecomb.foundation.vertx.http.VertxServerRequestToHttpServletRequest;
import org.apache.servicecomb.foundation.vertx.http.VertxServerResponseToHttpServletResponse;
import org.apache.servicecomb.serviceregistry.RegistryUtils;
import org.apache.servicecomb.serviceregistry.consumer.MicroserviceVersionRule;
import org.apache.servicecomb.serviceregistry.definition.DefinitionConst;
import io.vertx.core.Vertx;
import io.vertx.ext.web.RoutingContext;
public class EdgeInvocation extends AbstractRestInvocation {
public static final String EDGE_INVOCATION_CONTEXT = "edgeInvocationContext";
protected String microserviceName;
protected MicroserviceVersionRule microserviceVersionRule;
protected MicroserviceVersionMeta latestMicroserviceVersionMeta;
protected ReferenceConfig referenceConfig;
protected String versionRule = DefinitionConst.VERSION_RULE_ALL;
public void init(String microserviceName, RoutingContext context, String path,
List<HttpServerFilter> httpServerFilters) {
this.microserviceName = microserviceName;
this.requestEx = new VertxServerRequestToHttpServletRequest(context, path);
this.responseEx = new VertxServerResponseToHttpServletResponse(context.response());
this.httpServerFilters = httpServerFilters;
requestEx.setAttribute(RestConst.REST_REQUEST, requestEx);
}
public void edgeInvoke() {
findMicroserviceVersionMeta();
findRestOperation(latestMicroserviceVersionMeta.getMicroserviceMeta());
scheduleInvocation();
}
protected void findMicroserviceVersionMeta() {
String versionRule = chooseVersionRule();
String appId = RegistryUtils.getAppId();
int idxAt = microserviceName.indexOf(org.apache.servicecomb.serviceregistry.api.Const.APP_SERVICE_SEPARATOR);
if (idxAt != -1) {
appId = microserviceName.substring(0, idxAt);
}
microserviceVersionRule = RegistryUtils.getServiceRegistry()
.getAppManager()
.getOrCreateMicroserviceVersionRule(appId, microserviceName, versionRule);
latestMicroserviceVersionMeta = microserviceVersionRule.getLatestMicroserviceVersion();
if (latestMicroserviceVersionMeta == null) {
throw new ServiceCombException(
String.format("Failed to find latest MicroserviceVersionMeta, appId=%s, microserviceName=%s, versionRule=%s.",
appId,
microserviceName,
versionRule));
}
}
public void setVersionRule(String versionRule) {
this.versionRule = versionRule;
}
// another possible rule:
// path is: /msName/version/.....
// version in path is v1 or v2 and so on
// map version to VersionRule:
// v1->1.0.0-2.0.0
// v2->2.0.0-3.0.0
// that means if a(1.x.x) bigger then b(1.y.y), then a compatible to b
// but a(2.x.x) not compatible to b
protected String chooseVersionRule() {
// this will use all instance of the microservice
// and this required all new version compatible to old version
return versionRule;
}
@Override
protected OperationLocator locateOperation(ServicePathManager servicePathManager) {
return servicePathManager.consumerLocateOperation(requestEx.getRequestURI(), requestEx.getMethod());
}
@Override
protected void createInvocation() {
ReferenceConfig referenceConfig = new ReferenceConfig();
referenceConfig.setMicroserviceMeta(latestMicroserviceVersionMeta.getMicroserviceMeta());
referenceConfig.setMicroserviceVersionRule(microserviceVersionRule.getVersionRule().getVersionRule());
referenceConfig.setTransport(Const.ANY_TRANSPORT);
this.invocation = InvocationFactory.forConsumer(referenceConfig,
restOperationMeta.getOperationMeta(),
null);
this.invocation.setSync(false);
this.invocation.getHandlerContext().put(EDGE_INVOCATION_CONTEXT, Vertx.currentContext());
this.invocation.setResponseExecutor(new ReactiveResponseExecutor());
}
}
| 1 | 9,643 | if just need to do something after createInvocation just override and call super first is enough? | apache-servicecomb-java-chassis | java |
@@ -11,7 +11,7 @@ describe Travis::Build::Script::Scala do
end
it_behaves_like 'a build script'
- # it_behaves_like 'a jdk build'
+ it_behaves_like 'a jvm build'
it 'sets TRAVIS_SCALA_VERSION' do
should set 'TRAVIS_SCALA_VERSION', '2.10.0' | 1 | require 'spec_helper'
describe Travis::Build::Script::Scala do
let(:options) { { logs: { build: false, state: false } } }
let(:data) { PAYLOADS[:push].deep_clone }
subject { described_class.new(data, options).compile }
after :all do
store_example
end
it_behaves_like 'a build script'
# it_behaves_like 'a jdk build'
it 'sets TRAVIS_SCALA_VERSION' do
should set 'TRAVIS_SCALA_VERSION', '2.10.0'
end
it 'announces Scala 2.10.0' do
should run 'echo Using Scala 2.10.0'
end
it 'runs sbt ++2.10.0 test if ./project directory exists' do
directory('project')
should run_script 'sbt ++2.10.0 test'
end
it 'runs sbt ++2.10.0 test if ./build.sbt exists' do
file('build.sbt')
should run_script 'sbt ++2.10.0 test'
end
it 'runs gradle check if ./build.gradle exists' do
file('build.gradle')
should run_script 'gradle check'
end
it 'runs mvn test if no project directory or build file exists' do
should run_script 'mvn test'
end
it "runs sbt with sbt_args if they are given" do
file("build.sbt")
data["config"]["sbt_args"] = "-Dsbt.log.noformat=true"
should run_script "sbt -Dsbt.log.noformat=true ++2.10.0 test"
end
end
| 1 | 10,774 | By the way, I fixed `announce` method (missing `super` call to announce JDK version) | travis-ci-travis-build | rb |
@@ -505,9 +505,10 @@ public final class TreeSet<T> implements SortedSet<T>, Serializable {
}
}
+ @SuppressWarnings("unchecked")
@Override
- public boolean contains(T element) {
- return tree.contains(element);
+ public boolean contains(Object element) {
+ return tree.contains((T) element);
}
@Override | 1 | /* / \____ _ _ ____ ______ / \ ____ __ _ _____
* / / \/ \ / \/ \ / /\__\/ // \/ \ / / _ \ Javaslang
* _/ / /\ \ \/ / /\ \\__\\ \ // /\ \ /\\/ \__/ / Copyright 2014-now Daniel Dietrich
* /___/\_/ \_/\____/\_/ \_/\__\/__/___\_/ \_// \__/_____/ Licensed under the Apache License, Version 2.0
*/
package javaslang.collection;
import javaslang.Tuple;
import javaslang.Tuple2;
import javaslang.Tuple3;
import javaslang.control.Match;
import javaslang.control.Option;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.NoSuchElementException;
import java.util.Objects;
import java.util.function.*;
import java.util.stream.Collector;
import static javaslang.collection.Comparators.naturalComparator;
/**
* SortedSet implementation, backed by a Red/Black Tree.
*
* @param <T> Component type
* @author Daniel Dietrich
* @since 2.0.0
*/
// DEV-NOTE: it is not possible to create an EMPTY TreeSet without a Comparator type in scope
public final class TreeSet<T> implements SortedSet<T>, Serializable {
private static final long serialVersionUID = 1L;
private final RedBlackTree<T> tree;
TreeSet(RedBlackTree<T> tree) {
this.tree = tree;
}
/**
* Returns a {@link java.util.stream.Collector} which may be used in conjunction with
* {@link java.util.stream.Stream#collect(java.util.stream.Collector)} to obtain a {@link javaslang.collection.TreeSet}.
*
* @param <T> Component type of the List.
* @return A javaslang.collection.List Collector.
*/
public static <T> Collector<T, ArrayList<T>, TreeSet<T>> collector() {
final Supplier<ArrayList<T>> supplier = ArrayList::new;
final BiConsumer<ArrayList<T>, T> accumulator = ArrayList::add;
final BinaryOperator<ArrayList<T>> combiner = (left, right) -> {
left.addAll(right);
return left;
};
final Function<ArrayList<T>, TreeSet<T>> finisher = list -> TreeSet.ofAll(naturalComparator(), list);
return Collector.of(supplier, accumulator, combiner, finisher);
}
public static <T extends Comparable<? super T>> TreeSet<T> empty() {
return new TreeSet<>(RedBlackTree.<T> empty());
}
public static <T> TreeSet<T> empty(Comparator<? super T> comparator) {
Objects.requireNonNull(comparator, "comparator is null");
return new TreeSet<>(RedBlackTree.empty(comparator));
}
public static <T extends Comparable<? super T>> TreeSet<T> of(T value) {
return new TreeSet<>(RedBlackTree.of(value));
}
public static <T> TreeSet<T> of(Comparator<? super T> comparator, T value) {
Objects.requireNonNull(comparator, "comparator is null");
return new TreeSet<>(RedBlackTree.of(comparator, value));
}
@SuppressWarnings("varargs")
@SafeVarargs
public static <T extends Comparable<? super T>> TreeSet<T> of(T... values) {
Objects.requireNonNull(values, "values is null");
return new TreeSet<>(RedBlackTree.of(values));
}
@SuppressWarnings("varargs")
@SafeVarargs
public static <T> TreeSet<T> of(Comparator<? super T> comparator, T... values) {
Objects.requireNonNull(comparator, "comparator is null");
Objects.requireNonNull(values, "values is null");
return new TreeSet<>(RedBlackTree.of(comparator, values));
}
/**
* Returns a TreeSet containing {@code n} values of a given Function {@code f}
* over a range of integer values from 0 to {@code n - 1}.
*
* @param <T> Component type of the TreeSet
* @param comparator The comparator used to sort the elements
* @param n The number of elements in the TreeSet
* @param f The Function computing element values
* @return A TreeSet consisting of elements {@code f(0),f(1), ..., f(n - 1)}
* @throws NullPointerException if {@code comparator} or {@code f} are null
*/
public static <T> TreeSet<T> tabulate(Comparator<? super T> comparator, int n, Function<? super Integer, ? extends T> f) {
Objects.requireNonNull(comparator, "comparator is null");
Objects.requireNonNull(f, "f is null");
return Collections.tabulate(n, f, TreeSet.empty(comparator), values -> TreeSet.of(comparator, values));
}
/**
* Returns a TreeSet containing {@code n} values of a given Function {@code f}
* over a range of integer values from 0 to {@code n - 1}.
* The underlying comparator is the natural comparator of T.
*
* @param <T> Component type of the TreeSet
* @param n The number of elements in the TreeSet
* @param f The Function computing element values
* @return A TreeSet consisting of elements {@code f(0),f(1), ..., f(n - 1)}
* @throws NullPointerException if {@code f} is null
*/
public static <T extends Comparable<? super T>> TreeSet<T> tabulate(int n, Function<? super Integer, ? extends T> f) {
Objects.requireNonNull(f, "f is null");
return tabulate((Comparator<? super T> & Serializable) T::compareTo, n, f);
}
/**
* Returns a TreeSet containing {@code n} values supplied by a given Supplier {@code s}.
*
* @param <T> Component type of the TreeSet
* @param comparator The comparator used to sort the elements
* @param n The number of elements in the TreeSet
* @param s The Supplier computing element values
* @return A TreeSet of size {@code n}, where each element contains the result supplied by {@code s}.
* @throws NullPointerException if {@code comparator} or {@code s} are null
*/
public static <T> TreeSet<T> fill(Comparator<? super T> comparator, int n, Supplier<? extends T> s) {
Objects.requireNonNull(comparator, "comparator is null");
Objects.requireNonNull(s, "s is null");
return Collections.fill(n, s, TreeSet.empty(comparator), values -> TreeSet.of(comparator, values));
}
/**
* Returns a TreeSet containing {@code n} values supplied by a given Supplier {@code s}.
* The underlying comparator is the natural comparator of T.
*
* @param <T> Component type of the TreeSet
* @param n The number of elements in the TreeSet
* @param s The Supplier computing element values
* @return A TreeSet of size {@code n}, where each element contains the result supplied by {@code s}.
* @throws NullPointerException if {@code s} is null
*/
public static <T extends Comparable<? super T>> TreeSet<T> fill(int n, Supplier<? extends T> s) {
Objects.requireNonNull(s, "s is null");
return fill((Comparator<? super T> & Serializable) T::compareTo, n, s);
}
public static <T extends Comparable<? super T>> TreeSet<T> ofAll(Iterable<? extends T> values) {
Objects.requireNonNull(values, "values is null");
return values.iterator().hasNext() ? new TreeSet<>(RedBlackTree.ofAll(values)) : empty();
}
@SuppressWarnings("unchecked")
public static <T> TreeSet<T> ofAll(Comparator<? super T> comparator, Iterable<? extends T> values) {
Objects.requireNonNull(comparator, "comparator is null");
Objects.requireNonNull(values, "values is null");
return values.iterator().hasNext()
? new TreeSet<>(RedBlackTree.ofAll(comparator, values))
: (TreeSet<T>) empty();
}
/**
* Creates a TreeSet based on the elements of a boolean array.
*
* @param array a boolean array
* @return A new TreeSet of Boolean values
*/
public static TreeSet<Boolean> ofAll(boolean[] array) {
Objects.requireNonNull(array, "array is null");
return TreeSet.ofAll(Iterator.ofAll(array));
}
/**
* Creates a TreeSet based on the elements of a byte array.
*
* @param array a byte array
* @return A new TreeSet of Byte values
*/
public static TreeSet<Byte> ofAll(byte[] array) {
Objects.requireNonNull(array, "array is null");
return TreeSet.ofAll(Iterator.ofAll(array));
}
/**
* Creates a TreeSet based on the elements of a char array.
*
* @param array a char array
* @return A new TreeSet of Character values
*/
public static TreeSet<Character> ofAll(char[] array) {
Objects.requireNonNull(array, "array is null");
return TreeSet.ofAll(Iterator.ofAll(array));
}
/**
* Creates a TreeSet based on the elements of a double array.
*
* @param array a double array
* @return A new TreeSet of Double values
*/
public static TreeSet<Double> ofAll(double[] array) {
Objects.requireNonNull(array, "array is null");
return TreeSet.ofAll(Iterator.ofAll(array));
}
/**
* Creates a TreeSet based on the elements of a float array.
*
* @param array a float array
* @return A new TreeSet of Float values
*/
public static TreeSet<Float> ofAll(float[] array) {
Objects.requireNonNull(array, "array is null");
return TreeSet.ofAll(Iterator.ofAll(array));
}
/**
* Creates a TreeSet based on the elements of an int array.
*
* @param array an int array
* @return A new TreeSet of Integer values
*/
public static TreeSet<Integer> ofAll(int[] array) {
Objects.requireNonNull(array, "array is null");
return TreeSet.ofAll(Iterator.ofAll(array));
}
/**
* Creates a TreeSet based on the elements of a long array.
*
* @param array a long array
* @return A new TreeSet of Long values
*/
public static TreeSet<Long> ofAll(long[] array) {
Objects.requireNonNull(array, "array is null");
return TreeSet.ofAll(Iterator.ofAll(array));
}
/**
* Creates a TreeSet based on the elements of a short array.
*
* @param array a short array
* @return A new TreeSet of Short values
*/
public static TreeSet<Short> ofAll(short[] array) {
Objects.requireNonNull(array, "array is null");
return TreeSet.ofAll(Iterator.ofAll(array));
}
/**
* Creates a TreeSet of int numbers starting from {@code from}, extending to {@code toExclusive - 1}.
* <p>
* Examples:
* <pre>
* <code>
* TreeSet.range(0, 0) // = TreeSet()
* TreeSet.range(2, 0) // = TreeSet()
* TreeSet.range(-2, 2) // = TreeSet(-2, -1, 0, 1)
* </code>
* </pre>
*
* @param from the first number
* @param toExclusive the last number + 1
* @return a range of int values as specified or the empty range if {@code from >= toExclusive}
*/
public static TreeSet<Integer> range(int from, int toExclusive) {
return TreeSet.ofAll(Iterator.range(from, toExclusive));
}
public static TreeSet<Character> range(char from, char toExclusive) {
return TreeSet.ofAll(Iterator.range(from, toExclusive));
}
/**
* Creates a TreeSet of int numbers starting from {@code from}, extending to {@code toExclusive - 1},
* with {@code step}.
* <p>
* Examples:
* <pre>
* <code>
* TreeSet.rangeBy(1, 3, 1) // = TreeSet(1, 2)
* TreeSet.rangeBy(1, 4, 2) // = TreeSet(1, 3)
* TreeSet.rangeBy(4, 1, -2) // = TreeSet(4, 2)
* TreeSet.rangeBy(4, 1, 2) // = TreeSet()
* </code>
* </pre>
*
* @param from the first number
* @param toExclusive the last number + 1
* @param step the step
* @return a range of long values as specified or the empty range if<br>
* {@code from >= toInclusive} and {@code step > 0} or<br>
* {@code from <= toInclusive} and {@code step < 0}
* @throws IllegalArgumentException if {@code step} is zero
*/
public static TreeSet<Integer> rangeBy(int from, int toExclusive, int step) {
return TreeSet.ofAll(Iterator.rangeBy(from, toExclusive, step));
}
public static TreeSet<Character> rangeBy(char from, char toExclusive, int step) {
return TreeSet.ofAll(Iterator.rangeBy(from, toExclusive, step));
}
public static TreeSet<Double> rangeBy(double from, double toExclusive, double step) {
return TreeSet.ofAll(Iterator.rangeBy(from, toExclusive, step));
}
/**
* Creates a TreeSet of long numbers starting from {@code from}, extending to {@code toExclusive - 1}.
* <p>
* Examples:
* <pre>
* <code>
* TreeSet.range(0L, 0L) // = TreeSet()
* TreeSet.range(2L, 0L) // = TreeSet()
* TreeSet.range(-2L, 2L) // = TreeSet(-2L, -1L, 0L, 1L)
* </code>
* </pre>
*
* @param from the first number
* @param toExclusive the last number + 1
* @return a range of long values as specified or the empty range if {@code from >= toExclusive}
*/
public static TreeSet<Long> range(long from, long toExclusive) {
return TreeSet.ofAll(Iterator.range(from, toExclusive));
}
/**
* Creates a TreeSet of long numbers starting from {@code from}, extending to {@code toExclusive - 1},
* with {@code step}.
* <p>
* Examples:
* <pre>
* <code>
* TreeSet.rangeBy(1L, 3L, 1L) // = TreeSet(1L, 2L)
* TreeSet.rangeBy(1L, 4L, 2L) // = TreeSet(1L, 3L)
* TreeSet.rangeBy(4L, 1L, -2L) // = TreeSet(4L, 2L)
* TreeSet.rangeBy(4L, 1L, 2L) // = TreeSet()
* </code>
* </pre>
*
* @param from the first number
* @param toExclusive the last number + 1
* @param step the step
* @return a range of long values as specified or the empty range if<br>
* {@code from >= toInclusive} and {@code step > 0} or<br>
* {@code from <= toInclusive} and {@code step < 0}
* @throws IllegalArgumentException if {@code step} is zero
*/
public static TreeSet<Long> rangeBy(long from, long toExclusive, long step) {
return TreeSet.ofAll(Iterator.rangeBy(from, toExclusive, step));
}
/**
* Creates a TreeSet of int numbers starting from {@code from}, extending to {@code toInclusive}.
* <p>
* Examples:
* <pre>
* <code>
* TreeSet.rangeClosed(0, 0) // = TreeSet(0)
* TreeSet.rangeClosed(2, 0) // = TreeSet()
* TreeSet.rangeClosed(-2, 2) // = TreeSet(-2, -1, 0, 1, 2)
* </code>
* </pre>
*
* @param from the first number
* @param toInclusive the last number
* @return a range of int values as specified or the empty range if {@code from > toInclusive}
*/
public static TreeSet<Integer> rangeClosed(int from, int toInclusive) {
return TreeSet.ofAll(Iterator.rangeClosed(from, toInclusive));
}
public static TreeSet<Character> rangeClosed(char from, char toInclusive) {
return TreeSet.ofAll(Iterator.rangeClosed(from, toInclusive));
}
/**
* Creates a TreeSet of int numbers starting from {@code from}, extending to {@code toInclusive},
* with {@code step}.
* <p>
* Examples:
* <pre>
* <code>
* TreeSet.rangeClosedBy(1, 3, 1) // = TreeSet(1, 2, 3)
* TreeSet.rangeClosedBy(1, 4, 2) // = TreeSet(1, 3)
* TreeSet.rangeClosedBy(4, 1, -2) // = TreeSet(4, 2)
* TreeSet.rangeClosedBy(4, 1, 2) // = TreeSet()
* </code>
* </pre>
*
* @param from the first number
* @param toInclusive the last number
* @param step the step
* @return a range of int values as specified or the empty range if<br>
* {@code from > toInclusive} and {@code step > 0} or<br>
* {@code from < toInclusive} and {@code step < 0}
* @throws IllegalArgumentException if {@code step} is zero
*/
public static TreeSet<Integer> rangeClosedBy(int from, int toInclusive, int step) {
return TreeSet.ofAll(Iterator.rangeClosedBy(from, toInclusive, step));
}
public static TreeSet<Character> rangeClosedBy(char from, char toInclusive, int step) {
return TreeSet.ofAll(Iterator.rangeClosedBy(from, toInclusive, step));
}
public static TreeSet<Double> rangeClosedBy(double from, double toInclusive, double step) {
return TreeSet.ofAll(Iterator.rangeClosedBy(from, toInclusive, step));
}
/**
* Creates a TreeSet of long numbers starting from {@code from}, extending to {@code toInclusive}.
* <p>
* Examples:
* <pre>
* <code>
* TreeSet.rangeClosed(0L, 0L) // = TreeSet(0L)
* TreeSet.rangeClosed(2L, 0L) // = TreeSet()
* TreeSet.rangeClosed(-2L, 2L) // = TreeSet(-2L, -1L, 0L, 1L, 2L)
* </code>
* </pre>
*
* @param from the first number
* @param toInclusive the last number
* @return a range of long values as specified or the empty range if {@code from > toInclusive}
*/
public static TreeSet<Long> rangeClosed(long from, long toInclusive) {
return TreeSet.ofAll(Iterator.rangeClosed(from, toInclusive));
}
/**
* Creates a TreeSet of long numbers starting from {@code from}, extending to {@code toInclusive},
* with {@code step}.
* <p>
* Examples:
* <pre>
* <code>
* TreeSet.rangeClosedBy(1L, 3L, 1L) // = TreeSet(1L, 2L, 3L)
* TreeSet.rangeClosedBy(1L, 4L, 2L) // = TreeSet(1L, 3L)
* TreeSet.rangeClosedBy(4L, 1L, -2L) // = TreeSet(4L, 2L)
* TreeSet.rangeClosedBy(4L, 1L, 2L) // = TreeSet()
* </code>
* </pre>
*
* @param from the first number
* @param toInclusive the last number
* @param step the step
* @return a range of int values as specified or the empty range if<br>
* {@code from > toInclusive} and {@code step > 0} or<br>
* {@code from < toInclusive} and {@code step < 0}
* @throws IllegalArgumentException if {@code step} is zero
*/
public static TreeSet<Long> rangeClosedBy(long from, long toInclusive, long step) {
return TreeSet.ofAll(Iterator.rangeClosedBy(from, toInclusive, step));
}
@Override
public TreeSet<T> add(T element) {
return new TreeSet<>(tree.insert(element));
}
@Override
public TreeSet<T> addAll(Iterable<? extends T> elements) {
Objects.requireNonNull(elements, "elements is null");
RedBlackTree<T> that = tree;
for (T element : elements) {
that = that.insert(element);
}
if (tree == that) {
return this;
} else {
return new TreeSet<>(that);
}
}
@Override
public TreeSet<T> clear() {
return isEmpty() ? this : new TreeSet<>(tree.clear());
}
@Override
public Comparator<T> comparator() {
return tree.comparator();
}
@SuppressWarnings("unchecked")
@Override
public TreeSet<T> diff(Set<? extends T> elements) {
Objects.requireNonNull(elements, "elements is null");
if (elements instanceof TreeSet) {
final RedBlackTree<T> that = ((TreeSet<T>) elements).tree;
return new TreeSet<>(tree.difference(that));
} else {
return removeAll(elements);
}
}
@Override
public boolean contains(T element) {
return tree.contains(element);
}
@Override
public TreeSet<T> distinct() {
return this;
}
@Override
public TreeSet<T> distinctBy(Comparator<? super T> comparator) {
Objects.requireNonNull(comparator, "comparator is null");
return TreeSet.ofAll(tree.comparator(), iterator().distinctBy(comparator));
}
@Override
public <U> TreeSet<T> distinctBy(Function<? super T, ? extends U> keyExtractor) {
Objects.requireNonNull(keyExtractor, "keyExtractor is null");
return TreeSet.ofAll(tree.comparator(), iterator().distinctBy(keyExtractor));
}
@Override
public TreeSet<T> drop(long n) {
if (n <= 0) {
return this;
} else if (n >= length()) {
return empty(tree.comparator());
} else {
return TreeSet.ofAll(tree.comparator(), iterator().drop(n));
}
}
@Override
public TreeSet<T> dropRight(long n) {
if (n <= 0) {
return this;
} else if (n >= length()) {
return empty(tree.comparator());
} else {
return TreeSet.ofAll(tree.comparator(), iterator().dropRight(n));
}
}
@Override
public TreeSet<T> dropUntil(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return dropWhile(predicate.negate());
}
@Override
public TreeSet<T> dropWhile(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
final TreeSet<T> treeSet = TreeSet.ofAll(tree.comparator(), iterator().dropWhile(predicate));
return (treeSet.length() == length()) ? this : treeSet;
}
@Override
public TreeSet<T> filter(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
final TreeSet<T> treeSet = TreeSet.ofAll(tree.comparator(), iterator().filter(predicate));
return (treeSet.length() == length()) ? this : treeSet;
}
@Override
public <U> TreeSet<U> flatMap(Comparator<? super U> comparator,
Function<? super T, ? extends Iterable<? extends U>> mapper) {
Objects.requireNonNull(mapper, "mapper is null");
return TreeSet.ofAll(comparator, iterator().flatMap(mapper));
}
@Override
public <U> TreeSet<U> flatMap(Function<? super T, ? extends Iterable<? extends U>> mapper) {
return flatMap(naturalComparator(), mapper);
}
@Override
public <U> U foldRight(U zero, BiFunction<? super T, ? super U, ? extends U> f) {
Objects.requireNonNull(f, "f is null");
return iterator().foldRight(zero, f);
}
@Override
public <C> Map<C, TreeSet<T>> groupBy(Function<? super T, ? extends C> classifier) {
Objects.requireNonNull(classifier, "classifier is null");
return iterator().groupBy(classifier).map(
(key, iterator) -> Tuple.of(key, TreeSet.ofAll(tree.comparator(), iterator)));
}
@Override
public Iterator<TreeSet<T>> grouped(long size) {
return sliding(size, size);
}
@Override
public boolean hasDefiniteSize() {
return true;
}
@Override
public T head() {
if (isEmpty()) {
throw new NoSuchElementException("head of empty TreeSet");
} else {
return tree.min().get();
}
}
@Override
public Option<T> headOption() {
return tree.min();
}
@Override
public TreeSet<T> init() {
if (isEmpty()) {
throw new UnsupportedOperationException("init of empty TreeSet");
} else {
return new TreeSet<>(tree.delete(tree.max().get()));
}
}
@Override
public Option<TreeSet<T>> initOption() {
return isEmpty() ? Option.none() : Option.some(init());
}
@SuppressWarnings("unchecked")
@Override
public TreeSet<T> intersect(Set<? extends T> elements) {
Objects.requireNonNull(elements, "elements is null");
if (elements instanceof TreeSet) {
final RedBlackTree<T> that = ((TreeSet<T>) elements).tree;
return new TreeSet<>(tree.intersection(that));
} else {
return retainAll(elements);
}
}
@Override
public boolean isEmpty() {
return tree.isEmpty();
}
@Override
public boolean isTraversableAgain() {
return true;
}
@Override
public Iterator<T> iterator() {
return tree.iterator();
}
@Override
public int length() {
return tree.size();
}
@Override
public <U> TreeSet<U> map(Comparator<? super U> comparator, Function<? super T, ? extends U> mapper) {
Objects.requireNonNull(mapper, "mapper is null");
return TreeSet.ofAll(comparator, iterator().map(mapper));
}
@Override
public <U> TreeSet<U> map(Function<? super T, ? extends U> mapper) {
return map(naturalComparator(), mapper);
}
@Override
public Match.MatchValue.Of<TreeSet<T>> match() {
return Match.of(this);
}
@Override
public Option<T> max() {
return tree.max();
}
@Override
public Option<T> min() {
return tree.min();
}
@Override
public Tuple2<TreeSet<T>, TreeSet<T>> partition(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return iterator().partition(predicate).map(i1 -> TreeSet.ofAll(tree.comparator(), i1),
i2 -> TreeSet.ofAll(tree.comparator(), i2));
}
@Override
public TreeSet<T> peek(Consumer<? super T> action) {
Objects.requireNonNull(action, "action is null");
if (!isEmpty()) {
action.accept(head());
}
return this;
}
@Override
public TreeSet<T> remove(T element) {
return new TreeSet<>(tree.delete(element));
}
@Override
public TreeSet<T> removeAll(Iterable<? extends T> elements) {
Objects.requireNonNull(elements, "elements is null");
if (isEmpty()) {
return this;
} else {
RedBlackTree<T> that = tree;
final java.util.Iterator<? extends T> iter = elements.iterator();
while (!that.isEmpty() && iter.hasNext()) {
that = that.delete(iter.next());
}
if (that == tree) {
return this;
} else {
return new TreeSet<>(that);
}
}
}
@Override
public TreeSet<T> replace(T currentElement, T newElement) {
if (tree.contains(currentElement)) {
return new TreeSet<>(tree.delete(currentElement).insert(newElement));
} else {
return this;
}
}
@Override
public TreeSet<T> replaceAll(T currentElement, T newElement) {
// a set has only one occurrence
return replace(currentElement, newElement);
}
@Override
public TreeSet<T> retainAll(Iterable<? extends T> elements) {
Objects.requireNonNull(elements, "elements is null");
if (isEmpty()) {
return this;
} else {
final RedBlackTree<T> kept = RedBlackTree.ofAll(tree.comparator(), elements);
final RedBlackTree<T> newTree = tree.intersection(kept);
return newTree.size() == tree.size() ? this : new TreeSet<>(tree.intersection(kept));
}
}
@Override
public TreeSet<T> scan(T zero, BiFunction<? super T, ? super T, ? extends T> operation) {
Objects.requireNonNull(operation, "operation is null");
return Collections.scanLeft(this, zero, operation, TreeSet.empty(comparator()), TreeSet::add, Function.identity());
}
@Override
public <U> Set<U> scanLeft(U zero, BiFunction<? super U, ? super T, ? extends U> operation) {
Objects.requireNonNull(operation, "operation is null");
if (zero instanceof Comparable) {
final Comparator<U> comparator = naturalComparator();
return Collections.scanLeft(this, zero, operation, TreeSet.empty(comparator), TreeSet::add, Function.identity());
} else {
return Collections.scanLeft(this, zero, operation, new java.util.ArrayList<>(), (c, u) -> {
c.add(u);
return c;
}, HashSet::ofAll);
}
}
@Override
public <U> Set<U> scanRight(U zero, BiFunction<? super T, ? super U, ? extends U> operation) {
Objects.requireNonNull(operation, "operation is null");
if (zero instanceof Comparable) {
final Comparator<U> comparator = naturalComparator();
return Collections.scanRight(this, zero, operation, TreeSet.empty(comparator), TreeSet::add, Function.identity());
} else {
return Collections.scanRight(this, zero, operation, new java.util.ArrayList<>(), (c, u) -> {
c.add(u);
return c;
}, HashSet::ofAll);
}
}
@Override
public Iterator<TreeSet<T>> sliding(long size) {
return sliding(size, 1);
}
@Override
public Iterator<TreeSet<T>> sliding(long size, long step) {
return iterator().sliding(size, step).map(seq -> TreeSet.ofAll(tree.comparator(), seq));
}
@Override
public Tuple2<TreeSet<T>, TreeSet<T>> span(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return iterator().span(predicate).map(i1 -> TreeSet.ofAll(tree.comparator(), i1),
i2 -> TreeSet.ofAll(tree.comparator(), i2));
}
@Override
public TreeSet<T> tail() {
if (isEmpty()) {
throw new UnsupportedOperationException("tail of empty TreeSet");
} else {
return new TreeSet<>(tree.delete(tree.min().get()));
}
}
@Override
public Option<TreeSet<T>> tailOption() {
return isEmpty() ? Option.none() : Option.some(tail());
}
@Override
public TreeSet<T> take(long n) {
if (n <= 0) {
return empty(tree.comparator());
} else if (n >= length()) {
return this;
} else {
return TreeSet.ofAll(tree.comparator(), iterator().take(n));
}
}
@Override
public TreeSet<T> takeRight(long n) {
if (n <= 0) {
return empty(tree.comparator());
} else if (n >= length()) {
return this;
} else {
return TreeSet.ofAll(tree.comparator(), iterator().takeRight(n));
}
}
@Override
public TreeSet<T> takeUntil(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
final TreeSet<T> treeSet = takeWhile(predicate.negate());
return (treeSet.length() == length()) ? this : treeSet;
}
@Override
public TreeSet<T> takeWhile(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
final TreeSet<T> treeSet = TreeSet.ofAll(tree.comparator(), iterator().takeWhile(predicate));
return (treeSet.length() == length()) ? this : treeSet;
}
@SuppressWarnings("unchecked")
@Override
public TreeSet<T> union(Set<? extends T> elements) {
Objects.requireNonNull(elements, "elements is null");
if (elements instanceof TreeSet) {
final RedBlackTree<T> that = ((TreeSet<T>) elements).tree;
return new TreeSet<>(tree.union(that));
} else {
return addAll(elements);
}
}
@Override
public <T1, T2> Tuple2<TreeSet<T1>, TreeSet<T2>> unzip(
Function<? super T, Tuple2<? extends T1, ? extends T2>> unzipper) {
Objects.requireNonNull(unzipper, "unzipper is null");
return iterator().unzip(unzipper).map(i1 -> TreeSet.ofAll(naturalComparator(), i1),
i2 -> TreeSet.ofAll(naturalComparator(), i2));
}
@Override
public <T1, T2, T3> Tuple3<TreeSet<T1>, TreeSet<T2>, TreeSet<T3>> unzip3(
Function<? super T, Tuple3<? extends T1, ? extends T2, ? extends T3>> unzipper) {
Objects.requireNonNull(unzipper, "unzipper is null");
return iterator().unzip3(unzipper).map(
i1 -> TreeSet.ofAll(naturalComparator(), i1),
i2 -> TreeSet.ofAll(naturalComparator(), i2),
i3 -> TreeSet.ofAll(naturalComparator(), i3));
}
@Override
public <U> TreeSet<Tuple2<T, U>> zip(Iterable<U> that) {
Objects.requireNonNull(that, "that is null");
final Comparator<Tuple2<T, U>> tuple2Comparator = Tuple2.comparator(tree.comparator(), naturalComparator());
return TreeSet.ofAll(tuple2Comparator, iterator().zip(that));
}
@Override
public <U> TreeSet<Tuple2<T, U>> zipAll(Iterable<U> that, T thisElem, U thatElem) {
Objects.requireNonNull(that, "that is null");
final Comparator<Tuple2<T, U>> tuple2Comparator = Tuple2.comparator(tree.comparator(), naturalComparator());
return TreeSet.ofAll(tuple2Comparator, iterator().zipAll(that, thisElem, thatElem));
}
@Override
public TreeSet<Tuple2<T, Long>> zipWithIndex() {
final Comparator<? super T> component1Comparator = tree.comparator();
final Comparator<Tuple2<T, Long>> tuple2Comparator = (t1, t2) -> component1Comparator.compare(t1._1, t2._1);
return TreeSet.ofAll(tuple2Comparator, iterator().zipWithIndex());
}
// -- Object
@Override
public boolean equals(Object o) {
if (o == this) {
return true;
} else if (o instanceof TreeSet) {
final TreeSet<?> that = (TreeSet<?>) o;
return tree.equals(that.tree);
} else {
return false;
}
}
@Override
public int hashCode() {
return tree.hashCode();
}
@Override
public String stringPrefix() {
return "TreeSet";
}
@Override
public String toString() {
return mkString(stringPrefix() + "(", ", ", ")");
}
}
| 1 | 7,279 | I'm not sure about that... This line can produce <code>ClassCastException</code> if <code>Comparator</code> do not check this. | vavr-io-vavr | java |
@@ -737,7 +737,7 @@ type MDOps interface {
// which may not yet be reflected in the MD if the TLF hasn't been rekeyed since it
// entered into a conflicting state.
GetLatestHandleForTLF(ctx context.Context, id TlfID) (
- *BareTlfHandle, error)
+ BareTlfHandle, error)
}
// KeyOps fetches server-side key halves from the key server. | 1 | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"reflect"
"time"
"github.com/keybase/client/go/libkb"
"github.com/keybase/client/go/logger"
keybase1 "github.com/keybase/client/go/protocol"
metrics "github.com/rcrowley/go-metrics"
"golang.org/x/net/context"
)
// AuthTokenRefreshHandler defines a callback to be called when an auth token refresh
// is needed.
type AuthTokenRefreshHandler interface {
RefreshAuthToken(context.Context)
}
// Block just needs to be (de)serialized using msgpack
type Block interface {
// GetEncodedSize returns the encoded size of this block, but only
// if it has been previously set; otherwise it returns 0.
GetEncodedSize() uint32
// SetEncodedSize sets the encoded size of this block, locally
// caching it. The encoded size is not serialized.
SetEncodedSize(size uint32)
// DataVersion returns the data version for this block
DataVersion() DataVer
}
// NodeID is a unique but transient ID for a Node. That is, two Node
// objects in memory at the same time represent the same file or
// directory if and only if their NodeIDs are equal (by pointer).
type NodeID interface {
// ParentID returns the NodeID of the directory containing the
// pointed-to file or directory, or nil if none exists.
ParentID() NodeID
}
// Node represents a direct pointer to a file or directory in KBFS.
// It is somewhat like an inode in a regular file system. Users of
// KBFS can use Node as a handle when accessing files or directories
// they have previously looked up.
type Node interface {
// GetID returns the ID of this Node. This should be used as a
// map key instead of the Node itself.
GetID() NodeID
// GetFolderBranch returns the folder ID and branch for this Node.
GetFolderBranch() FolderBranch
// GetBasename returns the current basename of the node, or ""
// if the node has been unlinked.
GetBasename() string
}
// KBFSOps handles all file system operations. Expands all indirect
// pointers. Operations that modify the server data change all the
// block IDs along the path, and so must return a path with the new
// BlockIds so the caller can update their references.
//
// KBFSOps implementations must guarantee goroutine-safety of calls on
// a per-top-level-folder basis.
//
// There are two types of operations that could block:
// * remote-sync operations, that need to synchronously update the
// MD for the corresponding top-level folder. When these
// operations return successfully, they will have guaranteed to
// have successfully written the modification to the KBFS servers.
// * remote-access operations, that don't sync any modifications to KBFS
// servers, but may block on reading data from the servers.
//
// KBFSOps implementations are supposed to give git-like consistency
// semantics for modification operations; they will be visible to
// other clients immediately after the remote-sync operations succeed,
// if and only if there was no other intervening modification to the
// same folder. If not, the change will be sync'd to the server in a
// special per-device "unmerged" area before the operation succeeds.
// In this case, the modification will not be visible to other clients
// until the KBFS code on this device performs automatic conflict
// resolution in the background.
//
// All methods take a Context (see https://blog.golang.org/context),
// and if that context is cancelled during the operation, KBFSOps will
// abort any blocking calls and return ctx.Err(). Any notifications
// resulting from an operation will also include this ctx (or a
// Context derived from it), allowing the caller to determine whether
// the notification is a result of their own action or an external
// action.
type KBFSOps interface {
// GetFavorites returns the logged-in user's list of favorite
// top-level folders. This is a remote-access operation.
GetFavorites(ctx context.Context) ([]Favorite, error)
// RefreshCachedFavorites tells the instances to forget any cached
// favorites list and fetch a new list from the server. The
// effects are asychronous; if there's an error refreshing the
// favorites, the cached favorites will become empty.
RefreshCachedFavorites(ctx context.Context)
// DeleteFavorite deletes the favorite from both the server and
// the local cache. Idempotent, so it succeeds even if the folder
// isn't favorited.
DeleteFavorite(ctx context.Context, name string, public bool) error
// GetOrCreateRootNode returns the root node and root entry
// info associated with the given TLF handle and branch, if
// the logged-in user has read permissions to the top-level
// folder. It creates the folder if one doesn't exist yet (and
// branch == MasterBranch), and the logged-in user has write
// permissions to the top-level folder. This is a
// remote-access operation.
GetOrCreateRootNode(
ctx context.Context, h *TlfHandle, branch BranchName) (
node Node, ei EntryInfo, err error)
// GetDirChildren returns a map of children in the directory,
// mapped to their EntryInfo, if the logged-in user has read
// permission for the top-level folder. This is a remote-access
// operation.
GetDirChildren(ctx context.Context, dir Node) (map[string]EntryInfo, error)
// Lookup returns the Node and entry info associated with a
// given name in a directory, if the logged-in user has read
// permissions to the top-level folder. The returned Node is nil
// if the name is a symlink. This is a remote-access operation.
Lookup(ctx context.Context, dir Node, name string) (Node, EntryInfo, error)
// Stat returns the entry info associated with a
// given Node, if the logged-in user has read permissions to the
// top-level folder. This is a remote-access operation.
Stat(ctx context.Context, node Node) (EntryInfo, error)
// CreateDir creates a new subdirectory under the given node, if
// the logged-in user has write permission to the top-level
// folder. Returns the new Node for the created subdirectory, and
// its new entry info. This is a remote-sync operation.
CreateDir(ctx context.Context, dir Node, name string) (
Node, EntryInfo, error)
// CreateFile creates a new file under the given node, if the
// logged-in user has write permission to the top-level folder.
// Returns the new Node for the created file, and its new
// entry info. This is a remote-sync operation.
CreateFile(ctx context.Context, dir Node, name string, isEx bool) (
Node, EntryInfo, error)
// CreateLink creates a new symlink under the given node, if the
// logged-in user has write permission to the top-level folder.
// Returns the new entry info for the created symlink. This
// is a remote-sync operation.
CreateLink(ctx context.Context, dir Node, fromName string, toPath string) (
EntryInfo, error)
// RemoveDir removes the subdirectory represented by the given
// node, if the logged-in user has write permission to the
// top-level folder. Will return an error if the subdirectory is
// not empty. This is a remote-sync operation.
RemoveDir(ctx context.Context, dir Node, dirName string) error
// RemoveEntry removes the directory entry represented by the
// given node, if the logged-in user has write permission to the
// top-level folder. This is a remote-sync operation.
RemoveEntry(ctx context.Context, dir Node, name string) error
// Rename performs an atomic rename operation with a given
// top-level folder if the logged-in user has write permission to
// that folder, and will return an error if nodes from different
// folders are passed in. Also returns an error if the new name
// already has an entry corresponding to an existing directory
// (only non-dir types may be renamed over). This is a
// remote-sync operation.
Rename(ctx context.Context, oldParent Node, oldName string, newParent Node,
newName string) error
// Read fills in the given buffer with data from the file at the
// given node starting at the given offset, if the logged-in user
// has read permission to the top-level folder. The read data
// reflects any outstanding writes and truncates to that file that
// have been written through this KBFSOps object, even if those
// writes have not yet been sync'd. There is no guarantee that
// Read returns all of the requested data; it will return the
// number of bytes that it wrote to the dest buffer. Reads on an
// unlinked file may or may not succeed, depending on whether or
// not the data has been cached locally. If (0, nil) is returned,
// that means EOF has been reached. This is a remote-access
// operation.
Read(ctx context.Context, file Node, dest []byte, off int64) (int64, error)
// Write modifies the file at the given node, by writing the given
// buffer at the given offset within the file, if the logged-in
// user has write permission to the top-level folder. It
// overwrites any data already there, and extends the file size as
// necessary to accomodate the new data. It guarantees to write
// the entire buffer in one operation. Writes on an unlinked file
// may or may not succeed as no-ops, depending on whether or not
// the necessary blocks have been locally cached. This is a
// remote-access operation.
Write(ctx context.Context, file Node, data []byte, off int64) error
// Truncate modifies the file at the given node, by either
// shrinking or extending its size to match the given size, if the
// logged-in user has write permission to the top-level folder.
// If extending the file, it pads the new data with 0s. Truncates
// on an unlinked file may or may not succeed as no-ops, depending
// on whether or not the necessary blocks have been locally
// cached. This is a remote-access operation.
Truncate(ctx context.Context, file Node, size uint64) error
// SetEx turns on or off the executable bit on the file
// represented by a given node, if the logged-in user has write
// permissions to the top-level folder. This is a remote-sync
// operation.
SetEx(ctx context.Context, file Node, ex bool) error
// SetMtime sets the modification time on the file represented by
// a given node, if the logged-in user has write permissions to
// the top-level folder. If mtime is nil, it is a noop. This is
// a remote-sync operation.
SetMtime(ctx context.Context, file Node, mtime *time.Time) error
// Sync flushes all outstanding writes and truncates for the given
// file to the KBFS servers, if the logged-in user has write
// permissions to the top-level folder. If done through a file
// system interface, this may include modifications done via
// multiple file handles. This is a remote-sync operation.
Sync(ctx context.Context, file Node) error
// FolderStatus returns the status of a particular folder/branch, along
// with a channel that will be closed when the status has been
// updated (to eliminate the need for polling this method).
FolderStatus(ctx context.Context, folderBranch FolderBranch) (
FolderBranchStatus, <-chan StatusUpdate, error)
// Status returns the status of KBFS, along with a channel that will be
// closed when the status has been updated (to eliminate the need for
// polling this method). KBFSStatus can be non-empty even if there is an
// error.
Status(ctx context.Context) (
KBFSStatus, <-chan StatusUpdate, error)
// UnstageForTesting clears out this device's staged state, if
// any, and fast-forwards to the current head of this
// folder-branch. TODO: remove this once we have automatic
// conflict resolution.
UnstageForTesting(ctx context.Context, folderBranch FolderBranch) error
// Rekey rekeys this folder.
Rekey(ctx context.Context, id TlfID) error
// SyncFromServerForTesting blocks until the local client has
// contacted the server and guaranteed that all known updates
// for the given top-level folder have been applied locally
// (and notifications sent out to any observers). It returns
// an error if this folder-branch is currently unmerged or
// dirty locally.
SyncFromServerForTesting(ctx context.Context, folderBranch FolderBranch) error
// GetUpdateHistory returns a complete history of all the merged
// updates of the given folder, in a data structure that's
// suitable for encoding directly into JSON. This is an expensive
// operation, and should only be used for ocassional debugging.
// Note that the history does not include any unmerged changes or
// outstanding writes from the local device.
GetUpdateHistory(ctx context.Context, folderBranch FolderBranch) (
history TLFUpdateHistory, err error)
// Shutdown is called to clean up any resources associated with
// this KBFSOps instance.
Shutdown() error
// PushConnectionStatusChange updates the status of a service for
// human readable connection status tracking.
PushConnectionStatusChange(service string, newStatus error)
}
// KeybaseDaemon is an interface for communicating with the local
// Keybase daemon.
type KeybaseDaemon interface {
// Resolve, given an assertion, resolves it to a username/UID
// pair. The username <-> UID mapping is trusted and
// immutable, so it can be cached. If the assertion is just
// the username or a UID assertion, then the resolution can
// also be trusted. If the returned pair is equal to that of
// the current session, then it can also be
// trusted. Otherwise, Identify() needs to be called on the
// assertion before the assertion -> (username, UID) mapping
// can be trusted.
Resolve(ctx context.Context, assertion string) (
libkb.NormalizedUsername, keybase1.UID, error)
// Identify, given an assertion, returns a UserInfo struct
// with the user that matches that assertion, or an error
// otherwise. The reason string is displayed on any tracker
// popups spawned.
Identify(ctx context.Context, assertion, reason string) (UserInfo, error)
// LoadUserPlusKeys returns a UserInfo struct for a
// user with the specified UID.
// If you have the UID for a user and don't require Identify to
// validate an assertion or the identity of a user, use this to
// get UserInfo structs as it is much cheaper than Identify.
LoadUserPlusKeys(ctx context.Context, uid keybase1.UID) (UserInfo, error)
// CurrentSession returns a SessionInfo struct with all the
// information for the current session, or an error otherwise.
CurrentSession(ctx context.Context, sessionID int) (SessionInfo, error)
// FavoriteAdd adds the given folder to the list of favorites.
FavoriteAdd(ctx context.Context, folder keybase1.Folder) error
// FavoriteAdd removes the given folder from the list of
// favorites.
FavoriteDelete(ctx context.Context, folder keybase1.Folder) error
// FavoriteList returns the current list of favorites.
FavoriteList(ctx context.Context, sessionID int) ([]keybase1.Folder, error)
// Notify sends a filesystem notification.
Notify(ctx context.Context, notification *keybase1.FSNotification) error
// FlushUserFromLocalCache instructs this layer to clear any
// KBFS-side, locally-cached information about the given user.
// This does NOT involve communication with the daemon, this is
// just to force future calls loading this user to fall through to
// the daemon itself, rather than being served from the cache.
FlushUserFromLocalCache(ctx context.Context, uid keybase1.UID)
// TODO: Add CryptoClient methods, too.
// Shutdown frees any resources associated with this
// instance. No other methods may be called after this is
// called.
Shutdown()
}
type resolver interface {
// Resolve, given an assertion, resolves it to a username/UID
// pair. The username <-> UID mapping is trusted and
// immutable, so it can be cached. If the assertion is just
// the username or a UID assertion, then the resolution can
// also be trusted. If the returned pair is equal to that of
// the current session, then it can also be
// trusted. Otherwise, Identify() needs to be called on the
// assertion before the assertion -> (username, UID) mapping
// can be trusted.
Resolve(ctx context.Context, assertion string) (
libkb.NormalizedUsername, keybase1.UID, error)
}
type identifier interface {
// Identify resolves an assertion (which could also be a
// username) to a UserInfo struct, spawning tracker popups if
// necessary. The reason string is displayed on any tracker
// popups spawned.
Identify(ctx context.Context, assertion, reason string) (UserInfo, error)
}
type normalizedUsernameGetter interface {
// GetNormalizedUsername returns the normalized username
// corresponding to the given UID.
GetNormalizedUsername(ctx context.Context, uid keybase1.UID) (libkb.NormalizedUsername, error)
}
// KBPKI interacts with the Keybase daemon to fetch user info.
type KBPKI interface {
// GetCurrentToken gets the current keybase session token.
GetCurrentToken(ctx context.Context) (string, error)
// GetCurrentUserInfo gets the name and UID of the current
// logged-in user.
GetCurrentUserInfo(ctx context.Context) (
libkb.NormalizedUsername, keybase1.UID, error)
// GetCurrentCryptPublicKey gets the crypt public key for the
// currently-active device.
GetCurrentCryptPublicKey(ctx context.Context) (CryptPublicKey, error)
// GetCurrentVerifyingKey gets the public key used for signing for the
// currently-active device.
GetCurrentVerifyingKey(ctx context.Context) (VerifyingKey, error)
resolver
identifier
normalizedUsernameGetter
// HasVerifyingKey returns nil if the given user has the given
// VerifyingKey, and an error otherwise.
HasVerifyingKey(ctx context.Context, uid keybase1.UID,
verifyingKey VerifyingKey, atServerTime time.Time) error
// GetCryptPublicKeys gets all of a user's crypt public keys (including
// paper keys).
GetCryptPublicKeys(ctx context.Context, uid keybase1.UID) (
[]CryptPublicKey, error)
// TODO: Split the methods below off into a separate
// FavoriteOps interface.
// FavoriteAdd adds folder to the list of the logged in user's
// favorite folders. It is idempotent.
FavoriteAdd(ctx context.Context, folder keybase1.Folder) error
// FavoriteDelete deletes folder from the list of the logged in user's
// favorite folders. It is idempotent.
FavoriteDelete(ctx context.Context, folder keybase1.Folder) error
// FavoriteList returns the list of all favorite folders for
// the logged in user.
FavoriteList(ctx context.Context) ([]keybase1.Folder, error)
// Notify sends a filesystem notification.
Notify(ctx context.Context, notification *keybase1.FSNotification) error
}
// KeyManager fetches and constructs the keys needed for KBFS file
// operations.
type KeyManager interface {
// GetTLFCryptKeyForEncryption gets the crypt key to use for
// encryption (i.e., with the latest key generation) for the
// TLF with the given metadata.
GetTLFCryptKeyForEncryption(ctx context.Context, md *RootMetadata) (
TLFCryptKey, error)
// GetTLFCryptKeyForMDDecryption gets the crypt key to use for the
// TLF with the given metadata to decrypt the private portion of
// the metadata. It finds the appropriate key from mdWithKeys
// (which in most cases is the same as mdToDecrypt) if it's not
// already cached.
GetTLFCryptKeyForMDDecryption(ctx context.Context,
mdToDecrypt, mdWithKeys *RootMetadata) (TLFCryptKey, error)
// GetTLFCryptKeyForBlockDecryption gets the crypt key to use
// for the TLF with the given metadata to decrypt the block
// pointed to by the given pointer.
GetTLFCryptKeyForBlockDecryption(ctx context.Context, md *RootMetadata,
blockPtr BlockPointer) (TLFCryptKey, error)
// Rekey checks the given MD object, if it is a private TLF,
// against the current set of device keys for all valid
// readers and writers. If there are any new devices, it
// updates all existing key generations to include the new
// devices. If there are devices that have been removed, it
// creates a new epoch of keys for the TLF. If no devices
// have changed, or if there was an error, it returns false.
// Otherwise, it returns true. If a new key generation is
// added the second return value points to this new key. This
// is to allow for caching of the TLF crypt key only after a
// successful merged write of the metadata. Otherwise we could
// prematurely pollute the key cache.
//
// If the given MD object is a public TLF, it simply updates
// the TLF's handle with any newly-resolved writers.
//
// If promptPaper is set, prompts for any unlocked paper keys.
// promptPaper shouldn't be set if md is for a public TLF.
Rekey(ctx context.Context, md *RootMetadata, promptPaper bool) (bool, *TLFCryptKey, error)
}
// Reporter exports events (asynchronously) to any number of sinks
type Reporter interface {
// ReportErr records that a given error happened.
ReportErr(ctx context.Context, tlfName CanonicalTlfName, public bool,
mode ErrorModeType, err error)
// AllKnownErrors returns all errors known to this Reporter.
AllKnownErrors() []ReportedError
// Notify sends the given notification to any sink.
Notify(ctx context.Context, notification *keybase1.FSNotification)
// Shutdown frees any resources allocated by a Reporter.
Shutdown()
}
// MDCache gets and puts plaintext top-level metadata into the cache.
type MDCache interface {
// Get gets the metadata object associated with the given TlfID,
// revision number, and branch ID (NullBranchID for merged MD).
Get(tlf TlfID, rev MetadataRevision, bid BranchID) (*RootMetadata, error)
// Put stores the metadata object.
Put(md *RootMetadata) error
}
// KeyCache handles caching for both TLFCryptKeys and BlockCryptKeys.
type KeyCache interface {
// GetTLFCryptKey gets the crypt key for the given TLF.
GetTLFCryptKey(TlfID, KeyGen) (TLFCryptKey, error)
// PutTLFCryptKey stores the crypt key for the given TLF.
PutTLFCryptKey(TlfID, KeyGen, TLFCryptKey) error
}
// BlockCacheLifetime denotes the lifetime of an entry in BlockCache.
type BlockCacheLifetime int
const (
// TransientEntry means that the cache entry may be evicted at
// any time.
TransientEntry BlockCacheLifetime = iota
// PermanentEntry means that the cache entry must remain until
// explicitly removed from the cache.
PermanentEntry
)
// BlockCache gets and puts plaintext dir blocks and file blocks into
// a cache. These blocks are immutable and identified by their
// content hash.
type BlockCache interface {
// Get gets the block associated with the given block ID.
Get(ptr BlockPointer) (Block, error)
// CheckForKnownPtr sees whether this cache has a transient
// entry for the given file block, which must be a direct file
// block containing data). Returns the full BlockPointer
// associated with that ID, including key and data versions.
// If no ID is known, return an uninitialized BlockPointer and
// a nil error.
CheckForKnownPtr(tlf TlfID, block *FileBlock) (BlockPointer, error)
// Put stores the final (content-addressable) block associated
// with the given block ID. If lifetime is TransientEntry,
// then it is assumed that the block exists on the server and
// the entry may be evicted from the cache at any time. If
// lifetime is PermanentEntry, then it is assumed that the
// block doesn't exist on the server and must remain in the
// cache until explicitly removed. As an intermediary state,
// as when a block is being sent to the server, the block may
// be put into the cache both with TransientEntry and
// PermanentEntry -- these are two separate entries. This is
// fine, since the block should be the same.
Put(ptr BlockPointer, tlf TlfID, block Block,
lifetime BlockCacheLifetime) error
// DeleteTransient removes the transient entry for the given
// pointer from the cache, as well as any cached IDs so the block
// won't be reused.
DeleteTransient(ptr BlockPointer, tlf TlfID) error
// Delete removes the permanent entry for the non-dirty block
// associated with the given block ID from the cache. No
// error is returned if no block exists for the given ID.
DeletePermanent(id BlockID) error
// DeleteKnownPtr removes the cached ID for the given file
// block. It does not remove the block itself.
DeleteKnownPtr(tlf TlfID, block *FileBlock) error
}
// DirtyBlockCache gets and puts plaintext dir blocks and file blocks
// into a cache, which have been modified by the application and not
// yet committed on the KBFS servers. They are identified by a
// (potentially random) ID that may not have any relationship with
// their context, along with a Branch in case the same TLF is being
// modified via multiple branches. Dirty blocks are never evicted,
// they must be deleted explicitly.
type DirtyBlockCache interface {
// Get gets the block associated with the given block ID. Returns
// the dirty block for the given ID, if one exists.
Get(ptr BlockPointer, branch BranchName) (Block, error)
// Put stores a dirty block currently identified by the
// given block pointer and branch name.
Put(ptr BlockPointer, branch BranchName, block Block) error
// Delete removes the dirty block associated with the given block
// pointer and branch from the cache. No error is returned if no
// block exists for the given ID.
Delete(ptr BlockPointer, branch BranchName) error
// IsDirty states whether or not the block associated with the
// given block pointer and branch name is dirty in this cache.
IsDirty(ptr BlockPointer, branch BranchName) bool
// DirtyBytesEstimate counts the number of outstanding bytes held
// in dirty blocks. It's an estimate because callers can be
// modifying the size of the dirty blocks outside of the cache
// while this is being called.
DirtyBytesEstimate() uint64
}
// Crypto signs, verifies, encrypts, and decrypts stuff.
type Crypto interface {
// MakeRandomTlfID generates a dir ID using a CSPRNG.
MakeRandomTlfID(isPublic bool) (TlfID, error)
// MakeRandomBranchID generates a per-device branch ID using a CSPRNG.
MakeRandomBranchID() (BranchID, error)
// MakeMdID computes the MD ID of a RootMetadata object.
MakeMdID(md *RootMetadata) (MdID, error)
// MakeMerkleHash computes the hash of a RootMetadataSigned object
// for inclusion into the KBFS Merkle tree.
MakeMerkleHash(md *RootMetadataSigned) (MerkleHash, error)
// MakeTemporaryBlockID generates a temporary block ID using a
// CSPRNG. This is used for indirect blocks before they're
// committed to the server.
MakeTemporaryBlockID() (BlockID, error)
// MakePermanentBlockID computes the permanent ID of a block
// given its encoded and encrypted contents.
MakePermanentBlockID(encodedEncryptedData []byte) (BlockID, error)
// VerifyBlockID verifies that the given block ID is the
// permanent block ID for the given encoded and encrypted
// data.
VerifyBlockID(encodedEncryptedData []byte, id BlockID) error
// MakeRefNonce generates a block reference nonce using a
// CSPRNG. This is used for distinguishing different references to
// the same BlockID.
MakeBlockRefNonce() (BlockRefNonce, error)
// MakeRandomTLFKeys generates top-level folder keys using a CSPRNG.
MakeRandomTLFKeys() (TLFPublicKey, TLFPrivateKey, TLFEphemeralPublicKey,
TLFEphemeralPrivateKey, TLFCryptKey, error)
// MakeRandomTLFCryptKeyServerHalf generates the server-side of a
// top-level folder crypt key.
MakeRandomTLFCryptKeyServerHalf() (TLFCryptKeyServerHalf, error)
// MakeRandomBlockCryptKeyServerHalf generates the server-side of
// a block crypt key.
MakeRandomBlockCryptKeyServerHalf() (BlockCryptKeyServerHalf, error)
// MaskTLFCryptKey returns the client-side of a top-level folder crypt key.
MaskTLFCryptKey(serverHalf TLFCryptKeyServerHalf, key TLFCryptKey) (
TLFCryptKeyClientHalf, error)
// UnmaskTLFCryptKey returns the top-level folder crypt key.
UnmaskTLFCryptKey(serverHalf TLFCryptKeyServerHalf,
clientHalf TLFCryptKeyClientHalf) (TLFCryptKey, error)
// UnmaskBlockCryptKey returns the block crypt key.
UnmaskBlockCryptKey(serverHalf BlockCryptKeyServerHalf,
tlfCryptKey TLFCryptKey) (BlockCryptKey, error)
// Sign signs the msg with the current device's private key.
Sign(ctx context.Context, msg []byte) (sigInfo SignatureInfo, err error)
// Sign signs the msg with the current device's private key and output
// the full serialized NaclSigInfo.
SignToString(ctx context.Context, msg []byte) (signature string, err error)
// Verify verifies that sig matches msg being signed with the
// private key that corresponds to verifyingKey.
Verify(msg []byte, sigInfo SignatureInfo) error
// EncryptTLFCryptKeyClientHalf encrypts a TLFCryptKeyClientHalf
// using both a TLF's ephemeral private key and a device pubkey.
EncryptTLFCryptKeyClientHalf(privateKey TLFEphemeralPrivateKey,
publicKey CryptPublicKey, clientHalf TLFCryptKeyClientHalf) (
EncryptedTLFCryptKeyClientHalf, error)
// DecryptTLFCryptKeyClientHalf decrypts a TLFCryptKeyClientHalf
// using the current device's private key and the TLF's ephemeral
// public key.
DecryptTLFCryptKeyClientHalf(ctx context.Context,
publicKey TLFEphemeralPublicKey,
encryptedClientHalf EncryptedTLFCryptKeyClientHalf) (
TLFCryptKeyClientHalf, error)
// DecryptTLFCryptKeyClientHalfAny decrypts one of the
// TLFCryptKeyClientHalf using the available private keys and the
// ephemeral public key. If promptPaper is true, the service will
// prompt the user for any unlocked paper keys.
DecryptTLFCryptKeyClientHalfAny(ctx context.Context,
keys []EncryptedTLFCryptKeyClientAndEphemeral, promptPaper bool) (
TLFCryptKeyClientHalf, int, error)
// GetTLFCryptKeyServerHalfID creates a unique ID for this particular
// TLFCryptKeyServerHalf.
GetTLFCryptKeyServerHalfID(
user keybase1.UID, deviceKID keybase1.KID,
serverHalf TLFCryptKeyServerHalf) (TLFCryptKeyServerHalfID, error)
// VerifyTLFCryptKeyServerHalfID verifies the ID is the proper HMAC result.
VerifyTLFCryptKeyServerHalfID(serverHalfID TLFCryptKeyServerHalfID, user keybase1.UID,
deviceKID keybase1.KID, serverHalf TLFCryptKeyServerHalf) error
// EncryptPrivateMetadata encrypts a PrivateMetadata object.
EncryptPrivateMetadata(pmd *PrivateMetadata, key TLFCryptKey) (EncryptedPrivateMetadata, error)
// DecryptPrivateMetadata decrypts a PrivateMetadata object.
DecryptPrivateMetadata(encryptedPMD EncryptedPrivateMetadata, key TLFCryptKey) (*PrivateMetadata, error)
// EncryptBlocks encrypts a block. plainSize is the size of the encoded
// block; EncryptBlock() must guarantee that plainSize <=
// len(encryptedBlock).
EncryptBlock(block Block, key BlockCryptKey) (
plainSize int, encryptedBlock EncryptedBlock, err error)
// DecryptBlock decrypts a block. Similar to EncryptBlock(),
// DecryptBlock() must guarantee that (size of the decrypted
// block) <= len(encryptedBlock).
DecryptBlock(encryptedBlock EncryptedBlock, key BlockCryptKey, block Block) error
// EncryptMerkleLeaf encrypts a Merkle leaf node with the TLFPublicKey.
EncryptMerkleLeaf(leaf MerkleLeaf, pubKey TLFPublicKey, nonce *[24]byte,
ePrivKey TLFEphemeralPrivateKey) (EncryptedMerkleLeaf, error)
// DecryptMerkleLeaf decrypts a Merkle leaf node with the TLFPrivateKey.
DecryptMerkleLeaf(encryptedLeaf EncryptedMerkleLeaf, privKey TLFPrivateKey,
nonce *[24]byte, ePubKey TLFEphemeralPublicKey) (*MerkleLeaf, error)
// Shutdown frees any resources associated with this instance.
Shutdown()
}
// Codec encodes and decodes arbitrary data
type Codec interface {
// Decode unmarshals the given buffer into the given object, if possible.
Decode(buf []byte, obj interface{}) error
// Encode marshals the given object into a returned buffer.
Encode(obj interface{}) ([]byte, error)
// RegisterType should be called for all types that are stored
// under ambiguous types (like interface{} or nil interface) in a
// struct that will be encoded/decoded by the codec. Each must
// have a unique extCode. Types that include other extension
// types are not supported.
RegisterType(rt reflect.Type, code extCode)
// RegisterIfaceSliceType should be called for all encoded slices
// that contain ambiguous interface types. Each must have a
// unique extCode. Slice element types that include other
// extension types are not supported.
//
// If non-nil, typer is used to do a type assertion during
// decoding, to convert the encoded value into the value expected
// by the rest of the code. This is needed, for example, when the
// codec cannot decode interface types to their desired pointer
// form.
RegisterIfaceSliceType(rt reflect.Type, code extCode,
typer func(interface{}) reflect.Value)
}
// MDOps gets and puts root metadata to an MDServer. On a get, it
// verifies the metadata is signed by the metadata's signing key.
type MDOps interface {
// GetForHandle returns the current metadata
// object corresponding to the given top-level folder's handle, if
// the logged-in user has read permission on the folder. It
// creates the folder if one doesn't exist yet, and the logged-in
// user has permission to do so.
GetForHandle(ctx context.Context, handle *TlfHandle) (
*RootMetadata, error)
// GetUnmergedForHandle is the same as the above but for unmerged
// metadata history.
GetUnmergedForHandle(ctx context.Context, handle *TlfHandle) (
*RootMetadata, error)
// GetForTLF returns the current metadata object
// corresponding to the given top-level folder, if the logged-in
// user has read permission on the folder.
GetForTLF(ctx context.Context, id TlfID) (*RootMetadata, error)
// GetUnmergedForTLF is the same as the above but for unmerged
// metadata.
GetUnmergedForTLF(ctx context.Context, id TlfID, bid BranchID) (
*RootMetadata, error)
// GetRange returns a range of metadata objects corresponding to
// the passed revision numbers (inclusive).
GetRange(ctx context.Context, id TlfID, start, stop MetadataRevision) (
[]*RootMetadata, error)
// GetUnmergedRange is the same as the above but for unmerged
// metadata history (inclusive).
GetUnmergedRange(ctx context.Context, id TlfID, bid BranchID,
start, stop MetadataRevision) ([]*RootMetadata, error)
// Put stores the metadata object for the given
// top-level folder.
Put(ctx context.Context, rmd *RootMetadata) error
// PutUnmerged is the same as the above but for unmerged
// metadata history.
PutUnmerged(ctx context.Context, rmd *RootMetadata, bid BranchID) error
// GetLatestHandleForTLF returns the server's idea of the latest handle for the TLF,
// which may not yet be reflected in the MD if the TLF hasn't been rekeyed since it
// entered into a conflicting state.
GetLatestHandleForTLF(ctx context.Context, id TlfID) (
*BareTlfHandle, error)
}
// KeyOps fetches server-side key halves from the key server.
type KeyOps interface {
// GetTLFCryptKeyServerHalf gets a server-side key half for a
// device given the key half ID.
GetTLFCryptKeyServerHalf(ctx context.Context,
serverHalfID TLFCryptKeyServerHalfID,
cryptPublicKey CryptPublicKey) (TLFCryptKeyServerHalf, error)
// PutTLFCryptKeyServerHalves stores a server-side key halves for a
// set of users and devices.
PutTLFCryptKeyServerHalves(ctx context.Context,
serverKeyHalves map[keybase1.UID]map[keybase1.KID]TLFCryptKeyServerHalf) error
// DeleteTLFCryptKeyServerHalf deletes a server-side key half for a
// device given the key half ID.
DeleteTLFCryptKeyServerHalf(ctx context.Context,
uid keybase1.UID, kid keybase1.KID,
serverHalfID TLFCryptKeyServerHalfID) error
}
// BlockOps gets and puts data blocks to a BlockServer. It performs
// the necessary crypto operations on each block.
type BlockOps interface {
// Get gets the block associated with the given block pointer
// (which belongs to the TLF with the given metadata),
// decrypts it if necessary, and fills in the provided block
// object with its contents, if the logged-in user has read
// permission for that block.
Get(ctx context.Context, md *RootMetadata, blockPtr BlockPointer,
block Block) error
// Ready turns the given block (which belongs to the TLF with
// the given metadata) into encoded (and encrypted) data, and
// calculates its ID and size, so that we can do a bunch of
// block puts in parallel for every write. Ready() must
// guarantee that plainSize <= readyBlockData.QuotaSize().
Ready(ctx context.Context, md *RootMetadata, block Block) (
id BlockID, plainSize int, readyBlockData ReadyBlockData, err error)
// Put stores the readied block data under the given block
// pointer (which belongs to the TLF with the given metadata)
// on the server.
Put(ctx context.Context, md *RootMetadata, blockPtr BlockPointer,
readyBlockData ReadyBlockData) error
// Delete instructs the server to delete the given block references.
// It returns the number of not-yet deleted references to
// each block reference
Delete(ctx context.Context, md *RootMetadata, ptrs []BlockPointer) (
liveCounts map[BlockID]int, err error)
// Archive instructs the server to mark the given block references
// as "archived"; that is, they are not being used in the current
// view of the folder, and shouldn't be served to anyone other
// than folder writers.
Archive(ctx context.Context, md *RootMetadata, ptrs []BlockPointer) error
}
// MDServer gets and puts metadata for each top-level directory. The
// instantiation should be able to fetch session/user details via KBPKI. On a
// put, the server is responsible for 1) ensuring the user has appropriate
// permissions for whatever modifications were made; 2) ensuring that
// LastModifyingWriter and LastModifyingUser are updated appropriately; and 3)
// detecting conflicting writes based on the previous root block ID (i.e., when
// it supports strict consistency). On a get, it verifies the logged-in user
// has read permissions.
//
// TODO: Add interface for searching by time
type MDServer interface {
AuthTokenRefreshHandler
// GetForHandle returns the current (signed/encrypted) metadata
// object corresponding to the given top-level folder's handle, if
// the logged-in user has read permission on the folder. It
// creates the folder if one doesn't exist yet, and the logged-in
// user has permission to do so.
GetForHandle(ctx context.Context, handle BareTlfHandle,
mStatus MergeStatus) (TlfID, *RootMetadataSigned, error)
// GetForTLF returns the current (signed/encrypted) metadata object
// corresponding to the given top-level folder, if the logged-in
// user has read permission on the folder.
GetForTLF(ctx context.Context, id TlfID, bid BranchID, mStatus MergeStatus) (
*RootMetadataSigned, error)
// GetRange returns a range of (signed/encrypted) metadata objects
// corresponding to the passed revision numbers (inclusive).
GetRange(ctx context.Context, id TlfID, bid BranchID, mStatus MergeStatus,
start, stop MetadataRevision) ([]*RootMetadataSigned, error)
// Put stores the (signed/encrypted) metadata object for the given
// top-level folder. Note: If the unmerged bit is set in the metadata
// block's flags bitmask it will be appended to the unmerged per-device
// history.
Put(ctx context.Context, rmds *RootMetadataSigned) error
// PruneBranch prunes all unmerged history for the given TLF branch.
PruneBranch(ctx context.Context, id TlfID, bid BranchID) error
// RegisterForUpdate tells the MD server to inform the caller when
// there is a merged update with a revision number greater than
// currHead, which did NOT originate from this same MD server
// session. This method returns a chan which can receive only a
// single error before it's closed. If the received err is nil,
// then there is updated MD ready to fetch which didn't originate
// locally; if it is non-nil, then the previous registration
// cannot send the next notification (e.g., the connection to the
// MD server may have failed). In either case, the caller must
// re-register to get a new chan that can receive future update
// notifications.
RegisterForUpdate(ctx context.Context, id TlfID,
currHead MetadataRevision) (<-chan error, error)
// CheckForRekeys initiates the rekey checking process on the
// server. The server is allowed to delay this request, and so it
// returns a channel for returning the error. Actual rekey
// requests are expected to come in asynchronously.
CheckForRekeys(ctx context.Context) <-chan error
// TruncateLock attempts to take the history truncation lock for
// this folder, for a TTL defined by the server. Returns true if
// the lock was successfully taken.
TruncateLock(ctx context.Context, id TlfID) (bool, error)
// TruncateUnlock attempts to release the history truncation lock
// for this folder. Returns true if the lock was successfully
// released.
TruncateUnlock(ctx context.Context, id TlfID) (bool, error)
// DisableRekeyUpdatesForTesting disables processing rekey updates
// received from the mdserver while testing.
DisableRekeyUpdatesForTesting()
// Shutdown is called to shutdown an MDServer connection.
Shutdown()
// IsConnected returns whether the MDServer is connected.
IsConnected() bool
// GetLatestHandleForTLF returns the server's idea of the latest handle for the TLF,
// which may not yet be reflected in the MD if the TLF hasn't been rekeyed since it
// entered into a conflicting state. For the highest level of confidence, the caller
// should verify the mapping with a Merkle tree lookup.
GetLatestHandleForTLF(ctx context.Context, id TlfID) (
*BareTlfHandle, error)
}
// BlockServer gets and puts opaque data blocks. The instantiation
// should be able to fetch session/user details via KBPKI. On a
// put/delete, the server is reponsible for: 1) checking that the ID
// matches the hash of the buffer; and 2) enforcing writer quotas.
type BlockServer interface {
AuthTokenRefreshHandler
// Get gets the (encrypted) block data associated with the given
// block ID and context, uses the provided block key to decrypt
// the block, and fills in the provided block object with its
// contents, if the logged-in user has read permission for that
// block.
Get(ctx context.Context, id BlockID, tlfID TlfID, context BlockContext) (
[]byte, BlockCryptKeyServerHalf, error)
// Put stores the (encrypted) block data under the given ID and
// context on the server, along with the server half of the block
// key. context should contain a BlockRefNonce of zero. There
// will be an initial reference for this block for the given
// context.
//
// Put should be idempotent, although it should also return an
// error if, for a given ID, any of the other arguments differ
// from previous Put calls with the same ID.
Put(ctx context.Context, id BlockID, tlfID TlfID, context BlockContext,
buf []byte, serverHalf BlockCryptKeyServerHalf) error
// AddBlockReference adds a new reference to the given block,
// defined by the given context (which should contain a non-zero
// BlockRefNonce). (Contexts with a BlockRefNonce of zero should
// be used when putting the block for the first time via Put().)
// Returns a BServerErrorBlockNonExistent if id is unknown within
// this folder.
//
// AddBlockReference should be idempotent, although it should
// also return an error if, for a given ID and refnonce, any
// of the other fields of context differ from previous
// AddBlockReference calls with the same ID and refnonce.
AddBlockReference(ctx context.Context, id BlockID, tlfID TlfID,
context BlockContext) error
// RemoveBlockReference removes the reference to the given block
// ID defined by the given context. If no references to the block
// remain after this call, the server is allowed to delete the
// corresponding block permanently. If the reference defined by
// the count has already been removed, the call is a no-op.
// It returns the number of remaining not-yet-deleted references after this
// reference has been removed
RemoveBlockReference(ctx context.Context, tlfID TlfID,
contexts map[BlockID][]BlockContext) (liveCounts map[BlockID]int, err error)
// ArchiveBlockReferences marks the given block references as
// "archived"; that is, they are not being used in the current
// view of the folder, and shouldn't be served to anyone other
// than folder writers.
//
// For a given ID/refnonce pair, ArchiveBlockReferences should
// be idempotent, although it should also return an error if
// any of the other fields of the context differ from previous
// calls with the same ID/refnonce pair.
ArchiveBlockReferences(ctx context.Context, tlfID TlfID,
contexts map[BlockID][]BlockContext) error
// Shutdown is called to shutdown a BlockServer connection.
Shutdown()
// GetUserQuotaInfo returns the quota for the user.
GetUserQuotaInfo(ctx context.Context) (info *UserQuotaInfo, err error)
}
type blockRefLocalStatus int
const (
liveBlockRef blockRefLocalStatus = 1
archivedBlockRef = 2
)
// blockServerLocal is the interface for BlockServer implementations
// that store data locally.
type blockServerLocal interface {
BlockServer
// getAll returns all the known block references, and should only be
// used during testing.
getAll(tlfID TlfID) (map[BlockID]map[BlockRefNonce]blockRefLocalStatus, error)
}
// BlockSplitter decides when a file or directory block needs to be split
type BlockSplitter interface {
// CopyUntilSplit copies data into the block until we reach the
// point where we should split, but only if writing to the end of
// the last block. If this is writing into the middle of a file,
// just copy everything that will fit into the block, and assume
// that block boundaries will be fixed later. Return how much was
// copied.
CopyUntilSplit(
block *FileBlock, lastBlock bool, data []byte, off int64) int64
// CheckSplit, given a block, figures out whether it ends at the
// right place. If so, return 0. If not, return either the
// offset in the block where it should be split, or -1 if more
// bytes from the next block should be appended.
CheckSplit(block *FileBlock) int64
// ShouldEmbedBlockChanges decides whether we should keep the
// block changes embedded in the MD or not.
ShouldEmbedBlockChanges(bc *BlockChanges) bool
}
// KeyServer fetches/writes server-side key halves from/to the key server.
type KeyServer interface {
// GetTLFCryptKeyServerHalf gets a server-side key half for a
// device given the key half ID.
GetTLFCryptKeyServerHalf(ctx context.Context,
serverHalfID TLFCryptKeyServerHalfID,
cryptPublicKey CryptPublicKey) (TLFCryptKeyServerHalf, error)
// PutTLFCryptKeyServerHalves stores a server-side key halves for a
// set of users and devices.
PutTLFCryptKeyServerHalves(ctx context.Context,
serverKeyHalves map[keybase1.UID]map[keybase1.KID]TLFCryptKeyServerHalf) error
// DeleteTLFCryptKeyServerHalf deletes a server-side key half for a
// device given the key half ID.
DeleteTLFCryptKeyServerHalf(ctx context.Context,
uid keybase1.UID, kid keybase1.KID,
serverHalfID TLFCryptKeyServerHalfID) error
// Shutdown is called to free any KeyServer resources.
Shutdown()
}
// NodeChange represents a change made to a node as part of an atomic
// file system operation.
type NodeChange struct {
Node Node
// Basenames of entries added/removed.
DirUpdated []string
FileUpdated []WriteRange
}
// Observer can be notified that there is an available update for a
// given directory. The notification callbacks should not block, or
// make any calls to the Notifier interface. Nodes passed to the
// observer should not be held past the end of the notification
// callback.
type Observer interface {
// LocalChange announces that the file at this Node has been
// updated locally, but not yet saved at the server.
LocalChange(ctx context.Context, node Node, write WriteRange)
// BatchChanges announces that the nodes have all been updated
// together atomically. Each NodeChange in changes affects the
// same top-level folder and branch.
BatchChanges(ctx context.Context, changes []NodeChange)
// TlfHandleChange announces that the handle of the corresponding
// folder branch has changed, likely due to previously-unresolved
// assertions becoming resolved. This indicates that the listener
// should switch over any cached paths for this folder-branch to
// the new name. Nodes that were acquired under the old name will
// still continue to work, but new lookups on the old name may
// either encounter alias errors or entirely new TLFs (in the case
// of conflicts).
TlfHandleChange(ctx context.Context, newHandle *TlfHandle)
}
// Notifier notifies registrants of directory changes
type Notifier interface {
// RegisterForChanges declares that the given Observer wants to
// subscribe to updates for the given top-level folders.
RegisterForChanges(folderBranches []FolderBranch, obs Observer) error
// UnregisterFromChanges declares that the given Observer no
// longer wants to subscribe to updates for the given top-level
// folders.
UnregisterFromChanges(folderBranches []FolderBranch, obs Observer) error
}
// Clock is an interface for getting the current time
type Clock interface {
// Now returns the current time.
Now() time.Time
}
// ConflictRenamer deals with names for conflicting directory entries.
type ConflictRenamer interface {
// ConflictRename returns the appropriately modified filename.
ConflictRename(op op, original string) string
}
// Config collects all the singleton instance instantiations needed to
// run KBFS in one place. The methods below are self-explanatory and
// do not require comments.
type Config interface {
KBFSOps() KBFSOps
SetKBFSOps(KBFSOps)
KBPKI() KBPKI
SetKBPKI(KBPKI)
KeyManager() KeyManager
SetKeyManager(KeyManager)
Reporter() Reporter
SetReporter(Reporter)
MDCache() MDCache
SetMDCache(MDCache)
KeyCache() KeyCache
SetKeyCache(KeyCache)
BlockCache() BlockCache
SetBlockCache(BlockCache)
DirtyBlockCache() DirtyBlockCache
SetDirtyBlockCache(DirtyBlockCache)
Crypto() Crypto
SetCrypto(Crypto)
Codec() Codec
SetCodec(Codec)
MDOps() MDOps
SetMDOps(MDOps)
KeyOps() KeyOps
SetKeyOps(KeyOps)
BlockOps() BlockOps
SetBlockOps(BlockOps)
MDServer() MDServer
SetMDServer(MDServer)
BlockServer() BlockServer
SetBlockServer(BlockServer)
KeyServer() KeyServer
SetKeyServer(KeyServer)
KeybaseDaemon() KeybaseDaemon
SetKeybaseDaemon(KeybaseDaemon)
BlockSplitter() BlockSplitter
SetBlockSplitter(BlockSplitter)
Notifier() Notifier
SetNotifier(Notifier)
Clock() Clock
SetClock(Clock)
ConflictRenamer() ConflictRenamer
SetConflictRenamer(ConflictRenamer)
MetadataVersion() MetadataVer
DataVersion() DataVer
RekeyQueue() RekeyQueue
SetRekeyQueue(RekeyQueue)
// ReqsBufSize indicates the number of read or write operations
// that can be buffered per folder
ReqsBufSize() int
// MaxFileBytes indicates the maximum supported plaintext size of
// a file in bytes.
MaxFileBytes() uint64
// MaxNameBytes indicates the maximum supported size of a
// directory entry name in bytes.
MaxNameBytes() uint32
// MaxDirBytes indicates the maximum supported plaintext size of a
// directory in bytes.
MaxDirBytes() uint64
// DoBackgroundFlushes says whether we should periodically try to
// flush dirty files, even without a sync from the user. Should
// be true except for during some testing.
DoBackgroundFlushes() bool
// RekeyWithPromptWaitTime indicates how long to wait, after
// setting the rekey bit, before prompting for a paper key.
RekeyWithPromptWaitTime() time.Duration
// QuotaReclamationPeriod indicates how often should each TLF
// should check for quota to reclaim. If the Duration.Seconds()
// == 0, quota reclamation should not run automatically.
QuotaReclamationPeriod() time.Duration
// QuotaReclamationMinUnrefAge indicates the minimum time a block
// must have been unreferenced before it can be reclaimed.
QuotaReclamationMinUnrefAge() time.Duration
// ResetCaches clears and re-initializes all data and key caches.
ResetCaches()
MakeLogger(module string) logger.Logger
SetLoggerMaker(func(module string) logger.Logger)
// MetricsRegistry may be nil, which should be interpreted as
// not using metrics at all. (i.e., as if UseNilMetrics were
// set). This differs from how go-metrics treats nil Registry
// objects, which is to use the default registry.
MetricsRegistry() metrics.Registry
SetMetricsRegistry(metrics.Registry)
// TLFValidDuration is the time TLFs are valid before identification needs to be redone.
TLFValidDuration() time.Duration
// SetTLFValidDuration sets TLFValidDuration.
SetTLFValidDuration(time.Duration)
// Shutdown is called to free config resources.
Shutdown() error
// CheckStateOnShutdown tells the caller whether or not it is safe
// to check the state of the system on shutdown.
CheckStateOnShutdown() bool
}
// NodeCache holds Nodes, and allows libkbfs to update them when
// things change about the underlying KBFS blocks. It is probably
// most useful to instantiate this on a per-folder-branch basis, so
// that it can create a Path with the correct DirId and Branch name.
type NodeCache interface {
// GetOrCreate either makes a new Node for the given
// BlockPointer, or returns an existing one. TODO: If we ever
// support hard links, we will have to revisit the "name" and
// "parent" parameters here. name must not be empty. Returns
// an error if parent cannot be found.
GetOrCreate(ptr BlockPointer, name string, parent Node) (Node, error)
// Get returns the Node associated with the given ptr if one
// already exists. Otherwise, it returns nil.
Get(ref blockRef) Node
// UpdatePointer updates the BlockPointer for the corresponding
// Node. NodeCache ignores this call when oldRef is not cached in
// any Node.
UpdatePointer(oldRef blockRef, newPtr BlockPointer)
// Move swaps the parent node for the corresponding Node, and
// updates the node's name. NodeCache ignores the call when ptr
// is not cached. Returns an error if newParent cannot be found.
// If newParent is nil, it treats the ptr's corresponding node as
// being unlinked from the old parent completely.
Move(ref blockRef, newParent Node, newName string) error
// Unlink set the corresponding node's parent to nil and caches
// the provided path in case the node is still open. NodeCache
// ignores the call when ptr is not cached. The path is required
// because the caller may have made changes to the parent nodes
// already that shouldn't be reflected in the cached path.
Unlink(ref blockRef, oldPath path)
// PathFromNode creates the path up to a given Node.
PathFromNode(node Node) path
}
// fileBlockDeepCopier fetches a file block, makes a deep copy of it
// (duplicating pointer for any indirect blocks) and generates a new
// random temporary block ID for it. It returns the new BlockPointer,
// and internally saves the block for future uses.
type fileBlockDeepCopier func(context.Context, string, BlockPointer) (
BlockPointer, error)
// crAction represents a specific action to take as part of the
// conflict resolution process.
type crAction interface {
// swapUnmergedBlock should be called before do(), and if it
// returns true, the caller must use the merged block
// corresponding to the returned BlockPointer instead of
// unmergedBlock when calling do(). If BlockPointer{} is zeroPtr
// (and true is returned), just swap in the regular mergedBlock.
swapUnmergedBlock(unmergedChains *crChains, mergedChains *crChains,
unmergedBlock *DirBlock) (bool, BlockPointer, error)
// do modifies the given merged block in place to resolve the
// conflict, and potential uses the provided blockCopyFetchers to
// obtain copies of other blocks (along with new BlockPointers)
// when requiring a block copy.
do(ctx context.Context, unmergedCopier fileBlockDeepCopier,
mergedCopier fileBlockDeepCopier, unmergedBlock *DirBlock,
mergedBlock *DirBlock) error
// updateOps potentially modifies, in place, the slices of
// unmerged and merged operations stored in the corresponding
// crChains for the given unmerged and merged most recent
// pointers. Eventually, the "unmerged" ops will be pushed as
// part of a MD update, and so should contain any necessarily
// operations to fully merge the unmerged data, including any
// conflict resolution. The "merged" ops will be played through
// locally, to notify any caches about the newly-obtained merged
// data (and any changes to local data that were required as part
// of conflict resolution, such as renames). A few things to note:
// * A particular action's updateOps method may be called more than
// once for different sets of chains, however it should only add
// new directory operations (like create/rm/rename) into directory
// chains.
// * updateOps doesn't necessarily result in correct BlockPointers within
// each of those ops; that must happen in a later phase.
// * mergedBlock can be nil if the chain is for a file.
updateOps(unmergedMostRecent BlockPointer, mergedMostRecent BlockPointer,
unmergedBlock *DirBlock, mergedBlock *DirBlock,
unmergedChains *crChains, mergedChains *crChains) error
// String returns a string representation for this crAction, used
// for debugging.
String() string
}
// RekeyQueue is a managed queue of folders needing some rekey action taken upon them
// by the current client.
type RekeyQueue interface {
// Enqueue enqueues a folder for rekey action.
Enqueue(TlfID) <-chan error
// IsRekeyPending returns true if the given folder is in the rekey queue.
IsRekeyPending(TlfID) bool
// GetRekeyChannel will return any rekey completion channel (if pending.)
GetRekeyChannel(id TlfID) <-chan error
// Clear cancels all pending rekey actions and clears the queue.
Clear()
// Waits for all queued rekeys to finish
Wait(ctx context.Context) error
}
| 1 | 11,265 | There wasn't any special reason for this to return a pointer, right? | keybase-kbfs | go |
@@ -14,15 +14,15 @@ import org.mozilla.vrbrowser.R;
import org.mozilla.vrbrowser.ui.views.UIButton;
import org.mozilla.vrbrowser.ui.widgets.NotificationManager.Notification.NotificationPosition;
-import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
public class NotificationManager {
private static final int DEFAULT_DURATION = 3000;
- private static HashMap<Integer, NotificationData> mData = new HashMap<>();
+ private static ConcurrentHashMap<Integer, NotificationData> mData = new ConcurrentHashMap<>();
private static class NotificationData {
| 1 | package org.mozilla.vrbrowser.ui.widgets;
import android.graphics.Rect;
import android.view.View;
import androidx.annotation.DimenRes;
import androidx.annotation.IntDef;
import androidx.annotation.LayoutRes;
import androidx.annotation.NonNull;
import androidx.annotation.StringRes;
import org.mozilla.gecko.util.ThreadUtils;
import org.mozilla.vrbrowser.R;
import org.mozilla.vrbrowser.ui.views.UIButton;
import org.mozilla.vrbrowser.ui.widgets.NotificationManager.Notification.NotificationPosition;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
public class NotificationManager {
private static final int DEFAULT_DURATION = 3000;
private static HashMap<Integer, NotificationData> mData = new HashMap<>();
private static class NotificationData {
private TooltipWidget mNotificationView;
private Notification mNotification;
private Runnable mHideTask;
public NotificationData(@NonNull TooltipWidget view, @NonNull Notification notification, @NonNull Runnable hideTask) {
mNotificationView = view;
mNotification = notification;
mHideTask = hideTask;
}
}
public static class Notification {
@IntDef(value = { MIDDLE, TOP, BOTTOM, LEFT, RIGHT})
public @interface NotificationPosition {}
public static final int MIDDLE = 0;
public static final int TOP = 1;
public static final int BOTTOM = 2;
public static final int LEFT = 4;
public static final int RIGHT = 8;
private UIWidget mParent;
private View mView;
private String mString;
private float mMargin;
private float mZTranslation;
private @NotificationPosition int mPositionFlags;
private @DimenRes int mDensity;
private @LayoutRes int mLayoutRes;
private int mDuration;
private boolean mCurved;
public Notification(@NonNull Builder builder) {
mParent = builder.parent;
mView = builder.view;
mString = builder.string;
mMargin = builder.margin;
mZTranslation = builder.zTranslation;
mPositionFlags = builder.positionFlags;
mDensity = builder.density;
mLayoutRes = builder.layoutRes;
mDuration = builder.duration;
mCurved = builder.curved;
}
}
public static class Builder {
private UIWidget parent;
private View view = null;
private String string;
private float margin = 0.0f;
private float zTranslation = 0.0f;
private @NotificationPosition int positionFlags = Notification.MIDDLE;
private @DimenRes int density;
private @LayoutRes int layoutRes = R.layout.library_notification;
private int duration = DEFAULT_DURATION;
private boolean curved = false;
public Builder(@NonNull UIWidget parent) {
this.parent = parent;
this.density = R.dimen.tooltip_default_density;
}
public Builder withString(@StringRes int res) {
this.string = parent.getContext().getString(res);
return this;
}
public Builder withString(String string) {
this.string = string;
return this;
}
public Builder withView(@NonNull View view) {
this.view = view;
return this;
}
public Builder withMargin(float margin){
this.margin = margin;
return this;
}
public Builder withPosition(@NotificationPosition int positionFlags) {
this.positionFlags = positionFlags;
return this;
}
public Builder withZTranslation(float translation) {
this.zTranslation = translation;
return this;
}
public Builder withDensity(@DimenRes int density) {
this.density = density;
return this;
}
public Builder withLayout(@LayoutRes int res) {
this.layoutRes = res;
return this;
}
public Builder withDuration(int duration) {
this.duration = duration;
return this;
}
public Builder withCurved(boolean curved) {
this.curved = curved;
return this;
}
public Notification build(){
return new Notification(this);
}
}
public static void show(int notificationId, @NonNull Notification notification) {
if (mData.containsKey(notificationId)) {
return;
}
TooltipWidget notificationView = new TooltipWidget(notification.mParent.getContext(), notification.mLayoutRes);
notificationView.setDelegate(() -> hide(notificationId));
setPlacement(notificationView, notification);
notificationView.setText(notification.mString);
notificationView.setCurvedMode(false);
notificationView.show(UIWidget.KEEP_FOCUS);
if (notification.mView instanceof UIButton) {
((UIButton)notification.mView).setNotificationMode(true);
}
Runnable hideTask = () -> hide(notificationId);
ThreadUtils.postDelayedToUiThread(hideTask, notification.mDuration);
mData.put(notificationId, new NotificationData(notificationView, notification, hideTask));
}
public static void hide(int notificationId) {
if (!mData.containsKey(notificationId)) {
return;
}
NotificationData data = mData.get(notificationId);
if (data != null && data.mNotificationView.isVisible()) {
ThreadUtils.removeCallbacksFromUiThread(data.mHideTask);
data.mNotificationView.hide(UIWidget.REMOVE_WIDGET);
if (data.mNotification.mView instanceof UIButton) {
((UIButton)data.mNotification.mView).setNotificationMode(false);
}
mData.remove(notificationId);
}
}
public static void hideAll() {
Iterator<Map.Entry<Integer, NotificationData>> it = mData.entrySet().iterator();
while (it.hasNext()) {
hide(it.next().getKey());
}
}
private static void setPlacement(@NonNull TooltipWidget notificationView, @NonNull Notification notification) {
notificationView.getPlacement().parentHandle = notification.mParent.getHandle();
notificationView.getPlacement().density = WidgetPlacement.floatDimension(notification.mParent.getContext(), notification.mDensity);
notificationView.getPlacement().translationZ = notification.mZTranslation;
notificationView.getPlacement().cylinder = notification.mCurved;
Rect offsetViewBounds = new Rect();
if (notification.mView != null) {
notification.mParent.getDrawingRect(offsetViewBounds);
notification.mParent.offsetDescendantRectToMyCoords(notification.mView, offsetViewBounds);
}
int width = 0;
int height = 0;
float ratio = 1.0f;
if (notification.mView != null) {
width = notification.mView.getWidth();
height = notification.mView.getHeight();
ratio = WidgetPlacement.viewToWidgetRatio(notification.mParent.getContext(), notification.mParent);
}
if (notification.mView == null) {
notificationView.getPlacement().anchorX = 0.5f;
notificationView.getPlacement().parentAnchorX = 0.5f;
notificationView.getPlacement().anchorY = 0.5f;
notificationView.getPlacement().parentAnchorY = 0.5f;
if ((notification.mPositionFlags & Notification.TOP) == Notification.TOP) {
notificationView.getPlacement().anchorY = 0.0f;
notificationView.getPlacement().parentAnchorY = 1.0f;
notificationView.getPlacement().translationY = notification.mMargin;
}
if ((notification.mPositionFlags & Notification.BOTTOM) == Notification.BOTTOM) {
notificationView.getPlacement().anchorY = 1.0f;
notificationView.getPlacement().parentAnchorY = 0.0f;
notificationView.getPlacement().translationY = -notification.mMargin;
}
if ((notification.mPositionFlags & Notification.LEFT) == Notification.LEFT) {
notificationView.getPlacement().anchorX = 1.0f;
notificationView.getPlacement().parentAnchorX = 0.0f;
notificationView.getPlacement().translationX = -notification.mMargin;
}
if ((notification.mPositionFlags & Notification.RIGHT) == Notification.RIGHT) {
notificationView.getPlacement().anchorX = 0.0f;
notificationView.getPlacement().parentAnchorX = 1.0f;
notificationView.getPlacement().translationX = notification.mMargin;
}
} else {
notificationView.getPlacement().parentAnchorX = 0.0f;
notificationView.getPlacement().parentAnchorY = 1.0f;
notificationView.getPlacement().anchorX = 0.5f;
notificationView.getPlacement().anchorY = 0.5f;
notificationView.getPlacement().translationX = (offsetViewBounds.left + (width / 2.0f)) * ratio;
notificationView.getPlacement().translationY = -(offsetViewBounds.bottom - (height / 2.0f)) * ratio;
if ((notification.mPositionFlags & Notification.TOP) == Notification.TOP) {
notificationView.getPlacement().anchorY = 0.0f;
notificationView.getPlacement().translationY = (offsetViewBounds.top + notification.mMargin) * ratio;
}
if ((notification.mPositionFlags & Notification.BOTTOM) == Notification.BOTTOM) {
notificationView.getPlacement().anchorY = 1.0f;
notificationView.getPlacement().translationY = -(offsetViewBounds.bottom + notification.mMargin) * ratio;
}
if ((notification.mPositionFlags & Notification.LEFT) == Notification.LEFT) {
notificationView.getPlacement().anchorX = 1.0f;
notificationView.getPlacement().translationX = (offsetViewBounds.left - notification.mMargin) * ratio;
}
if ((notification.mPositionFlags & Notification.RIGHT) == Notification.RIGHT) {
notificationView.getPlacement().anchorX = 0.0f;
notificationView.getPlacement().translationX = (offsetViewBounds.left + width + notification.mMargin) * ratio;
}
}
}
}
| 1 | 8,998 | Why the need for a `ConcurrentHashMap`? | MozillaReality-FirefoxReality | java |
@@ -272,8 +272,13 @@ class Note extends BaseItem {
let bProp = b[order.by];
if (typeof aProp === 'string') aProp = aProp.toLowerCase();
if (typeof bProp === 'string') bProp = bProp.toLowerCase();
- if (aProp < bProp) r = +1;
- if (aProp > bProp) r = -1;
+
+ if (order.by == 'title' && Setting.value('titleNaturalSort')) {
+ r = this.naturalSortCompare(aProp, bProp, false);
+ } else {
+ if (aProp < bProp) r = +1;
+ if (aProp > bProp) r = -1;
+ }
if (order.dir == 'ASC') r = -r;
if (r !== 0) return r;
} | 1 | const BaseModel = require('../BaseModel').default;
const { sprintf } = require('sprintf-js');
const BaseItem = require('./BaseItem.js');
const ItemChange = require('./ItemChange.js');
const Resource = require('./Resource.js');
const Setting = require('./Setting').default;
const shim = require('../shim').default;
const { pregQuote } = require('../string-utils.js');
const time = require('../time').default;
const { _ } = require('../locale');
const ArrayUtils = require('../ArrayUtils.js');
const lodash = require('lodash');
const urlUtils = require('../urlUtils.js');
const markdownUtils = require('../markdownUtils').default;
const { isImageMimeType } = require('../resourceUtils');
const { MarkupToHtml } = require('@joplin/renderer');
const { ALL_NOTES_FILTER_ID } = require('../reserved-ids');
class Note extends BaseItem {
static tableName() {
return 'notes';
}
static fieldToLabel(field) {
const fieldsToLabels = {
title: _('title'),
user_updated_time: _('updated date'),
user_created_time: _('created date'),
order: _('custom order'),
};
return field in fieldsToLabels ? fieldsToLabels[field] : field;
}
static async serializeForEdit(note) {
return this.replaceResourceInternalToExternalLinks(await super.serialize(note, ['title', 'body']));
}
static async unserializeForEdit(content) {
content += `\n\ntype_: ${BaseModel.TYPE_NOTE}`;
const output = await super.unserialize(content);
if (!output.title) output.title = '';
if (!output.body) output.body = '';
output.body = await this.replaceResourceExternalToInternalLinks(output.body);
return output;
}
static async serializeAllProps(note) {
const fieldNames = this.fieldNames();
fieldNames.push('type_');
lodash.pull(fieldNames, 'title', 'body');
return super.serialize(note, fieldNames);
}
static minimalSerializeForDisplay(note) {
const n = Object.assign({}, note);
const fieldNames = this.fieldNames();
if (!n.is_conflict) lodash.pull(fieldNames, 'is_conflict');
if (!Number(n.latitude)) lodash.pull(fieldNames, 'latitude');
if (!Number(n.longitude)) lodash.pull(fieldNames, 'longitude');
if (!Number(n.altitude)) lodash.pull(fieldNames, 'altitude');
if (!n.author) lodash.pull(fieldNames, 'author');
if (!n.source_url) lodash.pull(fieldNames, 'source_url');
if (!n.is_todo) {
lodash.pull(fieldNames, 'is_todo');
lodash.pull(fieldNames, 'todo_due');
lodash.pull(fieldNames, 'todo_completed');
}
if (!n.application_data) lodash.pull(fieldNames, 'application_data');
lodash.pull(fieldNames, 'type_');
lodash.pull(fieldNames, 'title');
lodash.pull(fieldNames, 'body');
lodash.pull(fieldNames, 'created_time');
lodash.pull(fieldNames, 'updated_time');
lodash.pull(fieldNames, 'order');
return super.serialize(n, fieldNames);
}
static defaultTitle(noteBody) {
return this.defaultTitleFromBody(noteBody);
}
static defaultTitleFromBody(body) {
return markdownUtils.titleFromBody(body);
}
static geolocationUrl(note) {
if (!('latitude' in note) || !('longitude' in note)) throw new Error('Latitude or longitude is missing');
if (!Number(note.latitude) && !Number(note.longitude)) throw new Error(_('This note does not have geolocation information.'));
return this.geoLocationUrlFromLatLong(note.latitude, note.longitude);
}
static geoLocationUrlFromLatLong(lat, long) {
return sprintf('https://www.openstreetmap.org/?lat=%s&lon=%s&zoom=20', lat, long);
}
static modelType() {
return BaseModel.TYPE_NOTE;
}
static linkedItemIds(body) {
if (!body || body.length <= 32) return [];
const links = urlUtils.extractResourceUrls(body);
const itemIds = links.map(l => l.itemId);
return ArrayUtils.unique(itemIds);
}
static async linkedItems(body) {
const itemIds = this.linkedItemIds(body);
const r = await BaseItem.loadItemsByIds(itemIds);
return r;
}
static async linkedItemIdsByType(type, body) {
const items = await this.linkedItems(body);
const output = [];
for (let i = 0; i < items.length; i++) {
const item = items[i];
if (item.type_ === type) output.push(item.id);
}
return output;
}
static async linkedResourceIds(body) {
return this.linkedItemIdsByType(BaseModel.TYPE_RESOURCE, body);
}
static async linkedNoteIds(body) {
return this.linkedItemIdsByType(BaseModel.TYPE_NOTE, body);
}
static async replaceResourceInternalToExternalLinks(body, options = null) {
options = Object.assign({}, {
useAbsolutePaths: false,
}, options);
this.logger().debug('replaceResourceInternalToExternalLinks', 'options:', options, 'body:', body);
const resourceIds = await this.linkedResourceIds(body);
const Resource = this.getClass('Resource');
for (let i = 0; i < resourceIds.length; i++) {
const id = resourceIds[i];
const resource = await Resource.load(id);
if (!resource) continue;
const isImage = isImageMimeType(resource.mime);
// We add a timestamp parameter for images, so that when they
// change, the preview is updated inside the note. This is not
// needed for other resources since they are simple links.
const timestampParam = isImage ? `?t=${resource.updated_time}` : '';
const resourcePath = options.useAbsolutePaths ? `file://${Resource.fullPath(resource)}${timestampParam}` : Resource.relativePath(resource);
body = body.replace(new RegExp(`:/${id}`, 'gi'), markdownUtils.escapeLinkUrl(resourcePath));
}
this.logger().debug('replaceResourceInternalToExternalLinks result', body);
return body;
}
static async replaceResourceExternalToInternalLinks(body, options = null) {
options = Object.assign({}, {
useAbsolutePaths: false,
}, options);
let pathsToTry = [];
if (options.useAbsolutePaths) {
pathsToTry.push(`file://${Setting.value('resourceDir')}`);
pathsToTry.push(`file://${shim.pathRelativeToCwd(Setting.value('resourceDir'))}`);
} else {
pathsToTry.push(Resource.baseRelativeDirectoryPath());
}
// We support both the escaped and unescaped versions because both
// of those paths are valid:
//
// [](file://C:/I like spaces in paths/abcdefg.jpg)
// [](file://C:/I%20like%20spaces%20in%20paths/abcdefg.jpg)
//
// https://discourse.joplinapp.org/t/12986/4
const temp = [];
for (const p of pathsToTry) {
temp.push(p);
temp.push(markdownUtils.escapeLinkUrl(p));
}
pathsToTry = temp;
this.logger().debug('replaceResourceExternalToInternalLinks', 'options:', options, 'pathsToTry:', pathsToTry);
for (const basePath of pathsToTry) {
const reStrings = [
// Handles file://path/to/abcdefg.jpg?t=12345678
`${pregQuote(`${basePath}/`)}[a-zA-Z0-9.]+\\?t=[0-9]+`,
// Handles file://path/to/abcdefg.jpg
`${pregQuote(`${basePath}/`)}[a-zA-Z0-9.]+`,
];
for (const reString of reStrings) {
const re = new RegExp(reString, 'gi');
body = body.replace(re, match => {
const id = Resource.pathToId(match);
return `:/${id}`;
});
}
// Handles joplin://af0edffa4a60496bba1b0ba06b8fb39a
body = body.replace(/\(joplin:\/\/([a-zA-Z0-9]{32})\)/g, '(:/$1)');
}
// this.logger().debug('replaceResourceExternalToInternalLinks result', body);
return body;
}
static new(parentId = '') {
const output = super.new();
output.parent_id = parentId;
return output;
}
static newTodo(parentId = '') {
const output = this.new(parentId);
output.is_todo = true;
return output;
}
// Note: sort logic must be duplicated in previews();
static sortNotes(notes, orders, uncompletedTodosOnTop) {
const noteOnTop = note => {
return uncompletedTodosOnTop && note.is_todo && !note.todo_completed;
};
const noteFieldComp = (f1, f2) => {
if (f1 === f2) return 0;
return f1 < f2 ? -1 : +1;
};
// Makes the sort deterministic, so that if, for example, a and b have the
// same updated_time, they aren't swapped every time a list is refreshed.
const sortIdenticalNotes = (a, b) => {
let r = null;
r = noteFieldComp(a.user_updated_time, b.user_updated_time);
if (r) return r;
r = noteFieldComp(a.user_created_time, b.user_created_time);
if (r) return r;
const titleA = a.title ? a.title.toLowerCase() : '';
const titleB = b.title ? b.title.toLowerCase() : '';
r = noteFieldComp(titleA, titleB);
if (r) return r;
return noteFieldComp(a.id, b.id);
};
return notes.sort((a, b) => {
if (noteOnTop(a) && !noteOnTop(b)) return -1;
if (!noteOnTop(a) && noteOnTop(b)) return +1;
let r = 0;
for (let i = 0; i < orders.length; i++) {
const order = orders[i];
let aProp = a[order.by];
let bProp = b[order.by];
if (typeof aProp === 'string') aProp = aProp.toLowerCase();
if (typeof bProp === 'string') bProp = bProp.toLowerCase();
if (aProp < bProp) r = +1;
if (aProp > bProp) r = -1;
if (order.dir == 'ASC') r = -r;
if (r !== 0) return r;
}
return sortIdenticalNotes(a, b);
});
}
static previewFieldsWithDefaultValues(options = null) {
return Note.defaultValues(this.previewFields(options));
}
static previewFields(options = null) {
options = Object.assign({
includeTimestamps: true,
}, options);
const output = ['id', 'title', 'is_todo', 'todo_completed', 'todo_due', 'parent_id', 'encryption_applied', 'order', 'markup_language', 'is_conflict'];
if (options.includeTimestamps) {
output.push('updated_time');
output.push('user_updated_time');
output.push('user_created_time');
}
return output;
}
static previewFieldsSql(fields = null) {
if (fields === null) fields = this.previewFields();
return this.db().escapeFields(fields).join(',');
}
static async loadFolderNoteByField(folderId, field, value) {
if (!folderId) throw new Error('folderId is undefined');
const options = {
conditions: [`\`${field}\` = ?`],
conditionsParams: [value],
fields: '*',
};
const results = await this.previews(folderId, options);
return results.length ? results[0] : null;
}
static async previews(parentId, options = null) {
// Note: ordering logic must be duplicated in sortNotes(), which is used
// to sort already loaded notes.
if (!options) options = {};
if (!('order' in options)) options.order = [{ by: 'user_updated_time', dir: 'DESC' }, { by: 'user_created_time', dir: 'DESC' }, { by: 'title', dir: 'DESC' }, { by: 'id', dir: 'DESC' }];
if (!options.conditions) options.conditions = [];
if (!options.conditionsParams) options.conditionsParams = [];
if (!options.fields) options.fields = this.previewFields();
if (!options.uncompletedTodosOnTop) options.uncompletedTodosOnTop = false;
if (!('showCompletedTodos' in options)) options.showCompletedTodos = true;
const Folder = BaseItem.getClass('Folder');
// Conflicts are always displayed regardless of options, since otherwise
// it's confusing to have conflicts but with an empty conflict folder.
if (parentId === Folder.conflictFolderId()) options.showCompletedTodos = true;
if (parentId == Folder.conflictFolderId()) {
options.conditions.push('is_conflict = 1');
} else {
options.conditions.push('is_conflict = 0');
if (parentId && parentId !== ALL_NOTES_FILTER_ID) {
options.conditions.push('parent_id = ?');
options.conditionsParams.push(parentId);
}
}
if (options.anywherePattern) {
const pattern = options.anywherePattern.replace(/\*/g, '%');
options.conditions.push('(title LIKE ? OR body LIKE ?)');
options.conditionsParams.push(pattern);
options.conditionsParams.push(pattern);
}
let hasNotes = true;
let hasTodos = true;
if (options.itemTypes && options.itemTypes.length) {
if (options.itemTypes.indexOf('note') < 0) {
hasNotes = false;
} else if (options.itemTypes.indexOf('todo') < 0) {
hasTodos = false;
}
}
if (!options.showCompletedTodos) {
options.conditions.push('todo_completed <= 0');
}
if (options.uncompletedTodosOnTop && hasTodos) {
let cond = options.conditions.slice();
cond.push('is_todo = 1');
cond.push('(todo_completed <= 0 OR todo_completed IS NULL)');
let tempOptions = Object.assign({}, options);
tempOptions.conditions = cond;
const uncompletedTodos = await this.search(tempOptions);
cond = options.conditions.slice();
if (hasNotes && hasTodos) {
cond.push('(is_todo = 0 OR (is_todo = 1 AND todo_completed > 0))');
} else {
cond.push('(is_todo = 1 AND todo_completed > 0)');
}
tempOptions = Object.assign({}, options);
tempOptions.conditions = cond;
if ('limit' in tempOptions) tempOptions.limit -= uncompletedTodos.length;
const theRest = await this.search(tempOptions);
return uncompletedTodos.concat(theRest);
}
if (hasNotes && hasTodos) {
// Nothing
} else if (hasNotes) {
options.conditions.push('is_todo = 0');
} else if (hasTodos) {
options.conditions.push('is_todo = 1');
}
return this.search(options);
}
static preview(noteId, options = null) {
if (!options) options = { fields: null };
return this.modelSelectOne(`SELECT ${this.previewFieldsSql(options.fields)} FROM notes WHERE is_conflict = 0 AND id = ?`, [noteId]);
}
static async search(options = null) {
if (!options) options = {};
if (!options.conditions) options.conditions = [];
if (!options.conditionsParams) options.conditionsParams = [];
if (options.bodyPattern) {
const pattern = options.bodyPattern.replace(/\*/g, '%');
options.conditions.push('body LIKE ?');
options.conditionsParams.push(pattern);
}
return super.search(options);
}
static conflictedNotes() {
return this.modelSelectAll('SELECT * FROM notes WHERE is_conflict = 1');
}
static async conflictedCount() {
const r = await this.db().selectOne('SELECT count(*) as total FROM notes WHERE is_conflict = 1');
return r && r.total ? r.total : 0;
}
static unconflictedNotes() {
return this.modelSelectAll('SELECT * FROM notes WHERE is_conflict = 0');
}
static async updateGeolocation(noteId) {
if (!Setting.value('trackLocation')) return;
if (!Note.updateGeolocationEnabled_) return;
const startWait = time.unixMs();
while (true) {
if (!this.geolocationUpdating_) break;
this.logger().info('Waiting for geolocation update...');
await time.sleep(1);
if (startWait + 1000 * 20 < time.unixMs()) {
this.logger().warn(`Failed to update geolocation for: timeout: ${noteId}`);
return;
}
}
let geoData = null;
if (this.geolocationCache_ && this.geolocationCache_.timestamp + 1000 * 60 * 10 > time.unixMs()) {
geoData = Object.assign({}, this.geolocationCache_);
} else {
this.geolocationUpdating_ = true;
this.logger().info('Fetching geolocation...');
try {
geoData = await shim.Geolocation.currentPosition();
} catch (error) {
this.logger().error(`Could not get lat/long for note ${noteId}: `, error);
geoData = null;
}
this.geolocationUpdating_ = false;
if (!geoData) return;
this.logger().info('Got lat/long');
this.geolocationCache_ = geoData;
}
this.logger().info(`Updating lat/long of note ${noteId}`);
const note = await Note.load(noteId);
if (!note) return; // Race condition - note has been deleted in the meantime
note.longitude = geoData.coords.longitude;
note.latitude = geoData.coords.latitude;
note.altitude = geoData.coords.altitude;
return Note.save(note);
}
static filter(note) {
if (!note) return note;
const output = super.filter(note);
if ('longitude' in output) output.longitude = Number(!output.longitude ? 0 : output.longitude).toFixed(8);
if ('latitude' in output) output.latitude = Number(!output.latitude ? 0 : output.latitude).toFixed(8);
if ('altitude' in output) output.altitude = Number(!output.altitude ? 0 : output.altitude).toFixed(4);
return output;
}
static async copyToFolder(noteId, folderId) {
if (folderId == this.getClass('Folder').conflictFolderId()) throw new Error(_('Cannot copy note to "%s" notebook', this.getClass('Folder').conflictFolderTitle()));
return Note.duplicate(noteId, {
changes: {
parent_id: folderId,
is_conflict: 0, // Also reset the conflict flag in case we're moving the note out of the conflict folder
},
});
}
static async moveToFolder(noteId, folderId) {
if (folderId == this.getClass('Folder').conflictFolderId()) throw new Error(_('Cannot move note to "%s" notebook', this.getClass('Folder').conflictFolderTitle()));
// When moving a note to a different folder, the user timestamp is not updated.
// However updated_time is updated so that the note can be synced later on.
const modifiedNote = {
id: noteId,
parent_id: folderId,
is_conflict: 0,
updated_time: time.unixMs(),
};
return Note.save(modifiedNote, { autoTimestamp: false });
}
static changeNoteType(note, type) {
if (!('is_todo' in note)) throw new Error('Missing "is_todo" property');
const newIsTodo = type === 'todo' ? 1 : 0;
if (Number(note.is_todo) === newIsTodo) return note;
const output = Object.assign({}, note);
output.is_todo = newIsTodo;
output.todo_due = 0;
output.todo_completed = 0;
return output;
}
static toggleIsTodo(note) {
return this.changeNoteType(note, note.is_todo ? 'note' : 'todo');
}
static toggleTodoCompleted(note) {
if (!('todo_completed' in note)) throw new Error('Missing "todo_completed" property');
note = Object.assign({}, note);
if (note.todo_completed) {
note.todo_completed = 0;
} else {
note.todo_completed = Date.now();
}
return note;
}
static async duplicateMultipleNotes(noteIds, options = null) {
// if options.uniqueTitle is true, a unique title for the duplicated file will be assigned.
const ensureUniqueTitle = options && options.ensureUniqueTitle;
for (const noteId of noteIds) {
const noteOptions = {};
// If ensureUniqueTitle is truthy, set the original note's name as root for the unique title.
if (ensureUniqueTitle) {
const originalNote = await Note.load(noteId);
noteOptions.uniqueTitle = originalNote.title;
}
await Note.duplicate(noteId, noteOptions);
}
}
static async duplicate(noteId, options = null) {
const changes = options && options.changes;
const uniqueTitle = options && options.uniqueTitle;
const originalNote = await Note.load(noteId);
if (!originalNote) throw new Error(`Unknown note: ${noteId}`);
const newNote = Object.assign({}, originalNote);
const fieldsToReset = ['id', 'created_time', 'updated_time', 'user_created_time', 'user_updated_time'];
for (const field of fieldsToReset) {
delete newNote[field];
}
for (const n in changes) {
if (!changes.hasOwnProperty(n)) continue;
newNote[n] = changes[n];
}
if (uniqueTitle) {
const title = await Note.findUniqueItemTitle(uniqueTitle);
newNote.title = title;
}
return this.save(newNote);
}
static async noteIsOlderThan(noteId, date) {
const n = await this.db().selectOne('SELECT updated_time FROM notes WHERE id = ?', [noteId]);
if (!n) throw new Error(`No such note: ${noteId}`);
return n.updated_time < date;
}
static async save(o, options = null) {
const isNew = this.isNew(o, options);
const isProvisional = options && !!options.provisional;
const dispatchUpdateAction = options ? options.dispatchUpdateAction !== false : true;
if (isNew && !o.source) o.source = Setting.value('appName');
if (isNew && !o.source_application) o.source_application = Setting.value('appId');
if (isNew && !('order' in o)) o.order = Date.now();
// We only keep the previous note content for "old notes" (see Revision Service for more info)
// In theory, we could simply save all the previous note contents, and let the revision service
// decide what to keep and what to ignore, but in practice keeping the previous content is a bit
// heavy - the note needs to be reloaded here, the JSON blob needs to be saved, etc.
// So the check for old note here is basically an optimisation.
// 2020-10-19: It's not ideal to reload the previous version of the note before saving it again
// but it should be relatively fast anyway. This is so that code that listens to the NOTE_UPDATE_ONE
// action can decide what to do based on the fields that have been modified.
// This is necessary for example so that the folder list is not refreshed every time a note is changed.
// Now it can look at the properties and refresh only if the "parent_id" property is changed.
// Trying to fix: https://github.com/laurent22/joplin/issues/3893
const oldNote = !isNew && o.id ? await Note.load(o.id) : null;
let beforeNoteJson = null;
if (oldNote && this.revisionService().isOldNote(o.id)) {
beforeNoteJson = JSON.stringify(oldNote);
}
const changedFields = [];
if (oldNote) {
for (const field in o) {
if (!o.hasOwnProperty(field)) continue;
if (o[field] !== oldNote[field]) {
changedFields.push(field);
}
}
}
const note = await super.save(o, options);
const changeSource = options && options.changeSource ? options.changeSource : null;
ItemChange.add(BaseModel.TYPE_NOTE, note.id, isNew ? ItemChange.TYPE_CREATE : ItemChange.TYPE_UPDATE, changeSource, beforeNoteJson);
if (dispatchUpdateAction) {
this.dispatch({
type: 'NOTE_UPDATE_ONE',
note: note,
provisional: isProvisional,
changedFields: changedFields,
});
}
if ('todo_due' in o || 'todo_completed' in o || 'is_todo' in o || 'is_conflict' in o) {
this.dispatch({
type: 'EVENT_NOTE_ALARM_FIELD_CHANGE',
id: note.id,
});
}
return note;
}
static async batchDelete(ids, options = null) {
ids = ids.slice();
while (ids.length) {
const processIds = ids.splice(0, 50);
const notes = await Note.byIds(processIds);
const beforeChangeItems = {};
for (const note of notes) {
beforeChangeItems[note.id] = JSON.stringify(note);
}
await super.batchDelete(processIds, options);
const changeSource = options && options.changeSource ? options.changeSource : null;
for (let i = 0; i < processIds.length; i++) {
const id = processIds[i];
ItemChange.add(BaseModel.TYPE_NOTE, id, ItemChange.TYPE_DELETE, changeSource, beforeChangeItems[id]);
this.dispatch({
type: 'NOTE_DELETE',
id: id,
});
}
}
}
static dueNotes() {
return this.modelSelectAll('SELECT id, title, body, is_todo, todo_due, todo_completed, is_conflict FROM notes WHERE is_conflict = 0 AND is_todo = 1 AND todo_completed = 0 AND todo_due > ?', [time.unixMs()]);
}
static needAlarm(note) {
return note.is_todo && !note.todo_completed && note.todo_due >= time.unixMs() && !note.is_conflict;
}
static dueDateObject(note) {
if (!!note.is_todo && note.todo_due) {
if (!this.dueDateObjects_) this.dueDateObjects_ = {};
if (this.dueDateObjects_[note.todo_due]) return this.dueDateObjects_[note.todo_due];
this.dueDateObjects_[note.todo_due] = new Date(note.todo_due);
return this.dueDateObjects_[note.todo_due];
}
return null;
}
// Tells whether the conflict between the local and remote note can be ignored.
static mustHandleConflict(localNote, remoteNote) {
// That shouldn't happen so throw an exception
if (localNote.id !== remoteNote.id) throw new Error('Cannot handle conflict for two different notes');
// For encrypted notes the conflict must always be handled
if (localNote.encryption_cipher_text || remoteNote.encryption_cipher_text) return true;
// Otherwise only handle the conflict if there's a different on the title or body
if (localNote.title !== remoteNote.title) return true;
if (localNote.body !== remoteNote.body) return true;
return false;
}
static markupLanguageToLabel(markupLanguageId) {
if (markupLanguageId === MarkupToHtml.MARKUP_LANGUAGE_MARKDOWN) return 'Markdown';
if (markupLanguageId === MarkupToHtml.MARKUP_LANGUAGE_HTML) return 'HTML';
throw new Error(`Invalid markup language ID: ${markupLanguageId}`);
}
// When notes are sorted in "custom order", they are sorted by the "order" field first and,
// in those cases, where the order field is the same for some notes, by created time.
static customOrderByColumns(type = null) {
if (!type) type = 'object';
if (type === 'object') return [{ by: 'order', dir: 'DESC' }, { by: 'user_created_time', dir: 'DESC' }];
if (type === 'string') return 'ORDER BY `order` DESC, user_created_time DESC';
throw new Error(`Invalid type: ${type}`);
}
// Update the note "order" field without changing the user timestamps,
// which is generally what we want.
static async updateNoteOrder_(note, order) {
return Note.save(Object.assign({}, note, {
order: order,
user_updated_time: note.user_updated_time,
}), { autoTimestamp: false, dispatchUpdateAction: false });
}
// This method will disable the NOTE_UPDATE_ONE action to prevent a lot
// of unecessary updates, so it's the caller's responsability to update
// the UI once the call is finished. This is done by listening to the
// NOTE_IS_INSERTING_NOTES action in the application middleware.
static async insertNotesAt(folderId, noteIds, index) {
if (!noteIds.length) return;
const defer = () => {
this.dispatch({
type: 'NOTE_IS_INSERTING_NOTES',
value: false,
});
};
this.dispatch({
type: 'NOTE_IS_INSERTING_NOTES',
value: true,
});
try {
const noteSql = `
SELECT id, \`order\`, user_created_time, user_updated_time
FROM notes
WHERE is_conflict = 0 AND parent_id = ?
${this.customOrderByColumns('string')}`;
let notes = await this.modelSelectAll(noteSql, [folderId]);
// If the target index is the same as the source note index, exit now
for (let i = 0; i < notes.length; i++) {
const note = notes[i];
if (note.id === noteIds[0] && index === i) return defer();
}
// If some of the target notes have order = 0, set the order field to user_created_time
// (historically, all notes had the order field set to 0)
let hasSetOrder = false;
for (let i = 0; i < notes.length; i++) {
const note = notes[i];
if (!note.order) {
const updatedNote = await this.updateNoteOrder_(note, note.user_created_time);
notes[i] = updatedNote;
hasSetOrder = true;
}
}
if (hasSetOrder) notes = await this.modelSelectAll(noteSql, [folderId]);
// Find the order value for the first note to be inserted,
// and the increment between the order values of each inserted notes.
let newOrder = 0;
let intervalBetweenNotes = 0;
const defaultIntevalBetweeNotes = 60 * 60 * 1000;
if (!notes.length) { // If there's no notes in the target notebook
newOrder = Date.now();
intervalBetweenNotes = defaultIntevalBetweeNotes;
} else if (index >= notes.length) { // Insert at the end
intervalBetweenNotes = notes[notes.length - 1].order / (noteIds.length + 1);
newOrder = notes[notes.length - 1].order - intervalBetweenNotes;
} else if (index === 0) { // Insert at the beginning
const firstNoteOrder = notes[0].order;
if (firstNoteOrder >= Date.now()) {
intervalBetweenNotes = defaultIntevalBetweeNotes;
newOrder = firstNoteOrder + defaultIntevalBetweeNotes;
} else {
intervalBetweenNotes = (Date.now() - firstNoteOrder) / (noteIds.length + 1);
newOrder = firstNoteOrder + intervalBetweenNotes * noteIds.length;
}
} else { // Normal insert
let noteBefore = notes[index - 1];
let noteAfter = notes[index];
if (noteBefore.order === noteAfter.order) {
let previousOrder = noteBefore.order;
for (let i = index; i >= 0; i--) {
const n = notes[i];
if (n.order <= previousOrder) {
const o = previousOrder + defaultIntevalBetweeNotes;
const updatedNote = await this.updateNoteOrder_(n, o);
notes[i] = Object.assign({}, n, updatedNote);
previousOrder = o;
} else {
previousOrder = n.order;
}
}
noteBefore = notes[index - 1];
noteAfter = notes[index];
}
intervalBetweenNotes = (noteBefore.order - noteAfter.order) / (noteIds.length + 1);
newOrder = noteAfter.order + intervalBetweenNotes * noteIds.length;
}
// Set the order value for all the notes to be inserted
for (const noteId of noteIds) {
const note = await Note.load(noteId);
if (!note) throw new Error(`No such note: ${noteId}`);
await this.updateNoteOrder_({
id: noteId,
parent_id: folderId,
user_updated_time: note.user_updated_time,
}, newOrder);
newOrder -= intervalBetweenNotes;
}
} finally {
defer();
}
}
}
Note.updateGeolocationEnabled_ = true;
Note.geolocationUpdating_ = false;
module.exports = Note;
| 1 | 15,780 | New code should use strict equality `===` | laurent22-joplin | js |
@@ -84,6 +84,19 @@ describe "apply" do
resources = result[0]['result']['report']['resource_statuses']
expect(resources).to include('Notify[hello world]')
end
+
+ it 'applies the deferred type' do
+ result = run_cli_json(%w[plan run basic::defer] + config_flags)
+ expect(result).not_to include('kind')
+ expect(result[0]['status']).to eq('success')
+ resources = result[0]['result']['report']['resource_statuses']
+
+ local_pid = resources['Notify[local pid]']['events'][0]['desired_value'][/(\d+)/, 1]
+ raise 'local pid was not found' if local_pid.nil?
+ remote_pid = resources['Notify[remote pid]']['events'][0]['desired_value'][/(\d+)/, 1]
+ raise 'remote pid was not found' if remote_pid.nil?
+ expect(local_pid).not_to eq(remote_pid)
+ end
end
end
end | 1 | # frozen_string_literal: true
require 'spec_helper'
require 'bolt_spec/conn'
require 'bolt_spec/files'
require 'bolt_spec/integration'
require 'bolt_spec/run'
describe "apply" do
include BoltSpec::Conn
include BoltSpec::Files
include BoltSpec::Integration
include BoltSpec::Run
let(:modulepath) { File.join(__dir__, '../fixtures/apply') }
let(:config_flags) { %W[--format json --nodes #{uri} --password #{password} --modulepath #{modulepath}] + tflags }
describe 'over ssh', ssh: true do
let(:uri) { conn_uri('ssh') }
let(:password) { conn_info('ssh')[:password] }
let(:tflags) { %W[--no-host-key-check --run-as root --sudo-password #{password}] }
def root_config
{ 'modulepath' => File.join(__dir__, '../fixtures/apply'),
'ssh' => {
'run-as' => 'root',
'sudo-password' => conn_info('ssh')[:password],
'host-key-check' => false
} }
end
after(:all) do
# TODO: Extract into test helper if needed in more files
uri = conn_uri('ssh')
inventory_data = conn_inventory
config_data = root_config
uninstall = '/opt/puppetlabs/bin/puppet resource package puppet-agent ensure=absent'
run_command(uninstall, uri, config: config_data, inventory: inventory_data)
end
context "when installing puppet" do
before(:each) do
uninstall = '/opt/puppetlabs/bin/puppet resource package puppet-agent ensure=absent'
run_cli_json(%W[command run #{uninstall}] + config_flags)
end
it 'succeeds when run twice' do
result = run_cli_json(%w[plan run prep] + config_flags)
expect(result).not_to include('kind')
expect(result.count).to eq(1)
expect(result[0]['status']).to eq('success')
report = result[0]['result']['report']
expect(report['resource_statuses']).to include("Notify[Hello #{conn_info('ssh')[:host]}]")
result = run_cli_json(%w[plan run prep] + config_flags)
expect(result.count).to eq(1)
expect(result[0]['status']).to eq('success')
report = result[0]['result']['report']
expect(report['resource_statuses']).to include("Notify[Hello #{conn_info('ssh')[:host]}]")
end
end
context "with a puppet_agent installed" do
before(:all) do
# TODO: Extract into test helper if needed in more files
uri = conn_uri('ssh')
inventory_data = conn_inventory
config_data = root_config
run_task('puppet_agent::install', uri, config: config_data, inventory: inventory_data)
end
it 'errors when there are resource failures' do
result = run_cli_json(%w[plan run basic::failure] + config_flags, rescue_exec: true)
expect(result).to include('kind' => 'bolt/apply-failure')
error = result['details']['result_set'][0]['result']['_error']
expect(error['kind']).to eq('bolt/resource-failure')
expect(error['msg']).to match(/Resources failed to apply/)
end
it 'applies a notify' do
result = run_cli_json(%w[plan run basic::class] + config_flags)
expect(result).not_to include('kind')
expect(result[0]).to include('status' => 'success')
resources = result[0]['result']['report']['resource_statuses']
expect(resources).to include('Notify[hello world]')
end
end
end
end
| 1 | 9,437 | `expect(resources['Notify[local pid]']['events'][0]['desired_value']).to match(/(\d+)/)` seems clearer. | puppetlabs-bolt | rb |
@@ -54,7 +54,7 @@ const (
// Time out individual tests after 10 seconds.
var individualTestTimeout = 10 * time.Second
-func kbfsOpsInit(t *testing.T, changeMd bool) (mockCtrl *gomock.Controller,
+func kbfsOpsInit(t *testing.T) (mockCtrl *gomock.Controller,
config *ConfigMock, ctx context.Context, cancel context.CancelFunc) {
ctr := NewSafeTestReporter(t)
mockCtrl = gomock.NewController(ctr) | 1 | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"bytes"
"fmt"
"math/rand"
"testing"
"time"
"github.com/golang/mock/gomock"
"github.com/keybase/client/go/libkb"
"github.com/keybase/client/go/protocol/keybase1"
"github.com/keybase/go-codec/codec"
"github.com/keybase/kbfs/kbfsblock"
"github.com/keybase/kbfs/kbfscodec"
"github.com/keybase/kbfs/kbfscrypto"
"github.com/keybase/kbfs/kbfshash"
"github.com/keybase/kbfs/tlf"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/net/context"
)
type CheckBlockOps struct {
BlockOps
tr gomock.TestReporter
}
var _ BlockOps = (*CheckBlockOps)(nil)
func (cbo *CheckBlockOps) Ready(ctx context.Context, kmd KeyMetadata,
block Block) (id kbfsblock.ID, plainSize int, readyBlockData ReadyBlockData,
err error) {
id, plainSize, readyBlockData, err = cbo.BlockOps.Ready(ctx, kmd, block)
encodedSize := readyBlockData.GetEncodedSize()
if plainSize > encodedSize {
cbo.tr.Errorf("expected plainSize <= encodedSize, got plainSize = %d, "+
"encodedSize = %d", plainSize, encodedSize)
}
return
}
type tCtxIDType int
const (
tCtxID tCtxIDType = iota
)
// Time out individual tests after 10 seconds.
var individualTestTimeout = 10 * time.Second
func kbfsOpsInit(t *testing.T, changeMd bool) (mockCtrl *gomock.Controller,
config *ConfigMock, ctx context.Context, cancel context.CancelFunc) {
ctr := NewSafeTestReporter(t)
mockCtrl = gomock.NewController(ctr)
config = NewConfigMock(mockCtrl, ctr)
config.SetCodec(kbfscodec.NewMsgpack())
blockops := &CheckBlockOps{config.mockBops, ctr}
config.SetBlockOps(blockops)
kbfsops := NewKBFSOpsStandard(config)
config.SetKBFSOps(kbfsops)
config.SetNotifier(kbfsops)
// Use real caches, to avoid the overhead of tracking cache calls.
// Each test is expected to check the cache for correctness at the
// end of the test.
config.SetBlockCache(NewBlockCacheStandard(100, 1<<30))
config.SetDirtyBlockCache(NewDirtyBlockCacheStandard(wallClock{},
config.MakeLogger(""), 5<<20, 10<<20, 5<<20))
config.mockBcache = nil
config.mockDirtyBcache = nil
if changeMd {
// Give different values for the MD Id so we can test that it
// is properly cached
config.mockCrypto.EXPECT().MakeMdID(gomock.Any()).AnyTimes().
Return(fakeMdID(2), nil)
} else {
config.mockCrypto.EXPECT().MakeMdID(gomock.Any()).AnyTimes().
Return(fakeMdID(1), nil)
}
// These tests don't rely on external notifications at all, so ignore any
// goroutine attempting to register:
c := make(chan error, 1)
config.mockMdserv.EXPECT().RegisterForUpdate(gomock.Any(),
gomock.Any(), gomock.Any()).AnyTimes().Return(c, nil)
config.mockMdserv.EXPECT().OffsetFromServerTime().
Return(time.Duration(0), true).AnyTimes()
// None of these tests depend on time
config.mockClock.EXPECT().Now().AnyTimes().Return(time.Now())
// Ignore Notify calls for now
config.mockRep.EXPECT().Notify(gomock.Any(), gomock.Any()).AnyTimes()
// Max out MaxPtrsPerBlock
config.mockBsplit.EXPECT().MaxPtrsPerBlock().
Return(int((^uint(0)) >> 1)).AnyTimes()
// Ignore Archive calls for now
config.mockBops.EXPECT().Archive(gomock.Any(), gomock.Any(),
gomock.Any()).AnyTimes().Return(nil)
// Ignore Archive calls for now
config.mockBops.EXPECT().Archive(gomock.Any(), gomock.Any(),
gomock.Any()).AnyTimes().Return(nil)
// Ignore Prefetcher calls
brc := &testBlockRetrievalConfig{nil, newTestLogMaker(t),
config.BlockCache(), nil, newTestDiskBlockCacheGetter(t, nil)}
pre := newBlockPrefetcher(nil, brc)
config.mockBops.EXPECT().Prefetcher().AnyTimes().Return(pre)
// Ignore BlockRetriever calls
brq := newBlockRetrievalQueue(0, brc)
config.mockBops.EXPECT().BlockRetriever().AnyTimes().Return(brq)
// Ignore key bundle ID creation calls for now
config.mockCrypto.EXPECT().MakeTLFWriterKeyBundleID(gomock.Any()).
AnyTimes().Return(TLFWriterKeyBundleID{}, nil)
config.mockCrypto.EXPECT().MakeTLFReaderKeyBundleID(gomock.Any()).
AnyTimes().Return(TLFReaderKeyBundleID{}, nil)
// Ignore favorites
config.mockKbpki.EXPECT().FavoriteList(gomock.Any()).AnyTimes().
Return(nil, nil)
config.mockKbpki.EXPECT().FavoriteAdd(gomock.Any(), gomock.Any()).
AnyTimes().Return(nil)
interposeDaemonKBPKI(config, "alice", "bob", "charlie")
timeoutCtx, cancel := context.WithTimeout(
context.Background(), individualTestTimeout)
initSuccess := false
defer func() {
if !initSuccess {
cancel()
}
}()
// make the context identifiable, to verify that it is passed
// correctly to the observer
id := rand.Int()
ctx, err := NewContextWithCancellationDelayer(NewContextReplayable(
timeoutCtx, func(ctx context.Context) context.Context {
return context.WithValue(ctx, tCtxID, id)
}))
if err != nil {
t.Fatal(err)
}
initSuccess = true
return mockCtrl, config, ctx, cancel
}
func kbfsTestShutdown(mockCtrl *gomock.Controller, config *ConfigMock,
ctx context.Context, cancel context.CancelFunc) {
config.ctr.CheckForFailures()
config.KBFSOps().(*KBFSOpsStandard).Shutdown(ctx)
if config.mockDirtyBcache == nil {
if err := config.DirtyBlockCache().Shutdown(); err != nil {
// Ignore error; some tests intentionally leave around dirty data.
}
}
cancel()
if err := CleanupCancellationDelayer(ctx); err != nil {
panic(err)
}
mockCtrl.Finish()
}
// kbfsOpsInitNoMocks returns a config that doesn't use any mocks. The
// shutdown call is kbfsTestShutdownNoMocks.
func kbfsOpsInitNoMocks(t *testing.T, users ...libkb.NormalizedUsername) (
*ConfigLocal, keybase1.UID, context.Context, context.CancelFunc) {
config := MakeTestConfigOrBust(t, users...)
config.SetRekeyWithPromptWaitTime(individualTestTimeout)
timeoutCtx, cancel := context.WithTimeout(
context.Background(), individualTestTimeout)
initSuccess := false
defer func() {
if !initSuccess {
cancel()
}
}()
ctx, err := NewContextWithCancellationDelayer(NewContextReplayable(
timeoutCtx, func(c context.Context) context.Context {
return c
}))
if err != nil {
t.Fatal(err)
}
session, err := config.KBPKI().GetCurrentSession(ctx)
if err != nil {
t.Fatal(err)
}
initSuccess = true
return config, session.UID, ctx, cancel
}
func kbfsTestShutdownNoMocks(t *testing.T, config *ConfigLocal,
ctx context.Context, cancel context.CancelFunc) {
CheckConfigAndShutdown(ctx, t, config)
cancel()
CleanupCancellationDelayer(ctx)
}
// TODO: Get rid of all users of this.
func kbfsTestShutdownNoMocksNoCheck(t *testing.T, config *ConfigLocal,
ctx context.Context, cancel context.CancelFunc) {
config.Shutdown(ctx)
cancel()
CleanupCancellationDelayer(ctx)
}
func checkBlockCache(t *testing.T, config *ConfigMock, id tlf.ID,
expectedCleanBlocks []kbfsblock.ID,
expectedDirtyBlocks map[BlockPointer]BranchName) {
bcache := config.BlockCache().(*BlockCacheStandard)
// make sure the LRU consists of exactly the right set of clean blocks
for _, id := range expectedCleanBlocks {
_, ok := bcache.cleanTransient.Get(id)
if !ok {
t.Errorf("BlockCache missing clean block %v at the end of the test",
id)
}
}
if bcache.cleanTransient.Len() != len(expectedCleanBlocks) {
t.Errorf("BlockCache has extra clean blocks at end of test")
}
// make sure the dirty cache consists of exactly the right set of
// dirty blocks
dirtyBcache := config.DirtyBlockCache().(*DirtyBlockCacheStandard)
for ptr, branch := range expectedDirtyBlocks {
_, err := dirtyBcache.Get(id, ptr, branch)
if err != nil {
t.Errorf("BlockCache missing dirty block %v, branch %s at "+
"the end of the test: err %+v", ptr, branch, err)
}
if !dirtyBcache.IsDirty(id, ptr, branch) {
t.Errorf("BlockCache has incorrectly clean block %v, branch %s at "+
"the end of the test: err %+v", ptr, branch, err)
}
}
if len(dirtyBcache.cache) != len(expectedDirtyBlocks) {
t.Errorf("BlockCache has extra dirty blocks at end of test")
}
}
func TestKBFSOpsGetFavoritesSuccess(t *testing.T) {
config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "alice", "bob")
defer kbfsTestShutdownNoMocks(t, config, ctx, cancel)
handle1 := parseTlfHandleOrBust(t, config, "alice", false)
handle2 := parseTlfHandleOrBust(t, config, "alice,bob", false)
// dup for testing
handles := []*TlfHandle{handle1, handle2, handle2}
for _, h := range handles {
config.KeybaseService().FavoriteAdd(
context.Background(), h.ToFavorite().toKBFolder(false))
}
// The favorites list contains our own public dir by default, even
// if KBPKI doesn't return it.
handle3 := parseTlfHandleOrBust(t, config, "alice", true)
handles = append(handles, handle3)
handles2, err := config.KBFSOps().GetFavorites(ctx)
if err != nil {
t.Errorf("Got error on favorites: %+v", err)
}
if len(handles2) != len(handles)-1 {
t.Errorf("Got bad handles back: %v", handles2)
}
}
func TestKBFSOpsGetFavoritesFail(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
err := errors.New("Fake fail")
// Replace the old one (added in init function)
config.mockKbpki = NewMockKBPKI(mockCtrl)
config.SetKBPKI(config.mockKbpki)
// expect one call to favorites, and fail it
config.mockKbpki.EXPECT().FavoriteList(gomock.Any()).Return(nil, err)
if _, err2 := config.KBFSOps().GetFavorites(ctx); err2 != err {
t.Errorf("Got bad error on favorites: %+v", err2)
}
}
func getOps(config Config, id tlf.ID) *folderBranchOps {
return config.KBFSOps().(*KBFSOpsStandard).
getOpsNoAdd(FolderBranch{id, MasterBranch})
}
// createNewRMD creates a new RMD for the given name. Returns its ID
// and handle also.
func createNewRMD(t *testing.T, config Config, name string, public bool) (
tlf.ID, *TlfHandle, *RootMetadata) {
id := tlf.FakeID(1, public)
h := parseTlfHandleOrBust(t, config, name, public)
rmd, err := makeInitialRootMetadata(config.MetadataVersion(), id, h)
require.NoError(t, err)
return id, h, rmd
}
func makeImmutableRMDForTest(t *testing.T, config Config, rmd *RootMetadata,
mdID MdID) ImmutableRootMetadata {
session, err := config.KBPKI().GetCurrentSession(context.Background())
require.NoError(t, err)
// We have to fake out the signature here because most tests
// in this file modify the returned value, invalidating any
// real signatures. TODO: Fix all the tests in this file to
// not do so, and then just use MakeImmutableRootMetadata.
if brmdv2, ok := rmd.bareMd.(*BareRootMetadataV2); ok {
vk := brmdv2.WriterMetadataSigInfo.VerifyingKey
require.True(t, vk == (kbfscrypto.VerifyingKey{}) || vk == session.VerifyingKey,
"Writer signature %s with unexpected non-nil verifying key != %s",
brmdv2.WriterMetadataSigInfo, session.VerifyingKey)
brmdv2.WriterMetadataSigInfo = kbfscrypto.SignatureInfo{
VerifyingKey: session.VerifyingKey,
}
}
return MakeImmutableRootMetadata(rmd, session.VerifyingKey, mdID, time.Now())
}
// injectNewRMD creates a new RMD and makes sure the existing ops for
// its ID has as its head that RMD.
func injectNewRMD(t *testing.T, config *ConfigMock) (
keybase1.UID, tlf.ID, *RootMetadata) {
id, h, rmd := createNewRMD(t, config, "alice", false)
var keyGen KeyGen
if id.IsPublic() {
keyGen = PublicKeyGen
} else {
keyGen = 1
}
rmd.data.Dir = DirEntry{
BlockInfo: BlockInfo{
BlockPointer: BlockPointer{
KeyGen: keyGen,
DataVer: 1,
},
EncodedSize: 1,
},
}
rmd.fakeInitialRekey()
ops := getOps(config, id)
ops.head = makeImmutableRMDForTest(
t, config, rmd, fakeMdID(tlf.FakeIDByte(id)))
ops.headStatus = headTrusted
rmd.SetSerializedPrivateMetadata(make([]byte, 1))
config.Notifier().RegisterForChanges(
[]FolderBranch{{id, MasterBranch}}, config.observer)
uid := h.FirstResolvedWriter()
rmd.data.Dir.Creator = uid
return uid, id, rmd
}
func TestKBFSOpsGetRootNodeCacheSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
_, id, rmd := injectNewRMD(t, config)
rmd.data.Dir.BlockPointer.ID = kbfsblock.FakeID(1)
rmd.data.Dir.Type = Dir
ops := getOps(config, id)
assert.False(t, fboIdentityDone(ops))
n, ei, h, err := ops.getRootNode(ctx)
require.NoError(t, err)
assert.False(t, fboIdentityDone(ops))
p := ops.nodeCache.PathFromNode(n)
assert.Equal(t, id, p.Tlf)
require.Equal(t, 1, len(p.path))
assert.Equal(t, rmd.data.Dir.ID, p.path[0].ID)
assert.Equal(t, rmd.data.Dir.EntryInfo, ei)
assert.Equal(t, rmd.GetTlfHandle(), h)
// Trigger identify.
lState := makeFBOLockState()
_, err = ops.getMDForReadLocked(ctx, lState, mdReadNeedIdentify)
require.NoError(t, err)
assert.True(t, fboIdentityDone(ops))
}
func TestKBFSOpsGetRootNodeReIdentify(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
_, id, rmd := injectNewRMD(t, config)
rmd.data.Dir.BlockPointer.ID = kbfsblock.FakeID(1)
rmd.data.Dir.Type = Dir
ops := getOps(config, id)
assert.False(t, fboIdentityDone(ops))
n, ei, h, err := ops.getRootNode(ctx)
require.NoError(t, err)
assert.False(t, fboIdentityDone(ops))
p := ops.nodeCache.PathFromNode(n)
assert.Equal(t, id, p.Tlf)
require.Equal(t, 1, len(p.path))
assert.Equal(t, rmd.data.Dir.ID, p.path[0].ID)
assert.Equal(t, rmd.data.Dir.EntryInfo, ei)
assert.Equal(t, rmd.GetTlfHandle(), h)
// Trigger identify.
lState := makeFBOLockState()
_, err = ops.getMDForReadLocked(ctx, lState, mdReadNeedIdentify)
require.NoError(t, err)
assert.True(t, fboIdentityDone(ops))
// Mark everything for reidentifying, and wait for it to finish
// before checking.
kop := config.KBFSOps().(*KBFSOpsStandard)
returnCh := make(chan struct{})
kop.reIdentifyControlChan <- returnCh
<-returnCh
assert.False(t, fboIdentityDone(ops))
// Trigger new identify.
lState = makeFBOLockState()
_, err = ops.getMDForReadLocked(ctx, lState, mdReadNeedIdentify)
require.NoError(t, err)
assert.True(t, fboIdentityDone(ops))
}
// fboIdentityDone is needed to avoid data races.
func fboIdentityDone(fbo *folderBranchOps) bool {
fbo.identifyLock.Lock()
defer fbo.identifyLock.Unlock()
return fbo.identifyDone
}
type failIdentifyKBPKI struct {
KBPKI
identifyErr error
}
func (kbpki failIdentifyKBPKI) Identify(ctx context.Context, assertion, reason string) (UserInfo, error) {
return UserInfo{}, kbpki.identifyErr
}
func TestKBFSOpsGetRootNodeCacheIdentifyFail(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
_, id, rmd := injectNewRMD(t, config)
rmd.data.Dir.BlockPointer.ID = kbfsblock.FakeID(1)
rmd.data.Dir.Type = Dir
ops := getOps(config, id)
expectedErr := errors.New("Identify failure")
config.SetKBPKI(failIdentifyKBPKI{config.KBPKI(), expectedErr})
// Trigger identify.
lState := makeFBOLockState()
_, err := ops.getMDForReadLocked(ctx, lState, mdReadNeedIdentify)
assert.Equal(t, expectedErr, err)
assert.False(t, fboIdentityDone(ops))
}
func expectBlock(config *ConfigMock, kmd KeyMetadata, blockPtr BlockPointer, block Block, err error) {
config.mockBops.EXPECT().Get(gomock.Any(), kmdMatcher{kmd},
ptrMatcher{blockPtr}, gomock.Any(), gomock.Any()).
Do(func(ctx context.Context, kmd KeyMetadata,
blockPtr BlockPointer, getBlock Block, lifetime BlockCacheLifetime) {
getBlock.Set(block)
config.BlockCache().Put(blockPtr, kmd.TlfID(), getBlock, lifetime)
}).Return(err)
}
// ptrMatcher implements the gomock.Matcher interface to compare
// BlockPointer objects. We don't care about some of the fields in a
// pointer for the purposes of these tests.
type ptrMatcher struct {
ptr BlockPointer
}
// Matches implements the Matcher interface for ptrMatcher.
func (p ptrMatcher) Matches(x interface{}) bool {
xPtr, ok := x.(BlockPointer)
if !ok {
return false
}
return (xPtr.ID == p.ptr.ID && xPtr.RefNonce == p.ptr.RefNonce)
}
// String implements the Matcher interface for ptrMatcher.
func (p ptrMatcher) String() string {
return fmt.Sprintf("Matches BlockPointer %v", p.ptr)
}
func fillInNewMD(t *testing.T, config *ConfigMock, rmd *RootMetadata) {
if !rmd.TlfID().IsPublic() {
rmd.fakeInitialRekey()
}
rootPtr := BlockPointer{
ID: kbfsblock.FakeID(42),
KeyGen: 1,
DataVer: 1,
}
rmd.data.Dir = DirEntry{
BlockInfo: BlockInfo{
BlockPointer: rootPtr,
EncodedSize: 5,
},
EntryInfo: EntryInfo{
Type: Dir,
Size: 3,
},
}
return
}
func testKBFSOpsGetRootNodeCreateNewSuccess(t *testing.T, public bool) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
id, h, rmd := createNewRMD(t, config, "alice", public)
fillInNewMD(t, config, rmd)
// create a new MD
config.mockMdops.EXPECT().GetUnmergedForTLF(
gomock.Any(), id, gomock.Any()).Return(ImmutableRootMetadata{}, nil)
irmd := makeImmutableRMDForTest(t, config, rmd, fakeMdID(1))
config.mockMdops.EXPECT().GetForTLF(gomock.Any(), id).Return(irmd, nil)
config.mockMdcache.EXPECT().Put(irmd).Return(nil)
ops := getOps(config, id)
assert.False(t, fboIdentityDone(ops))
n, ei, h, err := ops.getRootNode(ctx)
require.NoError(t, err)
assert.True(t, fboIdentityDone(ops))
p := ops.nodeCache.PathFromNode(n)
require.Equal(t, id, p.Tlf)
require.Equal(t, 1, len(p.path))
require.Equal(t, rmd.data.Dir.ID, p.path[0].ID)
require.Equal(t, rmd.data.Dir.EntryInfo, ei)
require.Equal(t, rmd.GetTlfHandle(), h)
}
func TestKBFSOpsGetRootNodeCreateNewSuccessPublic(t *testing.T) {
testKBFSOpsGetRootNodeCreateNewSuccess(t, true)
}
func TestKBFSOpsGetRootNodeCreateNewSuccessPrivate(t *testing.T) {
testKBFSOpsGetRootNodeCreateNewSuccess(t, false)
}
func TestKBFSOpsGetRootMDForHandleExisting(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
id, h, rmd := createNewRMD(t, config, "alice", false)
rmd.data.Dir = DirEntry{
BlockInfo: BlockInfo{
BlockPointer: BlockPointer{
ID: kbfsblock.FakeID(1),
},
EncodedSize: 15,
},
EntryInfo: EntryInfo{
Type: Dir,
Size: 10,
Mtime: 1,
Ctime: 2,
},
}
config.mockMdops.EXPECT().GetForHandle(gomock.Any(), h, Unmerged).Return(
tlf.ID{}, ImmutableRootMetadata{}, nil)
config.mockMdops.EXPECT().GetForHandle(gomock.Any(), h, Merged).Return(
tlf.ID{}, makeImmutableRMDForTest(t, config, rmd, fakeMdID(1)), nil)
ops := getOps(config, id)
assert.False(t, fboIdentityDone(ops))
ops.head = makeImmutableRMDForTest(t, config, rmd, fakeMdID(2))
ops.headStatus = headTrusted
n, ei, err :=
config.KBFSOps().GetOrCreateRootNode(ctx, h, MasterBranch)
require.NoError(t, err)
assert.True(t, fboIdentityDone(ops))
p := ops.nodeCache.PathFromNode(n)
if p.Tlf != id {
t.Errorf("Got bad dir id back: %v", p.Tlf)
} else if len(p.path) != 1 {
t.Errorf("Got bad MD back: path size %d", len(p.path))
} else if p.path[0].ID != rmd.data.Dir.ID {
t.Errorf("Got bad MD back: root ID %v", p.path[0].ID)
} else if ei.Type != Dir {
t.Error("Got bad MD non-dir rootID back")
} else if ei.Size != 10 {
t.Errorf("Got bad MD Size back: %d", ei.Size)
} else if ei.Mtime != 1 {
t.Errorf("Got bad MD MTime back: %d", ei.Mtime)
} else if ei.Ctime != 2 {
t.Errorf("Got bad MD CTime back: %d", ei.Ctime)
}
}
// rmd should really be a ReadOnlyRootMetadata or *BareRootMetadata in
// the helper functions below, but all the callers would have to go
// md.ReadOnly(), which doesn't buy us much in tests.
func makeBP(id kbfsblock.ID, kmd KeyMetadata, config Config,
u keybase1.UID) BlockPointer {
return BlockPointer{
ID: id,
KeyGen: kmd.LatestKeyGeneration(),
DataVer: DefaultNewBlockDataVersion(false),
Context: kbfsblock.Context{
Creator: u,
// Refnonces not needed; explicit refnonce
// testing happens elsewhere.
},
}
}
func makeBI(id kbfsblock.ID, kmd KeyMetadata, config Config,
u keybase1.UID, encodedSize uint32) BlockInfo {
return BlockInfo{
BlockPointer: makeBP(id, kmd, config, u),
EncodedSize: encodedSize,
}
}
func makeIFP(id kbfsblock.ID, kmd KeyMetadata, config Config,
u keybase1.UID, encodedSize uint32, off int64) IndirectFilePtr {
return IndirectFilePtr{
BlockInfo{
BlockPointer: makeBP(id, kmd, config, u),
EncodedSize: encodedSize,
},
off,
false,
codec.UnknownFieldSetHandler{},
}
}
func makeBIFromID(id kbfsblock.ID, user keybase1.UID) BlockInfo {
return BlockInfo{
BlockPointer: BlockPointer{
ID: id, KeyGen: 1, DataVer: 1,
Context: kbfsblock.Context{
Creator: user,
},
},
EncodedSize: 1,
}
}
func nodeFromPath(t *testing.T, ops *folderBranchOps, p path) Node {
var prevNode Node
// populate the node cache with all the nodes we'll need
for _, pathNode := range p.path {
n, err := ops.nodeCache.GetOrCreate(pathNode.BlockPointer,
pathNode.Name, prevNode)
if err != nil {
t.Fatal(err)
}
prevNode = n
}
return prevNode
}
func testPutBlockInCache(
t *testing.T, config *ConfigMock, ptr BlockPointer, id tlf.ID,
block Block) {
err := config.BlockCache().Put(ptr, id, block, TransientEntry)
require.NoError(t, err)
if config.mockBcache != nil {
config.mockBcache.EXPECT().Get(ptr).AnyTimes().Return(block, nil)
}
}
func TestKBFSOpsGetBaseDirChildrenCacheSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
dirBlock := NewDirBlock().(*DirBlock)
dirBlock.Children["a"] = DirEntry{EntryInfo: EntryInfo{Type: File}}
dirBlock.Children["b"] = DirEntry{EntryInfo: EntryInfo{Type: Dir}}
blockPtr := makeBP(rootID, rmd, config, u)
rmd.data.Dir.BlockPointer = blockPtr
node := pathNode{blockPtr, "p"}
p := path{FolderBranch{Tlf: id}, []pathNode{node}}
testPutBlockInCache(t, config, node.BlockPointer, id, dirBlock)
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
children, err := config.KBFSOps().GetDirChildren(ctx, n)
if err != nil {
t.Errorf("Got error on getdir: %+v", err)
} else if len(children) != 2 {
t.Errorf("Got bad children back: %v", children)
}
for c, ei := range children {
if de, ok := dirBlock.Children[c]; !ok {
t.Errorf("No such child: %s", c)
} else if de.EntryInfo != ei {
t.Errorf("Wrong EntryInfo for child %s: %v", c, ei)
}
}
}
func TestKBFSOpsGetBaseDirChildrenUncachedSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
dirBlock := NewDirBlock().(*DirBlock)
blockPtr := makeBP(rootID, rmd, config, u)
rmd.data.Dir.BlockPointer = blockPtr
node := pathNode{blockPtr, "p"}
p := path{FolderBranch{Tlf: id}, []pathNode{node}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
// cache miss means fetching metadata and getting read key
expectBlock(config, rmd, blockPtr, dirBlock, nil)
if _, err := config.KBFSOps().GetDirChildren(ctx, n); err != nil {
t.Errorf("Got error on getdir: %+v", err)
}
}
func TestKBFSOpsGetBaseDirChildrenUncachedFailNonReader(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
id := tlf.FakeID(1, false)
h := parseTlfHandleOrBust(t, config, "bob#alice", false)
// Hack around access check in ParseTlfHandle.
h.resolvedReaders = nil
rmd, err := makeInitialRootMetadata(config.MetadataVersion(), id, h)
require.NoError(t, err)
session, err := config.KBPKI().GetCurrentSession(ctx)
if err != nil {
t.Fatal(err)
}
rootID := kbfsblock.FakeID(42)
node := pathNode{makeBP(rootID, rmd, config, session.UID), "p"}
p := path{FolderBranch{Tlf: id}, []pathNode{node}}
// won't even try getting the block if the user isn't a reader
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
ops.head = makeImmutableRMDForTest(t, config, rmd, fakeMdID(1))
ops.headStatus = headTrusted
expectedErr := NewReadAccessError(h, "alice", "/keybase/private/bob#alice")
if _, err := config.KBFSOps().GetDirChildren(ctx, n); err == nil {
t.Errorf("Got no expected error on getdir")
} else if err != expectedErr {
t.Errorf("Got unexpected error on root MD: %+v", err)
}
}
func TestKBFSOpsGetBaseDirChildrenUncachedFailMissingBlock(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
dirBlock := NewDirBlock().(*DirBlock)
blockPtr := makeBP(rootID, rmd, config, u)
rmd.data.Dir.BlockPointer = blockPtr
node := pathNode{blockPtr, "p"}
p := path{FolderBranch{Tlf: id}, []pathNode{node}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
// cache miss means fetching metadata and getting read key, then
// fail block fetch
err := NoSuchBlockError{rootID}
expectBlock(config, rmd, blockPtr, dirBlock, err)
if _, err2 := config.KBFSOps().GetDirChildren(ctx, n); err2 == nil {
t.Errorf("Got no expected error on getdir")
} else if err2 != err {
t.Errorf("Got unexpected error on root MD: %+v", err)
}
}
func TestKBFSOpsGetNestedDirChildrenCacheSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
id, h, rmd := createNewRMD(t, config, "alice", false)
ops := getOps(config, id)
ops.head = makeImmutableRMDForTest(t, config, rmd, fakeMdID(1))
ops.headStatus = headTrusted
u := h.FirstResolvedWriter()
rootID := kbfsblock.FakeID(42)
aID := kbfsblock.FakeID(43)
bID := kbfsblock.FakeID(44)
dirBlock := NewDirBlock().(*DirBlock)
dirBlock.Children["a"] = DirEntry{EntryInfo: EntryInfo{Type: Exec}}
dirBlock.Children["b"] = DirEntry{EntryInfo: EntryInfo{Type: Sym}}
blockPtr := makeBP(rootID, rmd, config, u)
rmd.data.Dir.BlockPointer = blockPtr
node := pathNode{blockPtr, "p"}
aNode := pathNode{makeBP(aID, rmd, config, u), "a"}
bNode := pathNode{makeBP(bID, rmd, config, u), "b"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode, bNode}}
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, bNode.BlockPointer, id, dirBlock)
children, err := config.KBFSOps().GetDirChildren(ctx, n)
if err != nil {
t.Errorf("Got error on getdir: %+v", err)
} else if len(children) != 2 {
t.Errorf("Got bad children back: %v", children)
}
for c, ei := range children {
if de, ok := dirBlock.Children[c]; !ok {
t.Errorf("No such child: %s", c)
} else if de.EntryInfo != ei {
t.Errorf("Wrong EntryInfo for child %s: %v", c, ei)
}
}
}
func TestKBFSOpsLookupSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
id, h, rmd := createNewRMD(t, config, "alice", false)
ops := getOps(config, id)
ops.head = makeImmutableRMDForTest(t, config, rmd, fakeMdID(1))
ops.headStatus = headTrusted
u := h.FirstResolvedWriter()
rootID := kbfsblock.FakeID(42)
aID := kbfsblock.FakeID(43)
bID := kbfsblock.FakeID(44)
dirBlock := NewDirBlock().(*DirBlock)
dirBlock.Children["b"] = DirEntry{
BlockInfo: makeBIFromID(bID, u),
EntryInfo: EntryInfo{
Type: Dir,
},
}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
aNode := pathNode{makeBP(aID, rmd, config, u), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, aNode.BlockPointer, id, dirBlock)
bn, ei, err := config.KBFSOps().Lookup(ctx, n, "b")
if err != nil {
t.Errorf("Error on Lookup: %+v", err)
}
bPath := ops.nodeCache.PathFromNode(bn)
expectedBNode := pathNode{makeBP(bID, rmd, config, u), "b"}
expectedBNode.KeyGen = 1
if ei != dirBlock.Children["b"].EntryInfo {
t.Errorf("Lookup returned a bad entry info: %v vs %v",
ei, dirBlock.Children["b"].EntryInfo)
} else if bPath.path[2] != expectedBNode {
t.Errorf("Bad path node after lookup: %v vs %v",
bPath.path[2], expectedBNode)
}
}
func TestKBFSOpsLookupSymlinkSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
id, h, rmd := createNewRMD(t, config, "alice", false)
ops := getOps(config, id)
ops.head = makeImmutableRMDForTest(t, config, rmd, fakeMdID(1))
ops.headStatus = headTrusted
u := h.FirstResolvedWriter()
rootID := kbfsblock.FakeID(42)
aID := kbfsblock.FakeID(43)
bID := kbfsblock.FakeID(44)
dirBlock := NewDirBlock().(*DirBlock)
dirBlock.Children["b"] = DirEntry{
BlockInfo: makeBIFromID(bID, u),
EntryInfo: EntryInfo{
Type: Sym,
},
}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
aNode := pathNode{makeBP(aID, rmd, config, u), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, aNode.BlockPointer, id, dirBlock)
bn, ei, err := config.KBFSOps().Lookup(ctx, n, "b")
if err != nil {
t.Errorf("Error on Lookup: %+v", err)
}
if ei != dirBlock.Children["b"].EntryInfo {
t.Errorf("Lookup returned a bad directory entry: %v vs %v",
ei, dirBlock.Children["b"].EntryInfo)
} else if bn != nil {
t.Errorf("Node for symlink is not nil: %v", bn)
}
}
func TestKBFSOpsLookupNoSuchNameFail(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
id, h, rmd := createNewRMD(t, config, "alice", false)
ops := getOps(config, id)
ops.head = makeImmutableRMDForTest(t, config, rmd, fakeMdID(1))
ops.headStatus = headTrusted
u := h.FirstResolvedWriter()
rootID := kbfsblock.FakeID(42)
aID := kbfsblock.FakeID(43)
bID := kbfsblock.FakeID(44)
dirBlock := NewDirBlock().(*DirBlock)
dirBlock.Children["b"] = DirEntry{
BlockInfo: makeBIFromID(bID, u),
EntryInfo: EntryInfo{
Type: Dir,
},
}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
aNode := pathNode{makeBP(aID, rmd, config, u), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, aNode.BlockPointer, id, dirBlock)
expectedErr := NoSuchNameError{"c"}
_, _, err := config.KBFSOps().Lookup(ctx, n, "c")
if err == nil {
t.Error("No error as expected on Lookup")
} else if err != expectedErr {
t.Errorf("Unexpected error after bad Lookup: %+v", err)
}
}
func TestKBFSOpsLookupNewDataVersionFail(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
id, h, rmd := createNewRMD(t, config, "alice", false)
ops := getOps(config, id)
ops.head = makeImmutableRMDForTest(t, config, rmd, fakeMdID(1))
ops.headStatus = headTrusted
u := h.FirstResolvedWriter()
rootID := kbfsblock.FakeID(42)
aID := kbfsblock.FakeID(43)
bID := kbfsblock.FakeID(44)
dirBlock := NewDirBlock().(*DirBlock)
bInfo := makeBIFromID(bID, u)
bInfo.DataVer = 10
dirBlock.Children["b"] = DirEntry{
BlockInfo: bInfo,
EntryInfo: EntryInfo{
Type: Dir,
},
}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
aNode := pathNode{makeBP(aID, rmd, config, u), "a"}
bNode := pathNode{makeBP(bID, rmd, config, u), "b"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, aNode.BlockPointer, id, dirBlock)
expectedErr := &NewDataVersionError{
path{FolderBranch{Tlf: id}, []pathNode{node, aNode, bNode}},
bInfo.DataVer,
}
_, _, err := config.KBFSOps().Lookup(ctx, n, "b")
if err == nil {
t.Error("No expected error found on lookup")
} else if err.Error() != expectedErr.Error() {
t.Errorf("Unexpected error after bad lookup: %+v", err)
}
}
func TestKBFSOpsStatSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
id, h, rmd := createNewRMD(t, config, "alice", false)
ops := getOps(config, id)
ops.head = makeImmutableRMDForTest(t, config, rmd, fakeMdID(1))
ops.headStatus = headTrusted
u := h.FirstResolvedWriter()
rootID := kbfsblock.FakeID(42)
aID := kbfsblock.FakeID(43)
bID := kbfsblock.FakeID(44)
dirBlock := NewDirBlock().(*DirBlock)
dirBlock.Children["b"] = DirEntry{
BlockInfo: makeBIFromID(bID, u),
EntryInfo: EntryInfo{
Type: Dir,
},
}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
aNode := pathNode{makeBP(aID, rmd, config, u), "a"}
bNode := pathNode{dirBlock.Children["b"].BlockPointer, "b"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode, bNode}}
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, aNode.BlockPointer, id, dirBlock)
ei, err := config.KBFSOps().Stat(ctx, n)
if err != nil {
t.Errorf("Error on Stat: %+v", err)
}
if ei != dirBlock.Children["b"].EntryInfo {
t.Errorf("Stat returned a bad entry info: %v vs %v",
ei, dirBlock.Children["b"].EntryInfo)
}
}
func getBlockFromCache(t *testing.T, config Config, id tlf.ID, ptr BlockPointer,
branch BranchName) Block {
if block, err := config.DirtyBlockCache().Get(id, ptr, branch); err == nil {
return block
}
block, err := config.BlockCache().Get(ptr)
if err != nil {
t.Errorf("Couldn't find block %v, branch %s in the cache after test: "+
"%+v", ptr, branch, err)
return nil
}
return block
}
func getDirBlockFromCache(t *testing.T, config Config, id tlf.ID,
ptr BlockPointer, branch BranchName) *DirBlock {
block := getBlockFromCache(t, config, id, ptr, branch)
dblock, ok := block.(*DirBlock)
if !ok {
t.Errorf("Cached block %v, branch %s was not a DirBlock", ptr, branch)
}
return dblock
}
func getFileBlockFromCache(t *testing.T, config Config, id tlf.ID,
ptr BlockPointer, branch BranchName) *FileBlock {
block := getBlockFromCache(t, config, id, ptr, branch)
fblock, ok := block.(*FileBlock)
if !ok {
t.Errorf("Cached block %v, branch %s was not a FileBlock", ptr, branch)
}
return fblock
}
func testCreateEntryFailDupName(t *testing.T, isDir bool) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
aID := kbfsblock.FakeID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["a"] = DirEntry{
BlockInfo: makeBIFromID(aID, u),
EntryInfo: EntryInfo{
Type: Dir,
},
}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
p := path{FolderBranch{Tlf: id}, []pathNode{node}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
// creating "a", which already exists in the root block
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
expectedErr := NameExistsError{"a"}
var err error
// dir and link have different checks for dup name
if isDir {
_, _, err = config.KBFSOps().CreateDir(ctx, n, "a")
} else {
_, err = config.KBFSOps().CreateLink(ctx, n, "a", "b")
}
if err == nil {
t.Errorf("Got no expected error on create")
} else if err != expectedErr {
t.Errorf("Got unexpected error on create: %+v", err)
}
}
func TestCreateDirFailDupName(t *testing.T) {
testCreateEntryFailDupName(t, true)
}
func TestCreateLinkFailDupName(t *testing.T) {
testCreateEntryFailDupName(t, false)
}
func testCreateEntryFailNameTooLong(t *testing.T, isDir bool) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
rootBlock := NewDirBlock().(*DirBlock)
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
p := path{FolderBranch{Tlf: id}, []pathNode{node}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
config.maxNameBytes = 2
name := "aaa"
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
expectedErr := NameTooLongError{name, config.maxNameBytes}
var err error
// dir and link have different checks for dup name
if isDir {
_, _, err = config.KBFSOps().CreateDir(ctx, n, name)
} else {
_, err = config.KBFSOps().CreateLink(ctx, n, name, "b")
}
if err == nil {
t.Errorf("Got no expected error on create")
} else if err != expectedErr {
t.Errorf("Got unexpected error on create: %+v", err)
}
}
func TestCreateDirFailNameTooLong(t *testing.T) {
testCreateEntryFailNameTooLong(t, true)
}
func TestCreateLinkFailNameTooLong(t *testing.T) {
testCreateEntryFailNameTooLong(t, false)
}
func testCreateEntryFailDirTooBig(t *testing.T, isDir bool) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
rootBlock := NewDirBlock().(*DirBlock)
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
p := path{FolderBranch{Tlf: id}, []pathNode{node}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
rmd.data.Dir.Size = 10
config.maxDirBytes = 12
name := "aaa"
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
var err error
// dir and link have different checks for dup name
if isDir {
_, _, err = config.KBFSOps().CreateDir(ctx, n, name)
} else {
_, err = config.KBFSOps().CreateLink(ctx, n, name, "b")
}
if err == nil {
t.Errorf("Got no expected error on create")
} else if _, ok := err.(DirTooBigError); !ok {
t.Errorf("Got unexpected error on create: %+v", err)
}
}
func TestCreateDirFailDirTooBig(t *testing.T) {
testCreateEntryFailDirTooBig(t, true)
}
func TestCreateLinkFailDirTooBig(t *testing.T) {
testCreateEntryFailDirTooBig(t, false)
}
func testCreateEntryFailKBFSPrefix(t *testing.T, et EntryType) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
aID := kbfsblock.FakeID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["a"] = DirEntry{
BlockInfo: makeBIFromID(aID, u),
EntryInfo: EntryInfo{
Type: Dir,
},
}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
p := path{FolderBranch{Tlf: id}, []pathNode{node}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
name := ".kbfs_status"
expectedErr := DisallowedPrefixError{name, ".kbfs"}
var err error
// dir and link have different checks for dup name
switch et {
case Dir:
_, _, err = config.KBFSOps().CreateDir(ctx, n, name)
case Sym:
_, err = config.KBFSOps().CreateLink(ctx, n, name, "a")
case Exec:
_, _, err = config.KBFSOps().CreateFile(ctx, n, name, true, NoExcl)
case File:
_, _, err = config.KBFSOps().CreateFile(ctx, n, name, false, NoExcl)
}
if err == nil {
t.Errorf("Got no expected error on create")
} else if err != expectedErr {
t.Errorf("Got unexpected error on create: %+v", err)
}
}
func TestCreateDirFailKBFSPrefix(t *testing.T) {
testCreateEntryFailKBFSPrefix(t, Dir)
}
func TestCreateFileFailKBFSPrefix(t *testing.T) {
testCreateEntryFailKBFSPrefix(t, File)
}
func TestCreateExecFailKBFSPrefix(t *testing.T) {
testCreateEntryFailKBFSPrefix(t, Exec)
}
func TestCreateLinkFailKBFSPrefix(t *testing.T) {
testCreateEntryFailKBFSPrefix(t, Sym)
}
// TODO: Currently only the remove tests use makeDirTree(),
// makeFile(), et al. Make the other tests use these functions, too.
// makeDirTree creates a block tree for the given path components and
// returns the DirEntry for the root block, a path, and the
// corresponding list of blocks. If n components are given, then the
// path will have n+1 nodes (one extra for the root node), and there
// will be n+1 corresponding blocks.
func makeDirTree(id tlf.ID, uid keybase1.UID, components ...string) (
DirEntry, path, []*DirBlock) {
var idCounter byte = 0x10
makeBlockID := func() kbfsblock.ID {
id := kbfsblock.FakeID(idCounter)
idCounter++
return id
}
// Handle the first (root) block.
bid := makeBlockID()
bi := makeBIFromID(bid, uid)
rootEntry := DirEntry{
BlockInfo: bi,
EntryInfo: EntryInfo{
Type: Dir,
},
}
nodes := []pathNode{{bi.BlockPointer, "{root}"}}
rootBlock := NewDirBlock().(*DirBlock)
blocks := []*DirBlock{rootBlock}
// Handle the rest.
parentDirBlock := rootBlock
for _, component := range components {
bid := makeBlockID()
bi := makeBIFromID(bid, uid)
parentDirBlock.Children[component] = DirEntry{
BlockInfo: bi,
EntryInfo: EntryInfo{
Type: Dir,
},
}
nodes = append(nodes, pathNode{bi.BlockPointer, component})
dirBlock := NewDirBlock().(*DirBlock)
blocks = append(blocks, dirBlock)
parentDirBlock = dirBlock
}
return rootEntry, path{FolderBranch{Tlf: id}, nodes}, blocks
}
func makeFile(dir path, parentDirBlock *DirBlock, name string, et EntryType,
directType BlockDirectType) (
path, *FileBlock) {
if et != File && et != Exec {
panic(fmt.Sprintf("Unexpected type %s", et))
}
bid := kbfsblock.FakeIDAdd(dir.tailPointer().ID, 1)
bi := makeBIFromID(bid, dir.tailPointer().Creator)
bi.DirectType = directType
parentDirBlock.Children[name] = DirEntry{
BlockInfo: bi,
EntryInfo: EntryInfo{
Type: et,
},
}
p := dir.ChildPath(name, bi.BlockPointer)
return p, NewFileBlock().(*FileBlock)
}
func makeDir(dir path, parentDirBlock *DirBlock, name string) (
path, *DirBlock) {
bid := kbfsblock.FakeIDAdd(dir.tailPointer().ID, 1)
bi := makeBIFromID(bid, dir.tailPointer().Creator)
parentDirBlock.Children[name] = DirEntry{
BlockInfo: bi,
EntryInfo: EntryInfo{
Type: Dir,
},
}
p := dir.ChildPath(name, bi.BlockPointer)
return p, NewDirBlock().(*DirBlock)
}
func makeSym(dir path, parentDirBlock *DirBlock, name string) {
parentDirBlock.Children[name] = DirEntry{
EntryInfo: EntryInfo{
Type: Sym,
},
}
}
func TestRemoveDirFailNonEmpty(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootEntry, p, blocks := makeDirTree(id, uid, "a", "b", "c", "d", "e")
rmd.data.Dir = rootEntry
// Prime cache with all blocks.
for i, block := range blocks {
testPutBlockInCache(
t, config, p.path[i].BlockPointer, id, block)
}
ops := getOps(config, id)
n := nodeFromPath(t, ops, *p.parentPath().parentPath())
expectedErr := DirNotEmptyError{p.parentPath().tailName()}
err := config.KBFSOps().RemoveDir(ctx, n, "d")
require.Equal(t, expectedErr, err)
}
func testKBFSOpsRemoveFileMissingBlockSuccess(t *testing.T, et EntryType) {
require.NotEqual(t, et, Sym)
config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "alice")
defer kbfsTestShutdownNoMocks(t, config, ctx, cancel)
config.noBGFlush = true
// create a file.
rootNode := GetRootNodeOrBust(ctx, t, config, "alice", false)
kbfsOps := config.KBFSOps()
var nodeA Node
var err error
if et == Dir {
nodeA, _, err = kbfsOps.CreateDir(ctx, rootNode, "a")
require.NoError(t, err)
err = kbfsOps.SyncAll(ctx, nodeA.GetFolderBranch())
require.NoError(t, err)
} else {
exec := false
if et == Exec {
exec = true
}
nodeA, _, err = kbfsOps.CreateFile(ctx, rootNode, "a", exec, NoExcl)
require.NoError(t, err)
data := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
err = kbfsOps.Write(ctx, nodeA, data, 0)
require.NoError(t, err)
err = kbfsOps.SyncAll(ctx, nodeA.GetFolderBranch())
require.NoError(t, err)
}
ops := getOps(config, rootNode.GetFolderBranch().Tlf)
// Remove block from the server directly, and clear caches.
config.BlockOps().Delete(ctx, rootNode.GetFolderBranch().Tlf,
[]BlockPointer{ops.nodeCache.PathFromNode(nodeA).tailPointer()})
config.ResetCaches()
err = config.KBFSOps().RemoveEntry(ctx, rootNode, "a")
require.NoError(t, err)
err = config.KBFSOps().SyncAll(ctx, rootNode.GetFolderBranch())
require.NoError(t, err)
// Shutdown the mdserver explicitly before the state checker tries
// to run, since the sizes will definitely be wrong.
defer config.MDServer().Shutdown()
}
func TestKBFSOpsRemoveFileMissingBlockSuccess(t *testing.T) {
testKBFSOpsRemoveFileMissingBlockSuccess(t, File)
}
func TestKBFSOpsRemoveExecMissingBlockSuccess(t *testing.T) {
testKBFSOpsRemoveFileMissingBlockSuccess(t, Exec)
}
func TestKBFSOpsRemoveDirMissingBlockSuccess(t *testing.T) {
testKBFSOpsRemoveFileMissingBlockSuccess(t, Dir)
}
func TestRemoveDirFailNoSuchName(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootEntry, p, blocks := makeDirTree(id, uid, "a", "b", "c", "d", "e")
rmd.data.Dir = rootEntry
// Prime cache with all blocks.
for i, block := range blocks {
testPutBlockInCache(
t, config, p.path[i].BlockPointer, id, block)
}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
expectedErr := NoSuchNameError{"nonexistent"}
err := config.KBFSOps().RemoveDir(ctx, n, "nonexistent")
require.Equal(t, expectedErr, err)
}
func TestRenameFailAcrossTopLevelFolders(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
id1 := tlf.FakeID(1, false)
h1 := parseTlfHandleOrBust(t, config, "alice,bob", false)
rmd1, err := makeInitialRootMetadata(config.MetadataVersion(), id1, h1)
require.NoError(t, err)
id2 := tlf.FakeID(2, false)
h2 := parseTlfHandleOrBust(t, config, "alice,bob,charlie", false)
rmd2, err := makeInitialRootMetadata(config.MetadataVersion(), id2, h2)
require.NoError(t, err)
uid1 := h2.ResolvedWriters()[0]
uid2 := h2.ResolvedWriters()[2]
rootID1 := kbfsblock.FakeID(41)
aID1 := kbfsblock.FakeID(42)
node1 := pathNode{makeBP(rootID1, rmd1, config, uid1), "p"}
aNode1 := pathNode{makeBP(aID1, rmd1, config, uid1), "a"}
p1 := path{FolderBranch{Tlf: id1}, []pathNode{node1, aNode1}}
ops1 := getOps(config, id1)
n1 := nodeFromPath(t, ops1, p1)
rootID2 := kbfsblock.FakeID(38)
aID2 := kbfsblock.FakeID(39)
node2 := pathNode{makeBP(rootID2, rmd2, config, uid2), "p"}
aNode2 := pathNode{makeBP(aID2, rmd2, config, uid2), "a"}
p2 := path{FolderBranch{Tlf: id2}, []pathNode{node2, aNode2}}
ops2 := getOps(config, id2)
n2 := nodeFromPath(t, ops2, p2)
expectedErr := RenameAcrossDirsError{}
if err := config.KBFSOps().Rename(ctx, n1, "b", n2, "c"); err == nil {
t.Errorf("Got no expected error on rename")
} else if err.Error() != expectedErr.Error() {
t.Errorf("Got unexpected error on rename: %+v", err)
}
}
func TestRenameFailAcrossBranches(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
id1 := tlf.FakeID(1, false)
h1 := parseTlfHandleOrBust(t, config, "alice,bob", false)
rmd1, err := makeInitialRootMetadata(config.MetadataVersion(), id1, h1)
require.NoError(t, err)
uid1 := h1.FirstResolvedWriter()
rootID1 := kbfsblock.FakeID(41)
aID1 := kbfsblock.FakeID(42)
node1 := pathNode{makeBP(rootID1, rmd1, config, uid1), "p"}
aNode1 := pathNode{makeBP(aID1, rmd1, config, uid1), "a"}
p1 := path{FolderBranch{Tlf: id1}, []pathNode{node1, aNode1}}
p2 := path{FolderBranch{id1, "test"}, []pathNode{node1, aNode1}}
ops1 := getOps(config, id1)
n1 := nodeFromPath(t, ops1, p1)
ops2 := config.KBFSOps().(*KBFSOpsStandard).getOpsNoAdd(
FolderBranch{id1, "test"})
n2 := nodeFromPath(t, ops2, p2)
expectedErr := RenameAcrossDirsError{}
if err := config.KBFSOps().Rename(ctx, n1, "b", n2, "c"); err == nil {
t.Errorf("Got no expected error on rename")
} else if err.Error() != expectedErr.Error() {
t.Errorf("Got unexpected error on rename: %+v", err)
}
}
func TestKBFSOpsCacheReadFullSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.Contents = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, u), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
pNode := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
n := len(fileBlock.Contents)
dest := make([]byte, n, n)
if n2, err := config.KBFSOps().Read(ctx, pNode, dest, 0); err != nil {
t.Errorf("Got error on read: %+v", err)
} else if n2 != int64(n) {
t.Errorf("Read the wrong number of bytes: %d", n2)
} else if !bytes.Equal(dest, fileBlock.Contents) {
t.Errorf("Read bad contents: %v", dest)
}
}
func TestKBFSOpsCacheReadPartialSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.Contents = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, u), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
pNode := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
dest := make([]byte, 4, 4)
if n, err := config.KBFSOps().Read(ctx, pNode, dest, 2); err != nil {
t.Errorf("Got error on read: %+v", err)
} else if n != 4 {
t.Errorf("Read the wrong number of bytes: %d", n)
} else if !bytes.Equal(dest, fileBlock.Contents[2:6]) {
t.Errorf("Read bad contents: %v", dest)
}
}
func TestKBFSOpsCacheReadFullMultiBlockSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
id1 := kbfsblock.FakeID(44)
id2 := kbfsblock.FakeID(45)
id3 := kbfsblock.FakeID(46)
id4 := kbfsblock.FakeID(47)
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.IsInd = true
fileBlock.IPtrs = []IndirectFilePtr{
makeIFP(id1, rmd, config, u, 0, 0),
makeIFP(id2, rmd, config, u, 6, 5),
makeIFP(id3, rmd, config, u, 7, 10),
makeIFP(id4, rmd, config, u, 8, 15),
}
block1 := NewFileBlock().(*FileBlock)
block1.Contents = []byte{5, 4, 3, 2, 1}
block2 := NewFileBlock().(*FileBlock)
block2.Contents = []byte{10, 9, 8, 7, 6}
block3 := NewFileBlock().(*FileBlock)
block3.Contents = []byte{15, 14, 13, 12, 11}
block4 := NewFileBlock().(*FileBlock)
block4.Contents = []byte{20, 19, 18, 17, 16}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, u), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
pNode := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
testPutBlockInCache(t, config, fileBlock.IPtrs[0].BlockPointer, id, block1)
testPutBlockInCache(t, config, fileBlock.IPtrs[1].BlockPointer, id, block2)
testPutBlockInCache(t, config, fileBlock.IPtrs[2].BlockPointer, id, block3)
testPutBlockInCache(t, config, fileBlock.IPtrs[3].BlockPointer, id, block4)
n := 20
dest := make([]byte, n, n)
fullContents := append(block1.Contents, block2.Contents...)
fullContents = append(fullContents, block3.Contents...)
fullContents = append(fullContents, block4.Contents...)
if n2, err := config.KBFSOps().Read(ctx, pNode, dest, 0); err != nil {
t.Errorf("Got error on read: %+v", err)
} else if n2 != int64(n) {
t.Errorf("Read the wrong number of bytes: %d", n2)
} else if !bytes.Equal(dest, fullContents) {
t.Errorf("Read bad contents: %v", dest)
}
}
func TestKBFSOpsCacheReadPartialMultiBlockSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
id1 := kbfsblock.FakeID(44)
id2 := kbfsblock.FakeID(45)
id3 := kbfsblock.FakeID(46)
id4 := kbfsblock.FakeID(47)
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.IsInd = true
fileBlock.IPtrs = []IndirectFilePtr{
makeIFP(id1, rmd, config, u, 0, 0),
makeIFP(id2, rmd, config, u, 6, 5),
makeIFP(id3, rmd, config, u, 7, 10),
makeIFP(id4, rmd, config, u, 8, 15),
}
block1 := NewFileBlock().(*FileBlock)
block1.Contents = []byte{5, 4, 3, 2, 1}
block2 := NewFileBlock().(*FileBlock)
block2.Contents = []byte{10, 9, 8, 7, 6}
block3 := NewFileBlock().(*FileBlock)
block3.Contents = []byte{15, 14, 13, 12, 11}
block4 := NewFileBlock().(*FileBlock)
block4.Contents = []byte{20, 19, 18, 17, 16}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, u), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
pNode := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
testPutBlockInCache(t, config, fileBlock.IPtrs[0].BlockPointer, id, block1)
testPutBlockInCache(t, config, fileBlock.IPtrs[1].BlockPointer, id, block2)
testPutBlockInCache(t, config, fileBlock.IPtrs[2].BlockPointer, id, block3)
n := 10
dest := make([]byte, n, n)
contents := append(block1.Contents[3:], block2.Contents...)
contents = append(contents, block3.Contents[:3]...)
if n2, err := config.KBFSOps().Read(ctx, pNode, dest, 3); err != nil {
t.Errorf("Got error on read: %+v", err)
} else if n2 != int64(n) {
t.Errorf("Read the wrong number of bytes: %d", n2)
} else if !bytes.Equal(dest, contents) {
t.Errorf("Read bad contents: %v", dest)
}
}
func TestKBFSOpsCacheReadFailPastEnd(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.Contents = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, u), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
pNode := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
dest := make([]byte, 4, 4)
if n, err := config.KBFSOps().Read(ctx, pNode, dest, 10); err != nil {
t.Errorf("Got error on read: %+v", err)
} else if n != 0 {
t.Errorf("Read the wrong number of bytes: %d", n)
}
}
func TestKBFSOpsServerReadFullSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.Contents = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
fileBlockPtr := makeBP(fileID, rmd, config, u)
fileNode := pathNode{fileBlockPtr, "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
pNode := nodeFromPath(t, ops, p)
// cache miss means fetching metadata and getting read key
expectBlock(config, rmd, fileBlockPtr, fileBlock, nil)
n := len(fileBlock.Contents)
dest := make([]byte, n, n)
if n2, err := config.KBFSOps().Read(ctx, pNode, dest, 0); err != nil {
t.Errorf("Got error on read: %+v", err)
} else if n2 != int64(n) {
t.Errorf("Read the wrong number of bytes: %d", n2)
} else if !bytes.Equal(dest, fileBlock.Contents) {
t.Errorf("Read bad contents: %v", dest)
}
}
func TestKBFSOpsServerReadFailNoSuchBlock(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.Contents = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
fileBlockPtr := makeBP(fileID, rmd, config, u)
fileNode := pathNode{fileBlockPtr, "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
pNode := nodeFromPath(t, ops, p)
// cache miss means fetching metadata and getting read key
err := NoSuchBlockError{rootID}
expectBlock(config, rmd, fileBlockPtr, fileBlock, err)
n := len(fileBlock.Contents)
dest := make([]byte, n, n)
if _, err2 := config.KBFSOps().Read(ctx, pNode, dest, 0); err2 == nil {
t.Errorf("Got no expected error")
} else if err2 != err {
t.Errorf("Got unexpected error: %+v", err2)
}
}
func checkSyncOp(t *testing.T, codec kbfscodec.Codec,
so *syncOp, filePtr BlockPointer, writes []WriteRange) {
if so == nil {
t.Error("No sync info for written file!")
}
if so.File.Unref != filePtr {
t.Errorf("Unexpected unref file in sync op: %v vs %v",
so.File.Unref, filePtr)
}
if len(so.Writes) != len(writes) {
t.Errorf("Unexpected number of writes: %v (expected %v)",
len(so.Writes), len(writes))
}
for i, w := range writes {
writeEqual, err := kbfscodec.Equal(codec, so.Writes[i], w)
if err != nil {
t.Fatal(err)
}
if !writeEqual {
t.Errorf("Unexpected write: %v vs %v", so.Writes[i], w)
}
}
}
func checkSyncOpInCache(t *testing.T, codec kbfscodec.Codec,
ops *folderBranchOps, filePtr BlockPointer, writes []WriteRange) {
// check the in-progress syncOp
si, ok := ops.blocks.unrefCache[filePtr.Ref()]
if !ok {
t.Error("No sync info for written file!")
}
checkSyncOp(t, codec, si.op, filePtr, writes)
}
func updateWithDirtyEntries(ctx context.Context, ops *folderBranchOps,
lState *lockState, dir path, block *DirBlock) (*DirBlock, error) {
ops.blocks.blockLock.RLock(lState)
defer ops.blocks.blockLock.RUnlock(lState)
return ops.blocks.updateWithDirtyEntriesLocked(ctx, lState, dir, block)
}
func TestKBFSOpsWriteNewBlockSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["f"] = DirEntry{
BlockInfo: BlockInfo{
BlockPointer: makeBP(fileID, rmd, config, uid),
EncodedSize: 1,
},
EntryInfo: EntryInfo{
Type: File,
},
}
fileBlock := NewFileBlock().(*FileBlock)
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, uid), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
data := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
config.mockBsplit.EXPECT().CopyUntilSplit(
gomock.Any(), gomock.Any(), data, int64(0)).
Do(func(block *FileBlock, lb bool, data []byte, off int64) {
block.Contents = data
}).Return(int64(len(data)))
if err := config.KBFSOps().Write(ctx, n, data, 0); err != nil {
t.Errorf("Got error on write: %+v", err)
}
newFileBlock := getFileBlockFromCache(t, config, id, fileNode.BlockPointer,
p.Branch)
newRootBlock := getDirBlockFromCache(
t, config, id, node.BlockPointer, p.Branch)
lState := makeFBOLockState()
newRootBlock, err := updateWithDirtyEntries(
ctx, ops, lState, *p.parentPath(), newRootBlock)
require.NoError(t, err)
if len(ops.nodeCache.PathFromNode(config.observer.localChange).path) !=
len(p.path) {
t.Errorf("Missing or incorrect local update during write: %v",
config.observer.localChange)
} else if ctx.Value(tCtxID) != config.observer.ctx.Value(tCtxID) {
t.Errorf("Wrong context value passed in local notify: %v",
config.observer.ctx.Value(tCtxID))
} else if !bytes.Equal(data, newFileBlock.Contents) {
t.Errorf("Wrote bad contents: %v", data)
} else if newRootBlock.Children["f"].GetWriter() != uid {
t.Errorf("Wrong last writer: %v",
newRootBlock.Children["f"].GetWriter())
} else if newRootBlock.Children["f"].Size != uint64(len(data)) {
t.Errorf("Wrong size for written file: %d",
newRootBlock.Children["f"].Size)
}
checkBlockCache(t, config, id, []kbfsblock.ID{rootID, fileID},
map[BlockPointer]BranchName{
fileNode.BlockPointer: p.Branch,
})
checkSyncOpInCache(t, config.Codec(), ops, fileNode.BlockPointer,
[]WriteRange{{Off: 0, Len: uint64(len(data))}})
}
func TestKBFSOpsWriteExtendSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["f"] = DirEntry{
BlockInfo: BlockInfo{
BlockPointer: makeBP(fileID, rmd, config, uid),
EncodedSize: 1,
},
EntryInfo: EntryInfo{
Type: File,
},
}
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.Contents = []byte{1, 2, 3, 4, 5}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, uid), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
data := []byte{6, 7, 8, 9, 10}
expectedFullData := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
config.mockBsplit.EXPECT().CopyUntilSplit(
gomock.Any(), gomock.Any(), data, int64(5)).
Do(func(block *FileBlock, lb bool, data []byte, off int64) {
block.Contents = expectedFullData
}).Return(int64(len(data)))
if err := config.KBFSOps().Write(ctx, n, data, 5); err != nil {
t.Errorf("Got error on write: %+v", err)
}
newFileBlock := getFileBlockFromCache(t, config, id, fileNode.BlockPointer,
p.Branch)
if len(ops.nodeCache.PathFromNode(config.observer.localChange).path) !=
len(p.path) {
t.Errorf("Missing or incorrect local update during write: %v",
config.observer.localChange)
} else if ctx.Value(tCtxID) != config.observer.ctx.Value(tCtxID) {
t.Errorf("Wrong context value passed in local notify: %v",
config.observer.ctx.Value(tCtxID))
} else if !bytes.Equal(expectedFullData, newFileBlock.Contents) {
t.Errorf("Wrote bad contents: %v", data)
}
checkBlockCache(t, config, id, []kbfsblock.ID{rootID, fileID},
map[BlockPointer]BranchName{
fileNode.BlockPointer: p.Branch,
})
checkSyncOpInCache(t, config.Codec(), ops, fileNode.BlockPointer,
[]WriteRange{{Off: 5, Len: uint64(len(data))}})
}
func TestKBFSOpsWritePastEndSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["f"] = DirEntry{
BlockInfo: BlockInfo{
BlockPointer: makeBP(fileID, rmd, config, uid),
EncodedSize: 1,
},
EntryInfo: EntryInfo{
Type: File,
},
}
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.Contents = []byte{1, 2, 3, 4, 5}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, uid), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
data := []byte{6, 7, 8, 9, 10}
expectedFullData := []byte{1, 2, 3, 4, 5, 0, 0, 6, 7, 8, 9, 10}
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
config.mockBsplit.EXPECT().CopyUntilSplit(
gomock.Any(), gomock.Any(), data, int64(7)).
Do(func(block *FileBlock, lb bool, data []byte, off int64) {
block.Contents = expectedFullData
}).Return(int64(len(data)))
if err := config.KBFSOps().Write(ctx, n, data, 7); err != nil {
t.Errorf("Got error on write: %+v", err)
}
newFileBlock := getFileBlockFromCache(t, config, id, fileNode.BlockPointer,
p.Branch)
if len(ops.nodeCache.PathFromNode(config.observer.localChange).path) !=
len(p.path) {
t.Errorf("Missing or incorrect local update during write: %v",
config.observer.localChange)
} else if ctx.Value(tCtxID) != config.observer.ctx.Value(tCtxID) {
t.Errorf("Wrong context value passed in local notify: %v",
config.observer.ctx.Value(tCtxID))
} else if !bytes.Equal(expectedFullData, newFileBlock.Contents) {
t.Errorf("Wrote bad contents: %v", data)
}
checkBlockCache(t, config, id, []kbfsblock.ID{rootID, fileID},
map[BlockPointer]BranchName{
fileNode.BlockPointer: p.Branch,
})
checkSyncOpInCache(t, config.Codec(), ops, fileNode.BlockPointer,
[]WriteRange{{Off: 7, Len: uint64(len(data))}})
}
func TestKBFSOpsWriteCauseSplit(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["f"] = DirEntry{
BlockInfo: BlockInfo{
BlockPointer: makeBP(fileID, rmd, config, uid),
EncodedSize: 1,
},
EntryInfo: EntryInfo{
Type: File,
},
}
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.Contents = []byte{}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, uid), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
newData := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
expectedFullData := append([]byte{0}, newData...)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
// only copy the first half first
config.mockBsplit.EXPECT().CopyUntilSplit(
gomock.Any(), gomock.Any(), newData, int64(1)).
Do(func(block *FileBlock, lb bool, data []byte, off int64) {
block.Contents = append([]byte{0}, data[0:5]...)
}).Return(int64(5))
id1 := kbfsblock.FakeID(44)
id2 := kbfsblock.FakeID(45)
// new left block
config.mockCrypto.EXPECT().MakeTemporaryBlockID().Return(id1, nil)
// new right block
config.mockCrypto.EXPECT().MakeTemporaryBlockID().Return(id2, nil)
// next we'll get the right block again
// then the second half
config.mockBsplit.EXPECT().CopyUntilSplit(
gomock.Any(), gomock.Any(), newData[5:10], int64(0)).
Do(func(block *FileBlock, lb bool, data []byte, off int64) {
block.Contents = data
}).Return(int64(5))
if err := config.KBFSOps().Write(ctx, n, newData, 1); err != nil {
t.Errorf("Got error on write: %+v", err)
}
b, _ := config.BlockCache().Get(node.BlockPointer)
newRootBlock := b.(*DirBlock)
lState := makeFBOLockState()
newRootBlock, err := updateWithDirtyEntries(
ctx, ops, lState, *p.parentPath(), newRootBlock)
require.NoError(t, err)
b, _ = config.DirtyBlockCache().Get(id, fileNode.BlockPointer, p.Branch)
pblock := b.(*FileBlock)
b, _ = config.DirtyBlockCache().Get(id, makeBP(id1, rmd, config, uid),
p.Branch)
block1 := b.(*FileBlock)
b, _ = config.DirtyBlockCache().Get(id, makeBP(id2, rmd, config, uid),
p.Branch)
block2 := b.(*FileBlock)
if len(ops.nodeCache.PathFromNode(config.observer.localChange).path) !=
len(p.path) {
t.Errorf("Missing or incorrect local update during write: %v",
config.observer.localChange)
} else if ctx.Value(tCtxID) != config.observer.ctx.Value(tCtxID) {
t.Errorf("Wrong context value passed in local notify: %v",
config.observer.ctx.Value(tCtxID))
} else if !bytes.Equal(expectedFullData[0:6], block1.Contents) {
t.Errorf("Wrote bad contents to block 1: %v", block1.Contents)
} else if !bytes.Equal(expectedFullData[6:11], block2.Contents) {
t.Errorf("Wrote bad contents to block 2: %v", block2.Contents)
} else if !pblock.IsInd {
t.Errorf("Parent block is not indirect!")
} else if len(pblock.IPtrs) != 2 {
t.Errorf("Wrong number of pointers in pblock: %v", pblock.IPtrs)
} else if pblock.IPtrs[0].ID != id1 {
t.Errorf("Parent block has wrong id for block 1: %v (vs. %v)",
pblock.IPtrs[0].ID, id1)
} else if pblock.IPtrs[1].ID != id2 {
t.Errorf("Parent block has wrong id for block 2: %v",
pblock.IPtrs[1].ID)
} else if pblock.IPtrs[0].Off != 0 {
t.Errorf("Parent block has wrong offset for block 1: %d",
pblock.IPtrs[0].Off)
} else if pblock.IPtrs[1].Off != 6 {
t.Errorf("Parent block has wrong offset for block 5: %d",
pblock.IPtrs[1].Off)
} else if newRootBlock.Children["f"].Size != uint64(11) {
t.Errorf("Wrong size for written file: %d",
newRootBlock.Children["f"].Size)
}
checkBlockCache(t, config, id, []kbfsblock.ID{rootID, fileID},
map[BlockPointer]BranchName{
fileNode.BlockPointer: p.Branch,
pblock.IPtrs[0].BlockPointer: p.Branch,
pblock.IPtrs[1].BlockPointer: p.Branch,
})
checkSyncOpInCache(t, config.Codec(), ops, fileNode.BlockPointer,
[]WriteRange{{Off: 1, Len: uint64(len(newData))}})
}
func mergeUnrefCache(
ops *folderBranchOps, lState *lockState, file path, md *RootMetadata) {
ops.blocks.blockLock.RLock(lState)
defer ops.blocks.blockLock.RUnlock(lState)
ops.blocks.unrefCache[file.tailPointer().Ref()].mergeUnrefCache(md)
}
func TestKBFSOpsWriteOverMultipleBlocks(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
id1 := kbfsblock.FakeID(44)
id2 := kbfsblock.FakeID(45)
rootBlock := NewDirBlock().(*DirBlock)
filePtr := BlockPointer{
ID: fileID, KeyGen: 1, DataVer: 1,
Context: kbfsblock.Context{
Creator: uid,
},
}
rootBlock.Children["f"] = DirEntry{
BlockInfo: BlockInfo{
BlockPointer: filePtr,
EncodedSize: 1,
},
EntryInfo: EntryInfo{
Size: 10,
},
}
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.IsInd = true
fileBlock.IPtrs = []IndirectFilePtr{
makeIFP(id1, rmd, config, uid, 5, 0),
makeIFP(id2, rmd, config, uid, 6, 5),
}
block1 := NewFileBlock().(*FileBlock)
block1.Contents = []byte{5, 4, 3, 2, 1}
block2 := NewFileBlock().(*FileBlock)
block2.Contents = []byte{10, 9, 8, 7, 6}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, uid), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
data := []byte{1, 2, 3, 4, 5}
expectedFullData := []byte{5, 4, 1, 2, 3, 4, 5, 8, 7, 6}
so, err := newSyncOp(filePtr)
require.NoError(t, err)
rmd.AddOp(so)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
testPutBlockInCache(t, config, fileBlock.IPtrs[0].BlockPointer, id, block1)
testPutBlockInCache(t, config, fileBlock.IPtrs[1].BlockPointer, id, block2)
// only copy the first half first
config.mockBsplit.EXPECT().CopyUntilSplit(
// gomock.Any(), gomock.Any(), data, int64(2)).
gomock.Any(), gomock.Any(), []byte{1, 2, 3}, int64(2)).
Do(func(block *FileBlock, lb bool, data []byte, off int64) {
block.Contents = append(block1.Contents[0:2], data[0:3]...)
}).Return(int64(3))
// update block 2
config.mockBsplit.EXPECT().CopyUntilSplit(
gomock.Any(), gomock.Any(), data[3:], int64(0)).
Do(func(block *FileBlock, lb bool, data []byte, off int64) {
block.Contents = append(data, block2.Contents[2:]...)
}).Return(int64(2))
if err := config.KBFSOps().Write(ctx, n, data, 2); err != nil {
t.Errorf("Got error on write: %+v", err)
}
newBlock1 := getFileBlockFromCache(t, config, id,
fileBlock.IPtrs[0].BlockPointer, p.Branch)
newBlock2 := getFileBlockFromCache(t, config, id,
fileBlock.IPtrs[1].BlockPointer, p.Branch)
if len(ops.nodeCache.PathFromNode(config.observer.localChange).path) !=
len(p.path) {
t.Errorf("Missing or incorrect local update during write: %v",
config.observer.localChange)
} else if ctx.Value(tCtxID) != config.observer.ctx.Value(tCtxID) {
t.Errorf("Wrong context value passed in local notify: %v",
config.observer.ctx.Value(tCtxID))
} else if !bytes.Equal(expectedFullData[0:5], newBlock1.Contents) {
t.Errorf("Wrote bad contents to block 1: %v", block1.Contents)
} else if !bytes.Equal(expectedFullData[5:10], newBlock2.Contents) {
t.Errorf("Wrote bad contents to block 2: %v", block2.Contents)
}
lState := makeFBOLockState()
// merge the unref cache to make it easy to check for changes
checkSyncOpInCache(t, config.Codec(), ops, fileNode.BlockPointer,
[]WriteRange{{Off: 2, Len: uint64(len(data))}})
mergeUnrefCache(ops, lState, p, rmd)
checkBlockCache(t, config, id, []kbfsblock.ID{rootID, fileID, id1, id2},
map[BlockPointer]BranchName{
fileNode.BlockPointer: p.Branch,
fileBlock.IPtrs[0].BlockPointer: p.Branch,
fileBlock.IPtrs[1].BlockPointer: p.Branch,
})
}
// Read tests check the same error cases, so no need for similar write
// error tests
func TestKBFSOpsTruncateToZeroSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["f"] = DirEntry{
BlockInfo: BlockInfo{
BlockPointer: makeBP(fileID, rmd, config, uid),
EncodedSize: 1,
},
EntryInfo: EntryInfo{
Type: File,
},
}
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.Contents = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, uid), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
data := []byte{}
if err := config.KBFSOps().Truncate(ctx, n, 0); err != nil {
t.Errorf("Got error on truncate: %+v", err)
}
newFileBlock := getFileBlockFromCache(t, config, id, fileNode.BlockPointer,
p.Branch)
newRootBlock := getDirBlockFromCache(
t, config, id, node.BlockPointer, p.Branch)
lState := makeFBOLockState()
newRootBlock, err := updateWithDirtyEntries(
ctx, ops, lState, *p.parentPath(), newRootBlock)
require.NoError(t, err)
if len(ops.nodeCache.PathFromNode(config.observer.localChange).path) !=
len(p.path) {
t.Errorf("Missing or incorrect local update during truncate: %v",
config.observer.localChange)
} else if ctx.Value(tCtxID) != config.observer.ctx.Value(tCtxID) {
t.Errorf("Wrong context value passed in local notify: %v",
config.observer.ctx.Value(tCtxID))
} else if !bytes.Equal(data, newFileBlock.Contents) {
t.Errorf("Wrote bad contents: %v", newFileBlock.Contents)
} else if newRootBlock.Children["f"].GetWriter() != uid {
t.Errorf("Wrong last writer: %v",
newRootBlock.Children["f"].GetWriter())
} else if newRootBlock.Children["f"].Size != 0 {
t.Errorf("Wrong size for written file: %d",
newRootBlock.Children["f"].Size)
}
checkBlockCache(t, config, id, []kbfsblock.ID{rootID, fileID},
map[BlockPointer]BranchName{
fileNode.BlockPointer: p.Branch,
})
checkSyncOpInCache(t, config.Codec(), ops, fileNode.BlockPointer,
[]WriteRange{{Off: 0, Len: 0}})
}
func TestKBFSOpsTruncateSameSize(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["f"] = DirEntry{
BlockInfo: makeBIFromID(fileID, u),
EntryInfo: EntryInfo{
Type: File,
},
}
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.Contents = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, u), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
data := fileBlock.Contents
if err := config.KBFSOps().Truncate(ctx, n, 10); err != nil {
t.Errorf("Got error on truncate: %+v", err)
} else if config.observer.localChange != nil {
t.Errorf("Unexpected local update during truncate: %v",
config.observer.localChange)
} else if !bytes.Equal(data, fileBlock.Contents) {
t.Errorf("Wrote bad contents: %v", data)
}
checkBlockCache(t, config, id, []kbfsblock.ID{rootID, fileID}, nil)
}
func TestKBFSOpsTruncateSmallerSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["f"] = DirEntry{
BlockInfo: BlockInfo{
BlockPointer: makeBP(fileID, rmd, config, uid),
EncodedSize: 1,
},
EntryInfo: EntryInfo{
Type: File,
},
}
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.Contents = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, uid), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
data := []byte{1, 2, 3, 4, 5}
if err := config.KBFSOps().Truncate(ctx, n, 5); err != nil {
t.Errorf("Got error on truncate: %+v", err)
}
newFileBlock := getFileBlockFromCache(t, config, id, fileNode.BlockPointer,
p.Branch)
if len(ops.nodeCache.PathFromNode(config.observer.localChange).path) !=
len(p.path) {
t.Errorf("Missing or incorrect local update during truncate: %v",
config.observer.localChange)
} else if ctx.Value(tCtxID) != config.observer.ctx.Value(tCtxID) {
t.Errorf("Wrong context value passed in local notify: %v",
config.observer.ctx.Value(tCtxID))
} else if !bytes.Equal(data, newFileBlock.Contents) {
t.Errorf("Wrote bad contents: %v", data)
}
checkBlockCache(t, config, id, []kbfsblock.ID{rootID, fileID},
map[BlockPointer]BranchName{
fileNode.BlockPointer: p.Branch,
})
checkSyncOpInCache(t, config.Codec(), ops, fileNode.BlockPointer,
[]WriteRange{{Off: 5, Len: 0}})
}
func TestKBFSOpsTruncateShortensLastBlock(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
id1 := kbfsblock.FakeID(44)
id2 := kbfsblock.FakeID(45)
rootBlock := NewDirBlock().(*DirBlock)
fileInfo := makeBIFromID(fileID, uid)
rootBlock.Children["f"] = DirEntry{
BlockInfo: fileInfo,
EntryInfo: EntryInfo{
Size: 10,
},
}
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.IsInd = true
fileBlock.IPtrs = []IndirectFilePtr{
makeIFP(id1, rmd, config, uid, 5, 0),
makeIFP(id2, rmd, config, uid, 6, 5),
}
block1 := NewFileBlock().(*FileBlock)
block1.Contents = []byte{5, 4, 3, 2, 1}
block2 := NewFileBlock().(*FileBlock)
block2.Contents = []byte{10, 9, 8, 7, 6}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, uid), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
so, err := newSyncOp(fileInfo.BlockPointer)
require.NoError(t, err)
rmd.AddOp(so)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
testPutBlockInCache(t, config, fileBlock.IPtrs[0].BlockPointer, id, block1)
testPutBlockInCache(t, config, fileBlock.IPtrs[1].BlockPointer, id, block2)
data2 := []byte{10, 9}
if err := config.KBFSOps().Truncate(ctx, n, 7); err != nil {
t.Errorf("Got error on truncate: %+v", err)
}
newPBlock := getFileBlockFromCache(t, config, id, fileNode.BlockPointer,
p.Branch)
newBlock1 := getFileBlockFromCache(t, config, id,
fileBlock.IPtrs[0].BlockPointer, p.Branch)
newBlock2 := getFileBlockFromCache(t, config, id,
fileBlock.IPtrs[1].BlockPointer, p.Branch)
lState := makeFBOLockState()
// merge unref changes so we can easily check the block changes
checkSyncOpInCache(t, config.Codec(), ops, fileNode.BlockPointer,
[]WriteRange{{Off: 7, Len: 0}})
mergeUnrefCache(ops, lState, p, rmd)
if len(ops.nodeCache.PathFromNode(config.observer.localChange).path) !=
len(p.path) {
t.Errorf("Missing or incorrect local update during truncate: %v",
config.observer.localChange)
} else if ctx.Value(tCtxID) != config.observer.ctx.Value(tCtxID) {
t.Errorf("Wrong context value passed in local notify: %v",
config.observer.ctx.Value(tCtxID))
} else if !bytes.Equal(block1.Contents, newBlock1.Contents) {
t.Errorf("Wrote bad contents for block 1: %v", newBlock1.Contents)
} else if !bytes.Equal(data2, newBlock2.Contents) {
t.Errorf("Wrote bad contents for block 2: %v", newBlock2.Contents)
} else if len(newPBlock.IPtrs) != 2 {
t.Errorf("Wrong number of indirect pointers: %d", len(newPBlock.IPtrs))
} else if rmd.UnrefBytes() != 0+6 {
// The fileid and the last block was all modified and marked dirty
t.Errorf("Truncated block not correctly unref'd, unrefBytes = %d",
rmd.UnrefBytes())
}
checkBlockCache(t, config, id, []kbfsblock.ID{rootID, fileID, id1, id2},
map[BlockPointer]BranchName{
fileNode.BlockPointer: p.Branch,
fileBlock.IPtrs[1].BlockPointer: p.Branch,
})
}
func TestKBFSOpsTruncateRemovesABlock(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
id1 := kbfsblock.FakeID(44)
id2 := kbfsblock.FakeID(45)
rootBlock := NewDirBlock().(*DirBlock)
fileInfo := makeBIFromID(fileID, uid)
rootBlock.Children["f"] = DirEntry{
BlockInfo: fileInfo,
EntryInfo: EntryInfo{
Size: 10,
},
}
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.IsInd = true
fileBlock.IPtrs = []IndirectFilePtr{
makeIFP(id1, rmd, config, uid, 5, 0),
makeIFP(id2, rmd, config, uid, 6, 5),
}
block1 := NewFileBlock().(*FileBlock)
block1.Contents = []byte{5, 4, 3, 2, 1}
block2 := NewFileBlock().(*FileBlock)
block2.Contents = []byte{10, 9, 8, 7, 6}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, uid), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
so, err := newSyncOp(fileInfo.BlockPointer)
require.NoError(t, err)
rmd.AddOp(so)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
testPutBlockInCache(t, config, fileBlock.IPtrs[0].BlockPointer, id, block1)
testPutBlockInCache(t, config, fileBlock.IPtrs[1].BlockPointer, id, block2)
data := []byte{5, 4, 3, 2}
if err := config.KBFSOps().Truncate(ctx, n, 4); err != nil {
t.Errorf("Got error on truncate: %+v", err)
}
newPBlock := getFileBlockFromCache(t, config, id, fileNode.BlockPointer,
p.Branch)
newBlock1 := getFileBlockFromCache(t, config, id,
fileBlock.IPtrs[0].BlockPointer, p.Branch)
lState := makeFBOLockState()
// merge unref changes so we can easily check the block changes
checkSyncOpInCache(t, config.Codec(), ops, fileNode.BlockPointer,
[]WriteRange{{Off: 4, Len: 0}})
mergeUnrefCache(ops, lState, p, rmd)
if len(ops.nodeCache.PathFromNode(config.observer.localChange).path) !=
len(p.path) {
t.Errorf("Missing or incorrect local update during truncate: %v",
config.observer.localChange)
} else if ctx.Value(tCtxID) != config.observer.ctx.Value(tCtxID) {
t.Errorf("Wrong context value passed in local notify: %v",
config.observer.ctx.Value(tCtxID))
} else if !bytes.Equal(data, newBlock1.Contents) {
t.Errorf("Wrote bad contents: %v", newBlock1.Contents)
} else if len(newPBlock.IPtrs) != 1 {
t.Errorf("Wrong number of indirect pointers: %d", len(newPBlock.IPtrs))
} else if rmd.UnrefBytes() != 0+5+6 {
// The fileid and both blocks were all modified and marked dirty
t.Errorf("Truncated block not correctly unref'd, unrefBytes = %d",
rmd.UnrefBytes())
}
checkBlockCache(t, config, id, []kbfsblock.ID{rootID, fileID, id1, id2},
map[BlockPointer]BranchName{
fileNode.BlockPointer: p.Branch,
fileBlock.IPtrs[0].BlockPointer: p.Branch,
})
}
func TestKBFSOpsTruncateBiggerSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["f"] = DirEntry{
BlockInfo: BlockInfo{
BlockPointer: makeBP(fileID, rmd, config, uid),
EncodedSize: 1,
},
EntryInfo: EntryInfo{
Type: File,
},
}
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.Contents = []byte{1, 2, 3, 4, 5}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, uid), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
config.mockBsplit.EXPECT().CopyUntilSplit(
gomock.Any(), gomock.Any(), []byte{0, 0, 0, 0, 0}, int64(5)).
Do(func(block *FileBlock, lb bool, data []byte, off int64) {
block.Contents = append(block.Contents, data...)
}).Return(int64(5))
data := []byte{1, 2, 3, 4, 5, 0, 0, 0, 0, 0}
if err := config.KBFSOps().Truncate(ctx, n, 10); err != nil {
t.Errorf("Got error on truncate: %+v", err)
}
newFileBlock := getFileBlockFromCache(t, config, id, fileNode.BlockPointer,
p.Branch)
if len(ops.nodeCache.PathFromNode(config.observer.localChange).path) !=
len(p.path) {
t.Errorf("Missing or incorrect local update during truncate: %v",
config.observer.localChange)
} else if ctx.Value(tCtxID) != config.observer.ctx.Value(tCtxID) {
t.Errorf("Wrong context value passed in local notify: %v",
config.observer.ctx.Value(tCtxID))
} else if !bytes.Equal(data, newFileBlock.Contents) {
t.Errorf("Wrote bad contents: %v", data)
}
checkBlockCache(t, config, id, []kbfsblock.ID{rootID, fileID},
map[BlockPointer]BranchName{
fileNode.BlockPointer: p.Branch,
})
// A truncate past the end of the file actually translates into a
// write for the difference
checkSyncOpInCache(t, config.Codec(), ops, fileNode.BlockPointer,
[]WriteRange{{Off: 5, Len: 5}})
}
func TestSetExFailNoSuchName(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
rmd.data.Dir.ID = rootID
aID := kbfsblock.FakeID(43)
rootBlock := NewDirBlock().(*DirBlock)
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
aNode := pathNode{makeBP(aID, rmd, config, u), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
expectedErr := NoSuchNameError{p.tailName()}
// chmod a+x a
if err := config.KBFSOps().SetEx(ctx, n, true); err == nil {
t.Errorf("Got no expected error on setex")
} else if err != expectedErr {
t.Errorf("Got unexpected error on setex: %+v", err)
}
}
// Other SetEx failure cases are all the same as any other block sync
func TestSetMtimeNull(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
aID := kbfsblock.FakeID(43)
rootBlock := NewDirBlock().(*DirBlock)
oldMtime := time.Now().UnixNano()
rootBlock.Children["a"] = DirEntry{
BlockInfo: makeBIFromID(aID, u),
EntryInfo: EntryInfo{
Type: File,
Mtime: oldMtime,
},
}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
aNode := pathNode{makeBP(aID, rmd, config, u), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
if err := config.KBFSOps().SetMtime(ctx, n, nil); err != nil {
t.Errorf("Got unexpected error on null setmtime: %+v", err)
}
newP := ops.nodeCache.PathFromNode(n)
if rootBlock.Children["a"].Mtime != oldMtime {
t.Errorf("a has wrong mtime: %v", rootBlock.Children["a"].Mtime)
} else if newP.path[0].ID != p.path[0].ID {
t.Errorf("Got back a changed path for null setmtime test: %v", newP)
}
checkBlockCache(t, config, id, nil, nil)
}
func TestMtimeFailNoSuchName(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
rmd.data.Dir.ID = rootID
aID := kbfsblock.FakeID(43)
rootBlock := NewDirBlock().(*DirBlock)
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
aNode := pathNode{makeBP(aID, rmd, config, u), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
expectedErr := NoSuchNameError{p.tailName()}
newMtime := time.Now()
if err := config.KBFSOps().SetMtime(ctx, n, &newMtime); err == nil {
t.Errorf("Got no expected error on setmtime")
} else if err != expectedErr {
t.Errorf("Got unexpected error on setmtime: %+v", err)
}
}
func getOrCreateSyncInfo(
ops *folderBranchOps, lState *lockState, de DirEntry) (*syncInfo, error) {
ops.blocks.blockLock.Lock(lState)
defer ops.blocks.blockLock.Unlock(lState)
return ops.blocks.getOrCreateSyncInfoLocked(lState, de)
}
func makeBlockStateDirty(config Config, kmd KeyMetadata, p path,
ptr BlockPointer) {
ops := getOps(config, kmd.TlfID())
lState := makeFBOLockState()
ops.blocks.blockLock.Lock(lState)
defer ops.blocks.blockLock.Unlock(lState)
df := ops.blocks.getOrCreateDirtyFileLocked(lState, p)
df.setBlockDirty(ptr)
}
// SetMtime failure cases are all the same as any other block sync
func TestSyncCleanSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
rmd.data.Dir.ID = rootID
aID := kbfsblock.FakeID(43)
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
aNode := pathNode{makeBP(aID, rmd, config, u), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
// fsync a
if err := config.KBFSOps().SyncAll(ctx, n.GetFolderBranch()); err != nil {
t.Errorf("Got unexpected error on sync: %+v", err)
}
newP := ops.nodeCache.PathFromNode(n)
if len(newP.path) != len(p.path) {
// should be the exact same path back
t.Errorf("Got a different length path back: %v", newP)
} else {
for i, n := range newP.path {
if n != p.path[i] {
t.Errorf("Node %d differed: %v", i, n)
}
}
}
checkBlockCache(t, config, id, nil, nil)
}
func expectSyncDirtyBlock(config *ConfigMock, kmd KeyMetadata,
p path, ptr BlockPointer, block *FileBlock, splitAt int64,
padSize int, opsLockHeld bool) *gomock.Call {
branch := MasterBranch
if config.mockDirtyBcache != nil {
config.mockDirtyBcache.EXPECT().IsDirty(gomock.Any(), ptrMatcher{ptr},
branch).AnyTimes().Return(true)
config.mockDirtyBcache.EXPECT().Get(gomock.Any(), ptrMatcher{ptr},
branch).AnyTimes().Return(block, nil)
} else {
config.DirtyBlockCache().Put(p.Tlf, ptr, branch, block)
}
if !opsLockHeld {
makeBlockStateDirty(config, kmd, p, ptr)
}
c1 := config.mockBsplit.EXPECT().CheckSplit(block).Return(splitAt)
newID := kbfsblock.FakeIDAdd(ptr.ID, 100)
// Ideally, we'd use the size of block.Contents at the time
// that Ready() is called, but GoMock isn't expressive enough
// for that.
newEncBuf := make([]byte, len(block.Contents)+padSize)
readyBlockData := ReadyBlockData{
buf: newEncBuf,
}
c2 := config.mockBops.EXPECT().Ready(gomock.Any(), kmdMatcher{kmd}, block).
After(c1).Return(newID, len(block.Contents), readyBlockData, nil)
newPtr := BlockPointer{ID: newID}
if config.mockBcache != nil {
config.mockBcache.EXPECT().Put(ptrMatcher{newPtr}, kmd.TlfID(), block, PermanentEntry).Return(nil)
config.mockBcache.EXPECT().DeletePermanent(newID).Return(nil)
} else {
// Nothing to do, since the cache entry is added and
// removed.
}
config.mockBserv.EXPECT().Put(gomock.Any(), kmd.TlfID(), newID,
gomock.Any(), gomock.Any(), gomock.Any()).Return(nil)
return c2
}
func putAndCleanAnyBlock(config *ConfigMock, p path) {
config.mockBcache.EXPECT().Put(gomock.Any(), gomock.Any(), gomock.Any(), TransientEntry).
Do(func(ptr BlockPointer, tlf tlf.ID, block Block, lifetime BlockCacheLifetime) {
config.mockDirtyBcache.EXPECT().
Get(gomock.Any(), ptrMatcher{BlockPointer{ID: ptr.ID}},
p.Branch).AnyTimes().Return(nil, NoSuchBlockError{ptr.ID})
config.mockBcache.EXPECT().
Get(ptrMatcher{BlockPointer{ID: ptr.ID}}).
AnyTimes().Return(block, nil)
}).AnyTimes().Return(nil)
config.mockDirtyBcache.EXPECT().Delete(gomock.Any(), gomock.Any(),
p.Branch).AnyTimes().Return(nil)
}
func TestKBFSOpsStatRootSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
id, h, rmd := createNewRMD(t, config, "alice", false)
ops := getOps(config, id)
ops.head = makeImmutableRMDForTest(t, config, rmd, fakeMdID(1))
ops.headStatus = headTrusted
u := h.FirstResolvedWriter()
rootID := kbfsblock.FakeID(42)
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
p := path{FolderBranch{Tlf: id}, []pathNode{node}}
n := nodeFromPath(t, ops, p)
_, err := config.KBFSOps().Stat(ctx, n)
if err != nil {
t.Errorf("Error on Stat: %+v", err)
}
}
func TestKBFSOpsFailingRootOps(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
id, h, rmd := createNewRMD(t, config, "alice", false)
ops := getOps(config, id)
ops.head = makeImmutableRMDForTest(t, config, rmd, fakeMdID(1))
ops.headStatus = headTrusted
u := h.FirstResolvedWriter()
rootID := kbfsblock.FakeID(42)
rmd.data.Dir.BlockPointer = makeBP(rootID, rmd, config, u)
node := pathNode{rmd.data.Dir.BlockPointer, "p"}
p := path{FolderBranch{Tlf: id}, []pathNode{node}}
n := nodeFromPath(t, ops, p)
// TODO: Make sure Read, Write, and Truncate fail also with
// InvalidPathError{}.
err := config.KBFSOps().SetEx(ctx, n, true)
if _, ok := err.(InvalidParentPathError); !ok {
t.Errorf("Unexpected error on SetEx: %+v", err)
}
err = config.KBFSOps().SetMtime(ctx, n, &time.Time{})
if _, ok := err.(InvalidParentPathError); !ok {
t.Errorf("Unexpected error on SetMtime: %+v", err)
}
// TODO: Sync succeeds, but it should fail. Fix this!
}
type testBGObserver struct {
c chan<- struct{}
}
func (t *testBGObserver) LocalChange(ctx context.Context, node Node,
write WriteRange) {
// ignore
}
func (t *testBGObserver) BatchChanges(ctx context.Context,
changes []NodeChange) {
t.c <- struct{}{}
}
func (t *testBGObserver) TlfHandleChange(ctx context.Context,
newHandle *TlfHandle) {
return
}
// Tests that the background flusher will sync a dirty file if the
// application does not.
func TestKBFSOpsBackgroundFlush(t *testing.T) {
config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "alice", "bob")
defer kbfsTestShutdownNoMocks(t, config, ctx, cancel)
config.noBGFlush = true
// create a file.
rootNode := GetRootNodeOrBust(ctx, t, config, "alice,bob", false)
kbfsOps := config.KBFSOps()
nodeA, _, err := kbfsOps.CreateFile(ctx, rootNode, "a", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %+v", err)
}
ops := getOps(config, rootNode.GetFolderBranch().Tlf)
oldPtr := ops.nodeCache.PathFromNode(nodeA).tailPointer()
staller := NewNaïveStaller(config)
staller.StallMDOp(StallableMDAfterPut, 1, false)
// start the background flusher
config.SetBGFlushPeriod(1 * time.Millisecond)
go ops.backgroundFlusher()
// Wait for the stall to know the background work is done.
staller.WaitForStallMDOp(StallableMDAfterPut)
staller.UnstallOneMDOp(StallableMDAfterPut)
// Do our own SyncAll now to ensure we wait for the bg flusher to
// finish.
err = kbfsOps.SyncAll(ctx, rootNode.GetFolderBranch())
if err != nil {
t.Fatalf("Couldn't sync all: %+v", err)
}
newPtr := ops.nodeCache.PathFromNode(nodeA).tailPointer()
if oldPtr == newPtr {
t.Fatalf("Background sync didn't update pointers")
}
}
func TestKBFSOpsWriteRenameStat(t *testing.T) {
config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "test_user")
// TODO: Use kbfsTestShutdownNoMocks.
defer kbfsTestShutdownNoMocksNoCheck(t, config, ctx, cancel)
// create a file.
rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", false)
kbfsOps := config.KBFSOps()
fileNode, _, err := kbfsOps.CreateFile(ctx, rootNode, "a", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %+v", err)
}
// Write to it.
data := []byte{1}
err = kbfsOps.Write(ctx, fileNode, data, 0)
if err != nil {
t.Fatalf("Couldn't write to file: %+v", err)
}
// Stat it.
ei, err := kbfsOps.Stat(ctx, fileNode)
if err != nil {
t.Fatalf("Couldn't stat file: %+v", err)
}
if ei.Size != 1 {
t.Errorf("Stat size %d unexpectedly not 1", ei.Size)
}
// Rename it.
err = kbfsOps.Rename(ctx, rootNode, "a", rootNode, "b")
if err != nil {
t.Fatalf("Couldn't rename; %+v", err)
}
// Stat it again.
newEi, err := kbfsOps.Stat(ctx, fileNode)
if err != nil {
t.Fatalf("Couldn't stat file: %+v", err)
}
// CTime is allowed to change after a rename, but nothing else.
if ei.Type != newEi.Type || ei.Size != newEi.Size ||
ei.Mtime != newEi.Mtime {
t.Errorf("Entry info unexpectedly changed from %+v to %+v", ei, newEi)
}
}
func TestKBFSOpsWriteRenameGetDirChildren(t *testing.T) {
config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "test_user")
// TODO: Use kbfsTestShutdownNoMocks.
defer kbfsTestShutdownNoMocksNoCheck(t, config, ctx, cancel)
// create a file.
rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", false)
kbfsOps := config.KBFSOps()
fileNode, _, err := kbfsOps.CreateFile(ctx, rootNode, "a", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %+v", err)
}
// Write to it.
data := []byte{1}
err = kbfsOps.Write(ctx, fileNode, data, 0)
if err != nil {
t.Fatalf("Couldn't write to file: %+v", err)
}
// Stat it.
ei, err := kbfsOps.Stat(ctx, fileNode)
if err != nil {
t.Fatalf("Couldn't stat file: %+v", err)
}
if ei.Size != 1 {
t.Errorf("Stat size %d unexpectedly not 1", ei.Size)
}
// Rename it.
err = kbfsOps.Rename(ctx, rootNode, "a", rootNode, "b")
if err != nil {
t.Fatalf("Couldn't rename; %+v", err)
}
// Get the stats via GetDirChildren.
eis, err := kbfsOps.GetDirChildren(ctx, rootNode)
if err != nil {
t.Fatalf("Couldn't stat file: %+v", err)
}
// CTime is allowed to change after a rename, but nothing else.
if newEi := eis["b"]; ei.Type != newEi.Type || ei.Size != newEi.Size ||
ei.Mtime != newEi.Mtime {
t.Errorf("Entry info unexpectedly changed from %+v to %+v",
ei, eis["b"])
}
}
func TestKBFSOpsCreateFileWithArchivedBlock(t *testing.T) {
config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "test_user")
defer kbfsTestShutdownNoMocks(t, config, ctx, cancel)
// create a file.
rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", false)
kbfsOps := config.KBFSOps()
_, _, err := kbfsOps.CreateFile(ctx, rootNode, "a", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %+v", err)
}
// Remove the file, which will archive the block
err = kbfsOps.RemoveEntry(ctx, rootNode, "a")
if err != nil {
t.Fatalf("Couldn't remove file: %+v", err)
}
// Wait for the archiving to finish
err = kbfsOps.SyncFromServerForTesting(ctx, rootNode.GetFolderBranch())
if err != nil {
t.Fatalf("Couldn't sync from server")
}
// Create a second file, which will use the same initial block ID
// from the cache, even though it's been archived, and will be
// forced to try again.
_, _, err = kbfsOps.CreateFile(ctx, rootNode, "b", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create second file: %+v", err)
}
}
func TestKBFSOpsMultiBlockSyncWithArchivedBlock(t *testing.T) {
config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "test_user")
defer kbfsTestShutdownNoMocks(t, config, ctx, cancel)
// Make the blocks small, with multiple levels of indirection, but
// make the unembedded size large, so we don't create thousands of
// unembedded block change blocks.
blockSize := int64(5)
bsplit := &BlockSplitterSimple{blockSize, 2, 100 * 1024}
config.SetBlockSplitter(bsplit)
// create a file.
rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", false)
kbfsOps := config.KBFSOps()
fileNode, _, err := kbfsOps.CreateFile(ctx, rootNode, "a", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %+v", err)
}
// Write a few blocks
data := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
err = kbfsOps.Write(ctx, fileNode, data, 0)
if err != nil {
t.Fatalf("Couldn't write file: %+v", err)
}
err = kbfsOps.SyncAll(ctx, fileNode.GetFolderBranch())
if err != nil {
t.Fatalf("Couldn't sync file: %+v", err)
}
// Now overwrite those blocks to archive them
newData := []byte{11, 12, 13, 14, 15, 16, 17, 18, 19, 20}
err = kbfsOps.Write(ctx, fileNode, newData, 0)
if err != nil {
t.Fatalf("Couldn't write file: %+v", err)
}
err = kbfsOps.SyncAll(ctx, fileNode.GetFolderBranch())
if err != nil {
t.Fatalf("Couldn't sync file: %+v", err)
}
// Wait for the archiving to finish
err = kbfsOps.SyncFromServerForTesting(ctx, rootNode.GetFolderBranch())
if err != nil {
t.Fatalf("Couldn't sync from server")
}
// Now write the original first block, which has been archived,
// and make sure it works.
err = kbfsOps.Write(ctx, fileNode, data[0:blockSize], 0)
if err != nil {
t.Fatalf("Couldn't write file: %+v", err)
}
err = kbfsOps.SyncAll(ctx, fileNode.GetFolderBranch())
if err != nil {
t.Fatalf("Couldn't sync file: %+v", err)
}
}
type corruptBlockServer struct {
BlockServer
}
func (cbs corruptBlockServer) Get(
ctx context.Context, tlfID tlf.ID, id kbfsblock.ID, context kbfsblock.Context) (
[]byte, kbfscrypto.BlockCryptKeyServerHalf, error) {
data, keyServerHalf, err := cbs.BlockServer.Get(ctx, tlfID, id, context)
if err != nil {
return nil, kbfscrypto.BlockCryptKeyServerHalf{}, err
}
return append(data, 0), keyServerHalf, nil
}
func TestKBFSOpsFailToReadUnverifiableBlock(t *testing.T) {
config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "test_user")
defer kbfsTestShutdownNoMocks(t, config, ctx, cancel)
config.SetBlockServer(&corruptBlockServer{
BlockServer: config.BlockServer(),
})
// create a file.
rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", false)
kbfsOps := config.KBFSOps()
_, _, err := kbfsOps.CreateFile(ctx, rootNode, "a", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %+v", err)
}
// Read using a different "device"
config2 := ConfigAsUser(config, "test_user")
defer CheckConfigAndShutdown(ctx, t, config2)
// Shutdown the mdserver explicitly before the state checker tries to run
defer config2.MDServer().Shutdown()
rootNode2 := GetRootNodeOrBust(ctx, t, config2, "test_user", false)
// Lookup the file, which should fail on block ID verification
kbfsOps2 := config2.KBFSOps()
_, _, err = kbfsOps2.Lookup(ctx, rootNode2, "a")
if _, ok := errors.Cause(err).(kbfshash.HashMismatchError); !ok {
t.Fatalf("Could unexpectedly lookup the file: %+v", err)
}
}
// Test that the size of a single empty block doesn't change. If this
// test ever fails, consult max or strib before merging.
func TestKBFSOpsEmptyTlfSize(t *testing.T) {
config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "test_user")
defer kbfsTestShutdownNoMocks(t, config, ctx, cancel)
// Create a TLF.
rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", false)
status, _, err := config.KBFSOps().FolderStatus(ctx,
rootNode.GetFolderBranch())
if err != nil {
t.Fatalf("Couldn't get folder status: %+v", err)
}
if status.DiskUsage != 313 {
t.Fatalf("Disk usage of an empty TLF is no longer 313. " +
"Talk to max or strib about why this matters.")
}
}
type cryptoFixedTlf struct {
Crypto
tlf tlf.ID
}
func (c cryptoFixedTlf) MakeRandomTlfID(isPublic bool) (tlf.ID, error) {
return c.tlf, nil
}
// TestKBFSOpsMaliciousMDServerRange tries to trick KBFSOps into
// accepting bad MDs.
func TestKBFSOpsMaliciousMDServerRange(t *testing.T) {
config1, _, ctx, cancel := kbfsOpsInitNoMocks(t, "alice", "mallory")
// TODO: Use kbfsTestShutdownNoMocks.
defer kbfsTestShutdownNoMocksNoCheck(t, config1, ctx, cancel)
// Create alice's TLF.
rootNode1 := GetRootNodeOrBust(ctx, t, config1, "alice", false)
fb1 := rootNode1.GetFolderBranch()
kbfsOps1 := config1.KBFSOps()
_, _, err := kbfsOps1.CreateFile(ctx, rootNode1, "dummy.txt", false, NoExcl)
require.NoError(t, err)
err = kbfsOps1.SyncAll(ctx, rootNode1.GetFolderBranch())
require.NoError(t, err)
// Create mallory's fake TLF using the same TLF ID as alice's.
config2 := ConfigAsUser(config1, "mallory")
crypto2 := cryptoFixedTlf{config2.Crypto(), fb1.Tlf}
config2.SetCrypto(crypto2)
mdserver2, err := NewMDServerMemory(mdServerLocalConfigAdapter{config2})
require.NoError(t, err)
config2.MDServer().Shutdown()
config2.SetMDServer(mdserver2)
config2.SetMDCache(NewMDCacheStandard(1))
rootNode2 := GetRootNodeOrBust(ctx, t, config2, "alice,mallory", false)
require.Equal(t, fb1.Tlf, rootNode2.GetFolderBranch().Tlf)
kbfsOps2 := config2.KBFSOps()
// Add some operations to get mallory's TLF to have a higher
// MetadataVersion.
_, _, err = kbfsOps2.CreateFile(
ctx, rootNode2, "dummy.txt", false, NoExcl)
require.NoError(t, err)
err = kbfsOps2.SyncAll(ctx, rootNode2.GetFolderBranch())
require.NoError(t, err)
err = kbfsOps2.RemoveEntry(ctx, rootNode2, "dummy.txt")
require.NoError(t, err)
err = kbfsOps2.SyncAll(ctx, rootNode2.GetFolderBranch())
require.NoError(t, err)
// Now route alice's TLF to mallory's MD server.
config1.SetMDServer(mdserver2.copy(mdServerLocalConfigAdapter{config1}))
// Simulate the server triggering alice to update.
config1.SetKeyCache(NewKeyCacheStandard(1))
err = kbfsOps1.SyncFromServerForTesting(ctx, fb1)
// TODO: We can actually fake out the PrevRoot pointer, too
// and then we'll be caught by the handle check. But when we
// have MDOps do the handle check, that'll trigger first.
require.IsType(t, MDPrevRootMismatch{}, err)
}
// TODO: Test malicious mdserver and rekey flow against wrong
// TLFs being introduced upon rekey.
// Test that if GetTLFCryptKeys fails to create a TLF, the second
// attempt will also fail with the same error. Regression test for
// KBFS-1929.
func TestGetTLFCryptKeysAfterFirstError(t *testing.T) {
config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "alice")
// TODO: Use kbfsTestShutdownNoMocks.
defer kbfsTestShutdownNoMocksNoCheck(t, config, ctx, cancel)
createErr := errors.New("Cannot create this TLF")
mdserver := &shimMDServer{
MDServer: config.MDServer(),
nextErr: createErr,
}
config.SetMDServer(mdserver)
h := parseTlfHandleOrBust(t, config, "alice", false)
_, _, err := config.KBFSOps().GetTLFCryptKeys(ctx, h)
if err != createErr {
t.Fatalf("Got unexpected error when creating TLF: %+v", err)
}
// Reset the error.
mdserver.nextErr = createErr
// Should get the same error, otherwise something's wrong.
_, _, err = config.KBFSOps().GetTLFCryptKeys(ctx, h)
if err != createErr {
t.Fatalf("Got unexpected error when creating TLF: %+v", err)
}
}
func TestForceFastForwardOnEmptyTLF(t *testing.T) {
config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "alice", "bob")
// TODO: Use kbfsTestShutdownNoMocks.
defer kbfsTestShutdownNoMocksNoCheck(t, config, ctx, cancel)
// Look up bob's public folder.
h := parseTlfHandleOrBust(t, config, "bob", true)
_, _, err := config.KBFSOps().GetOrCreateRootNode(ctx, h, MasterBranch)
if _, ok := err.(WriteAccessError); !ok {
t.Fatalf("Unexpected err reading a public TLF: %+v", err)
}
// There's only one folder at this point.
kbfsOps := config.KBFSOps().(*KBFSOpsStandard)
kbfsOps.opsLock.RLock()
var ops *folderBranchOps
for _, fbo := range kbfsOps.ops {
ops = fbo
break
}
kbfsOps.opsLock.RUnlock()
// FastForward shouldn't do anything, since the TLF hasn't been
// cleared yet.
config.KBFSOps().ForceFastForward(ctx)
err = ops.forcedFastForwards.Wait(ctx)
if err != nil {
t.Fatalf("Couldn't wait for fast forward: %+v", err)
}
}
// Regression test for KBFS-2161.
func TestDirtyPathsAfterRemoveDir(t *testing.T) {
config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "test_user")
defer kbfsTestShutdownNoMocks(t, config, ctx, cancel)
rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", false)
kbfsOps := config.KBFSOps()
// Create a/b/c.
nodeA, _, err := kbfsOps.CreateDir(ctx, rootNode, "a")
require.NoError(t, err)
err = kbfsOps.SyncAll(ctx, rootNode.GetFolderBranch())
require.NoError(t, err)
nodeB, _, err := kbfsOps.CreateDir(ctx, nodeA, "b")
require.NoError(t, err)
err = kbfsOps.SyncAll(ctx, rootNode.GetFolderBranch())
require.NoError(t, err)
nodeC, _, err := kbfsOps.CreateFile(ctx, nodeB, "c", false, NoExcl)
require.NoError(t, err)
err = kbfsOps.SyncAll(ctx, rootNode.GetFolderBranch())
require.NoError(t, err)
// Remove node c from the block cache and the server, to guarantee
// it's not needed during the removal.
ops := getOps(config, rootNode.GetFolderBranch().Tlf)
ptrC := ops.nodeCache.PathFromNode(nodeC).tailPointer()
err = config.BlockCache().DeleteTransient(
ptrC, rootNode.GetFolderBranch().Tlf)
require.NoError(t, err)
// Remove c.
err = kbfsOps.RemoveEntry(ctx, nodeB, "c")
require.NoError(t, err)
// Now a/b should be dirty.
status, _, err := kbfsOps.FolderStatus(ctx, rootNode.GetFolderBranch())
require.NoError(t, err)
require.Len(t, status.DirtyPaths, 1)
require.Equal(t, "test_user/a/b", status.DirtyPaths[0])
// Now remove b, and make sure a/b is no longer dirty.
err = kbfsOps.RemoveDir(ctx, nodeA, "b")
require.NoError(t, err)
status, _, err = kbfsOps.FolderStatus(ctx, rootNode.GetFolderBranch())
require.NoError(t, err)
require.Len(t, status.DirtyPaths, 1)
require.Equal(t, "test_user/a", status.DirtyPaths[0])
// Also make sure we can no longer create anything in the removed
// directory.
_, _, err = kbfsOps.CreateDir(ctx, nodeB, "d")
require.IsType(t, UnsupportedOpInUnlinkedDirError{}, errors.Cause(err))
err = kbfsOps.SyncAll(ctx, rootNode.GetFolderBranch())
require.NoError(t, err)
status, _, err = kbfsOps.FolderStatus(ctx, rootNode.GetFolderBranch())
require.NoError(t, err)
require.Len(t, status.DirtyPaths, 0)
// If the block made it back into the cache, we have a problem.
// It shouldn't be needed for removal.
_, err = config.BlockCache().Get(ptrC)
require.NotNil(t, err)
}
| 1 | 16,761 | Removed now-unneeded param. | keybase-kbfs | go |
@@ -107,6 +107,7 @@ VIEW DATA STRUCTURES
go-filecoin show - Get human-readable representations of filecoin objects
NETWORK COMMANDS
+ go-filecoin bitswap - Explore libp2p bitswap
go-filecoin bootstrap - Interact with bootstrap addresses
go-filecoin dht - Interact with the dht
go-filecoin id - Show info about the network peers | 1 | package commands
import (
"context"
"fmt"
"net"
"net/url"
"os"
"path/filepath"
"syscall"
"github.com/ipfs/go-ipfs-cmdkit"
"github.com/ipfs/go-ipfs-cmds"
"github.com/ipfs/go-ipfs-cmds/cli"
cmdhttp "github.com/ipfs/go-ipfs-cmds/http"
"github.com/mitchellh/go-homedir"
ma "github.com/multiformats/go-multiaddr"
"github.com/multiformats/go-multiaddr-net"
"github.com/pkg/errors"
"github.com/filecoin-project/go-filecoin/repo"
"github.com/filecoin-project/go-filecoin/types"
)
const (
// OptionAPI is the name of the option for specifying the api port.
OptionAPI = "cmdapiaddr"
// OptionRepoDir is the name of the option for specifying the directory of the repo.
OptionRepoDir = "repodir"
// APIPrefix is the prefix for the http version of the api.
APIPrefix = "/api"
// OfflineMode tells us if we should try to connect this Filecoin node to the network
OfflineMode = "offline"
// ELStdout tells the daemon to write event logs to stdout.
ELStdout = "elstdout"
// AutoSealIntervalSeconds configures the daemon to check for and seal any staged sectors on an interval.
AutoSealIntervalSeconds = "auto-seal-interval-seconds"
// SwarmAddress is the multiaddr for this Filecoin node
SwarmAddress = "swarmlisten"
// SwarmPublicRelayAddress is a public address that the filecoin node
// will listen on if it is operating as a relay. We use this to specify
// the public ip:port of a relay node that is sitting behind a static
// NAT mapping.
SwarmPublicRelayAddress = "swarmrelaypublic"
// BlockTime is the duration string of the block time the daemon will
// run with. TODO: this should eventually be more explicitly grouped
// with testing as we won't be able to set blocktime in production.
BlockTime = "block-time"
// PeerKeyFile is the path of file containing key to use for new nodes libp2p identity
PeerKeyFile = "peerkeyfile"
// WithMiner when set, creates a custom genesis block with a pre generated miner account, requires to run the daemon using dev mode (--dev)
WithMiner = "with-miner"
// DefaultAddress when set, sets the daemons's default address to the provided address
DefaultAddress = "default-address"
// GenesisFile is the path of file containing archive of genesis block DAG data
GenesisFile = "genesisfile"
// DevnetTest populates config bootstrap addrs with the dns multiaddrs of the test devnet and other test devnet specific bootstrap parameters
DevnetTest = "devnet-test"
// DevnetNightly populates config bootstrap addrs with the dns multiaddrs of the nightly devnet and other nightly devnet specific bootstrap parameters
DevnetNightly = "devnet-nightly"
// DevnetUser populates config bootstrap addrs with the dns multiaddrs of the user devnet and other user devnet specific bootstrap parameters
DevnetUser = "devnet-user"
// IsRelay when set causes the the daemon to provide libp2p relay
// services allowing other filecoin nodes behind NATs to talk directly.
IsRelay = "is-relay"
)
// command object for the local cli
var rootCmd = &cmds.Command{
Helptext: cmdkit.HelpText{
Tagline: "A decentralized storage network",
Subcommands: `
START RUNNING FILECOIN
go-filecoin init - Initialize a filecoin repo
go-filecoin config <key> [<value>] - Get and set filecoin config values
go-filecoin daemon - Start a long-running daemon process
go-filecoin wallet - Manage your filecoin wallets
go-filecoin address - Interact with addresses
STORE AND RETRIEVE DATA
go-filecoin client - Make deals, store data, retrieve data
go-filecoin retrieval-client - Manage retrieval client operations
MINE
go-filecoin miner - Manage a single miner actor
go-filecoin mining - Manage all mining operations for a node
VIEW DATA STRUCTURES
go-filecoin chain - Inspect the filecoin blockchain
go-filecoin dag - Interact with IPLD DAG objects
go-filecoin show - Get human-readable representations of filecoin objects
NETWORK COMMANDS
go-filecoin bootstrap - Interact with bootstrap addresses
go-filecoin dht - Interact with the dht
go-filecoin id - Show info about the network peers
go-filecoin ping <peer ID>... - Send echo request packets to p2p network members
go-filecoin swarm - Interact with the swarm
go-filecoin stats - Monitor statistics on your network usage
ACTOR COMMANDS
go-filecoin actor - Interact with actors. Actors are built-in smart contracts.
go-filecoin paych - Payment channel operations
MESSAGE COMMANDS
go-filecoin message - Manage messages
go-filecoin mpool - Manage the message pool
TOOL COMMANDS
go-filecoin log - Interact with the daemon event log output.
go-filecoin version - Show go-filecoin version information
`,
},
Options: []cmdkit.Option{
cmdkit.StringOption(OptionAPI, "set the api port to use"),
cmdkit.StringOption(OptionRepoDir, "set the directory of the repo, defaults to ~/.filecoin"),
cmds.OptionEncodingType,
cmdkit.BoolOption("help", "Show the full command help text."),
cmdkit.BoolOption("h", "Show a short version of the command help text."),
},
Subcommands: make(map[string]*cmds.Command),
}
// command object for the daemon
var rootCmdDaemon = &cmds.Command{
Subcommands: make(map[string]*cmds.Command),
}
// all top level commands, not available to daemon
var rootSubcmdsLocal = map[string]*cmds.Command{
"daemon": daemonCmd,
"init": initCmd,
}
// all top level commands, available on daemon. set during init() to avoid configuration loops.
var rootSubcmdsDaemon = map[string]*cmds.Command{
"actor": actorCmd,
"address": addrsCmd,
"bootstrap": bootstrapCmd,
"chain": chainCmd,
"config": configCmd,
"client": clientCmd,
"dag": dagCmd,
"dht": dhtCmd,
"id": idCmd,
"log": logCmd,
"message": msgCmd,
"miner": minerCmd,
"mining": miningCmd,
"mpool": mpoolCmd,
"outbox": outboxCmd,
"paych": paymentChannelCmd,
"ping": pingCmd,
"retrieval-client": retrievalClientCmd,
"show": showCmd,
"stats": statsCmd,
"swarm": swarmCmd,
"version": versionCmd,
"wallet": walletCmd,
}
func init() {
for k, v := range rootSubcmdsLocal {
rootCmd.Subcommands[k] = v
}
for k, v := range rootSubcmdsDaemon {
rootCmd.Subcommands[k] = v
rootCmdDaemon.Subcommands[k] = v
}
}
// Run processes the arguments and stdin
func Run(args []string, stdin, stdout, stderr *os.File) (int, error) {
err := cli.Run(context.Background(), rootCmd, args, stdin, stdout, stderr, buildEnv, makeExecutor)
if err == nil {
return 0, nil
}
if exerr, ok := err.(cli.ExitError); ok {
return int(exerr), nil
}
return 1, err
}
func buildEnv(ctx context.Context, req *cmds.Request) (cmds.Environment, error) {
return &Env{ctx: ctx}, nil
}
type executor struct {
api string
exec cmds.Executor
}
func (e *executor) Execute(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error {
if e.api == "" {
return e.exec.Execute(req, re, env)
}
client := cmdhttp.NewClient(e.api, cmdhttp.ClientWithAPIPrefix(APIPrefix))
res, err := client.Send(req)
if err != nil {
if isConnectionRefused(err) {
return cmdkit.Errorf(cmdkit.ErrFatal, "Connection Refused. Is the daemon running?")
}
return cmdkit.Errorf(cmdkit.ErrFatal, err.Error())
}
// copy received result into cli emitter
err = cmds.Copy(re, res)
if err != nil {
return cmdkit.Errorf(cmdkit.ErrFatal|cmdkit.ErrNormal, err.Error())
}
return nil
}
func makeExecutor(req *cmds.Request, env interface{}) (cmds.Executor, error) {
isDaemonRequired := requiresDaemon(req)
var api string
if isDaemonRequired {
var err error
api, err = getAPIAddress(req)
if err != nil {
return nil, err
}
}
if api == "" && isDaemonRequired {
return nil, ErrMissingDaemon
}
return &executor{
api: api,
exec: cmds.NewExecutor(rootCmd),
}, nil
}
func getAPIAddress(req *cmds.Request) (string, error) {
var rawAddr string
// second highest precedence is env vars.
if envapi := os.Getenv("FIL_API"); envapi != "" {
rawAddr = envapi
}
// first highest precedence is cmd flag.
if apiAddress, ok := req.Options[OptionAPI].(string); ok && apiAddress != "" {
rawAddr = apiAddress
}
// we will read the api file if no other option is given.
if len(rawAddr) == 0 {
repoDir, _ := req.Options[OptionRepoDir].(string)
repoDir = repo.GetRepoDir(repoDir)
rawPath := filepath.Join(filepath.Clean(repoDir), repo.APIFile)
apiFilePath, err := homedir.Expand(rawPath)
if err != nil {
return "", errors.Wrap(err, fmt.Sprintf("can't resolve local repo path %s", rawPath))
}
rawAddr, err = repo.APIAddrFromFile(apiFilePath)
if err != nil {
return "", errors.Wrap(err, "can't find API endpoint address in environment, command-line, or local repo (is the daemon running?)")
}
}
maddr, err := ma.NewMultiaddr(rawAddr)
if err != nil {
return "", errors.Wrap(err, fmt.Sprintf("unable to convert API endpoint address %s to a multiaddr", rawAddr))
}
_, host, err := manet.DialArgs(maddr)
if err != nil {
return "", errors.Wrap(err, fmt.Sprintf("unable to dial API endpoint address %s", maddr))
}
return host, nil
}
func requiresDaemon(req *cmds.Request) bool {
if req.Command == daemonCmd {
return false
}
if req.Command == initCmd {
return false
}
return true
}
func isConnectionRefused(err error) bool {
urlErr, ok := err.(*url.Error)
if !ok {
return false
}
opErr, ok := urlErr.Err.(*net.OpError)
if !ok {
return false
}
syscallErr, ok := opErr.Err.(*os.SyscallError)
if !ok {
return false
}
return syscallErr.Err == syscall.ECONNREFUSED
}
var priceOption = cmdkit.StringOption("gas-price", "Price (FIL e.g. 0.00013) to pay for each GasUnits consumed mining this message")
var limitOption = cmdkit.Uint64Option("gas-limit", "Maximum number of GasUnits this message is allowed to consume")
var previewOption = cmdkit.BoolOption("preview", "Preview the Gas cost of this command without actually executing it")
func parseGasOptions(req *cmds.Request) (types.AttoFIL, types.GasUnits, bool, error) {
priceOption := req.Options["gas-price"]
if priceOption == nil {
return types.AttoFIL{}, types.NewGasUnits(0), false, errors.New("price option is required")
}
price, ok := types.NewAttoFILFromFILString(priceOption.(string))
if !ok {
return types.AttoFIL{}, types.NewGasUnits(0), false, errors.New("invalid gas price (specify FIL as a decimal number)")
}
limitOption := req.Options["gas-limit"]
if limitOption == nil {
return types.AttoFIL{}, types.NewGasUnits(0), false, errors.New("limit option is required")
}
gasLimitInt, ok := limitOption.(uint64)
if !ok {
msg := fmt.Sprintf("invalid gas limit: %s", limitOption)
return types.AttoFIL{}, types.NewGasUnits(0), false, errors.New(msg)
}
preview, _ := req.Options["preview"].(bool)
return *price, types.NewGasUnits(gasLimitInt), preview, nil
}
| 1 | 18,342 | (NON-blocking, this can be tracked in follow up issue) @anorth @mishmosh is the toplevel getting too crowded? Should we have a `network` grandparent command, or maybe a `stats` command? | filecoin-project-venus | go |
@@ -85,7 +85,7 @@ storiesOf( 'Analytics Module', module )
title={ __( 'Top acquisition sources over the last 28 days', 'google-site-kit' ) }
headerCtaLink="https://analytics.google.com"
headerCtaLabel={ __( 'See full stats in Analytics', 'google-site-kit' ) }
- footerCtaLabel={ __( 'Analytics', 'google-site-kit' ) }
+ footerCtaLabel={ _x( 'Analytics', 'Service name', 'google-site-kit' ) }
footerCtaLink="https://analytics.google.com"
>
<div className="mdc-layout-grid"> | 1 | /**
* External dependencies
*/
import { storiesOf } from '@storybook/react';
/**
* WordPress dependencies
*/
import { __ } from '@wordpress/i18n';
import Layout from 'GoogleComponents/layout/layout';
import AnalyticsDashboardWidgetOverview from 'GoogleModules/analytics/dashboard/dashboard-widget-overview';
import AnalyticsDashboardWidgetSiteStats from 'GoogleModules/analytics/dashboard/dashboard-widget-sitestats';
import DashboardAcquisitionPieChart from 'GoogleModules/analytics/dashboard/dashboard-widget-acquisition-piechart';
import AnalyticsDashboardWidgetTopAcquisitionSources from 'GoogleModules/analytics/dashboard/dashboard-widget-top-acquisition-sources-table';
/**
* Internal dependencies
*/
import { googlesitekit as analyticsData } from '../.storybook/data/wp-admin-admin.php-page=googlesitekit-module-analytics-googlesitekit';
storiesOf( 'Analytics Module', module )
.add( 'Audience Overview Chart', () => {
window.googlesitekit = analyticsData;
const selectedStats = [
0,
];
const series = {
0: {
color: '#4285f4',
targetAxisIndex: 0,
},
1: {
color: '#4285f4',
targetAxisIndex: 0,
lineDashStyle: [
3,
3,
],
lineWidth: 1,
},
};
const vAxes = null;
// Load the datacache with data.
setTimeout( () => {
wp.hooks.doAction(
'googlesitekit.moduleLoaded',
'Single'
);
}, 250 );
return (
<Layout
header
title={ __( 'Audience overview for the last 28 days', 'google-site-kit' ) }
headerCtaLabel={ __( 'See full stats in Analytics', 'google-site-kit' ) }
headerCtaLink="http://analytics.google.com"
>
<AnalyticsDashboardWidgetOverview
selectedStats={ selectedStats }
handleDataError={ () => {} }
/>
<AnalyticsDashboardWidgetSiteStats
selectedStats={ selectedStats }
series={ series }
vAxes={ vAxes }
/>
</Layout>
);
},
{ options: { readySelector: '.googlesitekit-line-chart > div[style="position: relative;"]' } } )
.add( 'Top Acquisition Pie Chart', () => {
window.googlesitekit = analyticsData;
// Load the datacache with data.
setTimeout( () => {
wp.hooks.doAction(
'googlesitekit.moduleLoaded',
'Single'
);
}, 250 );
return (
<Layout
header
footer
title={ __( 'Top acquisition sources over the last 28 days', 'google-site-kit' ) }
headerCtaLink="https://analytics.google.com"
headerCtaLabel={ __( 'See full stats in Analytics', 'google-site-kit' ) }
footerCtaLabel={ __( 'Analytics', 'google-site-kit' ) }
footerCtaLink="https://analytics.google.com"
>
<div className="mdc-layout-grid">
<div className="mdc-layout-grid__inner">
<div className="
mdc-layout-grid__cell
mdc-layout-grid__cell--span-4-desktop
mdc-layout-grid__cell--span-8-tablet
mdc-layout-grid__cell--span-4-phone
">
<DashboardAcquisitionPieChart />
</div>
<div className="
mdc-layout-grid__cell
mdc-layout-grid__cell--span-8-desktop
mdc-layout-grid__cell--span-8-tablet
mdc-layout-grid__cell--span-4-phone
">
<AnalyticsDashboardWidgetTopAcquisitionSources />
</div>
</div>
</div>
</Layout>
);
},
{ options: { readySelector: '.googlesitekit-line-chart > div[style="position: relative;"]' } } );
| 1 | 25,370 | The `_x` function needs to be imported at the top of the file (in addition to `__`) | google-site-kit-wp | js |
@@ -78,8 +78,9 @@ namespace pwiz.SkylineTestFunctional
RunUI(() =>
{
propDialog.SetQValueTo(0.003f);
+ propDialog.OkDialog();
});
- OkDialog(propDialog, propDialog.OkDialog);
+ WaitForClosedForm(propDialog);
WaitForCondition(() => (DetectionsGraphController.Settings.QValueCutoff == 0.003f));
AssertDataCorrect(pane, 0, 0.003f);
| 1 | /*
* Original author: Rita Chupalov <ritach .at. uw.edu>,
* MacCoss Lab, Department of Genome Sciences, UW
*
* Copyright 2020 University of Washington - Seattle, WA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
using System;
using System.Globalization;
using System.Linq;
using Microsoft.VisualStudio.TestTools.UnitTesting;
using pwiz.Common.Collections;
using pwiz.SkylineTestUtil;
using pwiz.Skyline.Controls.Graphs;
using pwiz.Skyline.Model;
using pwiz.Skyline.Properties;
using pwiz.Skyline.Util.Extensions;
namespace pwiz.SkylineTestFunctional
{
[TestClass]
public class DetectionsPlotTest : AbstractFunctionalTestEx
{
private static readonly int[][] REF_DATA =
{
new[] { 114, 113, 113, 112, 112, 113}, //q = 0.003, Peptides
new[] { 123, 122, 122, 121, 121, 121}, //q = 0.003, Precursors
new[] { 111, 109, 110, 110, 109, 110 }, //q = 0.001, Peptides
new[] { 120, 118, 119, 119, 117, 117 }, //q = 0.001, Precursors
new[] { 110, 108, 109, 109, 108, 109 }, //q = 0.001, Peptides, after update
new[] { 119, 117, 118, 118, 116, 116 } //q = 0.001, Precursors, after update
};
[TestMethod]
public void TestDetectionsPlot()
{
TestFilesZip = @"TestFunctional/DetectionsPlotTest.zip";
RunFunctionalTest();
}
protected override void DoTest()
{
OpenDocument(TestFilesDir.GetTestPath(@"DIA-TTOF-tutorial.sky"));
WaitForDocumentLoaded();
RunUI(() => { SkylineWindow.ShowDetectionsReplicateComparisonGraph(); });
WaitForGraphs();
GraphSummary graph = SkylineWindow.DetectionsPlot;
var toolbar = graph.Toolbar as DetectionsToolbar;
Assert.IsNotNull(toolbar);
RunUI(() => { toolbar.CbLevel.SelectedItem = DetectionsGraphController.TargetType.PRECURSOR; });
WaitForGraphs();
DetectionsPlotPane pane;
Assert.IsTrue(graph.TryGetGraphPane(out pane));
Assert.IsTrue(pane.HasToolbar);
//use properties dialog to update the q-value
var propDialog = ShowDialog<DetectionToolbarProperties>(() =>
{
toolbar.pbProperties_Click(graph.GraphControl, new EventArgs());
});
//verify data correct for 2 q-values
RunUI(() =>
{
propDialog.SetQValueTo(0.003f);
});
OkDialog(propDialog, propDialog.OkDialog);
WaitForCondition(() => (DetectionsGraphController.Settings.QValueCutoff == 0.003f));
AssertDataCorrect(pane, 0, 0.003f);
//use properties dialog to update the q-value
propDialog = ShowDialog<DetectionToolbarProperties>(() =>
{
toolbar.pbProperties_Click(graph.GraphControl, new EventArgs());
});
RunUI(() =>
{
propDialog.SetQValueTo(0.001f);
});
OkDialog(propDialog, propDialog.OkDialog);
WaitForCondition(() => (DetectionsGraphController.Settings.QValueCutoff == 0.001f));
AssertDataCorrect(pane, 2, 0.001f);
//verify the number of the bars on the plot
RunUI(() =>
{
Assert.IsTrue(
pane.CurveList[0].IsBar && pane.CurveList[0].Points.Count == REF_DATA[0].Length);
});
string[] tipText =
{
Resources.DetectionPlotPane_Tooltip_Replicate + TextUtil.SEPARATOR_TSV_STR + @"2_SW-B",
string.Format(Resources.DetectionPlotPane_Tooltip_Count, DetectionsGraphController.TargetType.PRECURSOR) +
TextUtil.SEPARATOR_TSV_STR + 118.ToString( CultureInfo.CurrentCulture),
Resources.DetectionPlotPane_Tooltip_CumulativeCount + TextUtil.SEPARATOR_TSV_STR +
123.ToString( CultureInfo.CurrentCulture),
Resources.DetectionPlotPane_Tooltip_AllCount + TextUtil.SEPARATOR_TSV_STR +
115.ToString( CultureInfo.CurrentCulture),
Resources.DetectionPlotPane_Tooltip_QMedian + TextUtil.SEPARATOR_TSV_STR +
(6.0d).ToString(@"F1",CultureInfo.CurrentCulture)
};
RunUI(() =>
{
Assert.IsNotNull(pane.ToolTip);
pane.PopulateTooltip(1);
//verify the tooltip text
CollectionAssert.AreEqual(tipText, pane.ToolTip.TipLines);
});
//test the data correct after a doc change (delete peptide)
RunUI(() =>
{
SkylineWindow.SelectedPath = SkylineWindow.Document.GetPathTo((int)SrmDocument.Level.Molecules, 12);
SkylineWindow.EditDelete();
});
WaitForGraphs();
WaitForConditionUI(() => DetectionPlotData.GetDataCache().Datas.Any((dat) =>
ReferenceEquals(SkylineWindow.DocumentUI, dat.Document) &&
DetectionsGraphController.Settings.QValueCutoff == dat.QValueCutoff),
"Cache is not updated on document change.");
//verify that the cache is purged after the document update
RunUI(() =>
{
Assert.IsTrue(DetectionPlotData.GetDataCache().Datas.All((dat) =>
ReferenceEquals(SkylineWindow.DocumentUI, dat.Document)));
});
AssertDataCorrect(pane, 4, 0.001f);
RunUI(() => { SkylineWindow.ShowDetectionsHistogramGraph(); });
WaitForGraphs();
DetectionsHistogramPane paneHistogram;
var graphHistogram = SkylineWindow.DetectionsPlot;
Assert.IsTrue(graphHistogram.TryGetGraphPane(out paneHistogram), "Cannot get histogram pane.");
//display and hide tooltip
string[] histogramTipText =
{
Resources.DetectionHistogramPane_Tooltip_ReplicateCount + TextUtil.SEPARATOR_TSV_STR +
5.ToString( CultureInfo.CurrentCulture),
String.Format(Resources.DetectionHistogramPane_Tooltip_Count, DetectionsGraphController.TargetType.PRECURSOR) +
TextUtil.SEPARATOR_TSV_STR + 102.ToString( CultureInfo.CurrentCulture),
};
RunUI(() =>
{
Assert.IsNotNull(paneHistogram.ToolTip, "No tooltip found.");
paneHistogram.PopulateTooltip(5);
//verify the tooltip text
CollectionAssert.AreEqual(histogramTipText, paneHistogram.ToolTip.TipLines);
});
RunUI(() =>
{
graph.Close();
graphHistogram.Close();
});
WaitForGraphs();
}
private void AssertDataCorrect(DetectionsPlotPane pane, int refIndex, float qValue, bool record = false)
{
DetectionPlotData data = null;
WaitForConditionUI(() => (data = pane.CurrentData) != null
&& data.QValueCutoff == qValue
&& DetectionPlotData.GetDataCache().Status == DetectionPlotData.DetectionDataCache.CacheStatus.idle,
() => $"Retrieving data for qValue {qValue}, refIndex {refIndex} took too long.");
WaitForGraphs();
Assert.IsTrue(data.IsValid);
if (record)
{
Console.WriteLine(@"Peptides");
data.GetTargetData(DetectionsGraphController.TargetType.PEPTIDE).TargetsCount
.ForEach((cnt) => { Console.Write($@"{cnt}, "); });
Console.WriteLine(@"\nPrecursors");
data.GetTargetData(DetectionsGraphController.TargetType.PRECURSOR).TargetsCount
.ForEach((cnt) => { Console.Write($@"{cnt}, "); });
}
Assert.IsTrue(
REF_DATA[refIndex].SequenceEqual(
data.GetTargetData(DetectionsGraphController.TargetType.PEPTIDE).TargetsCount));
Assert.IsTrue(
REF_DATA[refIndex + 1].SequenceEqual(
data.GetTargetData(DetectionsGraphController.TargetType.PRECURSOR).TargetsCount));
}
}
}
| 1 | 13,747 | This is functionally equivalent to the code it replaces. | ProteoWizard-pwiz | .cs |
@@ -26,6 +26,8 @@ import (
type ENI struct {
// ID is the id of eni
ID string `json:"ec2Id"`
+ // ENIType is the type of ENI, valid value: "default", "vlan"
+ ENIType string `json:",omitempty"`
// IPV4Addresses is the ipv4 address associated with the eni
IPV4Addresses []*ENIIPV4Address
// IPV6Addresses is the ipv6 address associated with the eni | 1 | // Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package eni
import (
"fmt"
"strings"
"github.com/aws/amazon-ecs-agent/agent/acs/model/ecsacs"
"github.com/aws/aws-sdk-go/aws"
"github.com/pkg/errors"
)
// ENI contains information of the eni
type ENI struct {
// ID is the id of eni
ID string `json:"ec2Id"`
// IPV4Addresses is the ipv4 address associated with the eni
IPV4Addresses []*ENIIPV4Address
// IPV6Addresses is the ipv6 address associated with the eni
IPV6Addresses []*ENIIPV6Address
// MacAddress is the mac address of the eni
MacAddress string
// DomainNameServers specifies the nameserver IP addresses for
// the eni
DomainNameServers []string `json:",omitempty"`
// DomainNameSearchList specifies the search list for the domain
// name lookup, for the eni
DomainNameSearchList []string `json:",omitempty"`
// PrivateDNSName is the dns name assigned by the vpc to this eni
PrivateDNSName string `json:",omitempty"`
// SubnetGatewayIPV4Address is the address to the subnet gateway for
// the eni
SubnetGatewayIPV4Address string `json:",omitempty"`
}
// GetIPV4Addresses returns a list of ipv4 addresses allocated to the ENI
func (eni *ENI) GetIPV4Addresses() []string {
var addresses []string
for _, addr := range eni.IPV4Addresses {
addresses = append(addresses, addr.Address)
}
return addresses
}
// GetIPV6Addresses returns a list of ipv6 addresses allocated to the ENI
func (eni *ENI) GetIPV6Addresses() []string {
var addresses []string
for _, addr := range eni.IPV6Addresses {
addresses = append(addresses, addr.Address)
}
return addresses
}
// GetHostname returns the hostname assigned to the ENI
func (eni *ENI) GetHostname() string {
return eni.PrivateDNSName
}
// GetSubnetGatewayIPV4Address returns the subnet IPv4 gateway address assigned
// to the ENI
func (eni *ENI) GetSubnetGatewayIPV4Address() string {
return eni.SubnetGatewayIPV4Address
}
// String returns a human readable version of the ENI object
func (eni *ENI) String() string {
var ipv4Addresses []string
for _, addr := range eni.IPV4Addresses {
ipv4Addresses = append(ipv4Addresses, addr.Address)
}
var ipv6Addresses []string
for _, addr := range eni.IPV6Addresses {
ipv6Addresses = append(ipv6Addresses, addr.Address)
}
return fmt.Sprintf(
"eni id:%s, mac: %s, hostname: %s, ipv4addresses: [%s], ipv6addresses: [%s], dns: [%s], dns search: [%s], gateway ipv4: [%s]",
eni.ID, eni.MacAddress, eni.GetHostname(), strings.Join(ipv4Addresses, ","), strings.Join(ipv6Addresses, ","),
strings.Join(eni.DomainNameServers, ","), strings.Join(eni.DomainNameSearchList, ","), eni.SubnetGatewayIPV4Address)
}
// ENIIPV4Address is the ipv4 information of the eni
type ENIIPV4Address struct {
// Primary indicates whether the ip address is primary
Primary bool
// Address is the ipv4 address associated with eni
Address string
}
// ENIIPV6Address is the ipv6 information of the eni
type ENIIPV6Address struct {
// Address is the ipv6 address associated with eni
Address string
}
// ENIFromACS validates the information from acs message and create the ENI object
func ENIFromACS(acsenis []*ecsacs.ElasticNetworkInterface) (*ENI, error) {
err := ValidateTaskENI(acsenis)
if err != nil {
return nil, err
}
var ipv4 []*ENIIPV4Address
var ipv6 []*ENIIPV6Address
// Read ipv4 address information of the eni
for _, ec2Ipv4 := range acsenis[0].Ipv4Addresses {
ipv4 = append(ipv4, &ENIIPV4Address{
Primary: aws.BoolValue(ec2Ipv4.Primary),
Address: aws.StringValue(ec2Ipv4.PrivateAddress),
})
}
// Read ipv6 address information of the eni
for _, ec2Ipv6 := range acsenis[0].Ipv6Addresses {
ipv6 = append(ipv6, &ENIIPV6Address{
Address: aws.StringValue(ec2Ipv6.Address),
})
}
eni := &ENI{
ID: aws.StringValue(acsenis[0].Ec2Id),
IPV4Addresses: ipv4,
IPV6Addresses: ipv6,
MacAddress: aws.StringValue(acsenis[0].MacAddress),
PrivateDNSName: aws.StringValue(acsenis[0].PrivateDnsName),
SubnetGatewayIPV4Address: aws.StringValue(acsenis[0].SubnetGatewayIpv4Address),
}
for _, nameserverIP := range acsenis[0].DomainNameServers {
eni.DomainNameServers = append(eni.DomainNameServers, aws.StringValue(nameserverIP))
}
for _, nameserverDomain := range acsenis[0].DomainName {
eni.DomainNameSearchList = append(eni.DomainNameSearchList, aws.StringValue(nameserverDomain))
}
return eni, nil
}
// ValidateTaskENI validates the eni informaiton sent from acs
func ValidateTaskENI(acsenis []*ecsacs.ElasticNetworkInterface) error {
// Only one eni should be associated with the task
// Only one ipv4 should be associated with the eni
// No more than one ipv6 should be associated with the eni
if len(acsenis) != 1 {
return errors.Errorf("eni message validation: more than one ENIs in the message(%d)", len(acsenis))
} else if len(acsenis[0].Ipv4Addresses) != 1 {
return errors.Errorf("eni message validation: more than one ipv4 addresses in the message(%d)", len(acsenis[0].Ipv4Addresses))
} else if len(acsenis[0].Ipv6Addresses) > 1 {
return errors.Errorf("eni message validation: more than one ipv6 addresses in the message(%d)", len(acsenis[0].Ipv6Addresses))
}
if acsenis[0].MacAddress == nil {
return errors.Errorf("eni message validation: empty eni mac address in the message")
}
if acsenis[0].Ec2Id == nil {
return errors.Errorf("eni message validation: empty eni id in the message")
}
return nil
}
| 1 | 22,273 | can you change the field name here to "InterfaceAssociationProtocol"? same for the Config struct in agent/ecscni/types.go. i think it's better to keep the field name consistent between agent and acs payload | aws-amazon-ecs-agent | go |
@@ -1,5 +1,5 @@
/**
- * core/widgets data store: widget tests.
+ * Widgets data store: widget tests.
*
* Site Kit by Google, Copyright 2020 Google LLC
* | 1 | /**
* core/widgets data store: widget tests.
*
* Site Kit by Google, Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Internal dependencies
*/
import {
createTestRegistry,
unsubscribeFromAll,
muteConsole,
} from '../../../../../tests/js/utils';
import { STORE_NAME } from './constants';
describe( 'core/widgets Widget areas', () => {
let registry;
let store;
beforeEach( () => {
registry = createTestRegistry();
store = registry.stores[ STORE_NAME ].store;
} );
afterEach( () => {
unsubscribeFromAll( registry );
} );
describe( 'actions', () => {
describe( 'assignWidgetArea', () => {
it( 'should implicitly create a context when assigning a widget area, if one does not exist', () => {
// Assign this widget area to the testarea context.
registry.dispatch( STORE_NAME ).assignWidgetArea( 'header', 'testarea' );
const { contextAssignments } = store.getState();
expect( contextAssignments.testarea ).toEqual( [ 'header' ] );
} );
it( 'should re-use a context if one is already created', () => {
registry.dispatch( STORE_NAME ).assignWidgetArea( 'header', 'testarea' );
registry.dispatch( STORE_NAME ).assignWidgetArea( 'footer', 'testarea' );
const { contextAssignments } = store.getState();
expect( contextAssignments.testarea ).toEqual( [ 'header', 'footer' ] );
} );
it( 'should assign a registered widget area to a context', () => {
// Register the widget area.
const slug = 'header';
const settings = {
priority: 10,
title: 'Your Site',
subtitle: 'Learn about your site!',
icon: '/wp-content/plugins/googlesitekit/header.svg',
style: 'boxes',
};
registry.dispatch( STORE_NAME ).registerWidgetArea( slug, settings );
// Assign this widget area to the testarea context.
registry.dispatch( STORE_NAME ).assignWidgetArea( slug, 'testarea' );
// Get all assigned widget areas for the testarea context.
const testareaAreas = registry.select( STORE_NAME ).getWidgetAreas( 'testarea' );
expect( testareaAreas ).toHaveLength( 1 );
expect( testareaAreas.some( ( area ) => area.slug === slug ) ).toEqual( true );
} );
} );
describe( 'registerWidgetArea', () => {
it( 'should register a widget area', () => {
const slug = 'header';
const settings = {
priority: 10,
title: 'Your Site',
subtitle: 'Learn about your site!',
icon: '/wp-content/plugins/googlesitekit/header.svg',
style: 'boxes',
};
registry.dispatch( STORE_NAME ).registerWidgetArea( slug, settings );
const state = store.getState();
expect( registry.select( STORE_NAME ).isWidgetAreaRegistered( slug ) ).toEqual( true );
// There is no selector for unassigned widget areas, so we inspect the store directly for
// this test.
expect( state.areas ).toMatchObject( {
[ slug ]: { ...settings, slug },
} );
} );
it( 'requires a slug', () => {
expect( () => {
registry.dispatch( STORE_NAME ).registerWidgetArea( null, {} );
} ).toThrow( 'slug is required.' );
} );
it( 'requires settings', () => {
// (It will throw for the first missing param, because the settings argument is
// always defined .)
expect( () => {
registry.dispatch( STORE_NAME ).registerWidgetArea( 'my-cool-slug' );
} ).toThrow( 'settings.title is required.' );
} );
it( 'requires a title and subtitle in settings', () => {
// Mute warning about duplicate slug registrations for this test.
muteConsole( 'warn' );
expect( () => {
registry.dispatch( STORE_NAME ).registerWidgetArea( 'header', {} );
} ).toThrow( 'settings.title is required.' );
expect( () => {
registry.dispatch( STORE_NAME ).registerWidgetArea( 'header', {
title: 'Analytics Header',
} );
} ).toThrow( 'settings.subtitle is required.' );
expect( () => {
registry.dispatch( STORE_NAME ).registerWidgetArea( 'header', {
title: 'Analytics Header',
subtitle: 'Analytics tell you about visitors',
} );
} ).not.toThrow();
expect( () => {
registry.dispatch( STORE_NAME ).registerWidgetArea( 'header', {
title: 'Analytics Header',
subtitle: 'Analytics tell you about visitors',
style: 'composite',
} );
} ).not.toThrow();
} );
it( 'should register multiple widget areas', () => {
const slugOne = 'dashboard-header';
const settingsOne = {
priority: 10,
title: 'Header',
subtitle: 'Cool stuff only!',
icon: '/wp-content/plugins/googlesitekit/header.svg',
style: 'boxes',
};
const slugTwo = 'dashboard-footer';
const settingsTwo = {
priority: 12,
title: 'Footer',
subtitle: 'Less important stuff.',
icon: '/wp-content/plugins/googlesitekit/footer.svg',
style: 'boxes',
};
registry.dispatch( STORE_NAME ).registerWidgetArea( slugOne, settingsOne );
registry.dispatch( STORE_NAME ).registerWidgetArea( slugTwo, settingsTwo );
const state = store.getState();
expect( registry.select( STORE_NAME ).isWidgetAreaRegistered( slugOne ) ).toEqual( true );
expect( registry.select( STORE_NAME ).isWidgetAreaRegistered( slugTwo ) ).toEqual( true );
// There is no selector for unassigned widget areas, so we inspect the store directly for
// this test.
expect( state.areas ).toMatchObject( {
[ slugOne ]: { ...settingsOne, slug: slugOne },
[ slugTwo ]: { ...settingsTwo, slug: slugTwo },
} );
} );
it( 'should use priority: 10 as a default', () => {
const slug = 'pageviews';
const settings = {
title: 'Page Views',
subtitle: 'See all your views!',
icon: '/wp-content/plugins/googlesitekit/pageviews.svg',
style: 'boxes', // 'composite'
};
registry.dispatch( STORE_NAME ).registerWidgetArea( slug, settings );
const state = store.getState();
expect( registry.select( STORE_NAME ).isWidgetAreaRegistered( slug ) ).toEqual( true );
// There is no selector for unassigned widget areas, so we inspect the store directly for
// this test.
expect( state.areas ).toMatchObject( {
[ slug ]: { ...settings, priority: 10, slug },
} );
} );
it( 'should not overwrite an existing widget area', () => {
const slug = 'pageviews';
const settings = {
priority: 10,
title: 'Page Views',
subtitle: 'See all your views!',
icon: '/wp-content/plugins/googlesitekit/pageviews.svg',
style: 'boxes', // 'composite'
};
// We don't want other widget areas to be able to overwrite existing areas.
const differentSettings = {
priority: 10,
title: 'Mega Page Views',
subtitle: 'Subscribe for more features!',
icon: '/wp-content/plugins/googlesitekit/pageviews.svg',
style: 'composite',
};
registry.dispatch( STORE_NAME ).registerWidgetArea( slug, settings );
// Mute warning about duplicate slug since we expect it below anyway.
muteConsole( 'warn' );
// Expect console warning about duplicate slug.
const consoleWarnSpy = jest.spyOn( global.console, 'warn' );
registry.dispatch( STORE_NAME ).registerWidgetArea( slug, differentSettings );
expect( consoleWarnSpy ).toHaveBeenCalledWith( `Could not register widget area with slug "${ slug }". Widget area "${ slug }" is already registered.` );
consoleWarnSpy.mockClear();
const state = store.getState();
// Ensure the original settings are registered.
expect( state.areas ).toMatchObject( {
[ slug ]: { ...settings, slug },
} );
expect( state.areas ).not.toMatchObject( {
[ slug ]: { ...differentSettings, slug },
} );
} );
} );
} );
describe( 'selectors', () => {
describe( 'getWidgetAreas', () => {
it( 'requires a contextSlug', () => {
expect( () => {
registry.select( STORE_NAME ).getWidgetAreas();
} ).toThrow( 'contextSlug is required.' );
} );
it( 'returns all registered widget areas', () => {
// Register the widget area.
const slugOne = 'header';
const slugTwo = 'subheader';
const settings = {
priority: 10,
title: 'Your Site',
subtitle: 'Learn about your site!',
icon: '/wp-content/plugins/googlesitekit/header.svg',
style: 'boxes',
};
registry.dispatch( STORE_NAME ).registerWidgetArea( slugOne, settings );
registry.dispatch( STORE_NAME ).registerWidgetArea( slugTwo, settings );
// Assign this widget area to the testarea context.
registry.dispatch( STORE_NAME ).assignWidgetArea( slugOne, 'testarea' );
registry.dispatch( STORE_NAME ).assignWidgetArea( slugTwo, 'testarea' );
// Get all assigned widget areas for the testarea context.
const testareaAreas = registry.select( STORE_NAME ).getWidgetAreas( 'testarea' );
expect( testareaAreas ).toMatchObject( [
{ ...settings, slug: slugOne },
{ ...settings, slug: slugTwo },
] );
} );
it( 'does not return unregistered widget areas', () => {
// Assign this widget area to the testarea context.
registry.dispatch( STORE_NAME ).assignWidgetArea( 'area-one', 'testarea' );
registry.dispatch( STORE_NAME ).assignWidgetArea( 'area-two', 'testarea' );
// Get all assigned widget areas for the testarea context.
const testareaAreas = registry.select( STORE_NAME ).getWidgetAreas( 'testarea' );
expect( testareaAreas ).toHaveLength( 0 );
} );
it( 'returns widget areas that were registered after they were assigned', () => {
const slugOne = 'header';
const slugTwo = 'subheader';
// Assign this widget area to the testarea context.
registry.dispatch( STORE_NAME ).assignWidgetArea( slugOne, 'testarea' );
registry.dispatch( STORE_NAME ).assignWidgetArea( slugTwo, 'testarea' );
// Register the widget areas.
const settings = {
priority: 10,
title: 'Your Site',
subtitle: 'Learn about your site!',
icon: '/wp-content/plugins/googlesitekit/header.svg',
style: 'boxes',
};
registry.dispatch( STORE_NAME ).registerWidgetArea( slugOne, settings );
registry.dispatch( STORE_NAME ).registerWidgetArea( slugTwo, settings );
// Get all assigned widget areas for the testarea context.
const testareaAreas = registry.select( STORE_NAME ).getWidgetAreas( 'testarea' );
expect( testareaAreas ).toMatchObject( [
{ ...settings, slug: slugOne },
{ ...settings, slug: slugTwo },
] );
} );
it( 'returns the widget areas sorted by priority', () => {
// Register the widget area.
const slugLowest = 'header';
const slugMedium = 'header2';
const slugMediumTwo = 'header3';
const slugHighest = 'header4';
const settings = {
title: 'Your title',
subtitle: 'Okay!',
icon: '/wp-content/plugins/googlesitekit/header.svg',
style: 'boxes',
};
registry.dispatch( STORE_NAME ).registerWidgetArea( slugLowest, { ...settings, priority: 5 } );
registry.dispatch( STORE_NAME ).registerWidgetArea( slugMedium, { ...settings, priority: 10 } );
registry.dispatch( STORE_NAME ).registerWidgetArea( slugMediumTwo, { ...settings, priority: 10 } );
registry.dispatch( STORE_NAME ).registerWidgetArea( slugHighest, { ...settings, priority: 15 } );
// Assign this widget area to the testarea context.
registry.dispatch( STORE_NAME ).assignWidgetArea( slugLowest, 'testarea' );
registry.dispatch( STORE_NAME ).assignWidgetArea( slugMedium, 'testarea' );
registry.dispatch( STORE_NAME ).assignWidgetArea( slugMediumTwo, 'testarea' );
registry.dispatch( STORE_NAME ).assignWidgetArea( slugHighest, 'testarea' );
// Get all assigned widget areas for the testarea context.
const testareaAreas = registry.select( STORE_NAME ).getWidgetAreas( 'testarea' );
// The lowest priority appears first.
expect( testareaAreas[ 0 ] ).toMatchObject( { ...settings, slug: slugLowest } );
// Widgets assigned with the same priority should be last-in, last-out.
expect( testareaAreas[ 1 ] ).toMatchObject( { ...settings, slug: slugMedium } );
expect( testareaAreas[ 2 ] ).toMatchObject( { ...settings, slug: slugMediumTwo } );
expect( testareaAreas[ 3 ] ).toMatchObject( { ...settings, slug: slugHighest } );
} );
} );
describe( 'getWidgetArea', () => {
it( 'returns an area if the widget area is registered', () => {
registry.dispatch( STORE_NAME ).registerWidgetArea( 'TestArea', {
title: 'Test Header',
subtitle: 'Cool stuff for yoursite.com',
style: 'composite',
} );
expect( registry.select( STORE_NAME ).getWidgetArea( 'TestArea' ) ).toEqual( {
icon: undefined,
priority: 10,
title: 'Test Header',
subtitle: 'Cool stuff for yoursite.com',
style: 'composite',
slug: 'TestArea',
} );
} );
it( 'returns null if the widget area is not registered', () => {
expect( registry.select( STORE_NAME ).getWidgetArea( 'NotRealArea' ) ).toEqual( null );
} );
} );
describe( 'isWidgetAreaRegistered', () => {
it( 'returns true if the widget area is registered', () => {
registry.dispatch( STORE_NAME ).registerWidgetArea( 'TestArea', {
title: 'Test Header',
subtitle: 'Cool stuff for yoursite.com',
style: 'composite',
} );
expect( registry.select( STORE_NAME ).isWidgetAreaRegistered( 'TestArea' ) ).toEqual( true );
} );
it( 'returns false if the widget area is not registered', () => {
expect( registry.select( STORE_NAME ).isWidgetAreaRegistered( 'NotRealArea' ) ).toEqual( false );
} );
} );
} );
} );
| 1 | 32,214 | See above, same for the other cases. | google-site-kit-wp | js |
@@ -301,6 +301,19 @@ public class MetricRegistry implements MetricSet {
});
}
+ /**
+ * Return the {@link Gauge} registered under this name; or create and register
+ * a new {@link SettableGauge} if none is registered.
+ *
+ * @param name the name of the metric
+ * @return a new or pre-existing {@link SettableGauge}
+ * @since 4.2
+ */
+ @SuppressWarnings("unchecked")
+ public <T> SettableGauge<T> gauge(String name) {
+ return getOrAdd(name, MetricBuilder.GAUGES);
+ }
+
/**
* Return the {@link Gauge} registered under this name; or create and register
* a new {@link Gauge} using the provided MetricSupplier if none is registered. | 1 | package com.codahale.metrics;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.SortedMap;
import java.util.SortedSet;
import java.util.TreeMap;
import java.util.TreeSet;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.CopyOnWriteArrayList;
/**
* A registry of metric instances.
*/
public class MetricRegistry implements MetricSet {
/**
* Concatenates elements to form a dotted name, eliding any null values or empty strings.
*
* @param name the first element of the name
* @param names the remaining elements of the name
* @return {@code name} and {@code names} concatenated by periods
*/
public static String name(String name, String... names) {
final StringBuilder builder = new StringBuilder();
append(builder, name);
if (names != null) {
for (String s : names) {
append(builder, s);
}
}
return builder.toString();
}
/**
* Concatenates a class name and elements to form a dotted name, eliding any null values or
* empty strings.
*
* @param klass the first element of the name
* @param names the remaining elements of the name
* @return {@code klass} and {@code names} concatenated by periods
*/
public static String name(Class<?> klass, String... names) {
return name(klass.getName(), names);
}
private static void append(StringBuilder builder, String part) {
if (part != null && !part.isEmpty()) {
if (builder.length() > 0) {
builder.append('.');
}
builder.append(part);
}
}
private final ConcurrentMap<String, Metric> metrics;
private final List<MetricRegistryListener> listeners;
/**
* Creates a new {@link MetricRegistry}.
*/
public MetricRegistry() {
this.metrics = buildMap();
this.listeners = new CopyOnWriteArrayList<>();
}
/**
* Creates a new {@link ConcurrentMap} implementation for use inside the registry. Override this
* to create a {@link MetricRegistry} with space- or time-bounded metric lifecycles, for
* example.
*
* @return a new {@link ConcurrentMap}
*/
protected ConcurrentMap<String, Metric> buildMap() {
return new ConcurrentHashMap<>();
}
/**
* Given a {@link Metric}, registers it under the given name.
*
* @param name the name of the metric
* @param metric the metric
* @param <T> the type of the metric
* @return {@code metric}
* @throws IllegalArgumentException if the name is already registered or metric variable is null
*/
@SuppressWarnings("unchecked")
public <T extends Metric> T register(String name, T metric) throws IllegalArgumentException {
if (metric == null) {
throw new NullPointerException("metric == null");
}
if (metric instanceof MetricRegistry) {
final MetricRegistry childRegistry = (MetricRegistry)metric;
final String childName = name;
childRegistry.addListener(new MetricRegistryListener() {
@Override
public void onGaugeAdded(String name, Gauge<?> gauge) {
register(name(childName, name), gauge);
}
@Override
public void onGaugeRemoved(String name) {
remove(name(childName, name));
}
@Override
public void onCounterAdded(String name, Counter counter) {
register(name(childName, name), counter);
}
@Override
public void onCounterRemoved(String name) {
remove(name(childName, name));
}
@Override
public void onHistogramAdded(String name, Histogram histogram) {
register(name(childName, name), histogram);
}
@Override
public void onHistogramRemoved(String name) {
remove(name(childName, name));
}
@Override
public void onMeterAdded(String name, Meter meter) {
register(name(childName, name), meter);
}
@Override
public void onMeterRemoved(String name) {
remove(name(childName, name));
}
@Override
public void onTimerAdded(String name, Timer timer) {
register(name(childName, name), timer);
}
@Override
public void onTimerRemoved(String name) {
remove(name(childName, name));
}
});
} else if (metric instanceof MetricSet) {
registerAll(name, (MetricSet) metric);
} else {
final Metric existing = metrics.putIfAbsent(name, metric);
if (existing == null) {
onMetricAdded(name, metric);
} else {
throw new IllegalArgumentException("A metric named " + name + " already exists");
}
}
return metric;
}
/**
* Given a metric set, registers them.
*
* @param metrics a set of metrics
* @throws IllegalArgumentException if any of the names are already registered
*/
public void registerAll(MetricSet metrics) throws IllegalArgumentException {
registerAll(null, metrics);
}
/**
* Return the {@link Counter} registered under this name; or create and register
* a new {@link Counter} if none is registered.
*
* @param name the name of the metric
* @return a new or pre-existing {@link Counter}
*/
public Counter counter(String name) {
return getOrAdd(name, MetricBuilder.COUNTERS);
}
/**
* Return the {@link Counter} registered under this name; or create and register
* a new {@link Counter} using the provided MetricSupplier if none is registered.
*
* @param name the name of the metric
* @param supplier a MetricSupplier that can be used to manufacture a counter.
* @return a new or pre-existing {@link Counter}
*/
public Counter counter(String name, final MetricSupplier<Counter> supplier) {
return getOrAdd(name, new MetricBuilder<Counter>() {
@Override
public Counter newMetric() {
return supplier.newMetric();
}
@Override
public boolean isInstance(Metric metric) {
return Counter.class.isInstance(metric);
}
});
}
/**
* Return the {@link Histogram} registered under this name; or create and register
* a new {@link Histogram} if none is registered.
*
* @param name the name of the metric
* @return a new or pre-existing {@link Histogram}
*/
public Histogram histogram(String name) {
return getOrAdd(name, MetricBuilder.HISTOGRAMS);
}
/**
* Return the {@link Histogram} registered under this name; or create and register
* a new {@link Histogram} using the provided MetricSupplier if none is registered.
*
* @param name the name of the metric
* @param supplier a MetricSupplier that can be used to manufacture a histogram
* @return a new or pre-existing {@link Histogram}
*/
public Histogram histogram(String name, final MetricSupplier<Histogram> supplier) {
return getOrAdd(name, new MetricBuilder<Histogram>() {
@Override
public Histogram newMetric() {
return supplier.newMetric();
}
@Override
public boolean isInstance(Metric metric) {
return Histogram.class.isInstance(metric);
}
});
}
/**
* Return the {@link Meter} registered under this name; or create and register
* a new {@link Meter} if none is registered.
*
* @param name the name of the metric
* @return a new or pre-existing {@link Meter}
*/
public Meter meter(String name) {
return getOrAdd(name, MetricBuilder.METERS);
}
/**
* Return the {@link Meter} registered under this name; or create and register
* a new {@link Meter} using the provided MetricSupplier if none is registered.
*
* @param name the name of the metric
* @param supplier a MetricSupplier that can be used to manufacture a Meter
* @return a new or pre-existing {@link Meter}
*/
public Meter meter(String name, final MetricSupplier<Meter> supplier) {
return getOrAdd(name, new MetricBuilder<Meter>() {
@Override
public Meter newMetric() {
return supplier.newMetric();
}
@Override
public boolean isInstance(Metric metric) {
return Meter.class.isInstance(metric);
}
});
}
/**
* Return the {@link Timer} registered under this name; or create and register
* a new {@link Timer} if none is registered.
*
* @param name the name of the metric
* @return a new or pre-existing {@link Timer}
*/
public Timer timer(String name) {
return getOrAdd(name, MetricBuilder.TIMERS);
}
/**
* Return the {@link Timer} registered under this name; or create and register
* a new {@link Timer} using the provided MetricSupplier if none is registered.
*
* @param name the name of the metric
* @param supplier a MetricSupplier that can be used to manufacture a Timer
* @return a new or pre-existing {@link Timer}
*/
public Timer timer(String name, final MetricSupplier<Timer> supplier) {
return getOrAdd(name, new MetricBuilder<Timer>() {
@Override
public Timer newMetric() {
return supplier.newMetric();
}
@Override
public boolean isInstance(Metric metric) {
return Timer.class.isInstance(metric);
}
});
}
/**
* Return the {@link Gauge} registered under this name; or create and register
* a new {@link Gauge} using the provided MetricSupplier if none is registered.
*
* @param name the name of the metric
* @param supplier a MetricSupplier that can be used to manufacture a Gauge
* @return a new or pre-existing {@link Gauge}
*/
@SuppressWarnings("rawtypes")
public <T extends Gauge> T gauge(String name, final MetricSupplier<T> supplier) {
return getOrAdd(name, new MetricBuilder<T>() {
@Override
public T newMetric() {
return supplier.newMetric();
}
@Override
public boolean isInstance(Metric metric) {
return Gauge.class.isInstance(metric);
}
});
}
/**
* Removes the metric with the given name.
*
* @param name the name of the metric
* @return whether or not the metric was removed
*/
public boolean remove(String name) {
final Metric metric = metrics.remove(name);
if (metric != null) {
onMetricRemoved(name, metric);
return true;
}
return false;
}
/**
* Removes all metrics which match the given filter.
*
* @param filter a filter
*/
public void removeMatching(MetricFilter filter) {
for (Map.Entry<String, Metric> entry : metrics.entrySet()) {
if (filter.matches(entry.getKey(), entry.getValue())) {
remove(entry.getKey());
}
}
}
/**
* Adds a {@link MetricRegistryListener} to a collection of listeners that will be notified on
* metric creation. Listeners will be notified in the order in which they are added.
* <p>
* <b>N.B.:</b> The listener will be notified of all existing metrics when it first registers.
*
* @param listener the listener that will be notified
*/
public void addListener(MetricRegistryListener listener) {
listeners.add(listener);
for (Map.Entry<String, Metric> entry : metrics.entrySet()) {
notifyListenerOfAddedMetric(listener, entry.getValue(), entry.getKey());
}
}
/**
* Removes a {@link MetricRegistryListener} from this registry's collection of listeners.
*
* @param listener the listener that will be removed
*/
public void removeListener(MetricRegistryListener listener) {
listeners.remove(listener);
}
/**
* Returns a set of the names of all the metrics in the registry.
*
* @return the names of all the metrics
*/
public SortedSet<String> getNames() {
return Collections.unmodifiableSortedSet(new TreeSet<>(metrics.keySet()));
}
/**
* Returns a map of all the gauges in the registry and their names.
*
* @return all the gauges in the registry
*/
@SuppressWarnings("rawtypes")
public SortedMap<String, Gauge> getGauges() {
return getGauges(MetricFilter.ALL);
}
/**
* Returns a map of all the gauges in the registry and their names which match the given filter.
*
* @param filter the metric filter to match
* @return all the gauges in the registry
*/
@SuppressWarnings("rawtypes")
public SortedMap<String, Gauge> getGauges(MetricFilter filter) {
return getMetrics(Gauge.class, filter);
}
/**
* Returns a map of all the counters in the registry and their names.
*
* @return all the counters in the registry
*/
public SortedMap<String, Counter> getCounters() {
return getCounters(MetricFilter.ALL);
}
/**
* Returns a map of all the counters in the registry and their names which match the given
* filter.
*
* @param filter the metric filter to match
* @return all the counters in the registry
*/
public SortedMap<String, Counter> getCounters(MetricFilter filter) {
return getMetrics(Counter.class, filter);
}
/**
* Returns a map of all the histograms in the registry and their names.
*
* @return all the histograms in the registry
*/
public SortedMap<String, Histogram> getHistograms() {
return getHistograms(MetricFilter.ALL);
}
/**
* Returns a map of all the histograms in the registry and their names which match the given
* filter.
*
* @param filter the metric filter to match
* @return all the histograms in the registry
*/
public SortedMap<String, Histogram> getHistograms(MetricFilter filter) {
return getMetrics(Histogram.class, filter);
}
/**
* Returns a map of all the meters in the registry and their names.
*
* @return all the meters in the registry
*/
public SortedMap<String, Meter> getMeters() {
return getMeters(MetricFilter.ALL);
}
/**
* Returns a map of all the meters in the registry and their names which match the given filter.
*
* @param filter the metric filter to match
* @return all the meters in the registry
*/
public SortedMap<String, Meter> getMeters(MetricFilter filter) {
return getMetrics(Meter.class, filter);
}
/**
* Returns a map of all the timers in the registry and their names.
*
* @return all the timers in the registry
*/
public SortedMap<String, Timer> getTimers() {
return getTimers(MetricFilter.ALL);
}
/**
* Returns a map of all the timers in the registry and their names which match the given filter.
*
* @param filter the metric filter to match
* @return all the timers in the registry
*/
public SortedMap<String, Timer> getTimers(MetricFilter filter) {
return getMetrics(Timer.class, filter);
}
@SuppressWarnings("unchecked")
private <T extends Metric> T getOrAdd(String name, MetricBuilder<T> builder) {
final Metric metric = metrics.get(name);
if (builder.isInstance(metric)) {
return (T) metric;
} else if (metric == null) {
try {
return register(name, builder.newMetric());
} catch (IllegalArgumentException e) {
final Metric added = metrics.get(name);
if (builder.isInstance(added)) {
return (T) added;
}
}
}
throw new IllegalArgumentException(name + " is already used for a different type of metric");
}
@SuppressWarnings("unchecked")
private <T extends Metric> SortedMap<String, T> getMetrics(Class<T> klass, MetricFilter filter) {
final TreeMap<String, T> timers = new TreeMap<>();
for (Map.Entry<String, Metric> entry : metrics.entrySet()) {
if (klass.isInstance(entry.getValue()) && filter.matches(entry.getKey(),
entry.getValue())) {
timers.put(entry.getKey(), (T) entry.getValue());
}
}
return Collections.unmodifiableSortedMap(timers);
}
private void onMetricAdded(String name, Metric metric) {
for (MetricRegistryListener listener : listeners) {
notifyListenerOfAddedMetric(listener, metric, name);
}
}
private void notifyListenerOfAddedMetric(MetricRegistryListener listener, Metric metric, String name) {
if (metric instanceof Gauge) {
listener.onGaugeAdded(name, (Gauge<?>) metric);
} else if (metric instanceof Counter) {
listener.onCounterAdded(name, (Counter) metric);
} else if (metric instanceof Histogram) {
listener.onHistogramAdded(name, (Histogram) metric);
} else if (metric instanceof Meter) {
listener.onMeterAdded(name, (Meter) metric);
} else if (metric instanceof Timer) {
listener.onTimerAdded(name, (Timer) metric);
} else {
throw new IllegalArgumentException("Unknown metric type: " + metric.getClass());
}
}
private void onMetricRemoved(String name, Metric metric) {
for (MetricRegistryListener listener : listeners) {
notifyListenerOfRemovedMetric(name, metric, listener);
}
}
private void notifyListenerOfRemovedMetric(String name, Metric metric, MetricRegistryListener listener) {
if (metric instanceof Gauge) {
listener.onGaugeRemoved(name);
} else if (metric instanceof Counter) {
listener.onCounterRemoved(name);
} else if (metric instanceof Histogram) {
listener.onHistogramRemoved(name);
} else if (metric instanceof Meter) {
listener.onMeterRemoved(name);
} else if (metric instanceof Timer) {
listener.onTimerRemoved(name);
} else {
throw new IllegalArgumentException("Unknown metric type: " + metric.getClass());
}
}
/**
* Given a metric set, registers them with the given prefix prepended to their names.
*
* @param prefix a name prefix
* @param metrics a set of metrics
* @throws IllegalArgumentException if any of the names are already registered
*/
public void registerAll(String prefix, MetricSet metrics) throws IllegalArgumentException {
for (Map.Entry<String, Metric> entry : metrics.getMetrics().entrySet()) {
if (entry.getValue() instanceof MetricSet) {
registerAll(name(prefix, entry.getKey()), (MetricSet) entry.getValue());
} else {
register(name(prefix, entry.getKey()), entry.getValue());
}
}
}
@Override
public Map<String, Metric> getMetrics() {
return Collections.unmodifiableMap(metrics);
}
@FunctionalInterface
public interface MetricSupplier<T extends Metric> {
T newMetric();
}
/**
* A quick and easy way of capturing the notion of default metrics.
*/
private interface MetricBuilder<T extends Metric> {
MetricBuilder<Counter> COUNTERS = new MetricBuilder<Counter>() {
@Override
public Counter newMetric() {
return new Counter();
}
@Override
public boolean isInstance(Metric metric) {
return Counter.class.isInstance(metric);
}
};
MetricBuilder<Histogram> HISTOGRAMS = new MetricBuilder<Histogram>() {
@Override
public Histogram newMetric() {
return new Histogram(new ExponentiallyDecayingReservoir());
}
@Override
public boolean isInstance(Metric metric) {
return Histogram.class.isInstance(metric);
}
};
MetricBuilder<Meter> METERS = new MetricBuilder<Meter>() {
@Override
public Meter newMetric() {
return new Meter();
}
@Override
public boolean isInstance(Metric metric) {
return Meter.class.isInstance(metric);
}
};
MetricBuilder<Timer> TIMERS = new MetricBuilder<Timer>() {
@Override
public Timer newMetric() {
return new Timer();
}
@Override
public boolean isInstance(Metric metric) {
return Timer.class.isInstance(metric);
}
};
T newMetric();
boolean isInstance(Metric metric);
}
}
| 1 | 7,601 | Shouldn't line 313 be public \<T\> SettableGauge\<T\> **settable**Gauge(String name) { ? It would also be good to call it a few times and with differnet types (Long, Integer, String) in the test as well. | dropwizard-metrics | java |
@@ -287,7 +287,7 @@ def _has_bare_super_call(fundef_node):
return False
-def _safe_infer_call_result(node, caller, context=None):
+def _safe_infer_call_result(node, caller, context=None): # pylint: disable=inconsistent-return-statements
"""
Safely infer the return value of a function.
| 1 | # -*- coding: utf-8 -*-
# Copyright (c) 2006-2016 LOGILAB S.A. (Paris, FRANCE) <[email protected]>
# Copyright (c) 2012, 2014 Google, Inc.
# Copyright (c) 2013-2016 Claudiu Popa <[email protected]>
# Copyright (c) 2015 Dmitry Pribysh <[email protected]>
# Copyright (c) 2016 Moises Lopez - https://www.vauxoo.com/ <[email protected]>
# Copyright (c) 2016 Łukasz Rogalski <[email protected]>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
"""classes checker for Python code
"""
from __future__ import generators
import collections
import sys
import six
import astroid
from astroid.bases import Generator, BUILTINS
from astroid.exceptions import InconsistentMroError, DuplicateBasesError
from astroid import decorators
from astroid import objects
from astroid.scoped_nodes import function_to_method
from pylint.interfaces import IAstroidChecker
from pylint.checkers import BaseChecker
from pylint.checkers.utils import (
PYMETHODS, SPECIAL_METHODS_PARAMS,
overrides_a_method, check_messages, is_attr_private,
is_attr_protected, node_frame_class, is_builtin_object,
decorated_with_property, unimplemented_abstract_methods,
decorated_with, class_is_abstract,
safe_infer, has_known_bases, is_iterable, is_comprehension)
from pylint.utils import get_global_option
if sys.version_info >= (3, 0):
NEXT_METHOD = '__next__'
else:
NEXT_METHOD = 'next'
INVALID_BASE_CLASSES = {'bool', 'range', 'slice', 'memoryview'}
# Dealing with useless override detection, with regard
# to parameters vs arguments
_CallSignature = collections.namedtuple(
'_CallSignature', 'args kws starred_args starred_kws')
_ParameterSignature = collections.namedtuple(
'_ParameterSignature',
'args kwonlyargs varargs kwargs',
)
def _signature_from_call(call):
kws = {}
args = []
starred_kws = []
starred_args = []
for keyword in call.keywords or []:
arg, value = keyword.arg, keyword.value
if arg is None and isinstance(value, astroid.Name):
# Starred node and we are interested only in names,
# otherwise some transformation might occur for the parameter.
starred_kws.append(value.name)
elif isinstance(value, astroid.Name):
kws[arg] = value.name
else:
kws[arg] = None
for arg in call.args:
if isinstance(arg, astroid.Starred) and isinstance(arg.value, astroid.Name):
# Positional variadic and a name, otherwise some transformation
# might have occurred.
starred_args.append(arg.value.name)
elif isinstance(arg, astroid.Name):
args.append(arg.name)
else:
args.append(None)
return _CallSignature(args, kws, starred_args, starred_kws)
def _signature_from_arguments(arguments):
kwarg = arguments.kwarg
vararg = arguments.vararg
args = [arg.name for arg in arguments.args if arg.name != 'self']
kwonlyargs = [arg.name for arg in arguments.kwonlyargs]
return _ParameterSignature(args, kwonlyargs, vararg, kwarg)
def _definition_equivalent_to_call(definition, call):
'''Check if a definition signature is equivalent to a call.'''
if definition.kwargs:
same_kw_variadics = definition.kwargs in call.starred_kws
else:
same_kw_variadics = not call.starred_kws
if definition.varargs:
same_args_variadics = definition.varargs in call.starred_args
else:
same_args_variadics = not call.starred_args
same_kwonlyargs = all(kw in call.kws for kw in definition.kwonlyargs)
same_args = definition.args == call.args
no_additional_kwarg_arguments = True
if call.kws:
for keyword in call.kws:
is_arg = keyword in call.args
is_kwonly = keyword in definition.kwonlyargs
if not is_arg and not is_kwonly:
# Maybe this argument goes into **kwargs,
# or it is an extraneous argument.
# In any case, the signature is different than
# the call site, which stops our search.
no_additional_kwarg_arguments = False
break
return all((
same_args,
same_kwonlyargs,
same_args_variadics,
same_kw_variadics,
no_additional_kwarg_arguments,
))
# Deal with parameters overridding in two methods.
def _positional_parameters(method):
positional = method.args.args
if method.type in ('classmethod', 'method'):
positional = positional[1:]
return positional
def _has_different_parameters(original, overridden, dummy_parameter_regex):
zipped = six.moves.zip_longest(original, overridden)
for original_param, overridden_param in zipped:
params = (original_param, overridden_param)
if not all(params):
return True
names = [param.name for param in params]
if any(map(dummy_parameter_regex.match, names)):
continue
if original_param.name != overridden_param.name:
return True
return False
def _different_parameters(original, overridden, dummy_parameter_regex):
"""Determine if the two methods have different parameters
They are considered to have different parameters if:
* they have different positional parameters, including different names
* one of the methods is having variadics, while the other is not
* they have different keyword only parameters.
"""
original_parameters = _positional_parameters(original)
overridden_parameters = _positional_parameters(overridden)
different_positional = _has_different_parameters(
original_parameters,
overridden_parameters,
dummy_parameter_regex)
different_kwonly = _has_different_parameters(
original.args.kwonlyargs,
overridden.args.kwonlyargs,
dummy_parameter_regex)
if original.name in PYMETHODS:
# Ignore the difference for special methods. If the parameter
# numbers are different, then that is going to be caught by
# unexpected-special-method-signature.
# If the names are different, it doesn't matter, since they can't
# be used as keyword arguments anyway.
different_positional = different_kwonly = False
# Both or none should have extra variadics, otherwise the method
# loses or gains capabilities that are not reflected into the parent method,
# leading to potential inconsistencies in the code.
different_kwarg = sum(
1 for param in (original.args.kwarg, overridden.args.kwarg)
if not param) == 1
different_vararg = sum(
1 for param in (original.args.vararg, overridden.args.vararg)
if not param) == 1
return any((
different_positional,
different_kwarg,
different_vararg,
different_kwonly
))
def _is_invalid_base_class(cls):
return cls.name in INVALID_BASE_CLASSES and is_builtin_object(cls)
def _has_data_descriptor(cls, attr):
attributes = cls.getattr(attr)
for attribute in attributes:
try:
for inferred in attribute.infer():
if isinstance(inferred, astroid.Instance):
try:
inferred.getattr('__get__')
inferred.getattr('__set__')
except astroid.NotFoundError:
continue
else:
return True
except astroid.InferenceError:
# Can't infer, avoid emitting a false positive in this case.
return True
return False
def _called_in_methods(func, klass, methods):
""" Check if the func was called in any of the given methods,
belonging to the *klass*. Returns True if so, False otherwise.
"""
if not isinstance(func, astroid.FunctionDef):
return False
for method in methods:
try:
infered = klass.getattr(method)
except astroid.NotFoundError:
continue
for infer_method in infered:
for call in infer_method.nodes_of_class(astroid.Call):
try:
bound = next(call.func.infer())
except (astroid.InferenceError, StopIteration):
continue
if not isinstance(bound, astroid.BoundMethod):
continue
func_obj = bound._proxied
if isinstance(func_obj, astroid.UnboundMethod):
func_obj = func_obj._proxied
if func_obj.name == func.name:
return True
return False
def _is_attribute_property(name, klass):
""" Check if the given attribute *name* is a property
in the given *klass*.
It will look for `property` calls or for functions
with the given name, decorated by `property` or `property`
subclasses.
Returns ``True`` if the name is a property in the given klass,
``False`` otherwise.
"""
try:
attributes = klass.getattr(name)
except astroid.NotFoundError:
return False
property_name = "{0}.property".format(BUILTINS)
for attr in attributes:
try:
infered = next(attr.infer())
except astroid.InferenceError:
continue
if (isinstance(infered, astroid.FunctionDef) and
decorated_with_property(infered)):
return True
if infered.pytype() == property_name:
return True
return False
def _has_bare_super_call(fundef_node):
for call in fundef_node.nodes_of_class(astroid.Call):
func = call.func
if (isinstance(func, astroid.Name) and
func.name == 'super' and
not call.args):
return True
return False
def _safe_infer_call_result(node, caller, context=None):
"""
Safely infer the return value of a function.
Returns None if inference failed or if there is some ambiguity (more than
one node has been inferred). Otherwise returns infered value.
"""
try:
inferit = node.infer_call_result(caller, context=context)
value = next(inferit)
except astroid.InferenceError:
return # inference failed
except StopIteration:
return # no values infered
try:
next(inferit)
return # there is ambiguity on the inferred node
except astroid.InferenceError:
return # there is some kind of ambiguity
except StopIteration:
return value
def _has_same_layout_slots(slots, assigned_value):
inferred = next(assigned_value.infer())
if isinstance(inferred, astroid.ClassDef):
other_slots = inferred.slots()
if all(first_slot and second_slot and first_slot.value == second_slot.value
for (first_slot, second_slot) in six.moves.zip_longest(slots, other_slots)):
return True
return False
MSGS = {
'F0202': ('Unable to check methods signature (%s / %s)',
'method-check-failed',
'Used when Pylint has been unable to check methods signature '
'compatibility for an unexpected reason. Please report this kind '
'if you don\'t make sense of it.'),
'E0202': ('An attribute defined in %s line %s hides this method',
'method-hidden',
'Used when a class defines a method which is hidden by an '
'instance attribute from an ancestor class or set by some '
'client code.'),
'E0203': ('Access to member %r before its definition line %s',
'access-member-before-definition',
'Used when an instance member is accessed before it\'s actually '
'assigned.'),
'W0201': ('Attribute %r defined outside __init__',
'attribute-defined-outside-init',
'Used when an instance attribute is defined outside the __init__ '
'method.'),
'W0212': ('Access to a protected member %s of a client class', # E0214
'protected-access',
'Used when a protected member (i.e. class member with a name '
'beginning with an underscore) is access outside the class or a '
'descendant of the class where it\'s defined.'),
'E0211': ('Method has no argument',
'no-method-argument',
'Used when a method which should have the bound instance as '
'first argument has no argument defined.'),
'E0213': ('Method should have "self" as first argument',
'no-self-argument',
'Used when a method has an attribute different the "self" as '
'first argument. This is considered as an error since this is '
'a so common convention that you shouldn\'t break it!'),
'C0202': ('Class method %s should have %s as first argument',
'bad-classmethod-argument',
'Used when a class method has a first argument named differently '
'than the value specified in valid-classmethod-first-arg option '
'(default to "cls"), recommended to easily differentiate them '
'from regular instance methods.'),
'C0203': ('Metaclass method %s should have %s as first argument',
'bad-mcs-method-argument',
'Used when a metaclass method has a first argument named '
'differently than the value specified in valid-classmethod-first'
'-arg option (default to "cls"), recommended to easily '
'differentiate them from regular instance methods.'),
'C0204': ('Metaclass class method %s should have %s as first argument',
'bad-mcs-classmethod-argument',
'Used when a metaclass class method has a first argument named '
'differently than the value specified in valid-metaclass-'
'classmethod-first-arg option (default to "mcs"), recommended to '
'easily differentiate them from regular instance methods.'),
'W0211': ('Static method with %r as first argument',
'bad-staticmethod-argument',
'Used when a static method has "self" or a value specified in '
'valid-classmethod-first-arg option or '
'valid-metaclass-classmethod-first-arg option as first argument.'
),
'R0201': ('Method could be a function',
'no-self-use',
'Used when a method doesn\'t use its bound instance, and so could '
'be written as a function.'
),
'W0221': ('Parameters differ from %s %r method',
'arguments-differ',
'Used when a method has a different number of arguments than in '
'the implemented interface or in an overridden method.'),
'W0222': ('Signature differs from %s %r method',
'signature-differs',
'Used when a method signature is different than in the '
'implemented interface or in an overridden method.'),
'W0223': ('Method %r is abstract in class %r but is not overridden',
'abstract-method',
'Used when an abstract method (i.e. raise NotImplementedError) is '
'not overridden in concrete class.'
),
'W0231': ('__init__ method from base class %r is not called',
'super-init-not-called',
'Used when an ancestor class method has an __init__ method '
'which is not called by a derived class.'),
'W0232': ('Class has no __init__ method',
'no-init',
'Used when a class has no __init__ method, neither its parent '
'classes.'),
'W0233': ('__init__ method from a non direct base class %r is called',
'non-parent-init-called',
'Used when an __init__ method is called on a class which is not '
'in the direct ancestors for the analysed class.'),
'W0235': ('Useless super delegation in method %r',
'useless-super-delegation',
'Used whenever we can detect that an overridden method is useless, '
'relying on super() delegation to do the same thing as another method '
'from the MRO.'),
'E0236': ('Invalid object %r in __slots__, must contain '
'only non empty strings',
'invalid-slots-object',
'Used when an invalid (non-string) object occurs in __slots__.'),
'E0237': ('Assigning to attribute %r not defined in class slots',
'assigning-non-slot',
'Used when assigning to an attribute not defined '
'in the class slots.'),
'E0238': ('Invalid __slots__ object',
'invalid-slots',
'Used when an invalid __slots__ is found in class. '
'Only a string, an iterable or a sequence is permitted.'),
'E0239': ('Inheriting %r, which is not a class.',
'inherit-non-class',
'Used when a class inherits from something which is not a '
'class.'),
'E0240': ('Inconsistent method resolution order for class %r',
'inconsistent-mro',
'Used when a class has an inconsistent method resolution order.'),
'E0241': ('Duplicate bases for class %r',
'duplicate-bases',
'Used when a class has duplicate bases.'),
'R0202': ('Consider using a decorator instead of calling classmethod',
'no-classmethod-decorator',
'Used when a class method is defined without using the decorator '
'syntax.'),
'R0203': ('Consider using a decorator instead of calling staticmethod',
'no-staticmethod-decorator',
'Used when a static method is defined without using the decorator '
'syntax.'),
'C0205': ('Class __slots__ should be a non-string iterable',
'single-string-used-for-slots',
'Used when a class __slots__ is a simple string, rather '
'than an iterable.'),
}
class ScopeAccessMap(object):
"""Store the accessed variables per scope."""
def __init__(self):
self._scopes = collections.defaultdict(
lambda: collections.defaultdict(list)
)
def set_accessed(self, node):
"""Set the given node as accessed."""
frame = node_frame_class(node)
if frame is None:
# The node does not live in a class.
return
self._scopes[frame][node.attrname].append(node)
def accessed(self, scope):
"""Get the accessed variables for the given scope."""
return self._scopes.get(scope, {})
class ClassChecker(BaseChecker):
"""checks for :
* methods without self as first argument
* overridden methods signature
* access only to existent members via self
* attributes not defined in the __init__ method
* unreachable code
"""
__implements__ = (IAstroidChecker,)
# configuration section name
name = 'classes'
# messages
msgs = MSGS
priority = -2
# configuration options
options = (('defining-attr-methods',
{'default' : ('__init__', '__new__', 'setUp'),
'type' : 'csv',
'metavar' : '<method names>',
'help' : 'List of method names used to declare (i.e. assign) \
instance attributes.'}
),
('valid-classmethod-first-arg',
{'default' : ('cls',),
'type' : 'csv',
'metavar' : '<argument names>',
'help' : 'List of valid names for the first argument in \
a class method.'}
),
('valid-metaclass-classmethod-first-arg',
{'default' : ('mcs',),
'type' : 'csv',
'metavar' : '<argument names>',
'help' : 'List of valid names for the first argument in \
a metaclass class method.'}
),
('exclude-protected',
{
'default': (
# namedtuple public API.
'_asdict', '_fields', '_replace', '_source', '_make'),
'type': 'csv',
'metavar': '<protected access exclusions>',
'help': ('List of member names, which should be excluded '
'from the protected access warning.')}
))
def __init__(self, linter=None):
BaseChecker.__init__(self, linter)
self._accessed = ScopeAccessMap()
self._first_attrs = []
self._meth_could_be_func = None
@decorators.cachedproperty
def _dummy_rgx(self):
return get_global_option(
self, 'dummy-variables-rgx', default=None)
@decorators.cachedproperty
def _ignore_mixin(self):
return get_global_option(
self, 'ignore-mixin-members', default=True)
def visit_classdef(self, node):
"""init visit variable _accessed
"""
self._check_bases_classes(node)
# if not an exception or a metaclass
if node.type == 'class' and has_known_bases(node):
try:
node.local_attr('__init__')
except astroid.NotFoundError:
self.add_message('no-init', args=node, node=node)
self._check_slots(node)
self._check_proper_bases(node)
self._check_consistent_mro(node)
def _check_consistent_mro(self, node):
"""Detect that a class has a consistent mro or duplicate bases."""
try:
node.mro()
except InconsistentMroError:
self.add_message('inconsistent-mro', args=node.name, node=node)
except DuplicateBasesError:
self.add_message('duplicate-bases', args=node.name, node=node)
except NotImplementedError:
# Old style class, there's no mro so don't do anything.
pass
def _check_proper_bases(self, node):
"""
Detect that a class inherits something which is not
a class or a type.
"""
for base in node.bases:
ancestor = safe_infer(base)
if ancestor in (astroid.YES, None):
continue
if (isinstance(ancestor, astroid.Instance) and
ancestor.is_subtype_of('%s.type' % (BUILTINS,))):
continue
if (not isinstance(ancestor, astroid.ClassDef) or
_is_invalid_base_class(ancestor)):
self.add_message('inherit-non-class',
args=base.as_string(), node=node)
def leave_classdef(self, cnode):
"""close a class node:
check that instance attributes are defined in __init__ and check
access to existent members
"""
# check access to existent members on non metaclass classes
if self._ignore_mixin and cnode.name[-5:].lower() == 'mixin':
# We are in a mixin class. No need to try to figure out if
# something is missing, since it is most likely that it will
# miss.
return
accessed = self._accessed.accessed(cnode)
if cnode.type != 'metaclass':
self._check_accessed_members(cnode, accessed)
# checks attributes are defined in an allowed method such as __init__
if not self.linter.is_message_enabled('attribute-defined-outside-init'):
return
defining_methods = self.config.defining_attr_methods
current_module = cnode.root()
for attr, nodes in six.iteritems(cnode.instance_attrs):
# skip nodes which are not in the current module and it may screw up
# the output, while it's not worth it
nodes = [n for n in nodes if not
isinstance(n.statement(), (astroid.Delete, astroid.AugAssign))
and n.root() is current_module]
if not nodes:
continue # error detected by typechecking
# check if any method attr is defined in is a defining method
if any(node.frame().name in defining_methods
for node in nodes):
continue
# check attribute is defined in a parent's __init__
for parent in cnode.instance_attr_ancestors(attr):
attr_defined = False
# check if any parent method attr is defined in is a defining method
for node in parent.instance_attrs[attr]:
if node.frame().name in defining_methods:
attr_defined = True
if attr_defined:
# we're done :)
break
else:
# check attribute is defined as a class attribute
try:
cnode.local_attr(attr)
except astroid.NotFoundError:
for node in nodes:
if node.frame().name not in defining_methods:
# If the attribute was set by a call in any
# of the defining methods, then don't emit
# the warning.
if _called_in_methods(node.frame(), cnode,
defining_methods):
continue
self.add_message('attribute-defined-outside-init',
args=attr, node=node)
def visit_functiondef(self, node):
"""check method arguments, overriding"""
# ignore actual functions
if not node.is_method():
return
self._check_useless_super_delegation(node)
klass = node.parent.frame()
self._meth_could_be_func = True
# check first argument is self if this is actually a method
self._check_first_arg_for_type(node, klass.type == 'metaclass')
if node.name == '__init__':
self._check_init(node)
return
# check signature if the method overloads inherited method
for overridden in klass.local_attr_ancestors(node.name):
# get astroid for the searched method
try:
meth_node = overridden[node.name]
except KeyError:
# we have found the method but it's not in the local
# dictionary.
# This may happen with astroid build from living objects
continue
if not isinstance(meth_node, astroid.FunctionDef):
continue
self._check_signature(node, meth_node, 'overridden', klass)
break
if node.decorators:
for decorator in node.decorators.nodes:
if isinstance(decorator, astroid.Attribute) and \
decorator.attrname in ('getter', 'setter', 'deleter'):
# attribute affectation will call this method, not hiding it
return
if isinstance(decorator, astroid.Name) and decorator.name == 'property':
# attribute affectation will either call a setter or raise
# an attribute error, anyway not hiding the function
return
# check if the method is hidden by an attribute
try:
overridden = klass.instance_attr(node.name)[0] # XXX
overridden_frame = overridden.frame()
if (isinstance(overridden_frame, astroid.FunctionDef)
and overridden_frame.type == 'method'):
overridden_frame = overridden_frame.parent.frame()
if (isinstance(overridden_frame, astroid.ClassDef)
and klass.is_subtype_of(overridden_frame.qname())):
args = (overridden.root().name, overridden.fromlineno)
self.add_message('method-hidden', args=args, node=node)
except astroid.NotFoundError:
pass
visit_asyncfunctiondef = visit_functiondef
def _check_useless_super_delegation(self, function):
'''Check if the given function node is an useless method override
We consider it *useless* if it uses the super() builtin, but having
nothing additional whatsoever than not implementing the method at all.
If the method uses super() to delegate an operation to the rest of the MRO,
and if the method called is the same as the current one, the arguments
passed to super() are the same as the parameters that were passed to
this method, then the method could be removed altogether, by letting
other implementation to take precedence.
'''
if not function.is_method():
return
if function.decorators:
# With decorators is a change of use
return
body = function.body
if len(body) != 1:
# Multiple statements, which means this overridden method
# could do multiple things we are not aware of.
return
statement = body[0]
if not isinstance(statement, (astroid.Expr, astroid.Return)):
# Doing something else than what we are interested into.
return
call = statement.value
if not isinstance(call, astroid.Call):
return
if not isinstance(call.func, astroid.Attribute):
# Not a super() attribute access.
return
# Should be a super call.
try:
super_call = next(call.func.expr.infer())
except astroid.InferenceError:
return
else:
if not isinstance(super_call, objects.Super):
return
# The name should be the same.
if call.func.attrname != function.name:
return
# Should be a super call with the MRO pointer being the current class
# and the type being the current instance.
current_scope = function.parent.scope()
if super_call.mro_pointer != current_scope:
return
if not isinstance(super_call.type, astroid.Instance):
return
if super_call.type.name != current_scope.name:
return
# Detect if the parameters are the same as the call's arguments.
params = _signature_from_arguments(function.args)
args = _signature_from_call(call)
if _definition_equivalent_to_call(params, args):
self.add_message('useless-super-delegation', node=function,
args=(function.name, ))
def _check_slots(self, node):
if '__slots__' not in node.locals:
return
for slots in node.igetattr('__slots__'):
# check if __slots__ is a valid type
if slots is astroid.YES:
continue
if not is_iterable(slots) and not is_comprehension(slots):
self.add_message('invalid-slots', node=node)
continue
if isinstance(slots, astroid.Const):
# a string, ignore the following checks
self.add_message('single-string-used-for-slots', node=node)
continue
if not hasattr(slots, 'itered'):
# we can't obtain the values, maybe a .deque?
continue
if isinstance(slots, astroid.Dict):
values = [item[0] for item in slots.items]
else:
values = slots.itered()
if values is astroid.YES:
return
for elt in values:
try:
self._check_slots_elt(elt)
except astroid.InferenceError:
continue
def _check_slots_elt(self, elt):
for infered in elt.infer():
if infered is astroid.Uninferable:
continue
if (not isinstance(infered, astroid.Const) or
not isinstance(infered.value, six.string_types)):
self.add_message('invalid-slots-object',
args=infered.as_string(),
node=elt)
continue
if not infered.value:
self.add_message('invalid-slots-object',
args=infered.as_string(),
node=elt)
def leave_functiondef(self, node):
"""on method node, check if this method couldn't be a function
ignore class, static and abstract methods, initializer,
methods overridden from a parent class.
"""
if node.is_method():
if node.args.args is not None:
self._first_attrs.pop()
if not self.linter.is_message_enabled('no-self-use'):
return
class_node = node.parent.frame()
if (self._meth_could_be_func and node.type == 'method'
and node.name not in PYMETHODS
and not (node.is_abstract() or
overrides_a_method(class_node, node.name) or
decorated_with_property(node) or
(six.PY3 and _has_bare_super_call(node)))):
self.add_message('no-self-use', node=node)
def visit_attribute(self, node):
"""check if the getattr is an access to a class member
if so, register it. Also check for access to protected
class member from outside its class (but ignore __special__
methods)
"""
# Check self
if self._uses_mandatory_method_param(node):
self._accessed.set_accessed(node)
return
if not self.linter.is_message_enabled('protected-access'):
return
self._check_protected_attribute_access(node)
def visit_assignattr(self, node):
if (isinstance(node.assign_type(), astroid.AugAssign) and
self._uses_mandatory_method_param(node)):
self._accessed.set_accessed(node)
self._check_in_slots(node)
def _check_in_slots(self, node):
""" Check that the given AssignAttr node
is defined in the class slots.
"""
infered = safe_infer(node.expr)
if infered and isinstance(infered, astroid.Instance):
klass = infered._proxied
if '__slots__' not in klass.locals or not klass.newstyle:
return
slots = klass.slots()
if slots is None:
return
# If any ancestor doesn't use slots, the slots
# defined for this class are superfluous.
if any('__slots__' not in ancestor.locals and
ancestor.name != 'object'
for ancestor in klass.ancestors()):
return
if not any(slot.value == node.attrname for slot in slots):
# If we have a '__dict__' in slots, then
# assigning any name is valid.
if not any(slot.value == '__dict__' for slot in slots):
if _is_attribute_property(node.attrname, klass):
# Properties circumvent the slots mechanism,
# so we should not emit a warning for them.
return
if (node.attrname in klass.locals
and _has_data_descriptor(klass, node.attrname)):
# Descriptors circumvent the slots mechanism as well.
return
if (node.attrname == '__class__'
and _has_same_layout_slots(slots, node.parent.value)):
return
self.add_message('assigning-non-slot',
args=(node.attrname, ), node=node)
@check_messages('protected-access', 'no-classmethod-decorator',
'no-staticmethod-decorator')
def visit_assign(self, assign_node):
self._check_classmethod_declaration(assign_node)
node = assign_node.targets[0]
if not isinstance(node, astroid.AssignAttr):
return
if self._uses_mandatory_method_param(node):
return
self._check_protected_attribute_access(node)
def _check_classmethod_declaration(self, node):
"""Checks for uses of classmethod() or staticmethod()
When a @classmethod or @staticmethod decorator should be used instead.
A message will be emitted only if the assignment is at a class scope
and only if the classmethod's argument belongs to the class where it
is defined.
`node` is an assign node.
"""
if not isinstance(node.value, astroid.Call):
return
# check the function called is "classmethod" or "staticmethod"
func = node.value.func
if (not isinstance(func, astroid.Name) or
func.name not in ('classmethod', 'staticmethod')):
return
msg = ('no-classmethod-decorator' if func.name == 'classmethod' else
'no-staticmethod-decorator')
# assignment must be at a class scope
parent_class = node.scope()
if not isinstance(parent_class, astroid.ClassDef):
return
# Check if the arg passed to classmethod is a class member
classmeth_arg = node.value.args[0]
if not isinstance(classmeth_arg, astroid.Name):
return
method_name = classmeth_arg.name
if any(method_name == member.name
for member in parent_class.mymethods()):
self.add_message(msg, node=node.targets[0])
def _check_protected_attribute_access(self, node):
'''Given an attribute access node (set or get), check if attribute
access is legitimate. Call _check_first_attr with node before calling
this method. Valid cases are:
* self._attr in a method or cls._attr in a classmethod. Checked by
_check_first_attr.
* Klass._attr inside "Klass" class.
* Klass2._attr inside "Klass" class when Klass2 is a base class of
Klass.
'''
attrname = node.attrname
if (is_attr_protected(attrname) and
attrname not in self.config.exclude_protected):
klass = node_frame_class(node)
# XXX infer to be more safe and less dirty ??
# in classes, check we are not getting a parent method
# through the class object or through super
callee = node.expr.as_string()
# We are not in a class, no remaining valid case
if klass is None:
self.add_message('protected-access', node=node, args=attrname)
return
# If the expression begins with a call to super, that's ok.
if isinstance(node.expr, astroid.Call) and \
isinstance(node.expr.func, astroid.Name) and \
node.expr.func.name == 'super':
return
# If the expression begins with a call to type(self), that's ok.
if self._is_type_self_call(node.expr):
return
# We are in a class, one remaining valid cases, Klass._attr inside
# Klass
if not (callee == klass.name or callee in klass.basenames):
# Detect property assignments in the body of the class.
# This is acceptable:
#
# class A:
# b = property(lambda: self._b)
stmt = node.parent.statement()
if (isinstance(stmt, astroid.Assign)
and len(stmt.targets) == 1
and isinstance(stmt.targets[0], astroid.AssignName)):
name = stmt.targets[0].name
if _is_attribute_property(name, klass):
return
self.add_message('protected-access', node=node, args=attrname)
def _is_type_self_call(self, expr):
return (isinstance(expr, astroid.Call) and
isinstance(expr.func, astroid.Name) and
expr.func.name == 'type' and len(expr.args) == 1 and
self._is_mandatory_method_param(expr.args[0]))
def visit_name(self, node):
"""check if the name handle an access to a class member
if so, register it
"""
if self._first_attrs and (node.name == self._first_attrs[-1] or
not self._first_attrs[-1]):
self._meth_could_be_func = False
def _check_accessed_members(self, node, accessed):
"""check that accessed members are defined"""
# XXX refactor, probably much simpler now that E0201 is in type checker
excs = ('AttributeError', 'Exception', 'BaseException')
for attr, nodes in six.iteritems(accessed):
try:
# is it a class attribute ?
node.local_attr(attr)
# yes, stop here
continue
except astroid.NotFoundError:
pass
# is it an instance attribute of a parent class ?
try:
next(node.instance_attr_ancestors(attr))
# yes, stop here
continue
except StopIteration:
pass
# is it an instance attribute ?
try:
defstmts = node.instance_attr(attr)
except astroid.NotFoundError:
pass
else:
# filter out augment assignment nodes
defstmts = [stmt for stmt in defstmts if stmt not in nodes]
if not defstmts:
# only augment assignment for this node, no-member should be
# triggered by the typecheck checker
continue
# filter defstmts to only pick the first one when there are
# several assignments in the same scope
scope = defstmts[0].scope()
defstmts = [stmt for i, stmt in enumerate(defstmts)
if i == 0 or stmt.scope() is not scope]
# if there are still more than one, don't attempt to be smarter
# than we can be
if len(defstmts) == 1:
defstmt = defstmts[0]
# check that if the node is accessed in the same method as
# it's defined, it's accessed after the initial assignment
frame = defstmt.frame()
lno = defstmt.fromlineno
for _node in nodes:
if _node.frame() is frame and _node.fromlineno < lno \
and not astroid.are_exclusive(_node.statement(), defstmt, excs):
self.add_message('access-member-before-definition',
node=_node, args=(attr, lno))
def _check_first_arg_for_type(self, node, metaclass=0):
"""check the name of first argument, expect:
* 'self' for a regular method
* 'cls' for a class method or a metaclass regular method (actually
valid-classmethod-first-arg value)
* 'mcs' for a metaclass class method (actually
valid-metaclass-classmethod-first-arg)
* not one of the above for a static method
"""
# don't care about functions with unknown argument (builtins)
if node.args.args is None:
return
first_arg = node.args.args and node.argnames()[0]
self._first_attrs.append(first_arg)
first = self._first_attrs[-1]
# static method
if node.type == 'staticmethod':
if (first_arg == 'self' or
first_arg in self.config.valid_classmethod_first_arg or
first_arg in self.config.valid_metaclass_classmethod_first_arg):
self.add_message('bad-staticmethod-argument', args=first, node=node)
return
self._first_attrs[-1] = None
# class / regular method with no args
elif not node.args.args:
self.add_message('no-method-argument', node=node)
# metaclass
elif metaclass:
# metaclass __new__ or classmethod
if node.type == 'classmethod':
self._check_first_arg_config(
first,
self.config.valid_metaclass_classmethod_first_arg, node,
'bad-mcs-classmethod-argument', node.name)
# metaclass regular method
else:
self._check_first_arg_config(
first,
self.config.valid_classmethod_first_arg, node,
'bad-mcs-method-argument',
node.name)
# regular class
else:
# class method
if node.type == 'classmethod':
self._check_first_arg_config(
first,
self.config.valid_classmethod_first_arg, node,
'bad-classmethod-argument',
node.name)
# regular method without self as argument
elif first != 'self':
self.add_message('no-self-argument', node=node)
def _check_first_arg_config(self, first, config, node, message,
method_name):
if first not in config:
if len(config) == 1:
valid = repr(config[0])
else:
valid = ', '.join(repr(v) for v in config[:-1])
valid = '%s or %r' % (valid, config[-1])
self.add_message(message, args=(method_name, valid), node=node)
def _check_bases_classes(self, node):
"""check that the given class node implements abstract methods from
base classes
"""
def is_abstract(method):
return method.is_abstract(pass_is_abstract=False)
# check if this class abstract
if class_is_abstract(node):
return
methods = sorted(
unimplemented_abstract_methods(node, is_abstract).items(),
key=lambda item: item[0],
)
for name, method in methods:
owner = method.parent.frame()
if owner is node:
continue
# owner is not this class, it must be a parent class
# check that the ancestor's method is not abstract
if name in node.locals:
# it is redefined as an attribute or with a descriptor
continue
self.add_message('abstract-method', node=node,
args=(name, owner.name))
def _check_init(self, node):
"""check that the __init__ method call super or ancestors'__init__
method
"""
if (not self.linter.is_message_enabled('super-init-not-called') and
not self.linter.is_message_enabled('non-parent-init-called')):
return
klass_node = node.parent.frame()
to_call = _ancestors_to_call(klass_node)
not_called_yet = dict(to_call)
for stmt in node.nodes_of_class(astroid.Call):
expr = stmt.func
if not isinstance(expr, astroid.Attribute) \
or expr.attrname != '__init__':
continue
# skip the test if using super
if isinstance(expr.expr, astroid.Call) and \
isinstance(expr.expr.func, astroid.Name) and \
expr.expr.func.name == 'super':
return
try:
for klass in expr.expr.infer():
if klass is astroid.Uninferable:
continue
# The infered klass can be super(), which was
# assigned to a variable and the `__init__`
# was called later.
#
# base = super()
# base.__init__(...)
if (isinstance(klass, astroid.Instance) and
isinstance(klass._proxied, astroid.ClassDef) and
is_builtin_object(klass._proxied) and
klass._proxied.name == 'super'):
return
elif isinstance(klass, objects.Super):
return
try:
del not_called_yet[klass]
except KeyError:
if klass not in to_call:
self.add_message('non-parent-init-called',
node=expr, args=klass.name)
except astroid.InferenceError:
continue
for klass, method in six.iteritems(not_called_yet):
cls = node_frame_class(method)
if klass.name == 'object' or (cls and cls.name == 'object'):
continue
self.add_message('super-init-not-called', args=klass.name, node=node)
def _check_signature(self, method1, refmethod, class_type, cls):
"""check that the signature of the two given methods match
"""
if not (isinstance(method1, astroid.FunctionDef)
and isinstance(refmethod, astroid.FunctionDef)):
self.add_message('method-check-failed',
args=(method1, refmethod), node=method1)
return
instance = cls.instantiate_class()
method1 = function_to_method(method1, instance)
refmethod = function_to_method(refmethod, instance)
# Don't care about functions with unknown argument (builtins).
if method1.args.args is None or refmethod.args.args is None:
return
# Ignore private to class methods.
if is_attr_private(method1.name):
return
# Ignore setters, they have an implicit extra argument,
# which shouldn't be taken in consideration.
if method1.decorators:
for decorator in method1.decorators.nodes:
if (isinstance(decorator, astroid.Attribute) and
decorator.attrname == 'setter'):
return
if _different_parameters(
refmethod, method1,
dummy_parameter_regex=self._dummy_rgx):
self.add_message('arguments-differ',
args=(class_type, method1.name),
node=method1)
elif len(method1.args.defaults) < len(refmethod.args.defaults):
self.add_message('signature-differs',
args=(class_type, method1.name),
node=method1)
def _uses_mandatory_method_param(self, node):
"""Check that attribute lookup name use first attribute variable name
Name is `self` for method, `cls` for classmethod and `mcs` for metaclass.
"""
return self._is_mandatory_method_param(node.expr)
def _is_mandatory_method_param(self, node):
"""Check if astroid.Name corresponds to first attribute variable name
Name is `self` for method, `cls` for classmethod and `mcs` for metaclass.
"""
return (self._first_attrs and isinstance(node, astroid.Name)
and node.name == self._first_attrs[-1])
class SpecialMethodsChecker(BaseChecker):
"""Checker which verifies that special methods
are implemented correctly.
"""
__implements__ = (IAstroidChecker, )
name = 'classes'
msgs = {
'E0301': ('__iter__ returns non-iterator',
'non-iterator-returned',
'Used when an __iter__ method returns something which is not an '
'iterable (i.e. has no `%s` method)' % NEXT_METHOD,
{'old_names': [('W0234', 'non-iterator-returned'),
('E0234', 'non-iterator-returned')]}),
'E0302': ('The special method %r expects %s param(s), %d %s given',
'unexpected-special-method-signature',
'Emitted when a special method was defined with an '
'invalid number of parameters. If it has too few or '
'too many, it might not work at all.',
{'old_names': [('E0235', 'bad-context-manager')]}),
'E0303': ('__len__ does not return non-negative integer',
'invalid-length-returned',
'Used when an __len__ method returns something which is not a '
'non-negative integer', {}),
}
priority = -2
@check_messages('unexpected-special-method-signature',
'non-iterator-returned', 'invalid-length-returned')
def visit_functiondef(self, node):
if not node.is_method():
return
if node.name == '__iter__':
self._check_iter(node)
if node.name == '__len__':
self._check_len(node)
if node.name in PYMETHODS:
self._check_unexpected_method_signature(node)
visit_asyncfunctiondef = visit_functiondef
def _check_unexpected_method_signature(self, node):
expected_params = SPECIAL_METHODS_PARAMS[node.name]
if expected_params is None:
# This can support a variable number of parameters.
return
if not node.args.args and not node.args.vararg:
# Method has no parameter, will be caught
# by no-method-argument.
return
if decorated_with(node, [BUILTINS + ".staticmethod"]):
# We expect to not take in consideration self.
all_args = node.args.args
else:
all_args = node.args.args[1:]
mandatory = len(all_args) - len(node.args.defaults)
optional = len(node.args.defaults)
current_params = mandatory + optional
if isinstance(expected_params, tuple):
# The expected number of parameters can be any value from this
# tuple, although the user should implement the method
# to take all of them in consideration.
emit = mandatory not in expected_params
expected_params = "between %d or %d" % expected_params
else:
# If the number of mandatory parameters doesn't
# suffice, the expected parameters for this
# function will be deduced from the optional
# parameters.
rest = expected_params - mandatory
if rest == 0:
emit = False
elif rest < 0:
emit = True
elif rest > 0:
emit = not ((optional - rest) >= 0 or node.args.vararg)
if emit:
verb = "was" if current_params <= 1 else "were"
self.add_message('unexpected-special-method-signature',
args=(node.name, expected_params, current_params, verb),
node=node)
@staticmethod
def _is_iterator(node):
if node is astroid.YES:
# Just ignore YES objects.
return True
if isinstance(node, Generator):
# Generators can be itered.
return True
if isinstance(node, astroid.Instance):
try:
node.local_attr(NEXT_METHOD)
return True
except astroid.NotFoundError:
pass
elif isinstance(node, astroid.ClassDef):
metaclass = node.metaclass()
if metaclass and isinstance(metaclass, astroid.ClassDef):
try:
metaclass.local_attr(NEXT_METHOD)
return True
except astroid.NotFoundError:
pass
return False
def _check_iter(self, node):
infered = _safe_infer_call_result(node, node)
if infered is not None:
if not self._is_iterator(infered):
self.add_message('non-iterator-returned', node=node)
def _check_len(self, node):
inferred = _safe_infer_call_result(node, node)
if not inferred:
return
if not isinstance(inferred, astroid.Const):
self.add_message('invalid-length-returned', node=node)
return
value = inferred.value
if not isinstance(value, six.integer_types) or value < 0:
self.add_message('invalid-length-returned', node=node)
def _ancestors_to_call(klass_node, method='__init__'):
"""return a dictionary where keys are the list of base classes providing
the queried method, and so that should/may be called from the method node
"""
to_call = {}
for base_node in klass_node.ancestors(recurs=False):
try:
to_call[base_node] = next(base_node.igetattr(method))
except astroid.InferenceError:
continue
return to_call
def node_method(node, method_name):
"""get astroid for <method_name> on the given class node, ensuring it
is a Function node
"""
for node_attr in node.local_attr(method_name):
if isinstance(node_attr, astroid.Function):
return node_attr
raise astroid.NotFoundError(method_name)
def register(linter):
"""required method to auto register this checker """
linter.register_checker(ClassChecker(linter))
linter.register_checker(SpecialMethodsChecker(linter))
| 1 | 9,550 | I would prefer to just fix the occurences of this new check rather than disable them | PyCQA-pylint | py |
@@ -117,9 +117,9 @@ public class UITestUtils {
public void addHostedFeedData() throws IOException {
if (feedDataHosted) throw new IllegalStateException("addHostedFeedData was called twice on the same instance");
for (int i = 0; i < NUM_FEEDS; i++) {
- Feed feed = new Feed(0, null, "Title " + i, "http://example.com/" + i, "Description of feed " + i,
+ Feed feed = new Feed(0, null, "Title " + i, "https://news.google.com/news/rss?" + i, "Description of feed " + i,
"http://example.com/pay/feed" + i, "author " + i, "en", Feed.TYPE_RSS2, "feed" + i, null, null,
- "http://example.com/feed/src/" + i, false);
+ "https://news.google.com/news/rss?" + i, false);
// create items
List<FeedItem> items = new ArrayList<>(); | 1 | package de.test.antennapod.ui;
import android.content.Context;
import android.util.Log;
import de.danoeh.antennapod.core.event.FeedListUpdateEvent;
import de.danoeh.antennapod.core.event.QueueEvent;
import de.danoeh.antennapod.core.feed.Feed;
import de.danoeh.antennapod.core.feed.FeedItem;
import de.danoeh.antennapod.core.feed.FeedMedia;
import de.danoeh.antennapod.core.storage.PodDBAdapter;
import de.test.antennapod.util.service.download.HTTPBin;
import de.test.antennapod.util.syndication.feedgenerator.Rss2Generator;
import org.apache.commons.io.FileUtils;
import org.apache.commons.io.IOUtils;
import org.apache.commons.lang3.StringUtils;
import org.greenrobot.eventbus.EventBus;
import org.junit.Assert;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.Locale;
/**
* Utility methods for UI tests.
* Starts a web server that hosts feeds, episodes and images.
*/
public class UITestUtils {
private static final String TAG = UITestUtils.class.getSimpleName();
private static final int NUM_FEEDS = 5;
private static final int NUM_ITEMS_PER_FEED = 10;
private String testFileName = "3sec.mp3";
private boolean hostTextOnlyFeeds = false;
private final Context context;
private final HTTPBin server = new HTTPBin();
private File destDir;
private File hostedFeedDir;
private File hostedMediaDir;
public final List<Feed> hostedFeeds = new ArrayList<>();
public UITestUtils(Context context) {
this.context = context;
}
public void setup() throws IOException {
destDir = new File(context.getFilesDir(), "test/UITestUtils");
destDir.mkdirs();
hostedFeedDir = new File(destDir, "hostedFeeds");
hostedFeedDir.mkdir();
hostedMediaDir = new File(destDir, "hostedMediaDir");
hostedMediaDir.mkdir();
Assert.assertTrue(destDir.exists());
Assert.assertTrue(hostedFeedDir.exists());
Assert.assertTrue(hostedMediaDir.exists());
server.start();
}
public void tearDown() throws IOException {
FileUtils.deleteDirectory(destDir);
FileUtils.deleteDirectory(hostedMediaDir);
FileUtils.deleteDirectory(hostedFeedDir);
server.stop();
if (localFeedDataAdded) {
PodDBAdapter.deleteDatabase();
}
}
private String hostFeed(Feed feed) throws IOException {
File feedFile = new File(hostedFeedDir, feed.getTitle());
FileOutputStream out = new FileOutputStream(feedFile);
Rss2Generator generator = new Rss2Generator();
generator.writeFeed(feed, out, "UTF-8", 0);
out.close();
int id = server.serveFile(feedFile);
Assert.assertTrue(id != -1);
return String.format(Locale.US, "%s/files/%d", server.getBaseUrl(), id);
}
private String hostFile(File file) {
int id = server.serveFile(file);
Assert.assertTrue(id != -1);
return String.format(Locale.US, "%s/files/%d", server.getBaseUrl(), id);
}
private File newMediaFile(String name) throws IOException {
File mediaFile = new File(hostedMediaDir, name);
if (mediaFile.exists()) {
mediaFile.delete();
}
Assert.assertFalse(mediaFile.exists());
InputStream in = context.getAssets().open(testFileName);
Assert.assertNotNull(in);
FileOutputStream out = new FileOutputStream(mediaFile);
IOUtils.copy(in, out);
out.close();
return mediaFile;
}
private boolean feedDataHosted = false;
/**
* Adds feeds, images and episodes to the webserver for testing purposes.
*/
public void addHostedFeedData() throws IOException {
if (feedDataHosted) throw new IllegalStateException("addHostedFeedData was called twice on the same instance");
for (int i = 0; i < NUM_FEEDS; i++) {
Feed feed = new Feed(0, null, "Title " + i, "http://example.com/" + i, "Description of feed " + i,
"http://example.com/pay/feed" + i, "author " + i, "en", Feed.TYPE_RSS2, "feed" + i, null, null,
"http://example.com/feed/src/" + i, false);
// create items
List<FeedItem> items = new ArrayList<>();
for (int j = 0; j < NUM_ITEMS_PER_FEED; j++) {
FeedItem item = new FeedItem(j, "Feed " + (i+1) + ": Item " + (j+1), "item" + j,
"http://example.com/feed" + i + "/item/" + j, new Date(), FeedItem.UNPLAYED, feed);
items.add(item);
if (!hostTextOnlyFeeds) {
File mediaFile = newMediaFile("feed-" + i + "-episode-" + j + ".mp3");
item.setMedia(new FeedMedia(j, item, 0, 0, mediaFile.length(), "audio/mp3", null, hostFile(mediaFile), false, null, 0, 0));
}
}
feed.setItems(items);
feed.setDownload_url(hostFeed(feed));
hostedFeeds.add(feed);
}
feedDataHosted = true;
}
private boolean localFeedDataAdded = false;
/**
* Adds feeds, images and episodes to the local database. This method will also call addHostedFeedData if it has not
* been called yet.
*
* Adds one item of each feed to the queue and to the playback history.
*
* This method should NOT be called if the testing class wants to download the hosted feed data.
*
* @param downloadEpisodes true if episodes should also be marked as downloaded.
*/
public void addLocalFeedData(boolean downloadEpisodes) throws Exception {
if (localFeedDataAdded) {
Log.w(TAG, "addLocalFeedData was called twice on the same instance");
// might be a flaky test, this is actually not that severe
return;
}
if (!feedDataHosted) {
addHostedFeedData();
}
List<FeedItem> queue = new ArrayList<>();
for (Feed feed : hostedFeeds) {
feed.setDownloaded(true);
if (downloadEpisodes) {
for (FeedItem item : feed.getItems()) {
if (item.hasMedia()) {
FeedMedia media = item.getMedia();
int fileId = Integer.parseInt(StringUtils.substringAfter(media.getDownload_url(), "files/"));
media.setFile_url(server.accessFile(fileId).getAbsolutePath());
media.setDownloaded(true);
}
}
}
queue.add(feed.getItems().get(0));
if (feed.getItems().get(1).hasMedia()) {
feed.getItems().get(1).getMedia().setPlaybackCompletionDate(new Date());
}
}
localFeedDataAdded = true;
PodDBAdapter adapter = PodDBAdapter.getInstance();
adapter.open();
adapter.setCompleteFeed(hostedFeeds.toArray(new Feed[0]));
adapter.setQueue(queue);
adapter.close();
EventBus.getDefault().post(new FeedListUpdateEvent(hostedFeeds));
EventBus.getDefault().post(QueueEvent.setQueue(queue));
}
public void setMediaFileName(String filename) {
testFileName = filename;
}
public void setHostTextOnlyFeeds(boolean hostTextOnlyFeeds) {
this.hostTextOnlyFeeds = hostTextOnlyFeeds;
}
}
| 1 | 18,260 | The tests should be fixed in #4841, so this is no longer needed | AntennaPod-AntennaPod | java |
@@ -0,0 +1,17 @@
+<?php
+/**
+ * Copyright © Bold Brand Commerce Sp. z o.o. All rights reserved.
+ * See LICENSE.txt for license details.
+ */
+
+declare(strict_types=1);
+
+namespace Ergonode\Core\Infrastructure\Exception;
+
+class SerializationException extends SerializerException
+{
+ public function __construct(string $message, \Throwable $previous = null)
+ {
+ parent::__construct($message, $previous);
+ }
+} | 1 | 1 | 9,350 | I think it should been in `SharedKernel` module. | ergonode-backend | php |
|
@@ -317,6 +317,9 @@ type ACMEChallengeSolverHTTP01IngressTemplate struct {
// will override the in-built values.
// +optional
ACMEChallengeSolverHTTP01IngressObjectMeta `json:"metadata"`
+
+ // OverrideNginxIngressWhitelistAnnotation add description here
+ OverrideNginxIngressWhitelistAnnotation string `json:"overrideNginxIngressWhitelistAnnotation,omitempty"`
}
type ACMEChallengeSolverHTTP01IngressObjectMeta struct { | 1 | /*
Copyright 2020 The cert-manager Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
corev1 "k8s.io/api/core/v1"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
cmmeta "github.com/jetstack/cert-manager/pkg/apis/meta/v1"
)
// ACMEIssuer contains the specification for an ACME issuer.
// This uses the RFC8555 specification to obtain certificates by completing
// 'challenges' to prove ownership of domain identifiers.
// Earlier draft versions of the ACME specification are not supported.
type ACMEIssuer struct {
// Email is the email address to be associated with the ACME account.
// This field is optional, but it is strongly recommended to be set.
// It will be used to contact you in case of issues with your account or
// certificates, including expiry notification emails.
// This field may be updated after the account is initially registered.
// +optional
Email string `json:"email,omitempty"`
// Server is the URL used to access the ACME server's 'directory' endpoint.
// For example, for Let's Encrypt's staging endpoint, you would use:
// "https://acme-staging-v02.api.letsencrypt.org/directory".
// Only ACME v2 endpoints (i.e. RFC 8555) are supported.
Server string `json:"server"`
// PreferredChain is the chain to use if the ACME server outputs multiple.
// PreferredChain is no guarantee that this one gets delivered by the ACME
// endpoint.
// For example, for Let's Encrypt's DST crosssign you would use:
// "DST Root CA X3" or "ISRG Root X1" for the newer Let's Encrypt root CA.
// This value picks the first certificate bundle in the ACME alternative
// chains that has a certificate with this value as its issuer's CN
// +optional
// +kubebuilder:validation:MaxLength=64
PreferredChain string `json:"preferredChain"`
// Enables or disables validation of the ACME server TLS certificate.
// If true, requests to the ACME server will not have their TLS certificate
// validated (i.e. insecure connections will be allowed).
// Only enable this option in development environments.
// The cert-manager system installed roots will be used to verify connections
// to the ACME server if this is false.
// Defaults to false.
// +optional
SkipTLSVerify bool `json:"skipTLSVerify,omitempty"`
// ExternalAccountBinding is a reference to a CA external account of the ACME
// server.
// If set, upon registration cert-manager will attempt to associate the given
// external account credentials with the registered ACME account.
// +optional
ExternalAccountBinding *ACMEExternalAccountBinding `json:"externalAccountBinding,omitempty"`
// PrivateKey is the name of a Kubernetes Secret resource that will be used to
// store the automatically generated ACME account private key.
// Optionally, a `key` may be specified to select a specific entry within
// the named Secret resource.
// If `key` is not specified, a default of `tls.key` will be used.
PrivateKey cmmeta.SecretKeySelector `json:"privateKeySecretRef"`
// Solvers is a list of challenge solvers that will be used to solve
// ACME challenges for the matching domains.
// Solver configurations must be provided in order to obtain certificates
// from an ACME server.
// For more information, see: https://cert-manager.io/docs/configuration/acme/
// +optional
Solvers []ACMEChallengeSolver `json:"solvers,omitempty"`
// Enables or disables generating a new ACME account key.
// If true, the Issuer resource will *not* request a new account but will expect
// the account key to be supplied via an existing secret.
// If false, the cert-manager system will generate a new ACME account key
// for the Issuer.
// Defaults to false.
// +optional
DisableAccountKeyGeneration bool `json:"disableAccountKeyGeneration,omitempty"`
// Enables requesting a Not After date on certificates that matches the
// duration of the certificate. This is not supported by all ACME servers
// like Let's Encrypt. If set to true when the ACME server does not support
// it it will create an error on the Order.
// Defaults to false.
// +optional
EnableDurationFeature bool `json:"enableDurationFeature,omitempty"`
}
// ACMEExternalAccountBinding is a reference to a CA external account of the ACME
// server.
type ACMEExternalAccountBinding struct {
// keyID is the ID of the CA key that the External Account is bound to.
KeyID string `json:"keyID"`
// keySecretRef is a Secret Key Selector referencing a data item in a Kubernetes
// Secret which holds the symmetric MAC key of the External Account Binding.
// The `key` is the index string that is paired with the key data in the
// Secret and should not be confused with the key data itself, or indeed with
// the External Account Binding keyID above.
// The secret key stored in the Secret **must** be un-padded, base64 URL
// encoded data.
Key cmmeta.SecretKeySelector `json:"keySecretRef"`
// Deprecated: keyAlgorithm field exists for historical compatibility
// reasons and should not be used. The algorithm is now hardcoded to HS256
// in golang/x/crypto/acme.
// +optional
KeyAlgorithm HMACKeyAlgorithm `json:"keyAlgorithm,omitempty"`
}
// HMACKeyAlgorithm is the name of a key algorithm used for HMAC encryption
// +kubebuilder:validation:Enum=HS256;HS384;HS512
type HMACKeyAlgorithm string
const (
HS256 HMACKeyAlgorithm = "HS256"
HS384 HMACKeyAlgorithm = "HS384"
HS512 HMACKeyAlgorithm = "HS512"
)
// An ACMEChallengeSolver describes how to solve ACME challenges for the issuer it is part of.
// A selector may be provided to use different solving strategies for different DNS names.
// Only one of HTTP01 or DNS01 must be provided.
type ACMEChallengeSolver struct {
// Selector selects a set of DNSNames on the Certificate resource that
// should be solved using this challenge solver.
// If not specified, the solver will be treated as the 'default' solver
// with the lowest priority, i.e. if any other solver has a more specific
// match, it will be used instead.
// +optional
Selector *CertificateDNSNameSelector `json:"selector,omitempty"`
// Configures cert-manager to attempt to complete authorizations by
// performing the HTTP01 challenge flow.
// It is not possible to obtain certificates for wildcard domain names
// (e.g. `*.example.com`) using the HTTP01 challenge mechanism.
// +optional
HTTP01 *ACMEChallengeSolverHTTP01 `json:"http01,omitempty"`
// Configures cert-manager to attempt to complete authorizations by
// performing the DNS01 challenge flow.
// +optional
DNS01 *ACMEChallengeSolverDNS01 `json:"dns01,omitempty"`
}
// CertificateDomainSelector selects certificates using a label selector, and
// can optionally select individual DNS names within those certificates.
// If both MatchLabels and DNSNames are empty, this selector will match all
// certificates and DNS names within them.
type CertificateDNSNameSelector struct {
// A label selector that is used to refine the set of certificate's that
// this challenge solver will apply to.
// +optional
MatchLabels map[string]string `json:"matchLabels,omitempty"`
// List of DNSNames that this solver will be used to solve.
// If specified and a match is found, a dnsNames selector will take
// precedence over a dnsZones selector.
// If multiple solvers match with the same dnsNames value, the solver
// with the most matching labels in matchLabels will be selected.
// If neither has more matches, the solver defined earlier in the list
// will be selected.
// +optional
DNSNames []string `json:"dnsNames,omitempty"`
// List of DNSZones that this solver will be used to solve.
// The most specific DNS zone match specified here will take precedence
// over other DNS zone matches, so a solver specifying sys.example.com
// will be selected over one specifying example.com for the domain
// www.sys.example.com.
// If multiple solvers match with the same dnsZones value, the solver
// with the most matching labels in matchLabels will be selected.
// If neither has more matches, the solver defined earlier in the list
// will be selected.
// +optional
DNSZones []string `json:"dnsZones,omitempty"`
}
// ACMEChallengeSolverHTTP01 contains configuration detailing how to solve
// HTTP01 challenges within a Kubernetes cluster.
// Typically this is accomplished through creating 'routes' of some description
// that configure ingress controllers to direct traffic to 'solver pods', which
// are responsible for responding to the ACME server's HTTP requests.
// Only one of Ingress / Gateway can be specified.
type ACMEChallengeSolverHTTP01 struct {
// The ingress based HTTP01 challenge solver will solve challenges by
// creating or modifying Ingress resources in order to route requests for
// '/.well-known/acme-challenge/XYZ' to 'challenge solver' pods that are
// provisioned by cert-manager for each Challenge to be completed.
// +optional
Ingress *ACMEChallengeSolverHTTP01Ingress `json:"ingress,omitempty"`
// The Gateway API is a sig-network community API that models service networking
// in Kubernetes (https://gateway-api.sigs.k8s.io/). The Gateway solver will
// create HTTPRoutes with the specified labels in the same namespace as the challenge.
// This solver is experimental, and fields / behaviour may change in the future.
// +optional
GatewayHTTPRoute *ACMEChallengeSolverHTTP01GatewayHTTPRoute `json:"gatewayHTTPRoute,omitempty"`
}
type ACMEChallengeSolverHTTP01Ingress struct {
// Optional service type for Kubernetes solver service. Supported values
// are NodePort or ClusterIP (default).
// +optional
ServiceType corev1.ServiceType `json:"serviceType,omitempty"`
// The ingress class to use when creating Ingress resources to solve ACME
// challenges that use this challenge solver.
// Only one of 'class' or 'name' may be specified.
// +optional
Class *string `json:"class,omitempty"`
// The name of the ingress resource that should have ACME challenge solving
// routes inserted into it in order to solve HTTP01 challenges.
// This is typically used in conjunction with ingress controllers like
// ingress-gce, which maintains a 1:1 mapping between external IPs and
// ingress resources.
// +optional
Name string `json:"name,omitempty"`
// Optional pod template used to configure the ACME challenge solver pods
// used for HTTP01 challenges.
// +optional
PodTemplate *ACMEChallengeSolverHTTP01IngressPodTemplate `json:"podTemplate,omitempty"`
// Optional ingress template used to configure the ACME challenge solver
// ingress used for HTTP01 challenges.
// +optional
IngressTemplate *ACMEChallengeSolverHTTP01IngressTemplate `json:"ingressTemplate,omitempty"`
}
// The ACMEChallengeSolverHTTP01GatewayHTTPRoute solver will create HTTPRoute objects for a Gateway class
// routing to an ACME challenge solver pod.
type ACMEChallengeSolverHTTP01GatewayHTTPRoute struct {
// Optional service type for Kubernetes solver service. Supported values
// are NodePort or ClusterIP (default).
// +optional
ServiceType corev1.ServiceType `json:"serviceType,omitempty"`
// The labels that cert-manager will use when creating the temporary
// HTTPRoute needed for solving the HTTP-01 challenge. These labels
// must match the label selector of at least one Gateway.
Labels map[string]string `json:"labels,omitempty"`
}
type ACMEChallengeSolverHTTP01IngressPodTemplate struct {
// ObjectMeta overrides for the pod used to solve HTTP01 challenges.
// Only the 'labels' and 'annotations' fields may be set.
// If labels or annotations overlap with in-built values, the values here
// will override the in-built values.
// +optional
ACMEChallengeSolverHTTP01IngressPodObjectMeta `json:"metadata"`
// PodSpec defines overrides for the HTTP01 challenge solver pod.
// Only the 'priorityClassName', 'nodeSelector', 'affinity',
// 'serviceAccountName' and 'tolerations' fields are supported currently.
// All other fields will be ignored.
// +optional
Spec ACMEChallengeSolverHTTP01IngressPodSpec `json:"spec"`
}
type ACMEChallengeSolverHTTP01IngressPodObjectMeta struct {
// Annotations that should be added to the create ACME HTTP01 solver pods.
// +optional
Annotations map[string]string `json:"annotations,omitempty"`
// Labels that should be added to the created ACME HTTP01 solver pods.
// +optional
Labels map[string]string `json:"labels,omitempty"`
}
type ACMEChallengeSolverHTTP01IngressPodSpec struct {
// NodeSelector is a selector which must be true for the pod to fit on a node.
// Selector which must match a node's labels for the pod to be scheduled on that node.
// More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
// +optional
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
// If specified, the pod's scheduling constraints
// +optional
Affinity *corev1.Affinity `json:"affinity,omitempty"`
// If specified, the pod's tolerations.
// +optional
Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
// If specified, the pod's priorityClassName.
// +optional
PriorityClassName string `json:"priorityClassName,omitempty"`
// If specified, the pod's service account
// +optional
ServiceAccountName string `json:"serviceAccountName,omitempty"`
}
type ACMEChallengeSolverHTTP01IngressTemplate struct {
// ObjectMeta overrides for the ingress used to solve HTTP01 challenges.
// Only the 'labels' and 'annotations' fields may be set.
// If labels or annotations overlap with in-built values, the values here
// will override the in-built values.
// +optional
ACMEChallengeSolverHTTP01IngressObjectMeta `json:"metadata"`
}
type ACMEChallengeSolverHTTP01IngressObjectMeta struct {
// Annotations that should be added to the created ACME HTTP01 solver ingress.
// +optional
Annotations map[string]string `json:"annotations,omitempty"`
// Labels that should be added to the created ACME HTTP01 solver ingress.
// +optional
Labels map[string]string `json:"labels,omitempty"`
}
// Used to configure a DNS01 challenge provider to be used when solving DNS01
// challenges.
// Only one DNS provider may be configured per solver.
type ACMEChallengeSolverDNS01 struct {
// CNAMEStrategy configures how the DNS01 provider should handle CNAME
// records when found in DNS zones.
// +optional
CNAMEStrategy CNAMEStrategy `json:"cnameStrategy,omitempty"`
// Use the Akamai DNS zone management API to manage DNS01 challenge records.
// +optional
Akamai *ACMEIssuerDNS01ProviderAkamai `json:"akamai,omitempty"`
// Use the Google Cloud DNS API to manage DNS01 challenge records.
// +optional
CloudDNS *ACMEIssuerDNS01ProviderCloudDNS `json:"cloudDNS,omitempty"`
// Use the Cloudflare API to manage DNS01 challenge records.
// +optional
Cloudflare *ACMEIssuerDNS01ProviderCloudflare `json:"cloudflare,omitempty"`
// Use the AWS Route53 API to manage DNS01 challenge records.
// +optional
Route53 *ACMEIssuerDNS01ProviderRoute53 `json:"route53,omitempty"`
// Use the Microsoft Azure DNS API to manage DNS01 challenge records.
// +optional
AzureDNS *ACMEIssuerDNS01ProviderAzureDNS `json:"azureDNS,omitempty"`
// Use the DigitalOcean DNS API to manage DNS01 challenge records.
// +optional
DigitalOcean *ACMEIssuerDNS01ProviderDigitalOcean `json:"digitalocean,omitempty"`
// Use the 'ACME DNS' (https://github.com/joohoi/acme-dns) API to manage
// DNS01 challenge records.
// +optional
AcmeDNS *ACMEIssuerDNS01ProviderAcmeDNS `json:"acmeDNS,omitempty"`
// Use RFC2136 ("Dynamic Updates in the Domain Name System") (https://datatracker.ietf.org/doc/rfc2136/)
// to manage DNS01 challenge records.
// +optional
RFC2136 *ACMEIssuerDNS01ProviderRFC2136 `json:"rfc2136,omitempty"`
// Configure an external webhook based DNS01 challenge solver to manage
// DNS01 challenge records.
// +optional
Webhook *ACMEIssuerDNS01ProviderWebhook `json:"webhook,omitempty"`
}
// CNAMEStrategy configures how the DNS01 provider should handle CNAME records
// when found in DNS zones.
// By default, the None strategy will be applied (i.e. do not follow CNAMEs).
// +kubebuilder:validation:Enum=None;Follow
type CNAMEStrategy string
const (
// NoneStrategy indicates that no CNAME resolution strategy should be used
// when determining which DNS zone to update during DNS01 challenges.
NoneStrategy = "None"
// FollowStrategy will cause cert-manager to recurse through CNAMEs in
// order to determine which DNS zone to update during DNS01 challenges.
// This is useful if you do not want to grant cert-manager access to your
// root DNS zone, and instead delegate the _acme-challenge.example.com
// subdomain to some other, less privileged domain.
FollowStrategy = "Follow"
)
// ACMEIssuerDNS01ProviderAkamai is a structure containing the DNS
// configuration for Akamai DNS—Zone Record Management API
type ACMEIssuerDNS01ProviderAkamai struct {
ServiceConsumerDomain string `json:"serviceConsumerDomain"`
ClientToken cmmeta.SecretKeySelector `json:"clientTokenSecretRef"`
ClientSecret cmmeta.SecretKeySelector `json:"clientSecretSecretRef"`
AccessToken cmmeta.SecretKeySelector `json:"accessTokenSecretRef"`
}
// ACMEIssuerDNS01ProviderCloudDNS is a structure containing the DNS
// configuration for Google Cloud DNS
type ACMEIssuerDNS01ProviderCloudDNS struct {
// +optional
ServiceAccount *cmmeta.SecretKeySelector `json:"serviceAccountSecretRef,omitempty"`
Project string `json:"project"`
// HostedZoneName is an optional field that tells cert-manager in which
// Cloud DNS zone the challenge record has to be created.
// If left empty cert-manager will automatically choose a zone.
// +optional
HostedZoneName string `json:"hostedZoneName,omitempty"`
}
// ACMEIssuerDNS01ProviderCloudflare is a structure containing the DNS
// configuration for Cloudflare.
// One of `apiKeySecretRef` or `apiTokenSecretRef` must be provided.
type ACMEIssuerDNS01ProviderCloudflare struct {
// Email of the account, only required when using API key based authentication.
// +optional
Email string `json:"email,omitempty"`
// API key to use to authenticate with Cloudflare.
// Note: using an API token to authenticate is now the recommended method
// as it allows greater control of permissions.
// +optional
APIKey *cmmeta.SecretKeySelector `json:"apiKeySecretRef,omitempty"`
// API token used to authenticate with Cloudflare.
// +optional
APIToken *cmmeta.SecretKeySelector `json:"apiTokenSecretRef,omitempty"`
}
// ACMEIssuerDNS01ProviderDigitalOcean is a structure containing the DNS
// configuration for DigitalOcean Domains
type ACMEIssuerDNS01ProviderDigitalOcean struct {
Token cmmeta.SecretKeySelector `json:"tokenSecretRef"`
}
// ACMEIssuerDNS01ProviderRoute53 is a structure containing the Route 53
// configuration for AWS
type ACMEIssuerDNS01ProviderRoute53 struct {
// The AccessKeyID is used for authentication. If not set we fall-back to using env vars, shared credentials file or AWS Instance metadata
// see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials
// +optional
AccessKeyID string `json:"accessKeyID,omitempty"`
// The SecretAccessKey is used for authentication. If not set we fall-back to using env vars, shared credentials file or AWS Instance metadata
// https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials
// +optional
SecretAccessKey cmmeta.SecretKeySelector `json:"secretAccessKeySecretRef"`
// Role is a Role ARN which the Route53 provider will assume using either the explicit credentials AccessKeyID/SecretAccessKey
// or the inferred credentials from environment variables, shared credentials file or AWS Instance metadata
// +optional
Role string `json:"role,omitempty"`
// If set, the provider will manage only this zone in Route53 and will not do an lookup using the route53:ListHostedZonesByName api call.
// +optional
HostedZoneID string `json:"hostedZoneID,omitempty"`
// Always set the region when using AccessKeyID and SecretAccessKey
Region string `json:"region"`
}
// ACMEIssuerDNS01ProviderAzureDNS is a structure containing the
// configuration for Azure DNS
type ACMEIssuerDNS01ProviderAzureDNS struct {
// if both this and ClientSecret are left unset MSI will be used
// +optional
ClientID string `json:"clientID,omitempty"`
// if both this and ClientID are left unset MSI will be used
// +optional
ClientSecret *cmmeta.SecretKeySelector `json:"clientSecretSecretRef,omitempty"`
SubscriptionID string `json:"subscriptionID"`
// when specifying ClientID and ClientSecret then this field is also needed
// +optional
TenantID string `json:"tenantID,omitempty"`
ResourceGroupName string `json:"resourceGroupName"`
// +optional
HostedZoneName string `json:"hostedZoneName,omitempty"`
// +optional
Environment AzureDNSEnvironment `json:"environment,omitempty"`
}
// +kubebuilder:validation:Enum=AzurePublicCloud;AzureChinaCloud;AzureGermanCloud;AzureUSGovernmentCloud
type AzureDNSEnvironment string
const (
AzurePublicCloud AzureDNSEnvironment = "AzurePublicCloud"
AzureChinaCloud AzureDNSEnvironment = "AzureChinaCloud"
AzureGermanCloud AzureDNSEnvironment = "AzureGermanCloud"
AzureUSGovernmentCloud AzureDNSEnvironment = "AzureUSGovernmentCloud"
)
// ACMEIssuerDNS01ProviderAcmeDNS is a structure containing the
// configuration for ACME-DNS servers
type ACMEIssuerDNS01ProviderAcmeDNS struct {
Host string `json:"host"`
AccountSecret cmmeta.SecretKeySelector `json:"accountSecretRef"`
}
// ACMEIssuerDNS01ProviderRFC2136 is a structure containing the
// configuration for RFC2136 DNS
type ACMEIssuerDNS01ProviderRFC2136 struct {
// The IP address or hostname of an authoritative DNS server supporting
// RFC2136 in the form host:port. If the host is an IPv6 address it must be
// enclosed in square brackets (e.g [2001:db8::1]) ; port is optional.
// This field is required.
Nameserver string `json:"nameserver"`
// The name of the secret containing the TSIG value.
// If ``tsigKeyName`` is defined, this field is required.
// +optional
TSIGSecret cmmeta.SecretKeySelector `json:"tsigSecretSecretRef,omitempty"`
// The TSIG Key name configured in the DNS.
// If ``tsigSecretSecretRef`` is defined, this field is required.
// +optional
TSIGKeyName string `json:"tsigKeyName,omitempty"`
// The TSIG Algorithm configured in the DNS supporting RFC2136. Used only
// when ``tsigSecretSecretRef`` and ``tsigKeyName`` are defined.
// Supported values are (case-insensitive): ``HMACMD5`` (default),
// ``HMACSHA1``, ``HMACSHA256`` or ``HMACSHA512``.
// +optional
TSIGAlgorithm string `json:"tsigAlgorithm,omitempty"`
}
// ACMEIssuerDNS01ProviderWebhook specifies configuration for a webhook DNS01
// provider, including where to POST ChallengePayload resources.
type ACMEIssuerDNS01ProviderWebhook struct {
// The API group name that should be used when POSTing ChallengePayload
// resources to the webhook apiserver.
// This should be the same as the GroupName specified in the webhook
// provider implementation.
GroupName string `json:"groupName"`
// The name of the solver to use, as defined in the webhook provider
// implementation.
// This will typically be the name of the provider, e.g. 'cloudflare'.
SolverName string `json:"solverName"`
// Additional configuration that should be passed to the webhook apiserver
// when challenges are processed.
// This can contain arbitrary JSON data.
// Secret values should not be specified in this stanza.
// If secret values are needed (e.g. credentials for a DNS service), you
// should use a SecretKeySelector to reference a Secret resource.
// For details on the schema of this field, consult the webhook provider
// implementation's documentation.
// +optional
Config *apiextensionsv1.JSON `json:"config,omitempty"`
}
type ACMEIssuerStatus struct {
// URI is the unique account identifier, which can also be used to retrieve
// account details from the CA
// +optional
URI string `json:"uri,omitempty"`
// LastRegisteredEmail is the email associated with the latest registered
// ACME account, in order to track changes made to registered account
// associated with the Issuer
// +optional
LastRegisteredEmail string `json:"lastRegisteredEmail,omitempty"`
}
| 1 | 29,137 | Can we add some description for this field? | jetstack-cert-manager | go |
@@ -45,6 +45,7 @@ public class ProtoConverter extends Visitor<Expr, Object> {
.put(TimestampType.class, "Time")
.put(BytesType.class, "String")
.put(StringType.class, "String")
+ .put(TimeType.class, "Duration")
.build();
private final IdentityHashMap<Expression, DataType> typeMap; | 1 | /*
* Copyright 2017 PingCAP, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pingcap.tikv.expression.visitor;
import static java.util.Objects.requireNonNull;
import com.google.common.collect.ImmutableMap;
import com.pingcap.tidb.tipb.Expr;
import com.pingcap.tidb.tipb.ExprType;
import com.pingcap.tidb.tipb.FieldType;
import com.pingcap.tidb.tipb.ScalarFuncSig;
import com.pingcap.tikv.codec.Codec.IntegerCodec;
import com.pingcap.tikv.codec.CodecDataOutput;
import com.pingcap.tikv.exception.TiExpressionException;
import com.pingcap.tikv.expression.*;
import com.pingcap.tikv.expression.AggregateFunction.FunctionType;
import com.pingcap.tikv.types.*;
import com.pingcap.tikv.types.DataType.EncodeType;
import java.util.IdentityHashMap;
import java.util.Map;
public class ProtoConverter extends Visitor<Expr, Object> {
// All concrete data type should be hooked to a type name
private static final Map<Class<? extends DataType>, String> SCALAR_SIG_MAP =
ImmutableMap.<Class<? extends DataType>, String>builder()
.put(IntegerType.class, "Int")
.put(BitType.class, "Int")
.put(DecimalType.class, "Decimal")
.put(RealType.class, "Real")
.put(DateTimeType.class, "Time")
.put(DateType.class, "Time")
.put(TimestampType.class, "Time")
.put(BytesType.class, "String")
.put(StringType.class, "String")
.build();
private final IdentityHashMap<Expression, DataType> typeMap;
private final boolean validateColPosition;
public ProtoConverter(IdentityHashMap<Expression, DataType> typeMap) {
this(typeMap, true);
}
/**
* Instantiate a {{@code ProtoConverter}} using a typeMap.
*
* @param typeMap the type map
* @param validateColPosition whether to consider column position in this converter. By default, a
* {{@code TiDAGRequest}} should check whether a {{@code ColumnRef}}'s position is correct in
* it's executors. Can ignore this validation if `validateColPosition` is set to false.
*/
public ProtoConverter(
IdentityHashMap<Expression, DataType> typeMap, boolean validateColPosition) {
this.typeMap = typeMap;
this.validateColPosition = validateColPosition;
}
private DataType getType(Expression expression) {
DataType type = typeMap.get(expression);
if (type == null) {
throw new TiExpressionException(String.format("Expression %s type unknown", expression));
}
return type;
}
private String getTypeSignature(Expression expression) {
DataType type = getType(expression);
String typeSignature = SCALAR_SIG_MAP.get(type.getClass());
if (typeSignature == null) {
throw new TiExpressionException(String.format("Type %s signature unknown", type));
}
return typeSignature;
}
public static Expr toProto(Expression expression) {
return toProto(expression, null);
}
public static Expr toProto(Expression expression, Object context) {
ExpressionTypeCoercer coercer = new ExpressionTypeCoercer();
coercer.infer(expression);
ProtoConverter converter = new ProtoConverter(coercer.getTypeMap());
return expression.accept(converter, context);
}
// Generate protobuf builder with partial data encoded.
// Scala Signature is left alone
private Expr.Builder scalaToPartialProto(Expression node, Object context) {
Expr.Builder builder = Expr.newBuilder();
// Scalar function type
builder.setTp(ExprType.ScalarFunc);
// Return type
builder.setFieldType(FieldType.newBuilder().setTp(getType(node).getTypeCode()).build());
for (Expression child : node.getChildren()) {
Expr exprProto = child.accept(this, context);
builder.addChildren(exprProto);
}
return builder;
}
@Override
protected Expr visit(LogicalBinaryExpression node, Object context) {
ScalarFuncSig protoSig;
switch (node.getCompType()) {
case AND:
protoSig = ScalarFuncSig.LogicalAnd;
break;
case OR:
protoSig = ScalarFuncSig.LogicalOr;
break;
case XOR:
protoSig = ScalarFuncSig.LogicalXor;
break;
default:
throw new TiExpressionException(
String.format("Unknown comparison type %s", node.getCompType()));
}
Expr.Builder builder = scalaToPartialProto(node, context);
builder.setSig(protoSig);
return builder.build();
}
@Override
protected Expr visit(ArithmeticBinaryExpression node, Object context) {
// assume after type coerce, children should be compatible
Expression child = node.getLeft();
String typeSignature = getTypeSignature(child);
ScalarFuncSig protoSig;
switch (node.getCompType()) {
// TODO: Add test for bitwise push down
case BIT_AND:
protoSig = ScalarFuncSig.BitAndSig;
break;
case BIT_OR:
protoSig = ScalarFuncSig.BitOrSig;
break;
case BIT_XOR:
protoSig = ScalarFuncSig.BitXorSig;
break;
case DIVIDE:
protoSig = ScalarFuncSig.valueOf("Divide" + typeSignature);
break;
case MINUS:
protoSig = ScalarFuncSig.valueOf("Minus" + typeSignature);
break;
case MULTIPLY:
protoSig = ScalarFuncSig.valueOf("Multiply" + typeSignature);
break;
case PLUS:
protoSig = ScalarFuncSig.valueOf("Plus" + typeSignature);
break;
default:
throw new TiExpressionException(
String.format("Unknown comparison type %s", node.getCompType()));
}
Expr.Builder builder = scalaToPartialProto(node, context);
builder.setSig(protoSig);
return builder.build();
}
@Override
protected Expr visit(ComparisonBinaryExpression node, Object context) {
// assume after type coerce, children should be compatible
Expression child = node.getLeft();
String typeSignature = getTypeSignature(child);
ScalarFuncSig protoSig;
switch (node.getComparisonType()) {
case EQUAL:
protoSig = ScalarFuncSig.valueOf("EQ" + typeSignature);
break;
case GREATER_EQUAL:
protoSig = ScalarFuncSig.valueOf("GE" + typeSignature);
break;
case GREATER_THAN:
protoSig = ScalarFuncSig.valueOf("GT" + typeSignature);
break;
case LESS_EQUAL:
protoSig = ScalarFuncSig.valueOf("LE" + typeSignature);
break;
case LESS_THAN:
protoSig = ScalarFuncSig.valueOf("LT" + typeSignature);
break;
case NOT_EQUAL:
protoSig = ScalarFuncSig.valueOf("NE" + typeSignature);
break;
default:
throw new TiExpressionException(
String.format("Unknown comparison type %s", node.getComparisonType()));
}
Expr.Builder builder = scalaToPartialProto(node, context);
builder.setSig(protoSig);
return builder.build();
}
@Override
protected Expr visit(StringRegExpression node, Object context) {
// assume after type coerce, children should be compatible
ScalarFuncSig protoSig;
switch (node.getRegType()) {
case STARTS_WITH:
case CONTAINS:
case ENDS_WITH:
case LIKE:
protoSig = ScalarFuncSig.LikeSig;
break;
default:
throw new TiExpressionException(String.format("Unknown reg type %s", node.getRegType()));
}
Expr.Builder builder = scalaToPartialProto(node, context);
builder.setSig(protoSig);
return builder.build();
}
@Override
@SuppressWarnings("unchecked")
protected Expr visit(ColumnRef node, Object context) {
long position = 0;
if (validateColPosition) {
requireNonNull(context, "Context of a ColumnRef should not be null");
Map<ColumnRef, Integer> colIdOffsetMap = (Map<ColumnRef, Integer>) context;
position =
requireNonNull(
colIdOffsetMap.get(node), "Required column position info is not in a valid context.");
}
Expr.Builder builder = Expr.newBuilder();
builder.setTp(ExprType.ColumnRef);
CodecDataOutput cdo = new CodecDataOutput();
// After switching to DAG request mode, expression value
// should be the index of table columns we provided in
// the first executor of a DAG request.
IntegerCodec.writeLong(cdo, position);
builder.setVal(cdo.toByteString());
return builder.build();
}
@Override
protected Expr visit(Constant node, Object context) {
Expr.Builder builder = Expr.newBuilder();
if (node.getValue() == null) {
builder.setTp(ExprType.Null);
return builder.build();
} else {
DataType type = node.getType();
builder.setTp(type.getProtoExprType());
CodecDataOutput cdo = new CodecDataOutput();
type.encode(cdo, EncodeType.PROTO, node.getValue());
builder.setVal(cdo.toByteString());
}
return builder.build();
}
@Override
protected Expr visit(AggregateFunction node, Object context) {
Expr.Builder builder = Expr.newBuilder();
FunctionType type = node.getType();
switch (type) {
case Max:
builder.setTp(ExprType.Max);
break;
case Sum:
builder.setTp(ExprType.Sum);
break;
case Min:
builder.setTp(ExprType.Min);
break;
case First:
builder.setTp(ExprType.First);
break;
case Count:
builder.setTp(ExprType.Count);
break;
}
for (Expression arg : node.getChildren()) {
Expr exprProto = arg.accept(this, context);
builder.addChildren(exprProto);
}
return builder.build();
}
@Override
protected Expr visit(IsNull node, Object context) {
String typeSignature = getTypeSignature(node.getExpression());
ScalarFuncSig protoSig = ScalarFuncSig.valueOf(typeSignature + "IsNull");
Expr.Builder builder = scalaToPartialProto(node, context);
builder.setSig(protoSig);
return builder.build();
}
@Override
protected Expr visit(Not node, Object context) {
ScalarFuncSig protoSig = ScalarFuncSig.UnaryNot;
Expr.Builder builder = scalaToPartialProto(node, context);
builder.setSig(protoSig);
return builder.build();
}
}
| 1 | 8,924 | Should its name be Time or Duration? I can see them both. | pingcap-tispark | java |
@@ -31,6 +31,14 @@ type Option interface {
applyGRPCOption(*otlpconfig.Config)
}
+func asGRPCOptions(opts []Option) []otlpconfig.GRPCOption {
+ converted := make([]otlpconfig.GRPCOption, len(opts))
+ for i, o := range opts {
+ converted[i] = otlpconfig.NewGRPCOption(o.applyGRPCOption)
+ }
+ return converted
+}
+
// RetryConfig defines configuration for retrying batches in case of export
// failure using an exponential backoff.
type RetryConfig retry.Config | 1 | // Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
import (
"fmt"
"time"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig"
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/retry"
)
// Option applies an option to the gRPC driver.
type Option interface {
applyGRPCOption(*otlpconfig.Config)
}
// RetryConfig defines configuration for retrying batches in case of export
// failure using an exponential backoff.
type RetryConfig retry.Config
type wrappedOption struct {
otlpconfig.GRPCOption
}
func (w wrappedOption) applyGRPCOption(cfg *otlpconfig.Config) {
w.ApplyGRPCOption(cfg)
}
// WithInsecure disables client transport security for the exporter's gRPC connection
// just like grpc.WithInsecure() https://pkg.go.dev/google.golang.org/grpc#WithInsecure
// does. Note, by default, client security is required unless WithInsecure is used.
func WithInsecure() Option {
return wrappedOption{otlpconfig.WithInsecure()}
}
// WithEndpoint allows one to set the endpoint that the exporter will
// connect to the collector on. If unset, it will instead try to use
// connect to DefaultCollectorHost:DefaultCollectorPort.
func WithEndpoint(endpoint string) Option {
return wrappedOption{otlpconfig.WithEndpoint(endpoint)}
}
// WithReconnectionPeriod allows one to set the delay between next connection attempt
// after failing to connect with the collector.
func WithReconnectionPeriod(rp time.Duration) Option {
return wrappedOption{otlpconfig.NewGRPCOption(func(cfg *otlpconfig.Config) {
cfg.ReconnectionPeriod = rp
})}
}
func compressorToCompression(compressor string) otlpconfig.Compression {
switch compressor {
case "gzip":
return otlpconfig.GzipCompression
}
otel.Handle(fmt.Errorf("invalid compression type: '%s', using no compression as default", compressor))
return otlpconfig.NoCompression
}
// WithCompressor will set the compressor for the gRPC client to use when sending requests.
// It is the responsibility of the caller to ensure that the compressor set has been registered
// with google.golang.org/grpc/encoding. This can be done by encoding.RegisterCompressor. Some
// compressors auto-register on import, such as gzip, which can be registered by calling
// `import _ "google.golang.org/grpc/encoding/gzip"`.
func WithCompressor(compressor string) Option {
return wrappedOption{otlpconfig.WithCompression(compressorToCompression(compressor))}
}
// WithHeaders will send the provided headers with gRPC requests.
func WithHeaders(headers map[string]string) Option {
return wrappedOption{otlpconfig.WithHeaders(headers)}
}
// WithTLSCredentials allows the connection to use TLS credentials
// when talking to the server. It takes in grpc.TransportCredentials instead
// of say a Certificate file or a tls.Certificate, because the retrieving of
// these credentials can be done in many ways e.g. plain file, in code tls.Config
// or by certificate rotation, so it is up to the caller to decide what to use.
func WithTLSCredentials(creds credentials.TransportCredentials) Option {
return wrappedOption{otlpconfig.NewGRPCOption(func(cfg *otlpconfig.Config) {
cfg.Traces.GRPCCredentials = creds
})}
}
// WithServiceConfig defines the default gRPC service config used.
func WithServiceConfig(serviceConfig string) Option {
return wrappedOption{otlpconfig.NewGRPCOption(func(cfg *otlpconfig.Config) {
cfg.ServiceConfig = serviceConfig
})}
}
// WithDialOption opens support to any grpc.DialOption to be used. If it conflicts
// with some other configuration the GRPC specified via the collector the ones here will
// take preference since they are set last.
func WithDialOption(opts ...grpc.DialOption) Option {
return wrappedOption{otlpconfig.NewGRPCOption(func(cfg *otlpconfig.Config) {
cfg.DialOptions = opts
})}
}
// WithGRPCConn allows reusing existing gRPC connection when it has already been
// established for other services. When set, other dial options will be ignored.
func WithGRPCConn(conn *grpc.ClientConn) Option {
return wrappedOption{otlpconfig.NewGRPCOption(func(cfg *otlpconfig.Config) {
cfg.GRPCConn = conn
})}
}
// WithTimeout tells the driver the max waiting time for the backend to process
// each spans batch. If unset, the default will be 10 seconds.
func WithTimeout(duration time.Duration) Option {
return wrappedOption{otlpconfig.WithTimeout(duration)}
}
// WithRetry configures the retry policy for transient errors that may occurs
// when exporting traces. An exponential back-off algorithm is used to ensure
// endpoints are not overwhelmed with retries. If unset, the default retry
// policy will retry after 5 seconds and increase exponentially after each
// error for a total of 1 minute.
func WithRetry(settings RetryConfig) Option {
return wrappedOption{otlpconfig.WithRetry(retry.Config(settings))}
}
| 1 | 16,778 | Is this used anywhere? | open-telemetry-opentelemetry-go | go |
@@ -205,6 +205,17 @@ func TestMerge(t *testing.T) {
}
}
+func TestEmpty(t *testing.T) {
+ var res *resource.Resource
+ require.Equal(t, "", res.SchemaURL())
+ require.Equal(t, "", res.String())
+ require.Equal(t, []attribute.KeyValue(nil), res.Attributes())
+
+ it := res.Iter()
+ require.Equal(t, 0, it.Len())
+ require.True(t, res.Equal(res))
+}
+
func TestDefault(t *testing.T) {
res := resource.Default()
require.False(t, res.Equal(resource.Empty())) | 1 | // Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package resource_test
import (
"context"
"encoding/json"
"errors"
"fmt"
"os"
"strings"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
ottest "go.opentelemetry.io/otel/internal/internaltest"
"go.opentelemetry.io/otel/sdk/resource"
semconv "go.opentelemetry.io/otel/semconv/v1.4.0"
)
var (
kv11 = attribute.String("k1", "v11")
kv12 = attribute.String("k1", "v12")
kv21 = attribute.String("k2", "v21")
kv31 = attribute.String("k3", "v31")
kv41 = attribute.String("k4", "v41")
kv42 = attribute.String("k4", "")
)
func TestNewWithAttributes(t *testing.T) {
cases := []struct {
name string
in []attribute.KeyValue
want []attribute.KeyValue
}{
{
name: "Key with common key order1",
in: []attribute.KeyValue{kv12, kv11, kv21},
want: []attribute.KeyValue{kv11, kv21},
},
{
name: "Key with common key order2",
in: []attribute.KeyValue{kv11, kv12, kv21},
want: []attribute.KeyValue{kv12, kv21},
},
{
name: "Key with nil",
in: nil,
want: nil,
},
}
for _, c := range cases {
t.Run(fmt.Sprintf("case-%s", c.name), func(t *testing.T) {
res := resource.NewSchemaless(c.in...)
if diff := cmp.Diff(
res.Attributes(),
c.want,
cmp.AllowUnexported(attribute.Value{})); diff != "" {
t.Fatalf("unwanted result: diff %+v,", diff)
}
})
}
}
func TestMerge(t *testing.T) {
cases := []struct {
name string
a, b *resource.Resource
want []attribute.KeyValue
isErr bool
schemaURL string
}{
{
name: "Merge 2 nils",
a: nil,
b: nil,
want: nil,
},
{
name: "Merge with no overlap, no nil",
a: resource.NewSchemaless(kv11, kv31),
b: resource.NewSchemaless(kv21, kv41),
want: []attribute.KeyValue{kv11, kv21, kv31, kv41},
},
{
name: "Merge with no overlap, no nil, not interleaved",
a: resource.NewSchemaless(kv11, kv21),
b: resource.NewSchemaless(kv31, kv41),
want: []attribute.KeyValue{kv11, kv21, kv31, kv41},
},
{
name: "Merge with common key order1",
a: resource.NewSchemaless(kv11),
b: resource.NewSchemaless(kv12, kv21),
want: []attribute.KeyValue{kv12, kv21},
},
{
name: "Merge with common key order2",
a: resource.NewSchemaless(kv12, kv21),
b: resource.NewSchemaless(kv11),
want: []attribute.KeyValue{kv11, kv21},
},
{
name: "Merge with common key order4",
a: resource.NewSchemaless(kv11, kv21, kv41),
b: resource.NewSchemaless(kv31, kv41),
want: []attribute.KeyValue{kv11, kv21, kv31, kv41},
},
{
name: "Merge with no keys",
a: resource.NewSchemaless(),
b: resource.NewSchemaless(),
want: nil,
},
{
name: "Merge with first resource no keys",
a: resource.NewSchemaless(),
b: resource.NewSchemaless(kv21),
want: []attribute.KeyValue{kv21},
},
{
name: "Merge with second resource no keys",
a: resource.NewSchemaless(kv11),
b: resource.NewSchemaless(),
want: []attribute.KeyValue{kv11},
},
{
name: "Merge with first resource nil",
a: nil,
b: resource.NewSchemaless(kv21),
want: []attribute.KeyValue{kv21},
},
{
name: "Merge with second resource nil",
a: resource.NewSchemaless(kv11),
b: nil,
want: []attribute.KeyValue{kv11},
},
{
name: "Merge with first resource value empty string",
a: resource.NewSchemaless(kv42),
b: resource.NewSchemaless(kv41),
want: []attribute.KeyValue{kv41},
},
{
name: "Merge with second resource value empty string",
a: resource.NewSchemaless(kv41),
b: resource.NewSchemaless(kv42),
want: []attribute.KeyValue{kv42},
},
{
name: "Merge with first resource with schema",
a: resource.NewWithAttributes("https://opentelemetry.io/schemas/1.4.0", kv41),
b: resource.NewSchemaless(kv42),
want: []attribute.KeyValue{kv42},
schemaURL: "https://opentelemetry.io/schemas/1.4.0",
},
{
name: "Merge with second resource with schema",
a: resource.NewSchemaless(kv41),
b: resource.NewWithAttributes("https://opentelemetry.io/schemas/1.4.0", kv42),
want: []attribute.KeyValue{kv42},
schemaURL: "https://opentelemetry.io/schemas/1.4.0",
},
{
name: "Merge with different schemas",
a: resource.NewWithAttributes("https://opentelemetry.io/schemas/1.4.0", kv41),
b: resource.NewWithAttributes("https://opentelemetry.io/schemas/1.3.0", kv42),
want: nil,
isErr: true,
},
}
for _, c := range cases {
t.Run(fmt.Sprintf("case-%s", c.name), func(t *testing.T) {
res, err := resource.Merge(c.a, c.b)
if c.isErr {
assert.Error(t, err)
} else {
assert.NoError(t, err)
}
assert.EqualValues(t, c.schemaURL, res.SchemaURL())
if diff := cmp.Diff(
res.Attributes(),
c.want,
cmp.AllowUnexported(attribute.Value{})); diff != "" {
t.Fatalf("unwanted result: diff %+v,", diff)
}
})
}
}
func TestDefault(t *testing.T) {
res := resource.Default()
require.False(t, res.Equal(resource.Empty()))
require.True(t, res.Set().HasValue(semconv.ServiceNameKey))
serviceName, _ := res.Set().Value(semconv.ServiceNameKey)
require.True(t, strings.HasPrefix(serviceName.AsString(), "unknown_service:"))
require.Greaterf(t, len(serviceName.AsString()), len("unknown_service:"),
"default service.name should include executable name")
require.Contains(t, res.Attributes(), semconv.TelemetrySDKLanguageGo)
require.Contains(t, res.Attributes(), semconv.TelemetrySDKVersionKey.String(otel.Version()))
require.Contains(t, res.Attributes(), semconv.TelemetrySDKNameKey.String("opentelemetry"))
}
func TestString(t *testing.T) {
for _, test := range []struct {
kvs []attribute.KeyValue
want string
}{
{
kvs: nil,
want: "",
},
{
kvs: []attribute.KeyValue{},
want: "",
},
{
kvs: []attribute.KeyValue{kv11},
want: "k1=v11",
},
{
kvs: []attribute.KeyValue{kv11, kv12},
want: "k1=v12",
},
{
kvs: []attribute.KeyValue{kv11, kv21},
want: "k1=v11,k2=v21",
},
{
kvs: []attribute.KeyValue{kv21, kv11},
want: "k1=v11,k2=v21",
},
{
kvs: []attribute.KeyValue{kv11, kv21, kv31},
want: "k1=v11,k2=v21,k3=v31",
},
{
kvs: []attribute.KeyValue{kv31, kv11, kv21},
want: "k1=v11,k2=v21,k3=v31",
},
{
kvs: []attribute.KeyValue{attribute.String("A", "a"), attribute.String("B", "b")},
want: "A=a,B=b",
},
{
kvs: []attribute.KeyValue{attribute.String("A", "a,B=b")},
want: `A=a\,B\=b`,
},
{
kvs: []attribute.KeyValue{attribute.String("A", `a,B\=b`)},
want: `A=a\,B\\\=b`,
},
{
kvs: []attribute.KeyValue{attribute.String("A=a,B", `b`)},
want: `A\=a\,B=b`,
},
{
kvs: []attribute.KeyValue{attribute.String(`A=a\,B`, `b`)},
want: `A\=a\\\,B=b`,
},
{
kvs: []attribute.KeyValue{attribute.String("", "invalid")},
want: "",
},
{
kvs: []attribute.KeyValue{attribute.String("", "invalid"), attribute.String("B", "b")},
want: "B=b",
},
} {
if got := resource.NewSchemaless(test.kvs...).String(); got != test.want {
t.Errorf("Resource(%v).String() = %q, want %q", test.kvs, got, test.want)
}
}
}
const envVar = "OTEL_RESOURCE_ATTRIBUTES"
func TestMarshalJSON(t *testing.T) {
r := resource.NewSchemaless(attribute.Int64("A", 1), attribute.String("C", "D"))
data, err := json.Marshal(r)
require.NoError(t, err)
require.Equal(t,
`[{"Key":"A","Value":{"Type":"INT64","Value":1}},{"Key":"C","Value":{"Type":"STRING","Value":"D"}}]`,
string(data))
}
func TestNew(t *testing.T) {
tc := []struct {
name string
envars string
detectors []resource.Detector
options []resource.Option
resourceValues map[string]string
schemaURL string
isErr bool
}{
{
name: "No Options returns empty resource",
envars: "key=value,other=attr",
options: nil,
resourceValues: map[string]string{},
},
{
name: "Nil Detectors works",
envars: "key=value,other=attr",
options: []resource.Option{
resource.WithDetectors(),
},
resourceValues: map[string]string{},
},
{
name: "Only Host",
envars: "from=here",
options: []resource.Option{
resource.WithHost(),
},
resourceValues: map[string]string{
"host.name": hostname(),
},
schemaURL: semconv.SchemaURL,
},
{
name: "Only Env",
envars: "key=value,other=attr",
options: []resource.Option{
resource.WithFromEnv(),
},
resourceValues: map[string]string{
"key": "value",
"other": "attr",
},
},
{
name: "Only TelemetrySDK",
envars: "",
options: []resource.Option{
resource.WithTelemetrySDK(),
},
resourceValues: map[string]string{
"telemetry.sdk.name": "opentelemetry",
"telemetry.sdk.language": "go",
"telemetry.sdk.version": otel.Version(),
},
schemaURL: semconv.SchemaURL,
},
{
name: "WithAttributes",
envars: "key=value,other=attr",
options: []resource.Option{
resource.WithAttributes(attribute.String("A", "B")),
},
resourceValues: map[string]string{
"A": "B",
},
},
{
name: "With schema url",
envars: "",
options: []resource.Option{
resource.WithAttributes(attribute.String("A", "B")),
resource.WithSchemaURL("https://opentelemetry.io/schemas/1.0.0"),
},
resourceValues: map[string]string{
"A": "B",
},
schemaURL: "https://opentelemetry.io/schemas/1.0.0",
},
{
name: "With conflicting schema urls",
envars: "",
options: []resource.Option{
resource.WithDetectors(
resource.StringDetector("https://opentelemetry.io/schemas/1.0.0", semconv.HostNameKey, os.Hostname),
),
resource.WithSchemaURL("https://opentelemetry.io/schemas/1.1.0"),
},
resourceValues: map[string]string{},
schemaURL: "",
isErr: true,
},
{
name: "With conflicting detector schema urls",
envars: "",
options: []resource.Option{
resource.WithDetectors(
resource.StringDetector("https://opentelemetry.io/schemas/1.0.0", semconv.HostNameKey, os.Hostname),
resource.StringDetector("https://opentelemetry.io/schemas/1.1.0", semconv.HostNameKey, func() (string, error) { return "", errors.New("fail") }),
),
resource.WithSchemaURL("https://opentelemetry.io/schemas/1.2.0"),
},
resourceValues: map[string]string{},
schemaURL: "",
isErr: true,
},
}
for _, tt := range tc {
t.Run(tt.name, func(t *testing.T) {
store, err := ottest.SetEnvVariables(map[string]string{
envVar: tt.envars,
})
require.NoError(t, err)
defer func() { require.NoError(t, store.Restore()) }()
ctx := context.Background()
res, err := resource.New(ctx, tt.options...)
if tt.isErr {
require.Error(t, err)
} else {
require.NoError(t, err)
}
require.EqualValues(t, tt.resourceValues, toMap(res))
// TODO: do we need to ensure that resource is never nil and eliminate the
// following if?
if res != nil {
assert.EqualValues(t, tt.schemaURL, res.SchemaURL())
}
})
}
}
func TestWithOSType(t *testing.T) {
mockRuntimeProviders()
t.Cleanup(restoreAttributesProviders)
ctx := context.Background()
res, err := resource.New(ctx,
resource.WithOSType(),
)
require.NoError(t, err)
require.EqualValues(t, map[string]string{
"os.type": "linux",
}, toMap(res))
}
func TestWithOSDescription(t *testing.T) {
mockRuntimeProviders()
t.Cleanup(restoreAttributesProviders)
ctx := context.Background()
res, err := resource.New(ctx,
resource.WithOSDescription(),
)
require.NoError(t, err)
require.EqualValues(t, map[string]string{
"os.description": "Test",
}, toMap(res))
}
func TestWithOS(t *testing.T) {
mockRuntimeProviders()
t.Cleanup(restoreAttributesProviders)
ctx := context.Background()
res, err := resource.New(ctx,
resource.WithOS(),
)
require.NoError(t, err)
require.EqualValues(t, map[string]string{
"os.type": "linux",
"os.description": "Test",
}, toMap(res))
}
func TestWithProcessPID(t *testing.T) {
mockProcessAttributesProvidersWithErrors()
ctx := context.Background()
res, err := resource.New(ctx,
resource.WithProcessPID(),
)
require.NoError(t, err)
require.EqualValues(t, map[string]string{
"process.pid": fmt.Sprint(fakePID),
}, toMap(res))
}
func TestWithProcessExecutableName(t *testing.T) {
mockProcessAttributesProvidersWithErrors()
ctx := context.Background()
res, err := resource.New(ctx,
resource.WithProcessExecutableName(),
)
require.NoError(t, err)
require.EqualValues(t, map[string]string{
"process.executable.name": fakeExecutableName,
}, toMap(res))
}
func TestWithProcessExecutablePath(t *testing.T) {
mockProcessAttributesProviders()
ctx := context.Background()
res, err := resource.New(ctx,
resource.WithProcessExecutablePath(),
)
require.NoError(t, err)
require.EqualValues(t, map[string]string{
"process.executable.path": fakeExecutablePath,
}, toMap(res))
}
func TestWithProcessCommandArgs(t *testing.T) {
mockProcessAttributesProvidersWithErrors()
ctx := context.Background()
res, err := resource.New(ctx,
resource.WithProcessCommandArgs(),
)
require.NoError(t, err)
require.EqualValues(t, map[string]string{
"process.command_args": fmt.Sprint(fakeCommandArgs),
}, toMap(res))
}
func TestWithProcessOwner(t *testing.T) {
mockProcessAttributesProviders()
ctx := context.Background()
res, err := resource.New(ctx,
resource.WithProcessOwner(),
)
require.NoError(t, err)
require.EqualValues(t, map[string]string{
"process.owner": fakeOwner,
}, toMap(res))
}
func TestWithProcessRuntimeName(t *testing.T) {
mockProcessAttributesProvidersWithErrors()
ctx := context.Background()
res, err := resource.New(ctx,
resource.WithProcessRuntimeName(),
)
require.NoError(t, err)
require.EqualValues(t, map[string]string{
"process.runtime.name": fakeRuntimeName,
}, toMap(res))
}
func TestWithProcessRuntimeVersion(t *testing.T) {
mockProcessAttributesProvidersWithErrors()
ctx := context.Background()
res, err := resource.New(ctx,
resource.WithProcessRuntimeVersion(),
)
require.NoError(t, err)
require.EqualValues(t, map[string]string{
"process.runtime.version": fakeRuntimeVersion,
}, toMap(res))
}
func TestWithProcessRuntimeDescription(t *testing.T) {
mockProcessAttributesProvidersWithErrors()
ctx := context.Background()
res, err := resource.New(ctx,
resource.WithProcessRuntimeDescription(),
)
require.NoError(t, err)
require.EqualValues(t, map[string]string{
"process.runtime.description": fakeRuntimeDescription,
}, toMap(res))
}
func TestWithProcess(t *testing.T) {
mockProcessAttributesProviders()
ctx := context.Background()
res, err := resource.New(ctx,
resource.WithProcess(),
)
require.NoError(t, err)
require.EqualValues(t, map[string]string{
"process.pid": fmt.Sprint(fakePID),
"process.executable.name": fakeExecutableName,
"process.executable.path": fakeExecutablePath,
"process.command_args": fmt.Sprint(fakeCommandArgs),
"process.owner": fakeOwner,
"process.runtime.name": fakeRuntimeName,
"process.runtime.version": fakeRuntimeVersion,
"process.runtime.description": fakeRuntimeDescription,
}, toMap(res))
}
func toMap(res *resource.Resource) map[string]string {
m := map[string]string{}
for _, attr := range res.Attributes() {
m[string(attr.Key)] = attr.Value.Emit()
}
return m
}
func hostname() string {
hn, err := os.Hostname()
if err != nil {
return fmt.Sprintf("hostname(%s)", err)
}
return hn
}
| 1 | 16,494 | I believe that you can use `assert` instead of `require` here and in the following lines | open-telemetry-opentelemetry-go | go |
@@ -177,7 +177,7 @@ func (pb *Actor) CreateChannel(vmctx exec.VMContext, target address.Address, eol
err := withPayerChannels(ctx, storage, payerAddress, func(byChannelID exec.Lookup) error {
// check to see if payment channel is duplicate
- _, err := byChannelID.Find(ctx, channelID.KeyString())
+ err := byChannelID.Find(ctx, channelID.KeyString(), &PaymentChannel{})
if err != hamt.ErrNotFound { // we expect to not find the payment channel
if err == nil {
return Errors[ErrDuplicateChannel] | 1 | package paymentbroker
import (
"context"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-hamt-ipld"
cbor "github.com/ipfs/go-ipld-cbor"
"github.com/filecoin-project/go-filecoin/abi"
"github.com/filecoin-project/go-filecoin/actor"
"github.com/filecoin-project/go-filecoin/address"
"github.com/filecoin-project/go-filecoin/exec"
"github.com/filecoin-project/go-filecoin/types"
"github.com/filecoin-project/go-filecoin/vm/errors"
)
const (
// ErrNonAccountActor indicates an non-account actor attempted to create a payment channel.
ErrNonAccountActor = 33
// ErrDuplicateChannel indicates an attempt to create a payment channel with an existing id.
ErrDuplicateChannel = 34
// ErrEolTooLow indicates an attempt to lower the Eol of a payment channel.
ErrEolTooLow = 35
// ErrReclaimBeforeEol indicates an attempt to reclaim funds before the eol of the channel.
ErrReclaimBeforeEol = 36
// ErrInsufficientChannelFunds indicates an attempt to take more funds than the channel contains.
ErrInsufficientChannelFunds = 37
// ErrUnknownChannel indicates an invalid channel id.
ErrUnknownChannel = 38
// ErrWrongTarget indicates attempt to redeem from wrong target account.
ErrWrongTarget = 39
// ErrExpired indicates the block height has exceeded the eol.
ErrExpired = 40
// ErrAlreadyWithdrawn indicates amount of the voucher has already been withdrawn.
ErrAlreadyWithdrawn = 41
// ErrInvalidSignature indicates the signature is invalid.
ErrInvalidSignature = 42
//ErrTooEarly indicates that the block height is too low to satisfy a voucher
ErrTooEarly = 43
//ErrConditionInvalid indicates that the condition attached to a voucher did not execute successfully
ErrConditionInvalid = 44
//ErrInvalidCancel indicates that the condition attached to a voucher did execute successfully and therefore can't be cancelled
ErrInvalidCancel = 45
)
// CancelDelayBlockTime is the number of rounds given to the target to respond after the channel
// is canceled before it expires.
// TODO: what is a secure value for this? Value is arbitrary right now.
// See https://github.com/filecoin-project/go-filecoin/issues/1887
const CancelDelayBlockTime = 10000
// Errors map error codes to revert errors this actor may return.
var Errors = map[uint8]error{
ErrTooEarly: errors.NewCodedRevertError(ErrTooEarly, "block height too low to redeem voucher"),
ErrNonAccountActor: errors.NewCodedRevertError(ErrNonAccountActor, "Only account actors may create payment channels"),
ErrDuplicateChannel: errors.NewCodedRevertError(ErrDuplicateChannel, "Duplicate create channel attempt"),
ErrEolTooLow: errors.NewCodedRevertError(ErrEolTooLow, "payment channel eol may not be decreased"),
ErrReclaimBeforeEol: errors.NewCodedRevertError(ErrReclaimBeforeEol, "payment channel may not reclaimed before eol"),
ErrInsufficientChannelFunds: errors.NewCodedRevertError(ErrInsufficientChannelFunds, "voucher amount exceeds amount in channel"),
ErrUnknownChannel: errors.NewCodedRevertError(ErrUnknownChannel, "payment channel is unknown"),
ErrWrongTarget: errors.NewCodedRevertError(ErrWrongTarget, "attempt to redeem channel from wrong target account"),
ErrExpired: errors.NewCodedRevertError(ErrExpired, "block height has exceeded channel's end of life"),
ErrAlreadyWithdrawn: errors.NewCodedRevertError(ErrAlreadyWithdrawn, "update amount has already been redeemed"),
ErrInvalidSignature: errors.NewCodedRevertErrorf(ErrInvalidSignature, "signature failed to validate"),
}
func init() {
cbor.RegisterCborType(PaymentChannel{})
}
// PaymentChannel records the intent to pay funds to a target account.
type PaymentChannel struct {
// Target is the address of the account to which funds will be transferred
Target address.Address `json:"target"`
// Amount is the total amount of FIL that has been transferred to the channel from the payer
Amount types.AttoFIL `json:"amount"`
// AmountRedeemed is the amount of FIL already transferred to the target
AmountRedeemed types.AttoFIL `json:"amount_redeemed"`
// AgreedEol is the expiration for the payment channel agreed upon by the
// payer and payee upon initialization or extension
AgreedEol *types.BlockHeight `json:"agreed_eol"`
// Condition are the set of conditions for redeeming or closing the payment
// channel
Condition *types.Predicate `json:"condition"`
// Eol is the actual expiration for the payment channel which can differ from
// AgreedEol when the payment channel is in dispute
Eol *types.BlockHeight `json:"eol"`
// Redeemed is a flag indicating whether or not Redeem has been called on the
// payment channel yet. This is necessary because AmountRedeemed can still be
// zero in the event of a zero-value voucher
Redeemed bool `json:"redeemed"`
}
// Actor provides a mechanism for off chain payments.
// It allows the creation of payment channels that hold funds for a target account
// and permits that account to withdraw funds only with a voucher signed by the
// channel's creator.
type Actor struct{}
// NewActor returns a new payment broker actor.
func NewActor() *actor.Actor {
return actor.NewActor(types.PaymentBrokerActorCodeCid, types.ZeroAttoFIL)
}
// InitializeState stores the actor's initial data structure.
func (pb *Actor) InitializeState(storage exec.Storage, initializerData interface{}) error {
// pb's default state is an empty lookup, so this method is a no-op
return nil
}
// Exports returns the actor's exports.
func (pb *Actor) Exports() exec.Exports {
return paymentBrokerExports
}
var _ exec.ExecutableActor = (*Actor)(nil)
var paymentBrokerExports = exec.Exports{
"cancel": &exec.FunctionSignature{
Params: []abi.Type{abi.ChannelID},
Return: nil,
},
"close": &exec.FunctionSignature{
Params: []abi.Type{abi.Address, abi.ChannelID, abi.AttoFIL, abi.BlockHeight, abi.Predicate, abi.Bytes, abi.Parameters},
Return: nil,
},
"createChannel": &exec.FunctionSignature{
Params: []abi.Type{abi.Address, abi.BlockHeight},
Return: []abi.Type{abi.ChannelID},
},
"extend": &exec.FunctionSignature{
Params: []abi.Type{abi.ChannelID, abi.BlockHeight},
Return: nil,
},
"ls": &exec.FunctionSignature{
Params: []abi.Type{abi.Address},
Return: []abi.Type{abi.Bytes},
},
"reclaim": &exec.FunctionSignature{
Params: []abi.Type{abi.ChannelID},
Return: nil,
},
"redeem": &exec.FunctionSignature{
Params: []abi.Type{abi.Address, abi.ChannelID, abi.AttoFIL, abi.BlockHeight, abi.Predicate, abi.Bytes, abi.Parameters},
Return: nil,
},
"voucher": &exec.FunctionSignature{
Params: []abi.Type{abi.ChannelID, abi.AttoFIL, abi.BlockHeight, abi.Predicate},
Return: []abi.Type{abi.Bytes},
},
}
// CreateChannel creates a new payment channel from the caller to the target.
// The value attached to the invocation is used as the deposit, and the channel
// will expire and return all of its money to the owner after the given block height.
func (pb *Actor) CreateChannel(vmctx exec.VMContext, target address.Address, eol *types.BlockHeight) (*types.ChannelID, uint8, error) {
if err := vmctx.Charge(actor.DefaultGasCost); err != nil {
return nil, exec.ErrInsufficientGas, errors.RevertErrorWrap(err, "Insufficient gas")
}
// require that from account be an account actor to ensure nonce is a valid id
if !vmctx.IsFromAccountActor() {
return nil, errors.CodeError(Errors[ErrNonAccountActor]), Errors[ErrNonAccountActor]
}
ctx := context.Background()
storage := vmctx.Storage()
payerAddress := vmctx.Message().From
channelID := types.NewChannelID(uint64(vmctx.Message().Nonce))
err := withPayerChannels(ctx, storage, payerAddress, func(byChannelID exec.Lookup) error {
// check to see if payment channel is duplicate
_, err := byChannelID.Find(ctx, channelID.KeyString())
if err != hamt.ErrNotFound { // we expect to not find the payment channel
if err == nil {
return Errors[ErrDuplicateChannel]
}
return errors.FaultErrorWrapf(err, "Error retrieving payment channel")
}
// add payment channel and commit
err = byChannelID.Set(ctx, channelID.KeyString(), &PaymentChannel{
Target: target,
Amount: vmctx.Message().Value,
AmountRedeemed: types.NewAttoFILFromFIL(0),
AgreedEol: eol,
Eol: eol,
})
if err != nil {
return errors.FaultErrorWrap(err, "Could not set payment channel")
}
return nil
})
if err != nil {
// ensure error is properly wrapped
if !errors.IsFault(err) && !errors.ShouldRevert(err) {
return nil, 1, errors.FaultErrorWrap(err, "Error creating payment channel")
}
return nil, errors.CodeError(err), err
}
return channelID, 0, nil
}
// Redeem is called by the target account to withdraw funds with authorization from the payer.
// This method is exactly like Close except it doesn't close the channel.
// This is useful when you want to checkpoint the value in a payment, but continue to use the
// channel afterwards. The amt represents the total funds authorized so far, so that subsequent
// calls to Update will only transfer the difference between the given amt and the greatest
// amt taken so far. A series of channel transactions might look like this:
// Payer: 2000, Target: 0, Channel: 0
// payer createChannel(1000) -> Payer: 1000, Target: 0, Channel: 1000
// target Redeem(100) -> Payer: 1000, Target: 100, Channel: 900
// target Redeem(200) -> Payer: 1000, Target: 200, Channel: 800
// target Close(500) -> Payer: 1500, Target: 500, Channel: 0
//
// If a condition is provided in the voucher:
// - The parameters provided in the condition will be combined with redeemerConditionParams
// - A message will be sent to the the condition.To address using the condition.Method with the combined params
// - If the message returns an error the condition is considered to be false and the redeem will fail
func (pb *Actor) Redeem(vmctx exec.VMContext, payer address.Address, chid *types.ChannelID, amt types.AttoFIL,
validAt *types.BlockHeight, condition *types.Predicate, sig []byte, redeemerConditionParams []interface{}) (uint8, error) {
if err := vmctx.Charge(actor.DefaultGasCost); err != nil {
return exec.ErrInsufficientGas, errors.RevertErrorWrap(err, "Insufficient gas")
}
if !VerifyVoucherSignature(payer, chid, amt, validAt, condition, sig) {
return errors.CodeError(Errors[ErrInvalidSignature]), Errors[ErrInvalidSignature]
}
ctx := context.Background()
storage := vmctx.Storage()
err := withPayerChannels(ctx, storage, payer, func(byChannelID exec.Lookup) error {
chInt, err := byChannelID.Find(ctx, chid.KeyString())
if err != nil {
if err == hamt.ErrNotFound {
return Errors[ErrUnknownChannel]
}
return errors.FaultErrorWrapf(err, "Could not retrieve payment channel with ID: %s", chid)
}
channel, ok := chInt.(*PaymentChannel)
if !ok {
return errors.NewFaultError("Expected PaymentChannel from channels lookup")
}
// validate the amount can be sent to the target and send payment to that address.
err = validateAndUpdateChannel(vmctx, vmctx.Message().From, channel, amt, validAt, condition, redeemerConditionParams)
if err != nil {
return err
}
// Reset the EOL to the originally agreed upon EOL in the event that the
// channel has been cancelled.
channel.Eol = channel.AgreedEol
// Mark the payment channel as redeemed
channel.Redeemed = true
return byChannelID.Set(ctx, chid.KeyString(), channel)
})
if err != nil {
// ensure error is properly wrapped
if !errors.IsFault(err) && !errors.ShouldRevert(err) {
return 1, errors.FaultErrorWrap(err, "Error redeeming payment channel")
}
return errors.CodeError(err), err
}
return 0, nil
}
// Close first executes the logic performed in the the Update method, then returns all
// funds remaining in the channel to the payer account and deletes the channel.
//
// If a condition is provided in the voucher:
// - The parameters provided in the condition will be combined with redeemerConditionParams
// - A message will be sent to the the condition.To address using the condition.Method with the combined params
// - If the message returns an error the condition is considered to be false and the redeem will fail
func (pb *Actor) Close(vmctx exec.VMContext, payer address.Address, chid *types.ChannelID, amt types.AttoFIL,
validAt *types.BlockHeight, condition *types.Predicate, sig []byte, redeemerConditionParams []interface{}) (uint8, error) {
if err := vmctx.Charge(actor.DefaultGasCost); err != nil {
return exec.ErrInsufficientGas, errors.RevertErrorWrap(err, "Insufficient gas")
}
if !VerifyVoucherSignature(payer, chid, amt, validAt, condition, sig) {
return errors.CodeError(Errors[ErrInvalidSignature]), Errors[ErrInvalidSignature]
}
ctx := context.Background()
storage := vmctx.Storage()
err := withPayerChannels(ctx, storage, payer, func(byChannelID exec.Lookup) error {
chInt, err := byChannelID.Find(ctx, chid.KeyString())
if err != nil {
if err == hamt.ErrNotFound {
return Errors[ErrUnknownChannel]
}
return errors.FaultErrorWrapf(err, "Could not retrieve payment channel with ID: %s", chid)
}
channel, ok := chInt.(*PaymentChannel)
if !ok {
return errors.NewFaultError("Expected PaymentChannel from channels lookup")
}
// validate the amount can be sent to the target and send payment to that address.
err = validateAndUpdateChannel(vmctx, vmctx.Message().From, channel, amt, validAt, condition, redeemerConditionParams)
if err != nil {
return err
}
err = byChannelID.Set(ctx, chid.KeyString(), channel)
if err != nil {
return err
}
// return funds to payer
return reclaim(ctx, vmctx, byChannelID, payer, chid, channel)
})
if err != nil {
// ensure error is properly wrapped
if !errors.IsFault(err) && !errors.ShouldRevert(err) {
return 1, errors.FaultErrorWrap(err, "Error updating or reclaiming channel")
}
return errors.CodeError(err), err
}
return 0, nil
}
// Extend can be used by the owner of a channel to add more funds to it and
// extend the Channel's lifespan.
func (pb *Actor) Extend(vmctx exec.VMContext, chid *types.ChannelID, eol *types.BlockHeight) (uint8, error) {
if err := vmctx.Charge(actor.DefaultGasCost); err != nil {
return exec.ErrInsufficientGas, errors.RevertErrorWrap(err, "Insufficient gas")
}
ctx := context.Background()
storage := vmctx.Storage()
payerAddress := vmctx.Message().From
err := withPayerChannels(ctx, storage, payerAddress, func(byChannelID exec.Lookup) error {
chInt, err := byChannelID.Find(ctx, chid.KeyString())
if err != nil {
if err == hamt.ErrNotFound {
return Errors[ErrUnknownChannel]
}
return errors.FaultErrorWrapf(err, "Could not retrieve payment channel with ID: %s", chid)
}
channel, ok := chInt.(*PaymentChannel)
if !ok {
return errors.NewFaultError("Expected PaymentChannel from channels lookup")
}
// eol can only be increased
if channel.Eol.GreaterThan(eol) {
return Errors[ErrEolTooLow]
}
// set new eol
channel.AgreedEol = eol
channel.Eol = eol
// increment the value
channel.Amount = channel.Amount.Add(vmctx.Message().Value)
return byChannelID.Set(ctx, chid.KeyString(), channel)
})
if err != nil {
// ensure error is properly wrapped
if !errors.IsFault(err) && !errors.ShouldRevert(err) {
return 1, errors.FaultErrorWrap(err, "Error extending channel")
}
return errors.CodeError(err), err
}
return 0, nil
}
// Cancel can be used to end an off chain payment early. It lowers the EOL of
// the payment channel to 1 blocktime from now and allows a caller to reclaim
// their payments. In the time before the channel is closed, a target can
// potentially dispute a closer. Cancel will only succeed if the target has not
// successfully redeemed a voucher or if the target has successfully redeemed
// the channel with a conditional voucher and the condition is no longer valid
// due to changes in chain state.
func (pb *Actor) Cancel(vmctx exec.VMContext, chid *types.ChannelID) (uint8, error) {
if err := vmctx.Charge(actor.DefaultGasCost); err != nil {
return exec.ErrInsufficientGas, errors.RevertErrorWrap(err, "Insufficient gas")
}
ctx := context.Background()
storage := vmctx.Storage()
payerAddress := vmctx.Message().From
err := withPayerChannels(ctx, storage, payerAddress, func(byChannelID exec.Lookup) error {
chInt, err := byChannelID.Find(ctx, chid.KeyString())
if err != nil {
if err == hamt.ErrNotFound {
return Errors[ErrUnknownChannel]
}
return errors.FaultErrorWrapf(err, "Could not retrieve payment channel with ID: %s", chid)
}
channel, ok := chInt.(*PaymentChannel)
if !ok {
return errors.NewFaultError("Expected PaymentChannel from channels lookup")
}
// Check if channel has already been redeemed and re-run condition if necessary
if channel.Redeemed {
// If it doesn't have a condition, it's valid, so throw an error
if channel.Condition == nil {
return errors.NewCodedRevertError(ErrInvalidCancel, "channel cannot be cancelled due to successful redeem")
}
// Otherwise, check the condition on the payment channel
err := checkCondition(vmctx, channel)
// If we receive no error, the condition is valid, so we fail
if err == nil {
return errors.NewCodedRevertError(ErrInvalidCancel, "channel cannot be cancelled due to successful redeem")
}
// If there's a non-revert error, we have bigger problem, so raise the
// error
if !errors.ShouldRevert(err) {
return err
}
}
eol := vmctx.BlockHeight().Add(types.NewBlockHeight(CancelDelayBlockTime))
// eol can only be decreased
if channel.Eol.GreaterThan(eol) {
channel.Eol = eol
}
return byChannelID.Set(ctx, chid.KeyString(), channel)
})
if err != nil {
// ensure error is properly wrapped
if !errors.IsFault(err) && !errors.ShouldRevert(err) {
return 1, errors.FaultErrorWrap(err, "Error cancelling channel")
}
return errors.CodeError(err), err
}
return 0, nil
}
// Reclaim is used by the owner of a channel to reclaim unspent funds in timed
// out payment Channels they own.
func (pb *Actor) Reclaim(vmctx exec.VMContext, chid *types.ChannelID) (uint8, error) {
if err := vmctx.Charge(actor.DefaultGasCost); err != nil {
return exec.ErrInsufficientGas, errors.RevertErrorWrap(err, "Insufficient gas")
}
ctx := context.Background()
storage := vmctx.Storage()
payerAddress := vmctx.Message().From
err := withPayerChannels(ctx, storage, payerAddress, func(byChannelID exec.Lookup) error {
chInt, err := byChannelID.Find(ctx, chid.KeyString())
if err != nil {
if err == hamt.ErrNotFound {
return Errors[ErrUnknownChannel]
}
return errors.FaultErrorWrapf(err, "Could not retrieve payment channel with ID: %s", chid)
}
channel, ok := chInt.(*PaymentChannel)
if !ok {
return errors.NewFaultError("Expected PaymentChannel from channels lookup")
}
// reclaim may only be called at or after Eol
if vmctx.BlockHeight().LessThan(channel.Eol) {
return Errors[ErrReclaimBeforeEol]
}
// return funds to payer
return reclaim(ctx, vmctx, byChannelID, payerAddress, chid, channel)
})
if err != nil {
// ensure error is properly wrapped
if !errors.IsFault(err) && !errors.ShouldRevert(err) {
return 1, errors.FaultErrorWrap(err, "Error reclaiming channel")
}
return errors.CodeError(err), err
}
return 0, nil
}
// Voucher takes a channel id and amount creates a new unsigned PaymentVoucher
// against the given channel. It also takes a block height parameter "validAt"
// enforcing that the voucher is not reclaimed until the given block height
// Voucher errors if the channel doesn't exist or contains less than request
// amount.
// If a condition is provided, attempts to redeem or close with the voucher will
// first send a message based on the condition and require a successful response
// for funds to be transferred.
func (pb *Actor) Voucher(vmctx exec.VMContext, chid *types.ChannelID, amount types.AttoFIL, validAt *types.BlockHeight, condition *types.Predicate) ([]byte, uint8, error) {
if err := vmctx.Charge(actor.DefaultGasCost); err != nil {
return []byte{}, exec.ErrInsufficientGas, errors.RevertErrorWrap(err, "Insufficient gas")
}
ctx := context.Background()
storage := vmctx.Storage()
payerAddress := vmctx.Message().From
var voucher types.PaymentVoucher
err := withPayerChannelsForReading(ctx, storage, payerAddress, func(byChannelID exec.Lookup) error {
var channel *PaymentChannel
chInt, err := byChannelID.Find(ctx, chid.KeyString())
if err != nil {
if err == hamt.ErrNotFound {
return Errors[ErrUnknownChannel]
}
return errors.FaultErrorWrapf(err, "Could not retrieve payment channel with ID: %s", chid)
}
channel, ok := chInt.(*PaymentChannel)
if !ok {
return errors.NewFaultError("Expected PaymentChannel from channels lookup")
}
// voucher must be for less than total amount in channel
if channel.Amount.LessThan(amount) {
return Errors[ErrInsufficientChannelFunds]
}
// set voucher
voucher = types.PaymentVoucher{
Channel: *chid,
Payer: vmctx.Message().From,
Target: channel.Target,
Amount: amount,
ValidAt: *validAt,
Condition: condition,
}
return nil
})
if err != nil {
// ensure error is properly wrapped
if !errors.IsFault(err) && !errors.ShouldRevert(err) {
return nil, 1, errors.FaultErrorWrap(err, "Error reclaiming channel")
}
return nil, errors.CodeError(err), err
}
voucherBytes, err := actor.MarshalStorage(voucher)
if err != nil {
return nil, 1, errors.FaultErrorWrap(err, "Error marshalling voucher")
}
return voucherBytes, 0, nil
}
// Ls returns all payment channels for a given payer address.
// The slice of channels will be returned as cbor encoded map from string channelId to PaymentChannel.
func (pb *Actor) Ls(vmctx exec.VMContext, payer address.Address) ([]byte, uint8, error) {
if err := vmctx.Charge(actor.DefaultGasCost); err != nil {
return []byte{}, exec.ErrInsufficientGas, errors.RevertErrorWrap(err, "Insufficient gas")
}
ctx := context.Background()
storage := vmctx.Storage()
channels := map[string]*PaymentChannel{}
err := withPayerChannelsForReading(ctx, storage, payer, func(byChannelID exec.Lookup) error {
kvs, err := byChannelID.Values(ctx)
if err != nil {
return err
}
for _, kv := range kvs {
pc, ok := kv.Value.(*PaymentChannel)
if !ok {
return errors.NewFaultError("Expected PaymentChannel from channel lookup")
}
channels[kv.Key] = pc
}
return nil
})
if err != nil {
// ensure error is properly wrapped
if !errors.IsFault(err) && !errors.ShouldRevert(err) {
return nil, 1, errors.FaultErrorWrap(err, "Error reclaiming channel")
}
return nil, errors.CodeError(err), err
}
channelsBytes, err := actor.MarshalStorage(channels)
if err != nil {
return nil, 1, errors.FaultErrorWrap(err, "Error marshalling voucher")
}
return channelsBytes, 0, nil
}
func validateAndUpdateChannel(ctx exec.VMContext, target address.Address, channel *PaymentChannel, amt types.AttoFIL, validAt *types.BlockHeight, condition *types.Predicate, redeemerSuppliedParams []interface{}) error {
cacheCondition(channel, condition, redeemerSuppliedParams)
if err := checkCondition(ctx, channel); err != nil {
return err
}
if target != channel.Target {
return Errors[ErrWrongTarget]
}
if ctx.BlockHeight().LessThan(validAt) {
return Errors[ErrTooEarly]
}
if ctx.BlockHeight().GreaterEqual(channel.Eol) {
return Errors[ErrExpired]
}
if amt.GreaterThan(channel.Amount) {
return Errors[ErrInsufficientChannelFunds]
}
if amt.LessEqual(channel.AmountRedeemed) {
return Errors[ErrAlreadyWithdrawn]
}
// transfer funds to sender
updateAmount := amt.Sub(channel.AmountRedeemed)
_, _, err := ctx.Send(ctx.Message().From, "", updateAmount, nil)
if err != nil {
return err
}
// update amount redeemed from this channel
channel.AmountRedeemed = amt
return nil
}
func reclaim(ctx context.Context, vmctx exec.VMContext, byChannelID exec.Lookup, payer address.Address, chid *types.ChannelID, channel *PaymentChannel) error {
amt := channel.Amount.Sub(channel.AmountRedeemed)
if amt.LessEqual(types.ZeroAttoFIL) {
return nil
}
// clean up
err := byChannelID.Delete(ctx, chid.KeyString())
if err != nil {
return err
}
// send funds
_, _, err = vmctx.Send(payer, "", amt, nil)
if err != nil {
return errors.RevertErrorWrap(err, "could not send update funds")
}
return nil
}
// Separator is the separator used when concatenating channel and amount in a
// voucher signature.
const separator = 0x0
// SignVoucher creates the signature for the given combination of
// channel, amount, validAt (earliest block height for redeem) and from address.
// It does so by signing the following bytes: (channelID | 0x0 | amount | 0x0 | validAt)
func SignVoucher(channelID *types.ChannelID, amount types.AttoFIL, validAt *types.BlockHeight, addr address.Address, condition *types.Predicate, signer types.Signer) (types.Signature, error) {
data, err := createVoucherSignatureData(channelID, amount, validAt, condition)
if err != nil {
return nil, err
}
return signer.SignBytes(data, addr)
}
// VerifyVoucherSignature returns whether the voucher's signature is valid
func VerifyVoucherSignature(payer address.Address, chid *types.ChannelID, amt types.AttoFIL, validAt *types.BlockHeight, condition *types.Predicate, sig []byte) bool {
data, err := createVoucherSignatureData(chid, amt, validAt, condition)
// the only error is failure to encode the values
if err != nil {
return false
}
return types.IsValidSignature(data, payer, sig)
}
func createVoucherSignatureData(channelID *types.ChannelID, amount types.AttoFIL, validAt *types.BlockHeight, condition *types.Predicate) ([]byte, error) {
data := append(channelID.Bytes(), separator)
data = append(data, amount.Bytes()...)
data = append(data, separator)
if condition != nil {
data = append(data, condition.To.Bytes()...)
data = append(data, []byte(condition.Method)...)
encodedParams, err := abi.ToEncodedValues(condition.Params...)
if err != nil {
return []byte{}, err
}
data = append(data, encodedParams...)
}
return append(data, validAt.Bytes()...), nil
}
func withPayerChannels(ctx context.Context, storage exec.Storage, payer address.Address, f func(exec.Lookup) error) error {
stateCid, err := actor.WithLookup(ctx, storage, storage.Head(), func(byPayer exec.Lookup) error {
byChannelLookup, err := findByChannelLookup(ctx, storage, byPayer, payer)
if err != nil {
return err
}
// run inner function
err = f(byChannelLookup)
if err != nil {
return err
}
// commit channel lookup
commitedCID, err := byChannelLookup.Commit(ctx)
if err != nil {
return err
}
// if all payers channels are gone, delete the payer
if byChannelLookup.IsEmpty() {
return byPayer.Delete(ctx, payer.String())
}
// set payers channels into primary lookup
return byPayer.Set(ctx, payer.String(), commitedCID)
})
if err != nil {
return err
}
return storage.Commit(stateCid, storage.Head())
}
func withPayerChannelsForReading(ctx context.Context, storage exec.Storage, payer address.Address, f func(exec.Lookup) error) error {
return actor.WithLookupForReading(ctx, storage, storage.Head(), func(byPayer exec.Lookup) error {
byChannelLookup, err := findByChannelLookup(ctx, storage, byPayer, payer)
if err != nil {
return err
}
// run inner function
return f(byChannelLookup)
})
}
func findByChannelLookup(ctx context.Context, storage exec.Storage, byPayer exec.Lookup, payer address.Address) (exec.Lookup, error) {
byChannelID, err := byPayer.Find(ctx, payer.String())
if err != nil {
if err == hamt.ErrNotFound {
return actor.LoadLookup(ctx, storage, cid.Undef)
}
return nil, err
}
byChannelCID, ok := byChannelID.(cid.Cid)
if !ok {
return nil, errors.NewFaultError("Paymentbroker payer is not a Cid")
}
return actor.LoadTypedLookup(ctx, storage, byChannelCID, &PaymentChannel{})
}
// checkCondition combines params in the condition with the redeemerSuppliedParams, sends a message
// to the actor and method specified in the condition, and returns an error if one exists.
func checkCondition(vmctx exec.VMContext, channel *PaymentChannel) error {
if channel.Condition == nil {
return nil
}
_, _, err := vmctx.Send(channel.Condition.To, channel.Condition.Method, types.ZeroAttoFIL, channel.Condition.Params)
if err != nil {
if errors.IsFault(err) {
return err
}
return errors.NewCodedRevertErrorf(ErrConditionInvalid, "failed to validate voucher condition: %s", err)
}
return nil
}
// cacheCondition saves redeemer supplied conditions to the payment channel for
// future use
func cacheCondition(channel *PaymentChannel, condition *types.Predicate, redeemerSuppliedParams []interface{}) {
if condition == nil {
channel.Condition = nil
return
}
// If new params have been provided or we don't yet have a cached condition,
// cache the provided params and condition on the payment channel.
if !channel.Redeemed || channel.Condition == nil || len(redeemerSuppliedParams) > 0 {
newParams := condition.Params
newParams = append(newParams, redeemerSuppliedParams...)
newCachedCondition := *condition
newCachedCondition.Params = newParams
channel.Condition = &newCachedCondition
}
}
| 1 | 21,314 | In `storagemarket.go` you used `nil` for an unwanted out parameter. Do something consistent (nil seems fine if supported). | filecoin-project-venus | go |
@@ -0,0 +1,8 @@
+package trojan
+
+var (
+ Contains = contains
+ HashBytes = hashBytes
+ PadBytes = padBytesLeft
+ IsPotential = isPotential
+) | 1 | 1 | 10,937 | is this a new pattern we use in bee? interesting | ethersphere-bee | go |
|
@@ -382,7 +382,13 @@ func (f *FsS3) Mkdir() error {
if err, ok := err.(*s3.Error); ok {
if err.Code == "BucketAlreadyOwnedByYou" {
return nil
+ } else if err.Code == "BucketAlreadyExists" {
+ // Unfortunately Qstack are not reliably returning
+ // BucketAlreadyOwnedByYou, but instead BucketAlreadyExists.
+ // Carry on and wait for potential failure later.
+ return nil
}
+
}
return err
} | 1 | // S3 interface
package s3
// FIXME need to prevent anything but ListDir working for s3://
import (
"errors"
"fmt"
"io"
"net/http"
"path"
"regexp"
"strconv"
"strings"
"time"
"github.com/ncw/goamz/aws"
"github.com/ncw/goamz/s3"
"github.com/ncw/rclone/fs"
"github.com/ncw/swift"
)
// Register with Fs
func init() {
fs.Register(&fs.FsInfo{
Name: "s3",
NewFs: NewFs,
// AWS endpoints: http://docs.amazonwebservices.com/general/latest/gr/rande.html#s3_region
Options: []fs.Option{{
Name: "access_key_id",
Help: "AWS Access Key ID.",
}, {
Name: "secret_access_key",
Help: "AWS Secret Access Key (password). ",
}, {
Name: "endpoint",
Help: "Endpoint for S3 API.",
Examples: []fs.OptionExample{{
Value: "https://s3.amazonaws.com/",
Help: "The default endpoint - a good choice if you are unsure.\nUS Region, Northern Virginia or Pacific Northwest.\nLeave location constraint empty.",
}, {
Value: "https://s3-external-1.amazonaws.com",
Help: "US Region, Northern Virginia only.\nLeave location constraint empty.",
}, {
Value: "https://s3-us-west-2.amazonaws.com",
Help: "US West (Oregon) Region\nNeeds location constraint us-west-2.",
}, {
Value: "https://s3-us-west-1.amazonaws.com",
Help: "US West (Northern California) Region\nNeeds location constraint us-west-1.",
}, {
Value: "https://s3-eu-west-1.amazonaws.com",
Help: "EU (Ireland) Region Region\nNeeds location constraint EU or eu-west-1.",
}, {
Value: "https://s3-ap-southeast-1.amazonaws.com",
Help: "Asia Pacific (Singapore) Region\nNeeds location constraint ap-southeast-1.",
}, {
Value: "https://s3-ap-southeast-2.amazonaws.com",
Help: "Asia Pacific (Sydney) Region\nNeeds location constraint .",
}, {
Value: "https://s3-ap-northeast-1.amazonaws.com",
Help: "Asia Pacific (Tokyo) Region\nNeeds location constraint ap-northeast-1.",
}, {
Value: "https://s3-sa-east-1.amazonaws.com",
Help: "South America (Sao Paulo) Region\nNeeds location constraint sa-east-1.",
}},
}, {
Name: "location_constraint",
Help: "Location constraint - must be set to match the Endpoint.",
Examples: []fs.OptionExample{{
Value: "",
Help: "Empty for US Region, Northern Virginia or Pacific Northwest.",
}, {
Value: "us-west-2",
Help: "US West (Oregon) Region.",
}, {
Value: "us-west-1",
Help: "US West (Northern California) Region.",
}, {
Value: "eu-west-1",
Help: "EU (Ireland) Region.",
}, {
Value: "EU",
Help: "EU Region.",
}, {
Value: "ap-southeast-1",
Help: "Asia Pacific (Singapore) Region.",
}, {
Value: "ap-southeast-2",
Help: "Asia Pacific (Sydney) Region.",
}, {
Value: "ap-northeast-1",
Help: "Asia Pacific (Tokyo) Region.",
}, {
Value: "sa-east-1",
Help: "South America (Sao Paulo) Region.",
}},
}},
})
}
// Constants
const (
metaMtime = "X-Amz-Meta-Mtime" // the meta key to store mtime in
listChunkSize = 1024 // number of items to read at once
)
// FsS3 represents a remote s3 server
type FsS3 struct {
c *s3.S3 // the connection to the s3 server
b *s3.Bucket // the connection to the bucket
bucket string // the bucket we are working on
perm s3.ACL // permissions for new buckets / objects
root string // root of the bucket - ignore all objects above this
}
// FsObjectS3 describes a s3 object
type FsObjectS3 struct {
// Will definitely have everything but meta which may be nil
//
// List will read everything but meta - to fill that in need to call
// readMetaData
s3 *FsS3 // what this object is part of
remote string // The remote path
etag string // md5sum of the object
bytes int64 // size of the object
lastModified time.Time // Last modified
meta s3.Headers // The object metadata if known - may be nil
}
// ------------------------------------------------------------
// String converts this FsS3 to a string
func (f *FsS3) String() string {
if f.root == "" {
return fmt.Sprintf("S3 bucket %s", f.bucket)
}
return fmt.Sprintf("S3 bucket %s path %s", f.bucket, f.root)
}
// Pattern to match a s3 path
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
// parseParse parses a s3 'url'
func s3ParsePath(path string) (bucket, directory string, err error) {
parts := matcher.FindStringSubmatch(path)
if parts == nil {
err = fmt.Errorf("Couldn't parse bucket out of s3 path %q", path)
} else {
bucket, directory = parts[1], parts[2]
directory = strings.Trim(directory, "/")
}
return
}
// s3Connection makes a connection to s3
func s3Connection(name string) (*s3.S3, error) {
// Make the auth
accessKeyId := fs.ConfigFile.MustValue(name, "access_key_id")
if accessKeyId == "" {
return nil, errors.New("access_key_id not found")
}
secretAccessKey := fs.ConfigFile.MustValue(name, "secret_access_key")
if secretAccessKey == "" {
return nil, errors.New("secret_access_key not found")
}
auth := aws.Auth{AccessKey: accessKeyId, SecretKey: secretAccessKey}
// FIXME look through all the regions by name and use one of them if found
// Synthesize the region
s3Endpoint := fs.ConfigFile.MustValue(name, "endpoint")
if s3Endpoint == "" {
s3Endpoint = "https://s3.amazonaws.com/"
}
region := aws.Region{
Name: "s3",
S3Endpoint: s3Endpoint,
S3LocationConstraint: false,
}
s3LocationConstraint := fs.ConfigFile.MustValue(name, "location_constraint")
if s3LocationConstraint != "" {
region.Name = s3LocationConstraint
region.S3LocationConstraint = true
}
c := s3.New(auth, region)
return c, nil
}
// NewFsS3 contstructs an FsS3 from the path, bucket:path
func NewFs(name, root string) (fs.Fs, error) {
bucket, directory, err := s3ParsePath(root)
if err != nil {
return nil, err
}
c, err := s3Connection(name)
if err != nil {
return nil, err
}
f := &FsS3{
c: c,
bucket: bucket,
b: c.Bucket(bucket),
perm: s3.Private, // FIXME need user to specify
root: directory,
}
if f.root != "" {
f.root += "/"
// Check to see if the object exists
_, err = f.b.Head(directory, nil)
if err == nil {
remote := path.Base(directory)
f.root = path.Dir(directory)
if f.root == "." {
f.root = ""
} else {
f.root += "/"
}
obj := f.NewFsObject(remote)
// return a Fs Limited to this object
return fs.NewLimited(f, obj), nil
}
}
return f, nil
}
// Return an FsObject from a path
//
// May return nil if an error occurred
func (f *FsS3) newFsObjectWithInfo(remote string, info *s3.Key) fs.Object {
o := &FsObjectS3{
s3: f,
remote: remote,
}
if info != nil {
// Set info but not meta
var err error
o.lastModified, err = time.Parse(time.RFC3339, info.LastModified)
if err != nil {
fs.Log(o, "Failed to read last modified: %s", err)
o.lastModified = time.Now()
}
o.etag = info.ETag
o.bytes = info.Size
} else {
err := o.readMetaData() // reads info and meta, returning an error
if err != nil {
// logged already FsDebug("Failed to read info: %s", err)
return nil
}
}
return o
}
// Return an FsObject from a path
//
// May return nil if an error occurred
func (f *FsS3) NewFsObject(remote string) fs.Object {
return f.newFsObjectWithInfo(remote, nil)
}
// list the objects into the function supplied
//
// If directories is set it only sends directories
func (f *FsS3) list(directories bool, fn func(string, *s3.Key)) {
delimiter := ""
if directories {
delimiter = "/"
}
marker := ""
for {
objects, err := f.b.List(f.root, delimiter, marker, listChunkSize)
if err != nil {
fs.Stats.Error()
fs.Log(f, "Couldn't read bucket %q: %s", f.bucket, err)
} else {
rootLength := len(f.root)
if directories {
for _, remote := range objects.CommonPrefixes {
if !strings.HasPrefix(remote, f.root) {
fs.Log(f, "Odd name received %q", remote)
continue
}
remote := remote[rootLength:]
if strings.HasSuffix(remote, "/") {
remote = remote[:len(remote)-1]
}
fn(remote, &s3.Key{Key: remote})
}
} else {
for i := range objects.Contents {
object := &objects.Contents[i]
if !strings.HasPrefix(object.Key, f.root) {
fs.Log(f, "Odd name received %q", object.Key)
continue
}
remote := object.Key[rootLength:]
fn(remote, object)
}
}
}
if !objects.IsTruncated {
break
}
// Use NextMarker if set, otherwise use last Key
marker = objects.NextMarker
if marker == "" {
marker = objects.Contents[len(objects.Contents)-1].Key
}
}
}
// Walk the path returning a channel of FsObjects
func (f *FsS3) List() fs.ObjectsChan {
out := make(fs.ObjectsChan, fs.Config.Checkers)
if f.bucket == "" {
// Return no objects at top level list
close(out)
fs.Stats.Error()
fs.Log(f, "Can't list objects at root - choose a bucket using lsd")
} else {
go func() {
defer close(out)
f.list(false, func(remote string, object *s3.Key) {
if fs := f.newFsObjectWithInfo(remote, object); fs != nil {
out <- fs
}
})
}()
}
return out
}
// Lists the buckets
func (f *FsS3) ListDir() fs.DirChan {
out := make(fs.DirChan, fs.Config.Checkers)
if f.bucket == "" {
// List the buckets
go func() {
defer close(out)
buckets, err := f.c.ListBuckets()
if err != nil {
fs.Stats.Error()
fs.Log(f, "Couldn't list buckets: %s", err)
} else {
for _, bucket := range buckets {
out <- &fs.Dir{
Name: bucket.Name,
When: bucket.CreationDate,
Bytes: -1,
Count: -1,
}
}
}
}()
} else {
// List the directories in the path in the bucket
go func() {
defer close(out)
f.list(true, func(remote string, object *s3.Key) {
out <- &fs.Dir{
Name: remote,
Bytes: object.Size,
Count: 0,
}
})
}()
}
return out
}
// Put the FsObject into the bucket
func (f *FsS3) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
// Temporary FsObject under construction
fs := &FsObjectS3{s3: f, remote: remote}
return fs, fs.Update(in, modTime, size)
}
// Mkdir creates the bucket if it doesn't exist
func (f *FsS3) Mkdir() error {
err := f.b.PutBucket(f.perm)
if err, ok := err.(*s3.Error); ok {
if err.Code == "BucketAlreadyOwnedByYou" {
return nil
}
}
return err
}
// Rmdir deletes the bucket
//
// Returns an error if it isn't empty
func (f *FsS3) Rmdir() error {
return f.b.DelBucket()
}
// Return the precision
func (f *FsS3) Precision() time.Duration {
return time.Nanosecond
}
// ------------------------------------------------------------
// Return the parent Fs
func (o *FsObjectS3) Fs() fs.Fs {
return o.s3
}
// Return a string version
func (o *FsObjectS3) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Return the remote path
func (o *FsObjectS3) Remote() string {
return o.remote
}
// Md5sum returns the Md5sum of an object returning a lowercase hex string
func (o *FsObjectS3) Md5sum() (string, error) {
return strings.Trim(strings.ToLower(o.etag), `"`), nil
}
// Size returns the size of an object in bytes
func (o *FsObjectS3) Size() int64 {
return o.bytes
}
// readMetaData gets the metadata if it hasn't already been fetched
//
// if we get a 404 error then we retry a few times for eventual
// consistency reasons
//
// it also sets the info
func (o *FsObjectS3) readMetaData() (err error) {
if o.meta != nil {
return nil
}
var headers s3.Headers
// Try reading the metadata a few times (with exponential
// backoff) to get around eventual consistency on 404 error
for tries := uint(0); tries < 10; tries++ {
headers, err = o.s3.b.Head(o.s3.root+o.remote, nil)
if s3Err, ok := err.(*s3.Error); ok {
if s3Err.StatusCode == http.StatusNotFound {
time.Sleep(5 * time.Millisecond << tries)
continue
}
}
break
}
if err != nil {
fs.Debug(o, "Failed to read info: %s", err)
return err
}
var size int64
// Ignore missing Content-Length assuming it is 0
// Some versions of ceph do this due their apache proxies
if contentLength, ok := headers["Content-Length"]; ok {
size, err = strconv.ParseInt(contentLength, 10, 64)
if err != nil {
fs.Debug(o, "Failed to read size from: %q", headers)
return err
}
}
o.etag = headers["Etag"]
o.bytes = size
o.meta = headers
if o.lastModified, err = time.Parse(http.TimeFormat, headers["Last-Modified"]); err != nil {
fs.Log(o, "Failed to read last modified from HEAD: %s", err)
o.lastModified = time.Now()
}
return nil
}
// ModTime returns the modification time of the object
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *FsObjectS3) ModTime() time.Time {
err := o.readMetaData()
if err != nil {
fs.Log(o, "Failed to read metadata: %s", err)
return time.Now()
}
// read mtime out of metadata if available
d, ok := o.meta[metaMtime]
if !ok {
// fs.Debug(o, "No metadata")
return o.lastModified
}
modTime, err := swift.FloatStringToTime(d)
if err != nil {
fs.Log(o, "Failed to read mtime from object: %s", err)
return o.lastModified
}
return modTime
}
// Sets the modification time of the local fs object
func (o *FsObjectS3) SetModTime(modTime time.Time) {
err := o.readMetaData()
if err != nil {
fs.Stats.Error()
fs.Log(o, "Failed to read metadata: %s", err)
return
}
o.meta[metaMtime] = swift.TimeToFloatString(modTime)
_, err = o.s3.b.Update(o.s3.root+o.remote, o.s3.perm, o.meta)
if err != nil {
fs.Stats.Error()
fs.Log(o, "Failed to update remote mtime: %s", err)
}
}
// Is this object storable
func (o *FsObjectS3) Storable() bool {
return true
}
// Open an object for read
func (o *FsObjectS3) Open() (in io.ReadCloser, err error) {
in, err = o.s3.b.GetReader(o.s3.root + o.remote)
return
}
// Update the Object from in with modTime and size
func (o *FsObjectS3) Update(in io.Reader, modTime time.Time, size int64) error {
// Set the mtime in the headers
headers := s3.Headers{
metaMtime: swift.TimeToFloatString(modTime),
}
_, err := o.s3.b.PutReaderHeaders(o.s3.root+o.remote, in, size, fs.MimeType(o), o.s3.perm, headers)
if err != nil {
return err
}
// Read the metadata from the newly created object
o.meta = nil // wipe old metadata
err = o.readMetaData()
return err
}
// Remove an object
func (o *FsObjectS3) Remove() error {
return o.s3.b.Del(o.s3.root + o.remote)
}
// Check the interfaces are satisfied
var _ fs.Fs = &FsS3{}
var _ fs.Object = &FsObjectS3{}
| 1 | 5,414 | If you think this PR works, i'll clean this up before resubmitting. | rclone-rclone | go |
@@ -148,6 +148,10 @@ public final class Const {
public static final String PATH_CHECKSESSION = "checksession";
public static final String URL_PREFIX = "urlPrefix";
-
+
public static final String INSTANCE_PUBKEY_PRO = "publickey";
+
+ public static final String GROUPID = "io.servicecomb";
+
+ public static final String ARTIFACTID = "java-chassis";
} | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.servicecomb.serviceregistry.api;
import io.servicecomb.serviceregistry.config.ServiceRegistryConfig;
/**
* Created by on 2017/1/9.
*/
public final class Const {
private Const() {
}
public static final class REGISTRY_API {
public static final String DOMAIN_NAME = ServiceRegistryConfig.INSTANCE.getDomainName();
public static final String CURRENT_VERSION = ServiceRegistryConfig.INSTANCE.getRegistryApiVersion();
// 2017-10-21 add new implementations for v4. We can remove v3 support after a period.
public static final String VERSION_V3 = "v3";
public static final String LATEST_API_VERSION = "v4";
public static final String V4_PREFIX = String.format("/v4/%s/registry", DOMAIN_NAME);
public static final String MICROSERVICE_OPERATION_ALL;
static {
if (VERSION_V3.equals(CURRENT_VERSION)) {
MICROSERVICE_OPERATION_ALL = "/registry/v3/microservices";
} else {
MICROSERVICE_OPERATION_ALL = V4_PREFIX + "/microservices";
}
}
public static final String MICROSERVICE_OPERATION_ONE;
static {
if (VERSION_V3.equals(CURRENT_VERSION)) {
MICROSERVICE_OPERATION_ONE = "/registry/v3/microservices/%s";
} else {
MICROSERVICE_OPERATION_ONE = V4_PREFIX + "/microservices/%s";
}
}
public static final String MICROSERVICE_INSTANCE_OPERATION_ALL;
static {
if (VERSION_V3.equals(CURRENT_VERSION)) {
MICROSERVICE_INSTANCE_OPERATION_ALL = "/registry/v3/microservices/%s/instances";
} else {
MICROSERVICE_INSTANCE_OPERATION_ALL = V4_PREFIX + "/microservices/%s/instances";
}
}
public static final String MICROSERVICE_INSTANCE_OPERATION_ONE;
static {
if (VERSION_V3.equals(CURRENT_VERSION)) {
MICROSERVICE_INSTANCE_OPERATION_ONE = "/registry/v3/microservices/%s/instances/%s";
} else {
MICROSERVICE_INSTANCE_OPERATION_ONE = V4_PREFIX + "/microservices/%s/instances/%s";
}
}
public static final String MICROSERVICE_INSTANCES;
static {
if (VERSION_V3.equals(CURRENT_VERSION)) {
MICROSERVICE_INSTANCES = "/registry/v3/instances";
} else {
MICROSERVICE_INSTANCES = V4_PREFIX + "/instances";
}
}
public static final String MICROSERVICE_PROPERTIES;
static {
if (VERSION_V3.equals(CURRENT_VERSION)) {
MICROSERVICE_PROPERTIES = "/registry/v3/microservices/%s/properties";
} else {
MICROSERVICE_PROPERTIES = V4_PREFIX + "/microservices/%s/properties";
}
}
public static final String MICROSERVICE_INSTANCE_PROPERTIES;
static {
if (VERSION_V3.equals(CURRENT_VERSION)) {
MICROSERVICE_INSTANCE_PROPERTIES = "/registry/v3/microservices/%s/instances/%s/properties";
} else {
MICROSERVICE_INSTANCE_PROPERTIES = V4_PREFIX + "/microservices/%s/instances/%s/properties";
}
}
public static final String MICROSERVICE_HEARTBEAT;
static {
if (VERSION_V3.equals(CURRENT_VERSION)) {
MICROSERVICE_HEARTBEAT = "/registry/v3/microservices/%s/instances/%s/heartbeat";
} else {
MICROSERVICE_HEARTBEAT = V4_PREFIX + "/microservices/%s/instances/%s/heartbeat";
}
}
public static final String MICROSERVICE_EXISTENCE;
static {
if (VERSION_V3.equals(CURRENT_VERSION)) {
MICROSERVICE_EXISTENCE = "/registry/v3/existence";
} else {
MICROSERVICE_EXISTENCE = V4_PREFIX + "/existence";
}
}
public static final String MICROSERVICE_SCHEMA;
static {
if (VERSION_V3.equals(CURRENT_VERSION)) {
MICROSERVICE_SCHEMA = "/registry/v3/microservices/%s/schemas/%s";
} else {
MICROSERVICE_SCHEMA = V4_PREFIX + "/microservices/%s/schemas/%s";
}
}
public static final String MICROSERVICE_WATCH;
static {
if (VERSION_V3.equals(CURRENT_VERSION)) {
MICROSERVICE_WATCH = "/registry/v3/microservices/%s/watcher";
} else {
MICROSERVICE_WATCH = V4_PREFIX + "/microservices/%s/watcher";
}
}
}
public static final String REGISTRY_APP_ID = "default";
public static final String REGISTRY_SERVICE_NAME = "SERVICECENTER";
public static final String APP_SERVICE_SEPARATOR = ":";
public static final String PATH_CHECKSESSION = "checksession";
public static final String URL_PREFIX = "urlPrefix";
public static final String INSTANCE_PUBKEY_PRO = "publickey";
}
| 1 | 8,188 | are you sure you can read version by this artifactid? | apache-servicecomb-java-chassis | java |
@@ -32,10 +32,10 @@ import org.openqa.selenium.remote.tracing.HttpTracing;
import org.openqa.selenium.remote.tracing.Tracer;
import java.net.URL;
+import java.util.Objects;
import java.util.UUID;
import java.util.logging.Logger;
-import static org.openqa.selenium.net.Urls.fromUri;
import static org.openqa.selenium.remote.http.Contents.asJson;
import static org.openqa.selenium.remote.http.HttpMethod.DELETE;
import static org.openqa.selenium.remote.http.HttpMethod.GET; | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.grid.distributor.remote;
import org.openqa.selenium.SessionNotCreatedException;
import org.openqa.selenium.grid.data.CreateSessionResponse;
import org.openqa.selenium.grid.data.DistributorStatus;
import org.openqa.selenium.grid.distributor.Distributor;
import org.openqa.selenium.grid.node.Node;
import org.openqa.selenium.grid.web.Values;
import org.openqa.selenium.internal.Require;
import org.openqa.selenium.remote.http.HttpClient;
import org.openqa.selenium.remote.http.HttpHandler;
import org.openqa.selenium.remote.http.HttpRequest;
import org.openqa.selenium.remote.http.HttpResponse;
import org.openqa.selenium.remote.tracing.HttpTracing;
import org.openqa.selenium.remote.tracing.Tracer;
import java.net.URL;
import java.util.UUID;
import java.util.logging.Logger;
import static org.openqa.selenium.net.Urls.fromUri;
import static org.openqa.selenium.remote.http.Contents.asJson;
import static org.openqa.selenium.remote.http.HttpMethod.DELETE;
import static org.openqa.selenium.remote.http.HttpMethod.GET;
import static org.openqa.selenium.remote.http.HttpMethod.POST;
public class RemoteDistributor extends Distributor {
private static final Logger LOG = Logger.getLogger("Selenium Distributor (Remote)");
private final HttpHandler client;
public RemoteDistributor(Tracer tracer, HttpClient.Factory factory, URL url) {
super(tracer, factory);
this.client = factory.createClient(url);
}
@Override
public boolean isReady() {
try {
return client.execute(new HttpRequest(GET, "/readyz")).isSuccessful();
} catch (Exception e) {
return false;
}
}
@Override
public CreateSessionResponse newSession(HttpRequest request)
throws SessionNotCreatedException {
HttpRequest upstream = new HttpRequest(POST, "/se/grid/distributor/session");
HttpTracing.inject(tracer, tracer.getCurrentContext(), upstream);
upstream.setContent(request.getContent());
HttpResponse response = client.execute(upstream);
return Values.get(response, CreateSessionResponse.class);
}
@Override
public RemoteDistributor add(Node node) {
HttpRequest request = new HttpRequest(POST, "/se/grid/distributor/node");
HttpTracing.inject(tracer, tracer.getCurrentContext(), request);
request.setContent(asJson(node.getStatus()));
HttpResponse response = client.execute(request);
Values.get(response, Void.class);
LOG.info(String.format("Added node %s.", node.getId()));
return this;
}
@Override
public void remove(UUID nodeId) {
Require.nonNull("Node ID", nodeId);
HttpRequest request = new HttpRequest(DELETE, "/se/grid/distributor/node/" + nodeId);
HttpTracing.inject(tracer, tracer.getCurrentContext(), request);
HttpResponse response = client.execute(request);
Values.get(response, Void.class);
}
@Override
public DistributorStatus getStatus() {
HttpRequest request = new HttpRequest(GET, "/se/grid/distributor/status");
HttpTracing.inject(tracer, tracer.getCurrentContext(), request);
HttpResponse response = client.execute(request);
return Values.get(response, DistributorStatus.class);
}
}
| 1 | 17,752 | We can get rid of this import then. | SeleniumHQ-selenium | js |
@@ -388,10 +388,13 @@ func (c *client) parse(buf []byte) error {
arg = buf[c.as : i-c.drop]
}
var err error
- if c.typ == CLIENT {
+ switch c.typ {
+ case CLIENT:
err = c.processSub(arg)
- } else {
+ case ROUTER:
err = c.processRemoteSub(arg)
+ case GATEWAY:
+ err = c.processGatewaySubjectSub(arg)
}
if err != nil {
return err | 1 | // Copyright 2012-2018 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"fmt"
)
type pubArg struct {
arg []byte
rcache []byte
account []byte
subject []byte
reply []byte
szb []byte
queues [][]byte
size int
}
type parseState struct {
state int
as int
drop int
pa pubArg
argBuf []byte
msgBuf []byte
scratch [MAX_CONTROL_LINE_SIZE]byte
}
// Parser constants
const (
OP_START = iota
OP_PLUS
OP_PLUS_O
OP_PLUS_OK
OP_MINUS
OP_MINUS_E
OP_MINUS_ER
OP_MINUS_ERR
OP_MINUS_ERR_SPC
MINUS_ERR_ARG
OP_C
OP_CO
OP_CON
OP_CONN
OP_CONNE
OP_CONNEC
OP_CONNECT
CONNECT_ARG
OP_P
OP_PU
OP_PUB
OP_PUB_SPC
PUB_ARG
OP_PI
OP_PIN
OP_PING
OP_PO
OP_PON
OP_PONG
MSG_PAYLOAD
MSG_END
OP_S
OP_SU
OP_SUB
OP_SUB_SPC
SUB_ARG
OP_A
OP_ASUB
OP_ASUB_SPC
ASUB_ARG
OP_AUSUB
OP_AUSUB_SPC
AUSUB_ARG
OP_R
OP_RS
OP_U
OP_UN
OP_UNS
OP_UNSU
OP_UNSUB
OP_UNSUB_SPC
UNSUB_ARG
OP_M
OP_MS
OP_MSG
OP_MSG_SPC
MSG_ARG
OP_I
OP_IN
OP_INF
OP_INFO
INFO_ARG
)
func (c *client) parse(buf []byte) error {
var i int
var b byte
mcl := MAX_CONTROL_LINE_SIZE
if c.srv != nil {
if opts := c.srv.getOpts(); opts != nil {
mcl = opts.MaxControlLine
}
}
// Snapshot this, and reset when we receive a
// proper CONNECT if needed.
authSet := c.awaitingAuth()
// Move to loop instead of range syntax to allow jumping of i
for i = 0; i < len(buf); i++ {
b = buf[i]
switch c.state {
case OP_START:
if b != 'C' && b != 'c' && authSet {
goto authErr
}
switch b {
case 'P', 'p':
c.state = OP_P
case 'S', 's':
c.state = OP_S
case 'U', 'u':
c.state = OP_U
case 'R', 'r':
if c.typ == CLIENT {
goto parseErr
} else {
c.state = OP_R
}
case 'A', 'a':
if c.typ == CLIENT {
goto parseErr
} else {
c.state = OP_A
}
case 'C', 'c':
c.state = OP_C
case 'I', 'i':
c.state = OP_I
case '+':
c.state = OP_PLUS
case '-':
c.state = OP_MINUS
default:
goto parseErr
}
case OP_P:
switch b {
case 'U', 'u':
c.state = OP_PU
case 'I', 'i':
c.state = OP_PI
case 'O', 'o':
c.state = OP_PO
default:
goto parseErr
}
case OP_PU:
switch b {
case 'B', 'b':
c.state = OP_PUB
default:
goto parseErr
}
case OP_PUB:
switch b {
case ' ', '\t':
c.state = OP_PUB_SPC
default:
goto parseErr
}
case OP_PUB_SPC:
switch b {
case ' ', '\t':
continue
default:
c.state = PUB_ARG
c.as = i
}
case PUB_ARG:
switch b {
case '\r':
c.drop = 1
case '\n':
var arg []byte
if c.argBuf != nil {
arg = c.argBuf
c.argBuf = nil
} else {
arg = buf[c.as : i-c.drop]
}
if err := c.processPub(c.trace, arg); err != nil {
return err
}
c.drop, c.as, c.state = OP_START, i+1, MSG_PAYLOAD
// If we don't have a saved buffer then jump ahead with
// the index. If this overruns what is left we fall out
// and process split buffer.
if c.msgBuf == nil {
i = c.as + c.pa.size - LEN_CR_LF
}
default:
if c.argBuf != nil {
c.argBuf = append(c.argBuf, b)
}
}
case MSG_PAYLOAD:
if c.msgBuf != nil {
// copy as much as we can to the buffer and skip ahead.
toCopy := c.pa.size - len(c.msgBuf)
avail := len(buf) - i
if avail < toCopy {
toCopy = avail
}
if toCopy > 0 {
start := len(c.msgBuf)
// This is needed for copy to work.
c.msgBuf = c.msgBuf[:start+toCopy]
copy(c.msgBuf[start:], buf[i:i+toCopy])
// Update our index
i = (i + toCopy) - 1
} else {
// Fall back to append if needed.
c.msgBuf = append(c.msgBuf, b)
}
if len(c.msgBuf) >= c.pa.size {
c.state = MSG_END
}
} else if i-c.as >= c.pa.size {
c.state = MSG_END
}
case MSG_END:
switch b {
case '\n':
if c.msgBuf != nil {
c.msgBuf = append(c.msgBuf, b)
} else {
c.msgBuf = buf[c.as : i+1]
}
// strict check for proto
if len(c.msgBuf) != c.pa.size+LEN_CR_LF {
goto parseErr
}
c.processInboundMsg(c.msgBuf)
c.argBuf, c.msgBuf = nil, nil
c.drop, c.as, c.state = 0, i+1, OP_START
// Drop all pub args
c.pa.arg, c.pa.rcache, c.pa.account, c.pa.subject = nil, nil, nil, nil
c.pa.reply, c.pa.szb, c.pa.queues = nil, nil, nil
default:
if c.msgBuf != nil {
c.msgBuf = append(c.msgBuf, b)
}
continue
}
case OP_A:
switch b {
case '+':
c.state = OP_ASUB
case '-', 'u':
c.state = OP_AUSUB
default:
goto parseErr
}
case OP_ASUB:
switch b {
case ' ', '\t':
c.state = OP_ASUB_SPC
default:
goto parseErr
}
case OP_ASUB_SPC:
switch b {
case ' ', '\t':
continue
default:
c.state = ASUB_ARG
c.as = i
}
case ASUB_ARG:
switch b {
case '\r':
c.drop = 1
case '\n':
var arg []byte
if c.argBuf != nil {
arg = c.argBuf
c.argBuf = nil
} else {
arg = buf[c.as : i-c.drop]
}
if err := c.processAccountSub(arg); err != nil {
return err
}
c.drop, c.as, c.state = 0, i+1, OP_START
default:
if c.argBuf != nil {
c.argBuf = append(c.argBuf, b)
}
}
case OP_AUSUB:
switch b {
case ' ', '\t':
c.state = OP_AUSUB_SPC
default:
goto parseErr
}
case OP_AUSUB_SPC:
switch b {
case ' ', '\t':
continue
default:
c.state = AUSUB_ARG
c.as = i
}
case AUSUB_ARG:
switch b {
case '\r':
c.drop = 1
case '\n':
var arg []byte
if c.argBuf != nil {
arg = c.argBuf
c.argBuf = nil
} else {
arg = buf[c.as : i-c.drop]
}
c.processAccountUnsub(arg)
c.drop, c.as, c.state = 0, i+1, OP_START
default:
if c.argBuf != nil {
c.argBuf = append(c.argBuf, b)
}
}
case OP_S:
switch b {
case 'U', 'u':
c.state = OP_SU
default:
goto parseErr
}
case OP_SU:
switch b {
case 'B', 'b':
c.state = OP_SUB
default:
goto parseErr
}
case OP_SUB:
switch b {
case ' ', '\t':
c.state = OP_SUB_SPC
default:
goto parseErr
}
case OP_SUB_SPC:
switch b {
case ' ', '\t':
continue
default:
c.state = SUB_ARG
c.as = i
}
case SUB_ARG:
switch b {
case '\r':
c.drop = 1
case '\n':
var arg []byte
if c.argBuf != nil {
arg = c.argBuf
c.argBuf = nil
} else {
arg = buf[c.as : i-c.drop]
}
var err error
if c.typ == CLIENT {
err = c.processSub(arg)
} else {
err = c.processRemoteSub(arg)
}
if err != nil {
return err
}
c.drop, c.as, c.state = 0, i+1, OP_START
default:
if c.argBuf != nil {
c.argBuf = append(c.argBuf, b)
}
}
case OP_R:
switch b {
case 'S', 's':
c.state = OP_RS
case 'M', 'm':
c.state = OP_M
default:
goto parseErr
}
case OP_RS:
switch b {
case '+':
c.state = OP_SUB
case '-':
c.state = OP_UNSUB
default:
goto parseErr
}
case OP_U:
switch b {
case 'N', 'n':
c.state = OP_UN
default:
goto parseErr
}
case OP_UN:
switch b {
case 'S', 's':
c.state = OP_UNS
default:
goto parseErr
}
case OP_UNS:
switch b {
case 'U', 'u':
c.state = OP_UNSU
default:
goto parseErr
}
case OP_UNSU:
switch b {
case 'B', 'b':
c.state = OP_UNSUB
default:
goto parseErr
}
case OP_UNSUB:
switch b {
case ' ', '\t':
c.state = OP_UNSUB_SPC
default:
goto parseErr
}
case OP_UNSUB_SPC:
switch b {
case ' ', '\t':
continue
default:
c.state = UNSUB_ARG
c.as = i
}
case UNSUB_ARG:
switch b {
case '\r':
c.drop = 1
case '\n':
var arg []byte
if c.argBuf != nil {
arg = c.argBuf
c.argBuf = nil
} else {
arg = buf[c.as : i-c.drop]
}
var err error
if c.typ == CLIENT {
err = c.processUnsub(arg)
} else {
err = c.processRemoteUnsub(arg)
}
if err != nil {
return err
}
c.drop, c.as, c.state = 0, i+1, OP_START
default:
if c.argBuf != nil {
c.argBuf = append(c.argBuf, b)
}
}
case OP_PI:
switch b {
case 'N', 'n':
c.state = OP_PIN
default:
goto parseErr
}
case OP_PIN:
switch b {
case 'G', 'g':
c.state = OP_PING
default:
goto parseErr
}
case OP_PING:
switch b {
case '\n':
c.processPing()
c.drop, c.state = 0, OP_START
}
case OP_PO:
switch b {
case 'N', 'n':
c.state = OP_PON
default:
goto parseErr
}
case OP_PON:
switch b {
case 'G', 'g':
c.state = OP_PONG
default:
goto parseErr
}
case OP_PONG:
switch b {
case '\n':
c.processPong()
c.drop, c.state = 0, OP_START
}
case OP_C:
switch b {
case 'O', 'o':
c.state = OP_CO
default:
goto parseErr
}
case OP_CO:
switch b {
case 'N', 'n':
c.state = OP_CON
default:
goto parseErr
}
case OP_CON:
switch b {
case 'N', 'n':
c.state = OP_CONN
default:
goto parseErr
}
case OP_CONN:
switch b {
case 'E', 'e':
c.state = OP_CONNE
default:
goto parseErr
}
case OP_CONNE:
switch b {
case 'C', 'c':
c.state = OP_CONNEC
default:
goto parseErr
}
case OP_CONNEC:
switch b {
case 'T', 't':
c.state = OP_CONNECT
default:
goto parseErr
}
case OP_CONNECT:
switch b {
case ' ', '\t':
continue
default:
c.state = CONNECT_ARG
c.as = i
}
case CONNECT_ARG:
switch b {
case '\r':
c.drop = 1
case '\n':
var arg []byte
if c.argBuf != nil {
arg = c.argBuf
c.argBuf = nil
} else {
arg = buf[c.as : i-c.drop]
}
if err := c.processConnect(arg); err != nil {
return err
}
c.drop, c.state = 0, OP_START
// Reset notion on authSet
authSet = c.awaitingAuth()
default:
if c.argBuf != nil {
c.argBuf = append(c.argBuf, b)
}
}
case OP_M:
switch b {
case 'S', 's':
c.state = OP_MS
default:
goto parseErr
}
case OP_MS:
switch b {
case 'G', 'g':
c.state = OP_MSG
default:
goto parseErr
}
case OP_MSG:
switch b {
case ' ', '\t':
c.state = OP_MSG_SPC
default:
goto parseErr
}
case OP_MSG_SPC:
switch b {
case ' ', '\t':
continue
default:
c.state = MSG_ARG
c.as = i
}
case MSG_ARG:
switch b {
case '\r':
c.drop = 1
case '\n':
var arg []byte
if c.argBuf != nil {
arg = c.argBuf
c.argBuf = nil
} else {
arg = buf[c.as : i-c.drop]
}
if err := c.processRoutedMsgArgs(c.trace, arg); err != nil {
return err
}
c.drop, c.as, c.state = 0, i+1, MSG_PAYLOAD
// jump ahead with the index. If this overruns
// what is left we fall out and process split
// buffer.
i = c.as + c.pa.size - 1
default:
if c.argBuf != nil {
c.argBuf = append(c.argBuf, b)
}
}
case OP_I:
switch b {
case 'N', 'n':
c.state = OP_IN
default:
goto parseErr
}
case OP_IN:
switch b {
case 'F', 'f':
c.state = OP_INF
default:
goto parseErr
}
case OP_INF:
switch b {
case 'O', 'o':
c.state = OP_INFO
default:
goto parseErr
}
case OP_INFO:
switch b {
case ' ', '\t':
continue
default:
c.state = INFO_ARG
c.as = i
}
case INFO_ARG:
switch b {
case '\r':
c.drop = 1
case '\n':
var arg []byte
if c.argBuf != nil {
arg = c.argBuf
c.argBuf = nil
} else {
arg = buf[c.as : i-c.drop]
}
if err := c.processInfo(arg); err != nil {
return err
}
c.drop, c.as, c.state = 0, i+1, OP_START
default:
if c.argBuf != nil {
c.argBuf = append(c.argBuf, b)
}
}
case OP_PLUS:
switch b {
case 'O', 'o':
c.state = OP_PLUS_O
default:
goto parseErr
}
case OP_PLUS_O:
switch b {
case 'K', 'k':
c.state = OP_PLUS_OK
default:
goto parseErr
}
case OP_PLUS_OK:
switch b {
case '\n':
c.drop, c.state = 0, OP_START
}
case OP_MINUS:
switch b {
case 'E', 'e':
c.state = OP_MINUS_E
default:
goto parseErr
}
case OP_MINUS_E:
switch b {
case 'R', 'r':
c.state = OP_MINUS_ER
default:
goto parseErr
}
case OP_MINUS_ER:
switch b {
case 'R', 'r':
c.state = OP_MINUS_ERR
default:
goto parseErr
}
case OP_MINUS_ERR:
switch b {
case ' ', '\t':
c.state = OP_MINUS_ERR_SPC
default:
goto parseErr
}
case OP_MINUS_ERR_SPC:
switch b {
case ' ', '\t':
continue
default:
c.state = MINUS_ERR_ARG
c.as = i
}
case MINUS_ERR_ARG:
switch b {
case '\r':
c.drop = 1
case '\n':
var arg []byte
if c.argBuf != nil {
arg = c.argBuf
c.argBuf = nil
} else {
arg = buf[c.as : i-c.drop]
}
c.processErr(string(arg))
c.drop, c.as, c.state = 0, i+1, OP_START
default:
if c.argBuf != nil {
c.argBuf = append(c.argBuf, b)
}
}
default:
goto parseErr
}
}
// Check for split buffer scenarios for any ARG state.
if c.state == SUB_ARG || c.state == UNSUB_ARG || c.state == PUB_ARG ||
c.state == ASUB_ARG || c.state == AUSUB_ARG ||
c.state == MSG_ARG || c.state == MINUS_ERR_ARG ||
c.state == CONNECT_ARG || c.state == INFO_ARG {
// Setup a holder buffer to deal with split buffer scenario.
if c.argBuf == nil {
c.argBuf = c.scratch[:0]
c.argBuf = append(c.argBuf, buf[c.as:i-c.drop]...)
}
// Check for violations of control line length here. Note that this is not
// exact at all but the performance hit is too great to be precise, and
// catching here should prevent memory exhaustion attacks.
if len(c.argBuf) > mcl {
c.sendErr("Maximum Control Line Exceeded")
c.closeConnection(MaxControlLineExceeded)
return ErrMaxControlLine
}
}
// Check for split msg
if (c.state == MSG_PAYLOAD || c.state == MSG_END) && c.msgBuf == nil {
// We need to clone the pubArg if it is still referencing the
// read buffer and we are not able to process the msg.
if c.argBuf == nil {
// Works also for MSG_ARG, when message comes from ROUTE.
c.clonePubArg()
}
// If we will overflow the scratch buffer, just create a
// new buffer to hold the split message.
if c.pa.size > cap(c.scratch)-len(c.argBuf) {
lrem := len(buf[c.as:])
// Consider it a protocol error when the remaining payload
// is larger than the reported size for PUB. It can happen
// when processing incomplete messages from rogue clients.
if lrem > c.pa.size+LEN_CR_LF {
goto parseErr
}
c.msgBuf = make([]byte, lrem, c.pa.size+LEN_CR_LF)
copy(c.msgBuf, buf[c.as:])
} else {
c.msgBuf = c.scratch[len(c.argBuf):len(c.argBuf)]
c.msgBuf = append(c.msgBuf, (buf[c.as:])...)
}
}
return nil
authErr:
c.authViolation()
return ErrAuthentication
parseErr:
c.sendErr("Unknown Protocol Operation")
snip := protoSnippet(i, buf)
err := fmt.Errorf("%s parser ERROR, state=%d, i=%d: proto='%s...'",
c.typeString(), c.state, i, snip)
return err
}
func protoSnippet(start int, buf []byte) string {
stop := start + PROTO_SNIPPET_SIZE
bufSize := len(buf)
if start >= bufSize {
return `""`
}
if stop > bufSize {
stop = bufSize - 1
}
return fmt.Sprintf("%q", buf[start:stop])
}
// clonePubArg is used when the split buffer scenario has the pubArg in the existing read buffer, but
// we need to hold onto it into the next read.
func (c *client) clonePubArg() {
// Just copy and re-process original arg buffer.
c.argBuf = c.scratch[:0]
c.argBuf = append(c.argBuf, c.pa.arg...)
// This is a routed msg
if c.pa.account != nil {
c.processRoutedMsgArgs(false, c.argBuf)
} else {
c.processPub(false, c.argBuf)
}
}
| 1 | 8,266 | Should we do our own vtable? | nats-io-nats-server | go |
@@ -41,7 +41,6 @@ module C2
}
config.roadie.url_options = config.action_mailer.default_url_options
- config.exceptions_app = self.routes
config.autoload_paths << Rails.root.join('lib')
config.assets.precompile << 'common/communicarts.css' | 1 | require File.expand_path('../boot', __FILE__)
require 'rails/all'
# Require the gems listed in Gemfile, including any gems
# you've limited to :test, :development, or :production.
Bundler.require(:default, Rails.env)
module C2
class Application < Rails::Application
# Settings in config/environments/* take precedence over those specified here.
# Application configuration should go into files in config/initializers
# -- all .rb files in that directory are automatically loaded.
# Set Time.zone default to the specified zone and make Active Record auto-convert to this zone.
# Run "rake -D time" for a list of tasks for finding time zone names. Default is UTC.
# config.time_zone = 'Central Time (US & Canada)'
# The default locale is :en and all translations from config/locales/*.rb,yml are auto loaded.
# config.i18n.load_path += Dir[Rails.root.join('my', 'locales', '*.{rb,yml}').to_s]
# config.i18n.default_locale = :de
# http://git.io/ETVYsQ
config.middleware.insert_before 0, Rack::Cors, logger: Rails.logger do
allow do
origins '*'
resource '*',
headers: :any,
methods: [:get, :post, :delete, :put, :options, :head],
max_age: 1728000
end
end
config.action_mailer.raise_delivery_errors = true
config.action_mailer.default_url_options = {
scheme: ENV['DEFAULT_URL_SCHEME'] || 'http',
host: ENV['HOST_URL'] || ENV['DEFAULT_URL_HOST'] || 'localhost',
port: ENV['DEFAULT_URL_PORT'] || 3000
}
config.roadie.url_options = config.action_mailer.default_url_options
config.exceptions_app = self.routes
config.autoload_paths << Rails.root.join('lib')
config.assets.precompile << 'common/communicarts.css'
end
end
| 1 | 12,799 | This is the actual fix. | 18F-C2 | rb |
@@ -173,7 +173,7 @@ const (
// cache idx expiration
defaultCacheIdxExpiration = 5 * time.Minute
// default sync interval
- defaultSyncInterval = 10 * time.Second
+ defaultSyncInterval = 60 * time.Second
// coalesceMinimum
coalesceMinimum = 16 * 1024
// maxFlushWait is maximum we will wait to gather messages to flush. | 1 | // Copyright 2019-2021 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"archive/tar"
"bytes"
"crypto/sha256"
"encoding/binary"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"hash"
"io"
"io/ioutil"
"math/rand"
"net"
"os"
"path"
"sort"
"sync"
"sync/atomic"
"time"
"github.com/klauspost/compress/s2"
"github.com/minio/highwayhash"
)
type FileStoreConfig struct {
// Where the parent directory for all storage will be located.
StoreDir string
// BlockSize is the file block size. This also represents the maximum overhead size.
BlockSize uint64
// CacheExpire is how long with no activity until we expire the cache.
CacheExpire time.Duration
// SyncInterval is how often we sync to disk in the background.
SyncInterval time.Duration
// AsyncFlush allows async flush to batch write operations.
AsyncFlush bool
}
// FileStreamInfo allows us to remember created time.
type FileStreamInfo struct {
Created time.Time
StreamConfig
}
// File ConsumerInfo is used for creating consumer stores.
type FileConsumerInfo struct {
Created time.Time
Name string
ConsumerConfig
}
type fileStore struct {
mu sync.RWMutex
state StreamState
ld *LostStreamData
scb StorageUpdateHandler
ageChk *time.Timer
syncTmr *time.Timer
cfg FileStreamInfo
fcfg FileStoreConfig
lmb *msgBlock
blks []*msgBlock
hh hash.Hash64
qch chan struct{}
cfs []*consumerFileStore
closed bool
expiring bool
fip bool
sips int
}
// Represents a message store block and its data.
type msgBlock struct {
mu sync.RWMutex
fs *fileStore
mfn string
mfd *os.File
ifn string
ifd *os.File
liwsz int64
index uint64
bytes uint64
msgs uint64
first msgId
last msgId
lwits int64
lwts int64
llts int64
lrts int64
hh hash.Hash64
cache *cache
cloads uint64
cexp time.Duration
ctmr *time.Timer
werr error
loading bool
flusher bool
dmap map[uint64]struct{}
fch chan struct{}
qch chan struct{}
lchk [8]byte
}
// Write through caching layer that is also used on loading messages.
type cache struct {
buf []byte
off int
wp int
idx []uint32
lrl uint32
fseq uint64
flush bool
}
type msgId struct {
seq uint64
ts int64
}
type fileStoredMsg struct {
subj string
hdr []byte
msg []byte
seq uint64
ts int64 // nanoseconds
mb *msgBlock
off int64 // offset into block file
}
const (
// Magic is used to identify the file store files.
magic = uint8(22)
// Version
version = uint8(1)
// hdrLen
hdrLen = 2
// This is where we keep the streams.
streamsDir = "streams"
// This is where we keep the message store blocks.
msgDir = "msgs"
// This is where we temporarily move the messages dir.
purgeDir = "__msgs__"
// used to scan blk file names.
blkScan = "%d.blk"
// used to scan index file names.
indexScan = "%d.idx"
// This is where we keep state on consumers.
consumerDir = "obs"
// Index file for a consumer.
consumerState = "o.dat"
// This is where we keep state on templates.
tmplsDir = "templates"
// Maximum size of a write buffer we may consider for re-use.
maxBufReuse = 2 * 1024 * 1024
// default cache buffer expiration
defaultCacheBufferExpiration = 5 * time.Second
// cache idx expiration
defaultCacheIdxExpiration = 5 * time.Minute
// default sync interval
defaultSyncInterval = 10 * time.Second
// coalesceMinimum
coalesceMinimum = 16 * 1024
// maxFlushWait is maximum we will wait to gather messages to flush.
maxFlushWait = 8 * time.Millisecond
// Metafiles for streams and consumers.
JetStreamMetaFile = "meta.inf"
JetStreamMetaFileSum = "meta.sum"
// Default stream block size.
defaultStreamBlockSize = 64 * 1024 * 1024 // 64MB
// Default for workqueue or interest based.
defaultOtherBlockSize = 32 * 1024 * 1024 // 32MB
// max block size for now.
maxBlockSize = 2 * defaultStreamBlockSize
// FileStoreMinBlkSize is minimum size we will do for a blk size.
FileStoreMinBlkSize = 32 * 1000 // 32kib
// FileStoreMaxBlkSize is maximum size we will do for a blk size.
FileStoreMaxBlkSize = maxBlockSize
)
func newFileStore(fcfg FileStoreConfig, cfg StreamConfig) (*fileStore, error) {
return newFileStoreWithCreated(fcfg, cfg, time.Now())
}
func newFileStoreWithCreated(fcfg FileStoreConfig, cfg StreamConfig, created time.Time) (*fileStore, error) {
if cfg.Name == "" {
return nil, fmt.Errorf("name required")
}
if cfg.Storage != FileStorage {
return nil, fmt.Errorf("fileStore requires file storage type in config")
}
// Default values.
if fcfg.BlockSize == 0 {
fcfg.BlockSize = dynBlkSize(cfg.Retention, cfg.MaxBytes)
}
if fcfg.BlockSize > maxBlockSize {
return nil, fmt.Errorf("filestore max block size is %s", friendlyBytes(maxBlockSize))
}
if fcfg.CacheExpire == 0 {
fcfg.CacheExpire = defaultCacheBufferExpiration
}
if fcfg.SyncInterval == 0 {
fcfg.SyncInterval = defaultSyncInterval
}
// Check the directory
if stat, err := os.Stat(fcfg.StoreDir); os.IsNotExist(err) {
if err := os.MkdirAll(fcfg.StoreDir, 0755); err != nil {
return nil, fmt.Errorf("could not create storage directory - %v", err)
}
} else if stat == nil || !stat.IsDir() {
return nil, fmt.Errorf("storage directory is not a directory")
}
tmpfile, err := ioutil.TempFile(fcfg.StoreDir, "_test_")
if err != nil {
return nil, fmt.Errorf("storage directory is not writable")
}
os.Remove(tmpfile.Name())
fs := &fileStore{
fcfg: fcfg,
cfg: FileStreamInfo{Created: created, StreamConfig: cfg},
qch: make(chan struct{}),
}
// Set flush in place to AsyncFlush which by default is false.
fs.fip = fcfg.AsyncFlush
// Check if this is a new setup.
mdir := path.Join(fcfg.StoreDir, msgDir)
odir := path.Join(fcfg.StoreDir, consumerDir)
if err := os.MkdirAll(mdir, 0755); err != nil {
return nil, fmt.Errorf("could not create message storage directory - %v", err)
}
if err := os.MkdirAll(odir, 0755); err != nil {
return nil, fmt.Errorf("could not create message storage directory - %v", err)
}
// Create highway hash for message blocks. Use sha256 of directory as key.
key := sha256.Sum256([]byte(cfg.Name))
fs.hh, err = highwayhash.New64(key[:])
if err != nil {
return nil, fmt.Errorf("could not create hash: %v", err)
}
// Recover our state.
if err := fs.recoverMsgs(); err != nil {
return nil, err
}
// Write our meta data iff does not exist.
meta := path.Join(fcfg.StoreDir, JetStreamMetaFile)
if _, err := os.Stat(meta); err != nil && os.IsNotExist(err) {
if err := fs.writeStreamMeta(); err != nil {
return nil, err
}
}
fs.syncTmr = time.AfterFunc(fs.fcfg.SyncInterval, fs.syncBlocks)
return fs, nil
}
func (fs *fileStore) UpdateConfig(cfg *StreamConfig) error {
if fs.isClosed() {
return ErrStoreClosed
}
if cfg.Name == "" {
return fmt.Errorf("name required")
}
if cfg.Storage != FileStorage {
return fmt.Errorf("fileStore requires file storage type in config")
}
fs.mu.Lock()
new_cfg := FileStreamInfo{Created: fs.cfg.Created, StreamConfig: *cfg}
old_cfg := fs.cfg
fs.cfg = new_cfg
if err := fs.writeStreamMeta(); err != nil {
fs.cfg = old_cfg
fs.mu.Unlock()
return err
}
// Limits checks and enforcement.
fs.enforceMsgLimit()
fs.enforceBytesLimit()
// Do age timers.
if fs.ageChk == nil && fs.cfg.MaxAge != 0 {
fs.startAgeChk()
}
if fs.ageChk != nil && fs.cfg.MaxAge == 0 {
fs.ageChk.Stop()
fs.ageChk = nil
}
fs.mu.Unlock()
if cfg.MaxAge != 0 {
fs.expireMsgs()
}
return nil
}
func dynBlkSize(retention RetentionPolicy, maxBytes int64) uint64 {
if maxBytes > 0 {
blkSize := (maxBytes / 4) + 1 // (25% overhead)
// Round up to nearest 100
if m := blkSize % 100; m != 0 {
blkSize += 100 - m
}
if blkSize < FileStoreMinBlkSize {
blkSize = FileStoreMinBlkSize
}
if blkSize > FileStoreMaxBlkSize {
blkSize = FileStoreMaxBlkSize
}
return uint64(blkSize)
}
if retention == LimitsPolicy {
// TODO(dlc) - Make the blocksize relative to this if set.
return defaultStreamBlockSize
} else {
// TODO(dlc) - Make the blocksize relative to this if set.
return defaultOtherBlockSize
}
}
// Write out meta and the checksum.
// Lock should be held.
func (fs *fileStore) writeStreamMeta() error {
meta := path.Join(fs.fcfg.StoreDir, JetStreamMetaFile)
if _, err := os.Stat(meta); err != nil && !os.IsNotExist(err) {
return err
}
b, err := json.Marshal(fs.cfg)
if err != nil {
return err
}
if err := ioutil.WriteFile(meta, b, 0644); err != nil {
return err
}
fs.hh.Reset()
fs.hh.Write(b)
checksum := hex.EncodeToString(fs.hh.Sum(nil))
sum := path.Join(fs.fcfg.StoreDir, JetStreamMetaFileSum)
if err := ioutil.WriteFile(sum, []byte(checksum), 0644); err != nil {
return err
}
return nil
}
const msgHdrSize = 22
const checksumSize = 8
// This is the max room needed for index header.
const indexHdrSize = 7*binary.MaxVarintLen64 + hdrLen + checksumSize
func (fs *fileStore) recoverMsgBlock(fi os.FileInfo, index uint64) *msgBlock {
mb := &msgBlock{fs: fs, index: index, cexp: fs.fcfg.CacheExpire}
mdir := path.Join(fs.fcfg.StoreDir, msgDir)
mb.mfn = path.Join(mdir, fi.Name())
mb.ifn = path.Join(mdir, fmt.Sprintf(indexScan, index))
if mb.hh == nil {
key := sha256.Sum256(fs.hashKeyForBlock(index))
mb.hh, _ = highwayhash.New64(key[:])
}
// Open up the message file, but we will try to recover from the index file.
// We will check that the last checksums match.
file, err := os.Open(mb.mfn)
if err != nil {
return nil
}
defer file.Close()
// Read our index file. Use this as source of truth if possible.
if err := mb.readIndexInfo(); err == nil {
// Quick sanity check here.
// Note this only checks that the message blk file is not newer then this file.
var lchk [8]byte
file.ReadAt(lchk[:], fi.Size()-8)
if bytes.Equal(lchk[:], mb.lchk[:]) {
fs.blks = append(fs.blks, mb)
return mb
}
}
// Close here since we need to rebuild state.
file.Close()
// If we get an error rebuilding the message block state record that with the fs itself.
if ld, err := mb.rebuildState(); err != nil && ld != nil {
fs.rebuildState(ld)
}
// Rewrite this to make sure we are sync'd.
mb.writeIndexInfo()
fs.blks = append(fs.blks, mb)
fs.lmb = mb
return mb
}
func (fs *fileStore) lostData() *LostStreamData {
fs.mu.RLock()
defer fs.mu.RUnlock()
if fs.ld == nil {
return nil
}
nld := *fs.ld
return &nld
}
func (fs *fileStore) rebuildState(ld *LostStreamData) {
if fs.ld != nil {
fs.ld.Msgs = append(fs.ld.Msgs, ld.Msgs...)
msgs := fs.ld.Msgs
sort.Slice(msgs, func(i, j int) bool { return msgs[i] < msgs[j] })
fs.ld.Bytes += ld.Bytes
} else {
fs.ld = ld
}
fs.state.Msgs, fs.state.Bytes = 0, 0
fs.state.FirstSeq, fs.state.LastSeq = 0, 0
for _, mb := range fs.blks {
mb.mu.RLock()
fs.state.Msgs += mb.msgs
fs.state.Bytes += mb.bytes
if fs.state.FirstSeq == 0 || mb.first.seq < fs.state.FirstSeq {
fs.state.FirstSeq = mb.first.seq
fs.state.FirstTime = time.Unix(0, mb.first.ts).UTC()
}
fs.state.LastSeq = mb.last.seq
fs.state.LastTime = time.Unix(0, mb.last.ts).UTC()
mb.mu.RUnlock()
}
}
func (mb *msgBlock) rebuildState() (*LostStreamData, error) {
mb.mu.Lock()
defer mb.mu.Unlock()
startLastSeq := mb.last.seq
// Clear state we need to rebuild.
mb.msgs, mb.bytes = 0, 0
mb.last.seq, mb.last.ts = 0, 0
buf, err := ioutil.ReadFile(mb.mfn)
if err != nil {
return nil, err
}
addToDmap := func(seq uint64) {
if seq == 0 {
return
}
if mb.dmap == nil {
mb.dmap = make(map[uint64]struct{})
}
mb.dmap[seq] = struct{}{}
}
var le = binary.LittleEndian
truncate := func(index uint32) {
var fd *os.File
if mb.mfd != nil {
fd = mb.mfd
} else {
fd, _ := os.Open(mb.mfn)
defer fd.Close()
}
if fd != nil {
fd.Truncate(int64(index))
fd.Sync()
// Update our checksum.
if index >= 8 {
var lchk [8]byte
mb.mfd.ReadAt(lchk[:], int64(index-8))
copy(mb.lchk[0:], lchk[:])
}
}
}
gatherLost := func(lb uint32) *LostStreamData {
var ld LostStreamData
for seq := mb.last.seq + 1; seq <= startLastSeq; seq++ {
ld.Msgs = append(ld.Msgs, seq)
}
ld.Bytes = uint64(lb)
return &ld
}
for index, lbuf := uint32(0), uint32(len(buf)); index < lbuf; {
hdr := buf[index : index+msgHdrSize]
rl := le.Uint32(hdr[0:])
slen := le.Uint16(hdr[20:])
// Clear any headers bit that could be set.
rl &^= hbit
dlen := int(rl) - msgHdrSize
// Do some quick sanity checks here.
if dlen < 0 || int(slen) > dlen || dlen > int(rl) {
truncate(index)
return gatherLost(lbuf - index), errBadMsg
}
if index+rl > lbuf {
truncate(index)
return gatherLost(lbuf - index), errBadMsg
}
seq := le.Uint64(hdr[4:])
ts := int64(le.Uint64(hdr[12:]))
// If the first seq we read does not match our indexed first seq, reset.
if index == 0 && seq > mb.first.seq {
mb.first.seq = seq
}
// This is an old erased message, or a new one that we can track.
if seq == 0 || seq&ebit != 0 || seq < mb.first.seq {
seq = seq &^ ebit
addToDmap(seq)
index += rl
continue
}
var deleted bool
if mb.dmap != nil {
if _, ok := mb.dmap[seq]; ok {
deleted = true
}
}
if !deleted {
if hh := mb.hh; hh != nil {
data := buf[index+msgHdrSize : index+rl]
hh.Reset()
hh.Write(hdr[4:20])
hh.Write(data[:slen])
hh.Write(data[slen : dlen-8])
checksum := hh.Sum(nil)
if !bytes.Equal(checksum, data[len(data)-8:]) {
truncate(index)
return gatherLost(lbuf - index), errBadMsg
}
}
if mb.first.seq == 0 {
mb.first.seq = seq
mb.first.ts = ts
}
mb.last.seq = seq
mb.last.ts = ts
mb.msgs++
mb.bytes += uint64(rl)
}
index += rl
}
return nil, nil
}
func (fs *fileStore) recoverMsgs() error {
fs.mu.Lock()
defer fs.mu.Unlock()
// Check for any left over purged messages.
pdir := path.Join(fs.fcfg.StoreDir, purgeDir)
if _, err := os.Stat(pdir); err == nil {
os.RemoveAll(pdir)
}
mdir := path.Join(fs.fcfg.StoreDir, msgDir)
fis, err := ioutil.ReadDir(mdir)
if err != nil {
return errNotReadable
}
// Recover all of the msg blocks.
// These can come in a random order, so account for that.
for _, fi := range fis {
var index uint64
if n, err := fmt.Sscanf(fi.Name(), blkScan, &index); err == nil && n == 1 {
if mb := fs.recoverMsgBlock(fi, index); mb != nil {
if fs.state.FirstSeq == 0 || mb.first.seq < fs.state.FirstSeq {
fs.state.FirstSeq = mb.first.seq
fs.state.FirstTime = time.Unix(0, mb.first.ts).UTC()
}
if mb.last.seq > fs.state.LastSeq {
fs.state.LastSeq = mb.last.seq
fs.state.LastTime = time.Unix(0, mb.last.ts).UTC()
}
fs.state.Msgs += mb.msgs
fs.state.Bytes += mb.bytes
}
}
}
// Now make sure to sort blks for efficient lookup later with selectMsgBlock().
if len(fs.blks) > 0 {
sort.Slice(fs.blks, func(i, j int) bool { return fs.blks[i].index < fs.blks[j].index })
fs.lmb = fs.blks[len(fs.blks)-1]
err = fs.enableLastMsgBlockForWriting()
} else {
_, err = fs.newMsgBlockForWrite()
}
if err != nil {
return err
}
// Limits checks and enforcement.
fs.enforceMsgLimit()
fs.enforceBytesLimit()
// Do age checks too, make sure to call in place.
if fs.cfg.MaxAge != 0 && fs.state.Msgs > 0 {
fs.startAgeChk()
fs.expireMsgsLocked()
}
return nil
}
// GetSeqFromTime looks for the first sequence number that has
// the message with >= timestamp.
// FIXME(dlc) - inefficient, and dumb really. Make this better.
func (fs *fileStore) GetSeqFromTime(t time.Time) uint64 {
fs.mu.RLock()
lastSeq := fs.state.LastSeq
closed := fs.closed
fs.mu.RUnlock()
if closed {
return 0
}
mb := fs.selectMsgBlockForStart(t)
if mb == nil {
return lastSeq + 1
}
mb.mu.RLock()
fseq := mb.first.seq
lseq := mb.last.seq
mb.mu.RUnlock()
// Linear search, hence the dumb part..
ts := t.UnixNano()
for seq := fseq; seq <= lseq; seq++ {
sm, _ := mb.fetchMsg(seq)
if sm != nil && sm.ts >= ts {
return sm.seq
}
}
return 0
}
// RegisterStorageUpdates registers a callback for updates to storage changes.
// It will present number of messages and bytes as a signed integer and an
// optional sequence number of the message if a single.
func (fs *fileStore) RegisterStorageUpdates(cb StorageUpdateHandler) {
fs.mu.Lock()
fs.scb = cb
bsz := fs.state.Bytes
fs.mu.Unlock()
if cb != nil && bsz > 0 {
cb(0, int64(bsz), 0, _EMPTY_)
}
}
// Helper to get hash key for specific message block.
// Lock should be held
func (fs *fileStore) hashKeyForBlock(index uint64) []byte {
return []byte(fmt.Sprintf("%s-%d", fs.cfg.Name, index))
}
// This rolls to a new append msg block.
// Lock should be held.
func (fs *fileStore) newMsgBlockForWrite() (*msgBlock, error) {
index := uint64(1)
if fs.lmb != nil {
index = fs.lmb.index + 1
}
mb := &msgBlock{fs: fs, index: index, cexp: fs.fcfg.CacheExpire}
// Now do local hash.
key := sha256.Sum256(fs.hashKeyForBlock(index))
hh, err := highwayhash.New64(key[:])
if err != nil {
return nil, fmt.Errorf("could not create hash: %v", err)
}
mb.hh = hh
mdir := path.Join(fs.fcfg.StoreDir, msgDir)
mb.mfn = path.Join(mdir, fmt.Sprintf(blkScan, mb.index))
mfd, err := os.OpenFile(mb.mfn, os.O_CREATE|os.O_RDWR, 0644)
if err != nil {
mb.dirtyCloseWithRemove(true)
return nil, fmt.Errorf("Error creating msg block file [%q]: %v", mb.mfn, err)
}
mb.mfd = mfd
mb.ifn = path.Join(mdir, fmt.Sprintf(indexScan, mb.index))
ifd, err := os.OpenFile(mb.ifn, os.O_CREATE|os.O_RDWR, 0644)
if err != nil {
mb.dirtyCloseWithRemove(true)
return nil, fmt.Errorf("Error creating msg index file [%q]: %v", mb.mfn, err)
}
mb.ifd = ifd
// Set cache time to creation time to start.
ts := time.Now().UnixNano()
mb.llts, mb.lrts, mb.lwts = ts, ts, ts
// Remember our last sequence number.
mb.first.seq = fs.state.LastSeq + 1
mb.last.seq = fs.state.LastSeq
// If we know we will need this so go ahead and spin up.
if !fs.fip {
mb.spinUpFlushLoop()
}
// Add to our list of blocks and mark as last.
fs.blks = append(fs.blks, mb)
fs.lmb = mb
return mb, nil
}
// Make sure we can write to the last message block.
// Lock should be held.
func (fs *fileStore) enableLastMsgBlockForWriting() error {
mb := fs.lmb
if mb == nil {
return fmt.Errorf("no last message block assigned, can not enable for writing")
}
if mb.mfd != nil {
return nil
}
mfd, err := os.OpenFile(mb.mfn, os.O_CREATE|os.O_RDWR, 0644)
if err != nil {
return fmt.Errorf("error opening msg block file [%q]: %v", mb.mfn, err)
}
mb.mfd = mfd
// Spin up our flusher loop if needed.
if !fs.fip {
mb.spinUpFlushLoop()
}
return nil
}
// Stores a raw message with expected sequence number and timestamp.
// Lock should be held.
func (fs *fileStore) storeRawMsg(subj string, hdr, msg []byte, seq uint64, ts int64) error {
if fs.closed {
return ErrStoreClosed
}
// Check if we are discarding new messages when we reach the limit.
if fs.cfg.Discard == DiscardNew {
if fs.cfg.MaxMsgs > 0 && fs.state.Msgs >= uint64(fs.cfg.MaxMsgs) {
return ErrMaxMsgs
}
if fs.cfg.MaxBytes > 0 && fs.state.Bytes+uint64(len(msg)+len(hdr)) >= uint64(fs.cfg.MaxBytes) {
return ErrMaxBytes
}
}
// Check sequence.
if seq != fs.state.LastSeq+1 {
return ErrSequenceMismatch
}
// Write msg record.
n, err := fs.writeMsgRecord(seq, ts, subj, hdr, msg)
if err != nil {
return err
}
// Adjust first if needed.
now := time.Unix(0, ts).UTC()
if fs.state.Msgs == 0 {
fs.state.FirstSeq = seq
fs.state.FirstTime = now
}
fs.state.Msgs++
fs.state.Bytes += n
fs.state.LastSeq = seq
fs.state.LastTime = now
// Limits checks and enforcement.
// If they do any deletions they will update the
// byte count on their own, so no need to compensate.
fs.enforceMsgLimit()
fs.enforceBytesLimit()
// Check if we have and need the age expiration timer running.
if fs.ageChk == nil && fs.cfg.MaxAge != 0 {
fs.startAgeChk()
}
return nil
}
// StoreRawMsg stores a raw message with expected sequence number and timestamp.
func (fs *fileStore) StoreRawMsg(subj string, hdr, msg []byte, seq uint64, ts int64) error {
fs.mu.Lock()
err := fs.storeRawMsg(subj, hdr, msg, seq, ts)
cb := fs.scb
fs.mu.Unlock()
if err == nil && cb != nil {
cb(1, int64(fileStoreMsgSize(subj, hdr, msg)), seq, subj)
}
return err
}
// Store stores a message. We hold the main filestore lock for any write operation.
func (fs *fileStore) StoreMsg(subj string, hdr, msg []byte) (uint64, int64, error) {
fs.mu.Lock()
seq, ts := fs.state.LastSeq+1, time.Now().UnixNano()
err := fs.storeRawMsg(subj, hdr, msg, seq, ts)
cb := fs.scb
fs.mu.Unlock()
if err != nil {
seq, ts = 0, 0
} else if cb != nil {
cb(1, int64(fileStoreMsgSize(subj, hdr, msg)), seq, subj)
}
return seq, ts, err
}
// skipMsg will update this message block for a skipped message.
// If we do not have any messages, just update the metadata, otherwise
// we will place and empty record marking the sequence as used. The
// sequence will be marked erased.
// fs lock should be held.
func (mb *msgBlock) skipMsg(seq uint64, now time.Time) {
if mb == nil {
return
}
var needsRecord bool
mb.mu.Lock()
// If we are empty can just do meta.
if mb.msgs == 0 {
mb.last.seq = seq
mb.last.ts = now.UnixNano()
mb.first.seq = seq + 1
mb.first.ts = now.UnixNano()
} else {
needsRecord = true
if mb.dmap == nil {
mb.dmap = make(map[uint64]struct{})
}
mb.dmap[seq] = struct{}{}
mb.msgs--
mb.bytes -= emptyRecordLen
}
mb.mu.Unlock()
if needsRecord {
mb.writeMsgRecord(emptyRecordLen, seq|ebit, _EMPTY_, nil, nil, now.UnixNano(), true)
} else {
mb.kickFlusher()
}
}
// SkipMsg will use the next sequence number but not store anything.
func (fs *fileStore) SkipMsg() uint64 {
fs.mu.Lock()
defer fs.mu.Unlock()
// Grab time.
now := time.Now().UTC()
seq := fs.state.LastSeq + 1
fs.state.LastSeq = seq
fs.state.LastTime = now
if fs.state.Msgs == 0 {
fs.state.FirstSeq = seq
fs.state.FirstTime = now
}
if seq == fs.state.FirstSeq {
fs.state.FirstSeq = seq + 1
fs.state.FirstTime = now
}
fs.lmb.skipMsg(seq, now)
return seq
}
// Lock should be held.
func (fs *fileStore) rebuildFirst() {
if len(fs.blks) == 0 {
return
}
if fmb := fs.blks[0]; fmb != nil {
fmb.removeIndexFile()
fmb.rebuildState()
fmb.writeIndexInfo()
fs.selectNextFirst()
}
}
// Will check the msg limit and drop firstSeq msg if needed.
// Lock should be held.
func (fs *fileStore) enforceMsgLimit() {
if fs.cfg.MaxMsgs <= 0 || fs.state.Msgs <= uint64(fs.cfg.MaxMsgs) {
return
}
for nmsgs := fs.state.Msgs; nmsgs > uint64(fs.cfg.MaxMsgs); nmsgs = fs.state.Msgs {
if removed, err := fs.deleteFirstMsgLocked(); err != nil || !removed {
fs.rebuildFirst()
return
}
}
}
// Will check the bytes limit and drop msgs if needed.
// Lock should be held.
func (fs *fileStore) enforceBytesLimit() {
if fs.cfg.MaxBytes <= 0 || fs.state.Bytes <= uint64(fs.cfg.MaxBytes) {
return
}
for bs := fs.state.Bytes; bs > uint64(fs.cfg.MaxBytes); bs = fs.state.Bytes {
if removed, err := fs.deleteFirstMsgLocked(); err != nil || !removed {
fs.rebuildFirst()
return
}
}
}
// Lock should be held but will be released during actual remove.
func (fs *fileStore) deleteFirstMsgLocked() (bool, error) {
fs.mu.Unlock()
defer fs.mu.Lock()
return fs.removeMsg(fs.state.FirstSeq, false)
}
// Lock should NOT be held.
func (fs *fileStore) deleteFirstMsg() (bool, error) {
fs.mu.RLock()
seq := fs.state.FirstSeq
fs.mu.RUnlock()
return fs.removeMsg(seq, false)
}
// RemoveMsg will remove the message from this store.
// Will return the number of bytes removed.
func (fs *fileStore) RemoveMsg(seq uint64) (bool, error) {
return fs.removeMsg(seq, false)
}
func (fs *fileStore) EraseMsg(seq uint64) (bool, error) {
return fs.removeMsg(seq, true)
}
// Remove a message, optionally rewriting the mb file.
func (fs *fileStore) removeMsg(seq uint64, secure bool) (bool, error) {
fs.mu.Lock()
if fs.closed {
fs.mu.Unlock()
return false, ErrStoreClosed
}
if fs.sips > 0 {
fs.mu.Unlock()
return false, ErrStoreSnapshotInProgress
}
mb := fs.selectMsgBlock(seq)
if mb == nil {
var err = ErrStoreEOF
if seq <= fs.state.LastSeq {
err = ErrStoreMsgNotFound
}
fs.mu.Unlock()
return false, err
}
// If we have a callback grab the message since we need the subject.
// TODO(dlc) - This will cause whole buffer to be loaded which I was trying
// to avoid. Maybe use side cache for subjects or understand when we really need them.
// Meaning if the stream above is only a single subject no need to store, this is just
// for updating stream pending for consumers.
var sm *fileStoredMsg
if fs.scb != nil {
sm, _ = mb.fetchMsg(seq)
}
mb.mu.Lock()
// Check cache. This should be very rare.
if mb.cache == nil || mb.cache.idx == nil || seq < mb.cache.fseq && mb.cache.off > 0 {
mb.mu.Unlock()
fs.mu.Unlock()
if err := mb.loadMsgs(); err != nil {
return false, err
}
fs.mu.Lock()
mb.mu.Lock()
}
// See if the sequence numbers is still relevant. Check first and cache first.
if seq < mb.first.seq || seq < mb.cache.fseq || (seq-mb.cache.fseq) >= uint64(len(mb.cache.idx)) {
mb.mu.Unlock()
fs.mu.Unlock()
return false, nil
}
// Now check dmap if it is there.
if mb.dmap != nil {
if _, ok := mb.dmap[seq]; ok {
mb.mu.Unlock()
fs.mu.Unlock()
return false, nil
}
}
// Set cache timestamp for last remove.
mb.lrts = time.Now().UnixNano()
// Grab record length from idx.
slot := seq - mb.cache.fseq
ri, rl, _, _ := mb.slotInfo(int(slot))
msz := uint64(rl)
// Global stats
fs.state.Msgs--
fs.state.Bytes -= msz
// Now local mb updates.
mb.msgs--
mb.bytes -= msz
var shouldWriteIndex bool
var firstSeqNeedsUpdate bool
if secure {
mb.eraseMsg(seq, int(ri), int(rl))
}
// Optimize for FIFO case.
if seq == mb.first.seq {
mb.selectNextFirst()
if mb.isEmpty() {
fs.removeMsgBlock(mb)
firstSeqNeedsUpdate = seq == fs.state.FirstSeq
} else {
shouldWriteIndex = true
if seq == fs.state.FirstSeq {
fs.state.FirstSeq = mb.first.seq // new one.
fs.state.FirstTime = time.Unix(0, mb.first.ts).UTC()
}
}
} else {
// Out of order delete.
if mb.dmap == nil {
mb.dmap = make(map[uint64]struct{})
}
mb.dmap[seq] = struct{}{}
shouldWriteIndex = true
}
var qch, fch chan struct{}
if shouldWriteIndex {
qch = mb.qch
fch = mb.fch
}
cb := fs.scb
mb.mu.Unlock()
if secure {
mb.flushPendingMsgs()
}
// Kick outside of lock.
if shouldWriteIndex {
if !fs.fip {
if qch == nil {
mb.spinUpFlushLoop()
}
select {
case fch <- struct{}{}:
default:
}
} else {
mb.writeIndexInfo()
}
}
// If we emptied the current message block and the seq was state.First.Seq
// then we need to jump message blocks.
if firstSeqNeedsUpdate {
fs.selectNextFirst()
}
fs.mu.Unlock()
// Storage updates.
if cb != nil {
subj := _EMPTY_
if sm != nil {
subj = sm.subj
}
delta := int64(msz)
cb(-1, -delta, seq, subj)
}
return true, nil
}
// Grab info from a slot.
// Lock should be held.
func (mb *msgBlock) slotInfo(slot int) (uint32, uint32, bool, error) {
if mb.cache == nil || slot >= len(mb.cache.idx) {
return 0, 0, false, errPartialCache
}
bi := mb.cache.idx[slot]
ri := (bi &^ hbit)
hashChecked := (bi & hbit) != 0
// Determine record length
var rl uint32
if len(mb.cache.idx) > slot+1 {
ni := mb.cache.idx[slot+1] &^ hbit
rl = ni - ri
} else {
rl = mb.cache.lrl
}
if rl < msgHdrSize {
return 0, 0, false, errBadMsg
}
return uint32(ri), rl, hashChecked, nil
}
func (fs *fileStore) isClosed() bool {
fs.mu.RLock()
closed := fs.closed
fs.mu.RUnlock()
return closed
}
// Will spin up our flush loop.
func (mb *msgBlock) spinUpFlushLoop() {
mb.mu.Lock()
// Are we already running?
if mb.flusher {
mb.mu.Unlock()
return
}
mb.flusher = true
mb.fch = make(chan struct{}, 1)
mb.qch = make(chan struct{})
fch, qch := mb.fch, mb.qch
mb.mu.Unlock()
go mb.flushLoop(fch, qch)
}
// Raw low level kicker for flush loops.
func kickFlusher(fch chan struct{}) {
if fch != nil {
select {
case fch <- struct{}{}:
default:
}
}
}
// Kick flusher for this message block.
func (mb *msgBlock) kickFlusher() {
mb.mu.RLock()
defer mb.mu.RUnlock()
kickFlusher(mb.fch)
}
func (mb *msgBlock) setInFlusher() {
mb.mu.Lock()
mb.flusher = true
mb.mu.Unlock()
}
func (mb *msgBlock) clearInFlusher() {
mb.mu.Lock()
mb.flusher = false
mb.mu.Unlock()
}
// flushLoop watches for messages, index info, or recently closed msg block updates.
func (mb *msgBlock) flushLoop(fch, qch chan struct{}) {
mb.setInFlusher()
defer mb.clearInFlusher()
// Will use to test if we have meta data updates.
var firstSeq, lastSeq uint64
var dmapLen int
infoChanged := func() bool {
mb.mu.RLock()
defer mb.mu.RUnlock()
var changed bool
if firstSeq != mb.first.seq || lastSeq != mb.last.seq || dmapLen != len(mb.dmap) {
changed = true
firstSeq, lastSeq = mb.first.seq, mb.last.seq
dmapLen = len(mb.dmap)
}
return changed
}
for {
select {
case <-fch:
// If we have pending messages process them first.
if waiting := mb.pendingWriteSize(); waiting != 0 {
ts := 1 * time.Millisecond
var waited time.Duration
for waiting < coalesceMinimum {
time.Sleep(ts)
select {
case <-qch:
return
default:
}
newWaiting := mb.pendingWriteSize()
if waited = waited + ts; waited > maxFlushWait || newWaiting <= waiting {
break
}
waiting = newWaiting
ts *= 2
}
mb.flushPendingMsgs()
}
if infoChanged() {
mb.writeIndexInfo()
}
case <-qch:
return
}
}
}
// Lock should be held.
func (mb *msgBlock) eraseMsg(seq uint64, ri, rl int) error {
var le = binary.LittleEndian
var hdr [msgHdrSize]byte
le.PutUint32(hdr[0:], uint32(rl))
le.PutUint64(hdr[4:], seq|ebit)
le.PutUint64(hdr[12:], 0)
le.PutUint16(hdr[20:], 0)
// Randomize record
data := make([]byte, rl-emptyRecordLen)
rand.Read(data)
// Now write to underlying buffer.
var b bytes.Buffer
b.Write(hdr[:])
b.Write(data)
// Calculate hash.
mb.hh.Reset()
mb.hh.Write(hdr[4:20])
mb.hh.Write(data)
checksum := mb.hh.Sum(nil)
// Write to msg record.
b.Write(checksum)
// Update both cache and disk.
nbytes := b.Bytes()
// Cache
if ri >= mb.cache.off {
li := ri - mb.cache.off
buf := mb.cache.buf[li : li+rl]
copy(buf, nbytes)
}
// Disk
if mb.cache.off+mb.cache.wp > ri {
mfd, err := os.OpenFile(mb.mfn, os.O_RDWR, 0644)
if err != nil {
return err
}
defer mfd.Close()
if _, err = mfd.WriteAt(nbytes, int64(ri)); err == nil {
mfd.Sync()
}
if err != nil {
return err
}
}
return nil
}
// Truncate this message block to the storedMsg.
func (mb *msgBlock) truncate(sm *fileStoredMsg) (nmsgs, nbytes uint64, err error) {
// Make sure we are loaded to process messages etc.
if err := mb.loadMsgs(); err != nil {
return 0, 0, err
}
// Calculate new eof using slot info from our new last sm.
ri, rl, _, err := mb.slotInfo(int(sm.seq - mb.cache.fseq))
if err != nil {
return 0, 0, err
}
// Calculate new eof.
eof := int64(ri + rl)
var purged, bytes uint64
mb.mu.Lock()
checkDmap := len(mb.dmap) > 0
for seq := mb.last.seq; seq > sm.seq; seq-- {
if checkDmap {
if _, ok := mb.dmap[seq]; ok {
// Delete and skip to next.
delete(mb.dmap, seq)
continue
}
}
// We should have a valid msg to calculate removal stats.
_, rl, _, err := mb.slotInfo(int(seq - mb.cache.fseq))
if err != nil {
mb.mu.Unlock()
return 0, 0, err
}
purged++
bytes += uint64(rl)
}
// Truncate our msgs and close file.
if mb.mfd != nil {
mb.mfd.Truncate(eof)
mb.mfd.Sync()
// Update our checksum.
var lchk [8]byte
mb.mfd.ReadAt(lchk[:], eof-8)
copy(mb.lchk[0:], lchk[:])
} else {
return 0, 0, fmt.Errorf("failed to truncate msg block %d, file not open", mb.index)
}
// Do local mb stat updates.
mb.msgs -= purged
mb.bytes -= bytes
// Update our last msg.
mb.last.seq = sm.seq
mb.last.ts = sm.ts
// Clear our cache.
mb.clearCacheAndOffset()
mb.mu.Unlock()
// Write our index file.
mb.writeIndexInfo()
// Load msgs again.
mb.loadMsgs()
return purged, bytes, nil
}
// Lock should be held.
func (mb *msgBlock) isEmpty() bool {
return mb.first.seq > mb.last.seq
}
// Lock should be held.
func (mb *msgBlock) selectNextFirst() {
var seq uint64
for seq = mb.first.seq + 1; seq <= mb.last.seq; seq++ {
if _, ok := mb.dmap[seq]; ok {
// We will move past this so we can delete the entry.
delete(mb.dmap, seq)
} else {
break
}
}
// Set new first sequence.
mb.first.seq = seq
// Check if we are empty..
if mb.isEmpty() {
mb.first.ts = 0
return
}
// Need to get the timestamp.
// We will try the cache direct and fallback if needed.
sm, _ := mb.cacheLookupWithLock(seq)
if sm == nil {
// Slow path, need to unlock.
mb.mu.Unlock()
sm, _ = mb.fetchMsg(seq)
mb.mu.Lock()
}
if sm != nil {
mb.first.ts = sm.ts
} else {
mb.first.ts = 0
}
}
// Select the next FirstSeq
func (fs *fileStore) selectNextFirst() {
if len(fs.blks) > 0 {
mb := fs.blks[0]
mb.mu.RLock()
fs.state.FirstSeq = mb.first.seq
fs.state.FirstTime = time.Unix(0, mb.first.ts).UTC()
mb.mu.RUnlock()
} else {
// Could not find anything, so treat like purge
fs.state.FirstSeq = fs.state.LastSeq + 1
fs.state.FirstTime = time.Time{}
}
}
// Lock should be held.
func (mb *msgBlock) resetCacheExpireTimer(td time.Duration) {
if td == 0 {
td = mb.cexp
}
if mb.ctmr == nil {
mb.ctmr = time.AfterFunc(td, mb.expireCache)
} else {
mb.ctmr.Reset(td)
}
}
// Lock should be held.
func (mb *msgBlock) startCacheExpireTimer() {
mb.resetCacheExpireTimer(0)
}
// Used when we load in a message block.
// Lock should be held.
func (mb *msgBlock) clearCacheAndOffset() {
if mb.cache != nil {
mb.cache.off = 0
mb.cache.wp = 0
}
mb.clearCache()
}
// Lock should be held.
func (mb *msgBlock) clearCache() {
if mb.ctmr != nil {
mb.ctmr.Stop()
mb.ctmr = nil
}
if mb.cache == nil {
return
}
if mb.cache.off == 0 {
mb.cache = nil
} else {
// Clear msgs and index.
mb.cache.buf = nil
mb.cache.idx = nil
mb.cache.wp = 0
}
}
// Called to possibly expire a message block cache.
func (mb *msgBlock) expireCache() {
mb.mu.Lock()
defer mb.mu.Unlock()
if mb.cache == nil {
if mb.ctmr != nil {
mb.ctmr.Stop()
mb.ctmr = nil
}
return
}
// Can't expire if we are flushing or still have pending.
if mb.cache.flush || (len(mb.cache.buf)-int(mb.cache.wp) > 0) {
mb.resetCacheExpireTimer(mb.cexp)
return
}
// Grab timestamp to compare.
tns := time.Now().UnixNano()
// For the core buffer of messages, we care about reads and writes, but not removes.
bufts := mb.llts
if mb.lwts > bufts {
bufts = mb.lwts
}
// Check for activity on the cache that would prevent us from expiring.
if tns-bufts <= int64(mb.cexp) {
mb.resetCacheExpireTimer(mb.cexp - time.Duration(tns-bufts))
return
}
// If we are here we will at least expire the core msg buffer.
// We need to capture offset in case we do a write next before a full load.
mb.cache.off += len(mb.cache.buf)
mb.cache.buf = nil
mb.cache.wp = 0
// The idx is used in removes, and will have a longer timeframe.
// See if we should also remove the idx.
if tns-mb.lrts > int64(defaultCacheIdxExpiration) {
mb.clearCache()
} else {
mb.resetCacheExpireTimer(mb.cexp)
}
}
func (fs *fileStore) startAgeChk() {
if fs.ageChk == nil && fs.cfg.MaxAge != 0 {
fs.ageChk = time.AfterFunc(fs.cfg.MaxAge, fs.expireMsgs)
}
}
// Lock should be held.
func (fs *fileStore) expireMsgsLocked() {
fs.mu.Unlock()
fs.expireMsgs()
fs.mu.Lock()
}
// Will expire msgs that are too old.
func (fs *fileStore) expireMsgs() {
// Make sure this is only running one at a time.
fs.mu.Lock()
if fs.expiring {
fs.mu.Unlock()
return
}
fs.expiring = true
fs.mu.Unlock()
defer func() {
fs.mu.Lock()
fs.expiring = false
fs.mu.Unlock()
}()
now := time.Now().UnixNano()
minAge := now - int64(fs.cfg.MaxAge)
for {
sm, _ := fs.msgForSeq(0)
if sm != nil && sm.ts <= minAge {
fs.deleteFirstMsg()
} else {
fs.mu.Lock()
if sm == nil {
if fs.ageChk != nil {
fs.ageChk.Stop()
fs.ageChk = nil
}
} else {
fireIn := time.Duration(sm.ts-now) + fs.cfg.MaxAge
if fs.ageChk != nil {
fs.ageChk.Reset(fireIn)
} else {
fs.ageChk = time.AfterFunc(fireIn, fs.expireMsgs)
}
}
fs.mu.Unlock()
return
}
}
}
// Lock should be held.
func (fs *fileStore) checkAndFlushAllBlocks() {
for _, mb := range fs.blks {
if mb.pendingWriteSize() > 0 {
mb.flushPendingMsgsAndWait()
}
mb.writeIndexInfo()
}
}
// This will check all the checksums on messages and report back any sequence numbers with errors.
func (fs *fileStore) checkMsgs() *LostStreamData {
fs.mu.Lock()
defer fs.mu.Unlock()
fs.checkAndFlushAllBlocks()
for _, mb := range fs.blks {
if ld, err := mb.rebuildState(); err != nil && ld != nil {
// Rebuild fs state too.
mb.fs.rebuildState(ld)
}
}
return fs.ld
}
// Will write the message record to the underlying message block.
// filestore lock will be held.
func (mb *msgBlock) writeMsgRecord(rl, seq uint64, subj string, mhdr, msg []byte, ts int64, flush bool) error {
mb.mu.Lock()
// Make sure we have a cache setup.
if mb.cache == nil {
mb.cache = &cache{}
// Make sure we set the proper cache offset if we have existing data.
var fi os.FileInfo
if mb.mfd != nil {
fi, _ = mb.mfd.Stat()
} else {
fi, _ = os.Stat(mb.mfn)
}
if fi != nil {
mb.cache.off = int(fi.Size())
}
mb.startCacheExpireTimer()
}
// Indexing
index := len(mb.cache.buf) + int(mb.cache.off)
// Formats
// Format with no header
// total_len(4) sequence(8) timestamp(8) subj_len(2) subj msg hash(8)
// With headers, high bit on total length will be set.
// total_len(4) sequence(8) timestamp(8) subj_len(2) subj hdr_len(4) hdr msg hash(8)
// First write header, etc.
var le = binary.LittleEndian
var hdr [msgHdrSize]byte
l := uint32(rl)
hasHeaders := len(mhdr) > 0
if hasHeaders {
l |= hbit
}
le.PutUint32(hdr[0:], l)
le.PutUint64(hdr[4:], seq)
le.PutUint64(hdr[12:], uint64(ts))
le.PutUint16(hdr[20:], uint16(len(subj)))
// Now write to underlying buffer.
mb.cache.buf = append(mb.cache.buf, hdr[:]...)
mb.cache.buf = append(mb.cache.buf, subj...)
if hasHeaders {
var hlen [4]byte
le.PutUint32(hlen[0:], uint32(len(mhdr)))
mb.cache.buf = append(mb.cache.buf, hlen[:]...)
mb.cache.buf = append(mb.cache.buf, mhdr...)
}
mb.cache.buf = append(mb.cache.buf, msg...)
// Calculate hash.
mb.hh.Reset()
mb.hh.Write(hdr[4:20])
mb.hh.Write([]byte(subj))
if hasHeaders {
mb.hh.Write(mhdr)
}
mb.hh.Write(msg)
checksum := mb.hh.Sum(nil)
// Grab last checksum
copy(mb.lchk[0:], checksum)
// Update write through cache.
// Write to msg record.
mb.cache.buf = append(mb.cache.buf, checksum...)
// Write index
mb.cache.idx = append(mb.cache.idx, uint32(index)|hbit)
mb.cache.lrl = uint32(rl)
if mb.cache.fseq == 0 {
mb.cache.fseq = seq
}
// Set cache timestamp for last store.
mb.lwts = ts
// Decide if we write index info if flushing in place.
writeIndex := ts-mb.lwits > int64(time.Second)
// Accounting
mb.updateAccounting(seq, ts, rl)
fch, werr := mb.fch, mb.werr
mb.mu.Unlock()
// If we should be flushing in place do so here. We will also flip to flushing in place if we
// had a write error.
if flush || werr != nil {
if err := mb.flushPendingMsgs(); err != nil && err != errFlushRunning && err != errNoPending {
return err
}
if writeIndex {
mb.writeIndexInfo()
}
} else {
// Kick the flusher here.
kickFlusher(fch)
}
return nil
}
// How many bytes pending to be written for this message block.
func (mb *msgBlock) pendingWriteSize() int {
if mb == nil {
return 0
}
var pending int
mb.mu.RLock()
if mb.mfd != nil && mb.cache != nil {
pending = len(mb.cache.buf) - int(mb.cache.wp)
}
mb.mu.RUnlock()
return pending
}
// Lock should be held.
func (mb *msgBlock) clearFlushing() {
if mb.cache != nil {
mb.cache.flush = false
}
}
// Lock should be held.
func (mb *msgBlock) setFlushing() {
if mb.cache != nil {
mb.cache.flush = true
}
}
// bytesPending returns the buffer to be used for writing to the underlying file.
// This marks we are in flush and will return nil if asked again until cleared.
// Lock should be held.
func (mb *msgBlock) bytesPending() ([]byte, error) {
if mb == nil || mb.mfd == nil {
return nil, errNoPending
}
if mb.cache == nil {
return nil, errNoCache
}
if mb.cache.flush {
return nil, errFlushRunning
}
buf := mb.cache.buf[mb.cache.wp:]
if len(buf) == 0 {
return nil, errNoPending
}
return buf, nil
}
// Return the number of bytes in this message block.
func (mb *msgBlock) numBytes() uint64 {
mb.mu.RLock()
nb := mb.bytes
mb.mu.RUnlock()
return nb
}
// Update accounting on a write msg.
// Lock should be held.
func (mb *msgBlock) updateAccounting(seq uint64, ts int64, rl uint64) {
if mb.first.seq == 0 || mb.first.ts == 0 {
mb.first.seq = seq
mb.first.ts = ts
}
// Need atomics here for selectMsgBlock speed.
atomic.StoreUint64(&mb.last.seq, seq)
mb.last.ts = ts
mb.bytes += rl
mb.msgs++
}
// Lock should be held.
func (fs *fileStore) writeMsgRecord(seq uint64, ts int64, subj string, hdr, msg []byte) (uint64, error) {
var err error
// Get size for this message.
rl := fileStoreMsgSize(subj, hdr, msg)
if rl&hbit != 0 {
return 0, ErrMsgTooLarge
}
// Grab our current last message block.
mb := fs.lmb
if mb == nil || mb.numBytes()+rl > fs.fcfg.BlockSize {
if mb, err = fs.newMsgBlockForWrite(); err != nil {
return 0, err
}
}
// Ask msg block to store in write through cache.
err = mb.writeMsgRecord(rl, seq, subj, hdr, msg, ts, fs.fip)
return rl, err
}
// Sync msg and index files as needed. This is called from a timer.
func (fs *fileStore) syncBlocks() {
fs.mu.RLock()
if fs.closed {
fs.mu.RUnlock()
return
}
blks := append([]*msgBlock(nil), fs.blks...)
fs.mu.RUnlock()
for _, mb := range blks {
mb.mu.RLock()
mfd := mb.mfd
ifd := mb.ifd
liwsz := mb.liwsz
mb.mu.RUnlock()
if mfd != nil {
mfd.Sync()
}
if ifd != nil {
ifd.Truncate(liwsz)
ifd.Sync()
}
}
fs.mu.RLock()
cfs := append([]*consumerFileStore(nil), fs.cfs...)
fs.mu.RUnlock()
// Do consumers.
for _, o := range cfs {
o.syncStateFile()
}
fs.mu.Lock()
fs.syncTmr = time.AfterFunc(fs.fcfg.SyncInterval, fs.syncBlocks)
fs.mu.Unlock()
}
// Select the message block where this message should be found.
// Return nil if not in the set.
// Read lock should be held.
func (fs *fileStore) selectMsgBlock(seq uint64) *msgBlock {
// Check for out of range.
if seq < fs.state.FirstSeq || seq > fs.state.LastSeq {
return nil
}
// blks are sorted in ascending order.
// TODO(dlc) - Can be smarter here, when lots of blks maybe use binary search.
// For now this is cache friendly for small to medium numbers of blks.
for _, mb := range fs.blks {
if seq <= atomic.LoadUint64(&mb.last.seq) {
return mb
}
}
return nil
}
// Select the message block where this message should be found.
// Return nil if not in the set.
func (fs *fileStore) selectMsgBlockForStart(minTime time.Time) *msgBlock {
fs.mu.RLock()
defer fs.mu.RUnlock()
t := minTime.UnixNano()
for _, mb := range fs.blks {
mb.mu.RLock()
found := t <= mb.last.ts
mb.mu.RUnlock()
if found {
return mb
}
}
return nil
}
// Index a raw msg buffer.
// Lock should be held.
func (mb *msgBlock) indexCacheBuf(buf []byte) error {
var le = binary.LittleEndian
var fseq uint64
var idx []uint32
var index uint32
if mb.cache == nil {
// Approximation, may adjust below.
fseq = mb.first.seq
idx = make([]uint32, 0, mb.msgs)
mb.cache = &cache{}
} else {
fseq = mb.cache.fseq
idx = mb.cache.idx
if len(idx) == 0 {
idx = make([]uint32, 0, mb.msgs)
}
index = uint32(len(mb.cache.buf))
buf = append(mb.cache.buf, buf...)
}
lbuf := uint32(len(buf))
for index < lbuf {
hdr := buf[index : index+msgHdrSize]
rl := le.Uint32(hdr[0:])
seq := le.Uint64(hdr[4:])
slen := le.Uint16(hdr[20:])
// Clear any headers bit that could be set.
rl &^= hbit
dlen := int(rl) - msgHdrSize
// Do some quick sanity checks here.
if dlen < 0 || int(slen) > dlen || dlen > int(rl) {
// This means something is off.
// TODO(dlc) - Add into bad list?
return errBadMsg
}
// Clear erase bit.
seq = seq &^ ebit
// Adjust if we guessed wrong.
if seq != 0 && seq < fseq {
fseq = seq
}
// We defer checksum checks to individual msg cache lookups to amortorize costs and
// not introduce latency for first message from a newly loaded block.
idx = append(idx, index)
mb.cache.lrl = uint32(rl)
index += mb.cache.lrl
}
mb.cache.buf = buf
mb.cache.idx = idx
mb.cache.fseq = fseq
mb.cache.wp += int(lbuf)
return nil
}
func (mb *msgBlock) quitChan() chan struct{} {
mb.mu.RLock()
defer mb.mu.RUnlock()
return mb.qch
}
// When called directly, flushPending could be busy already and return errFlushRunning.
// This function is called for in place flushing so we need to wait.
func (mb *msgBlock) flushPendingMsgsAndWait() error {
var err error
// If we are in flush wait for that to clear.
for err = mb.flushPendingMsgs(); err == errFlushRunning; err = mb.flushPendingMsgs() {
qch := mb.quitChan()
select {
case <-qch:
return nil
case <-time.After(time.Millisecond):
}
}
return err
}
// flushPendingMsgs writes out any messages for this message block.
func (mb *msgBlock) flushPendingMsgs() error {
// We will not hold the lock across I/O so we can add more messages
// in parallel but we allow only one flush to be running.
mb.mu.Lock()
if mb.cache == nil || mb.mfd == nil {
mb.mu.Unlock()
return nil
}
// bytesPending will return with errFlushRunning
// if we are already flushing this message block.
buf, err := mb.bytesPending()
// If we got an error back return here.
if err != nil {
mb.mu.Unlock()
return err
}
woff := int64(mb.cache.off + mb.cache.wp)
lob := len(buf)
// Only one can be flushing at a time.
mb.setFlushing()
mfd := mb.mfd
mb.mu.Unlock()
var tn int
var n int
// Append new data to the message block file.
for lbb := lob; lbb > 0; lbb = len(buf) {
n, err = mfd.WriteAt(buf, woff)
if err != nil {
mb.removeIndexFile()
mb.dirtyClose()
if ld, err := mb.rebuildState(); err != nil && ld != nil {
// Rebuild fs state too.
mb.fs.rebuildState(ld)
}
break
}
woff += int64(n)
tn += n
// Success
if n == lbb {
break
}
// Partial write..
buf = buf[n:]
}
// We did a successful write.
// Re-acquire lock to update.
mb.mu.Lock()
defer mb.mu.Unlock()
// Clear on exit.
defer mb.clearFlushing()
// set write err to any error.
mb.werr = err
// Cache may be gone.
if mb.cache == nil || mb.mfd == nil {
return mb.werr
}
// Check for additional writes while we were writing to the disk.
moreBytes := len(mb.cache.buf) - mb.cache.wp - lob
// Decide what we want to do with the buffer in hand. If we have load interest
// we will hold onto the whole thing, otherwise empty the buffer, possibly reusing it.
if ts := time.Now().UnixNano(); ts < mb.llts || (ts-mb.llts) <= int64(mb.cexp) {
mb.cache.wp += tn
} else {
if cap(buf) <= maxBufReuse {
buf = buf[:0]
} else {
buf = nil
}
if moreBytes > 0 {
nbuf := mb.cache.buf[len(mb.cache.buf)-moreBytes:]
if moreBytes > (len(mb.cache.buf)/4*3) && cap(nbuf) <= maxBufReuse {
buf = nbuf
} else {
buf = append(buf, nbuf...)
}
}
// Update our cache offset.
mb.cache.off = int(woff)
// Reset write pointer.
mb.cache.wp = 0
// Place buffer back in the cache structure.
mb.cache.buf = buf
}
return mb.werr
}
// Lock should be held.
func (mb *msgBlock) clearLoading() {
mb.loading = false
}
// Will load msgs from disk.
func (mb *msgBlock) loadMsgs() error {
// We hold the lock here the whole time by design.
mb.mu.Lock()
defer mb.mu.Unlock()
// Check to see if we are loading already.
if mb.loading {
return nil
}
// Set loading status.
mb.loading = true
defer mb.clearLoading()
checkCache:
// Check to see if we have a full cache.
if mb.cache != nil && len(mb.cache.idx) == int(mb.msgs) && mb.cache.off == 0 && len(mb.cache.buf) > 0 {
return nil
}
mfn := mb.mfn
mb.llts = time.Now().UnixNano()
// FIXME(dlc) - We could be smarter here.
if mb.cache != nil && len(mb.cache.buf)-mb.cache.wp > 0 {
mb.mu.Unlock()
err := mb.flushPendingMsgsAndWait()
mb.mu.Lock()
if err != nil && err != errFlushRunning {
return err
}
goto checkCache
}
// Load in the whole block. We want to hold the mb lock here to avoid any changes to
// state.
buf, err := ioutil.ReadFile(mfn)
if err != nil {
return err
}
// Reset the cache since we just read everything in.
// Make sure this is cleared in case we had a partial when we started.
mb.clearCacheAndOffset()
if err := mb.indexCacheBuf(buf); err != nil {
return err
}
if len(buf) > 0 {
mb.cloads++
mb.startCacheExpireTimer()
}
return nil
}
// Fetch a message from this block, possibly reading in and caching the messages.
// We assume the block was selected and is correct, so we do not do range checks.
func (mb *msgBlock) fetchMsg(seq uint64) (*fileStoredMsg, error) {
var sm *fileStoredMsg
sm, err := mb.cacheLookup(seq)
if err == nil || (err != errNoCache && err != errPartialCache) {
return sm, err
}
// We have a cache miss here.
if err := mb.loadMsgs(); err != nil {
return nil, err
}
return mb.cacheLookup(seq)
}
var (
errNoCache = errors.New("no message cache")
errBadMsg = errors.New("malformed or corrupt message")
errDeletedMsg = errors.New("deleted message")
errPartialCache = errors.New("partial cache")
errNoPending = errors.New("message block does not have pending data")
errNotReadable = errors.New("storage directory not readable")
errFlushRunning = errors.New("flush is already running")
errCorruptState = errors.New("corrupt state file")
)
// Used for marking messages that have had their checksums checked.
// Used to signal a message record with headers.
const hbit = 1 << 31
// Used for marking erased messages sequences.
const ebit = 1 << 63
// Will do a lookup from the cache.
func (mb *msgBlock) cacheLookup(seq uint64) (*fileStoredMsg, error) {
// Currently grab the write lock for optional use of mb.hh. Prefer this for now
// vs read lock and promote. Also defer based on 1.14 performance.
mb.mu.Lock()
defer mb.mu.Unlock()
return mb.cacheLookupWithLock(seq)
}
// Will do a lookup from cache assuming lock is held.
func (mb *msgBlock) cacheLookupWithLock(seq uint64) (*fileStoredMsg, error) {
if mb.cache == nil || len(mb.cache.idx) == 0 {
return nil, errNoCache
}
if seq < mb.first.seq || seq < mb.cache.fseq || seq > mb.last.seq {
return nil, ErrStoreMsgNotFound
}
// If we have a delete map check it.
if mb.dmap != nil {
if _, ok := mb.dmap[seq]; ok {
return nil, errDeletedMsg
}
}
if mb.cache.off > 0 {
return nil, errPartialCache
}
bi, _, hashChecked, err := mb.slotInfo(int(seq - mb.cache.fseq))
if err != nil {
return nil, errPartialCache
}
// Update cache activity.
mb.llts = time.Now().UnixNano()
// We use the high bit to denote we have already checked the checksum.
var hh hash.Hash64
if !hashChecked {
hh = mb.hh // This will force the hash check in msgFromBuf.
mb.cache.idx[seq-mb.cache.fseq] = (bi | hbit)
}
li := int(bi) - mb.cache.off
buf := mb.cache.buf[li:]
// Parse from the raw buffer.
subj, hdr, msg, mseq, ts, err := msgFromBuf(buf, hh)
if err != nil {
return nil, err
}
if seq != mseq {
return nil, fmt.Errorf("sequence numbers for cache load did not match, %d vs %d", seq, mseq)
}
sm := &fileStoredMsg{
subj: subj,
hdr: hdr,
msg: msg,
seq: seq,
ts: ts,
mb: mb,
off: int64(bi),
}
return sm, nil
}
// Will return message for the given sequence number.
func (fs *fileStore) msgForSeq(seq uint64) (*fileStoredMsg, error) {
// TODO(dlc) - Since Store, Remove, Skip all hold the write lock on fs this will
// be stalled. Need another lock if want to happen in parallel.
fs.mu.RLock()
if fs.closed {
fs.mu.RUnlock()
return nil, ErrStoreClosed
}
// Indicates we want first msg.
if seq == 0 {
seq = fs.state.FirstSeq
}
// Make sure to snapshot here.
lseq := fs.state.LastSeq
mb := fs.selectMsgBlock(seq)
fs.mu.RUnlock()
if mb == nil {
var err = ErrStoreEOF
if seq <= lseq {
err = ErrStoreMsgNotFound
}
return nil, err
}
// TODO(dlc) - older design had a check to prefetch when we knew we were
// loading in order and getting close to end of current mb. Should add
// something like it back in.
return mb.fetchMsg(seq)
}
// Internal function to return msg parts from a raw buffer.
func msgFromBuf(buf []byte, hh hash.Hash64) (string, []byte, []byte, uint64, int64, error) {
if len(buf) < msgHdrSize {
return _EMPTY_, nil, nil, 0, 0, errBadMsg
}
var le = binary.LittleEndian
hdr := buf[:msgHdrSize]
rl := le.Uint32(hdr[0:])
hasHeaders := rl&hbit != 0
rl &^= hbit // clear header bit
dlen := int(rl) - msgHdrSize
slen := int(le.Uint16(hdr[20:]))
// Simple sanity check.
if dlen < 0 || slen > dlen || int(rl) > len(buf) {
return _EMPTY_, nil, nil, 0, 0, errBadMsg
}
data := buf[msgHdrSize : msgHdrSize+dlen]
// Do checksum tests here if requested.
if hh != nil {
hh.Reset()
hh.Write(hdr[4:20])
hh.Write(data[:slen])
if hasHeaders {
hh.Write(data[slen+4 : dlen-8])
} else {
hh.Write(data[slen : dlen-8])
}
if !bytes.Equal(hh.Sum(nil), data[len(data)-8:]) {
return _EMPTY_, nil, nil, 0, 0, errBadMsg
}
}
seq := le.Uint64(hdr[4:])
if seq&ebit != 0 {
seq = 0
}
ts := int64(le.Uint64(hdr[12:]))
// FIXME(dlc) - We need to not allow appends to the underlying buffer, so we will
// fix the capacity. This will cause a copy though in stream:internalSendLoop when
// we append CRLF but this was causing a race. Need to rethink more to avoid this copy.
end := dlen - 8
var mhdr, msg []byte
if hasHeaders {
hl := le.Uint32(data[slen:])
bi := slen + 4
li := bi + int(hl)
mhdr = data[bi:li:li]
msg = data[li:end:end]
} else {
msg = data[slen:end:end]
}
return string(data[:slen]), mhdr, msg, seq, ts, nil
}
// LoadMsg will lookup the message by sequence number and return it if found.
func (fs *fileStore) LoadMsg(seq uint64) (string, []byte, []byte, int64, error) {
sm, err := fs.msgForSeq(seq)
if sm != nil {
return sm.subj, sm.hdr, sm.msg, sm.ts, nil
}
return "", nil, nil, 0, err
}
// State returns the current state of the stream.
func (fs *fileStore) State() StreamState {
fs.mu.RLock()
state := fs.state
state.Consumers = len(fs.cfs)
state.Deleted = nil // make sure.
for _, mb := range fs.blks {
mb.mu.Lock()
fseq := mb.first.seq
for seq := range mb.dmap {
if seq <= fseq {
delete(mb.dmap, seq)
} else {
state.Deleted = append(state.Deleted, seq)
}
}
mb.mu.Unlock()
}
fs.mu.RUnlock()
state.Lost = fs.lostData()
// Can not be guaranteed to be sorted.
if len(state.Deleted) > 0 {
sort.Slice(state.Deleted, func(i, j int) bool {
return state.Deleted[i] < state.Deleted[j]
})
}
return state
}
const emptyRecordLen = 22 + 8
func fileStoreMsgSize(subj string, hdr, msg []byte) uint64 {
if len(hdr) == 0 {
// length of the message record (4bytes) + seq(8) + ts(8) + subj_len(2) + subj + msg + hash(8)
return uint64(22 + len(subj) + len(msg) + 8)
}
// length of the message record (4bytes) + seq(8) + ts(8) + subj_len(2) + subj + hdr_len(4) + hdr + msg + hash(8)
return uint64(22 + len(subj) + 4 + len(hdr) + len(msg) + 8)
}
func fileStoreMsgSizeEstimate(slen, maxPayload int) uint64 {
return uint64(emptyRecordLen + slen + 4 + maxPayload)
}
// Write index info to the appropriate file.
func (mb *msgBlock) writeIndexInfo() error {
// HEADER: magic version msgs bytes fseq fts lseq lts checksum
var hdr [indexHdrSize]byte
// Write header
hdr[0] = magic
hdr[1] = version
mb.mu.Lock()
defer mb.mu.Unlock()
n := hdrLen
n += binary.PutUvarint(hdr[n:], mb.msgs)
n += binary.PutUvarint(hdr[n:], mb.bytes)
n += binary.PutUvarint(hdr[n:], mb.first.seq)
n += binary.PutVarint(hdr[n:], mb.first.ts)
n += binary.PutUvarint(hdr[n:], mb.last.seq)
n += binary.PutVarint(hdr[n:], mb.last.ts)
n += binary.PutUvarint(hdr[n:], uint64(len(mb.dmap)))
buf := append(hdr[:n], mb.lchk[:]...)
// Append a delete map if needed
if len(mb.dmap) > 0 {
buf = append(buf, mb.genDeleteMap()...)
}
var err error
if mb.ifd == nil {
ifd, err := os.OpenFile(mb.ifn, os.O_CREATE|os.O_RDWR, 0644)
if err != nil {
return err
}
mb.ifd = ifd
}
mb.lwits = time.Now().UnixNano()
if n, err = mb.ifd.WriteAt(buf, 0); err == nil {
mb.liwsz = int64(n)
mb.werr = nil
} else {
mb.werr = err
}
return err
}
// readIndexInfo will read in the index information for the message block.
func (mb *msgBlock) readIndexInfo() error {
buf, err := ioutil.ReadFile(mb.ifn)
if err != nil {
return err
}
if err := checkHeader(buf); err != nil {
defer os.Remove(mb.ifn)
return fmt.Errorf("bad index file")
}
bi := hdrLen
// Helpers, will set i to -1 on error.
readSeq := func() uint64 {
if bi < 0 {
return 0
}
seq, n := binary.Uvarint(buf[bi:])
if n <= 0 {
bi = -1
return 0
}
bi += n
return seq &^ ebit
}
readCount := readSeq
readTimeStamp := func() int64 {
if bi < 0 {
return 0
}
ts, n := binary.Varint(buf[bi:])
if n <= 0 {
bi = -1
return -1
}
bi += n
return ts
}
mb.msgs = readCount()
mb.bytes = readCount()
mb.first.seq = readSeq()
mb.first.ts = readTimeStamp()
mb.last.seq = readSeq()
mb.last.ts = readTimeStamp()
dmapLen := readCount()
// Check if this is a short write index file.
if bi < 0 || bi+checksumSize > len(buf) {
defer os.Remove(mb.ifn)
return fmt.Errorf("short index file")
}
// Checksum
copy(mb.lchk[0:], buf[bi:bi+checksumSize])
bi += checksumSize
// Now check for presence of a delete map
if dmapLen > 0 {
mb.dmap = make(map[uint64]struct{}, dmapLen)
for i := 0; i < int(dmapLen); i++ {
seq := readSeq()
if seq == 0 {
break
}
mb.dmap[seq+mb.first.seq] = struct{}{}
}
}
return nil
}
func (mb *msgBlock) genDeleteMap() []byte {
if len(mb.dmap) == 0 {
return nil
}
buf := make([]byte, len(mb.dmap)*binary.MaxVarintLen64)
// We use first seq as an offset to cut down on size.
fseq, n := uint64(mb.first.seq), 0
for seq := range mb.dmap {
// This is for lazy cleanup as the first sequence moves up.
if seq <= fseq {
delete(mb.dmap, seq)
} else {
n += binary.PutUvarint(buf[n:], seq-fseq)
}
}
return buf[:n]
}
func syncAndClose(mfd, ifd *os.File) {
if mfd != nil {
mfd.Sync()
mfd.Close()
}
if ifd != nil {
ifd.Sync()
ifd.Close()
}
}
// Will return total number of cache loads.
func (fs *fileStore) cacheLoads() uint64 {
var tl uint64
fs.mu.RLock()
for _, mb := range fs.blks {
tl += mb.cloads
}
fs.mu.RUnlock()
return tl
}
// Will return total number of cached bytes.
func (fs *fileStore) cacheSize() uint64 {
var sz uint64
fs.mu.RLock()
for _, mb := range fs.blks {
mb.mu.RLock()
if mb.cache != nil {
sz += uint64(len(mb.cache.buf))
}
mb.mu.RUnlock()
}
fs.mu.RUnlock()
return sz
}
// Will return total number of dmapEntries for all msg blocks.
func (fs *fileStore) dmapEntries() int {
var total int
fs.mu.RLock()
for _, mb := range fs.blks {
total += len(mb.dmap)
}
fs.mu.RUnlock()
return total
}
// Purge will remove all messages from this store.
// Will return the number of purged messages.
func (fs *fileStore) Purge() (uint64, error) {
fs.mu.Lock()
if fs.closed {
fs.mu.Unlock()
return 0, ErrStoreClosed
}
purged := fs.state.Msgs
rbytes := int64(fs.state.Bytes)
fs.state.FirstSeq = fs.state.LastSeq + 1
fs.state.FirstTime = time.Time{}
fs.state.Bytes = 0
fs.state.Msgs = 0
for _, mb := range fs.blks {
mb.dirtyClose()
}
fs.blks = nil
fs.lmb = nil
// Move the msgs directory out of the way, will delete out of band.
// FIXME(dlc) - These can error and we need to change api above to propagate?
mdir := path.Join(fs.fcfg.StoreDir, msgDir)
pdir := path.Join(fs.fcfg.StoreDir, purgeDir)
// If purge directory still exists then we need to wait
// in place and remove since rename would fail.
if _, err := os.Stat(pdir); err == nil {
os.RemoveAll(pdir)
}
os.Rename(mdir, pdir)
go os.RemoveAll(pdir)
// Create new one.
os.MkdirAll(mdir, 0755)
// Make sure we have a lmb to write to.
if _, err := fs.newMsgBlockForWrite(); err != nil {
fs.mu.Unlock()
return purged, err
}
fs.lmb.first.seq = fs.state.FirstSeq
fs.lmb.last.seq = fs.state.LastSeq
fs.lmb.writeIndexInfo()
cb := fs.scb
fs.mu.Unlock()
if cb != nil {
cb(-int64(purged), -rbytes, 0, _EMPTY_)
}
return purged, nil
}
// Compact will remove all messages from this store up to
// but not including the seq parameter.
// Will return the number of purged messages.
func (fs *fileStore) Compact(seq uint64) (uint64, error) {
if seq == 0 {
return fs.Purge()
}
// TODO(dlc) - We can be smarter for large compactions and drop whole msg blocks.
var purged uint64
if last := fs.lastSeq(); seq <= last {
for fseq := fs.firstSeq(); fseq < seq; fseq = fs.firstSeq() {
if found, err := fs.removeMsg(fseq, false); err != nil {
if err == ErrStoreMsgNotFound {
continue
} else if err == ErrStoreEOF {
err = nil
}
return purged, err
} else if found {
purged++
}
}
} else {
// We are compacting past the end of our range. Do purge and set sequences correctly
// such that the next message placed will have seq.
var err error
if purged, err = fs.Purge(); err != nil {
return 0, err
}
fs.resetFirst(seq)
}
return purged, nil
}
// Truncate will truncate a stream store up to and including seq. Sequence needs to be valid.
func (fs *fileStore) Truncate(seq uint64) error {
fs.mu.Lock()
if fs.closed {
fs.mu.Unlock()
return ErrStoreClosed
}
if fs.sips > 0 {
fs.mu.Unlock()
return ErrStoreSnapshotInProgress
}
nlmb := fs.selectMsgBlock(seq)
if nlmb == nil {
fs.mu.Unlock()
return ErrInvalidSequence
}
lsm, _ := nlmb.fetchMsg(seq)
if lsm == nil {
fs.mu.Unlock()
return ErrInvalidSequence
}
// Set lmb to nlmb and make sure writeable.
fs.lmb = nlmb
fs.enableLastMsgBlockForWriting()
var purged, bytes uint64
// Truncate our new last message block.
nmsgs, nbytes, err := nlmb.truncate(lsm)
if err != nil {
fs.mu.Unlock()
return err
}
// Account for the truncated msgs and bytes.
purged += nmsgs
bytes += nbytes
// Remove any left over msg blocks.
getLastMsgBlock := func() *msgBlock { return fs.blks[len(fs.blks)-1] }
for mb := getLastMsgBlock(); mb != nlmb; mb = getLastMsgBlock() {
mb.mu.Lock()
purged += mb.msgs
bytes += mb.bytes
fs.removeMsgBlock(mb)
mb.mu.Unlock()
}
// Reset last.
fs.state.LastSeq = lsm.seq
fs.state.LastTime = time.Unix(0, lsm.ts).UTC()
// Update msgs and bytes.
fs.state.Msgs -= purged
fs.state.Bytes -= bytes
cb := fs.scb
fs.mu.Unlock()
if cb != nil {
cb(-int64(purged), -int64(bytes), 0, _EMPTY_)
}
return nil
}
func (fs *fileStore) resetFirst(newFirst uint64) {
fs.mu.Lock()
fs.state.FirstSeq = newFirst
fs.state.LastSeq = newFirst - 1
fs.lmb.first.seq = fs.state.FirstSeq
fs.lmb.last.seq = fs.state.LastSeq
fs.lmb.writeIndexInfo()
fs.mu.Unlock()
}
func (fs *fileStore) firstSeq() uint64 {
fs.mu.RLock()
seq := fs.state.FirstSeq
fs.mu.RUnlock()
return seq
}
func (fs *fileStore) lastSeq() uint64 {
fs.mu.RLock()
seq := fs.state.LastSeq
fs.mu.RUnlock()
return seq
}
// Returns number of msg blks.
func (fs *fileStore) numMsgBlocks() int {
fs.mu.RLock()
defer fs.mu.RUnlock()
return len(fs.blks)
}
// Will remove our index file.
func (mb *msgBlock) removeIndexFile() {
mb.mu.RLock()
defer mb.mu.RUnlock()
if mb.ifd != nil {
mb.ifd.Close()
mb.ifd = nil
}
if mb.ifn != _EMPTY_ {
os.Remove(mb.ifn)
}
}
// Removes the msgBlock
// Both locks should be held.
func (fs *fileStore) removeMsgBlock(mb *msgBlock) {
mb.dirtyCloseWithRemove(true)
// Remove from list.
for i, omb := range fs.blks {
if mb == omb {
fs.blks = append(fs.blks[:i], fs.blks[i+1:]...)
break
}
}
// Check for us being last message block
if mb == fs.lmb {
fs.newMsgBlockForWrite()
}
}
// Called by purge to simply get rid of the cache and close and fds.
// Lock should not be held.
func (mb *msgBlock) dirtyClose() {
mb.mu.Lock()
mb.dirtyCloseWithRemove(false)
mb.mu.Unlock()
}
// Should be called with lock held.
func (mb *msgBlock) dirtyCloseWithRemove(remove bool) {
if mb == nil {
return
}
// Close cache
mb.clearCacheAndOffset()
// Quit our loops.
if mb.qch != nil {
close(mb.qch)
mb.qch = nil
}
if mb.mfd != nil {
mb.mfd.Close()
mb.mfd = nil
}
if mb.ifd != nil {
mb.ifd.Close()
mb.ifd = nil
}
if remove {
if mb.ifn != _EMPTY_ {
os.Remove(mb.ifn)
mb.ifn = _EMPTY_
}
if mb.mfn != _EMPTY_ {
os.Remove(mb.mfn)
mb.mfn = _EMPTY_
}
}
}
func (mb *msgBlock) close(sync bool) {
if mb == nil {
return
}
mb.mu.Lock()
defer mb.mu.Unlock()
if mb.qch == nil {
return
}
// Close cache
mb.clearCacheAndOffset()
// Quit our loops.
if mb.qch != nil {
close(mb.qch)
mb.qch = nil
}
if sync {
syncAndClose(mb.mfd, mb.ifd)
} else {
go syncAndClose(mb.mfd, mb.ifd)
}
mb.mfd = nil
mb.ifd = nil
}
func (fs *fileStore) closeAllMsgBlocks(sync bool) {
for _, mb := range fs.blks {
mb.close(sync)
}
}
func (fs *fileStore) Delete() error {
if fs.isClosed() {
return ErrStoreClosed
}
fs.Purge()
if err := fs.Stop(); err != nil {
return err
}
return os.RemoveAll(fs.fcfg.StoreDir)
}
func (fs *fileStore) Stop() error {
fs.mu.Lock()
if fs.closed {
fs.mu.Unlock()
return ErrStoreClosed
}
fs.closed = true
fs.lmb = nil
fs.checkAndFlushAllBlocks()
fs.closeAllMsgBlocks(false)
if fs.syncTmr != nil {
fs.syncTmr.Stop()
fs.syncTmr = nil
}
if fs.ageChk != nil {
fs.ageChk.Stop()
fs.ageChk = nil
}
var _cfs [256]*consumerFileStore
cfs := append(_cfs[:0], fs.cfs...)
fs.cfs = nil
fs.mu.Unlock()
for _, o := range cfs {
o.Stop()
}
return nil
}
const errFile = "errors.txt"
// Stream our snapshot through S2 compression and tar.
func (fs *fileStore) streamSnapshot(w io.WriteCloser, state *StreamState, includeConsumers bool) {
defer w.Close()
enc := s2.NewWriter(w)
defer enc.Close()
tw := tar.NewWriter(enc)
defer tw.Close()
defer func() {
fs.mu.Lock()
fs.sips--
fs.mu.Unlock()
}()
modTime := time.Now().UTC()
writeFile := func(name string, buf []byte) error {
hdr := &tar.Header{
Name: name,
Mode: 0600,
ModTime: modTime,
Uname: "nats",
Gname: "nats",
Size: int64(len(buf)),
Format: tar.FormatPAX,
}
if err := tw.WriteHeader(hdr); err != nil {
return err
}
if _, err := tw.Write(buf); err != nil {
return err
}
return nil
}
writeErr := func(err string) {
writeFile(errFile, []byte(err))
}
fs.mu.Lock()
blks := fs.blks
// Write our general meta data.
if err := fs.writeStreamMeta(); err != nil {
fs.mu.Unlock()
writeErr(fmt.Sprintf("Could not write stream meta file: %v", err))
return
}
meta, err := ioutil.ReadFile(path.Join(fs.fcfg.StoreDir, JetStreamMetaFile))
if err != nil {
fs.mu.Unlock()
writeErr(fmt.Sprintf("Could not read stream meta file: %v", err))
return
}
sum, err := ioutil.ReadFile(path.Join(fs.fcfg.StoreDir, JetStreamMetaFileSum))
if err != nil {
fs.mu.Unlock()
writeErr(fmt.Sprintf("Could not read stream checksum file: %v", err))
return
}
fs.mu.Unlock()
// Meta first.
if writeFile(JetStreamMetaFile, meta) != nil {
return
}
if writeFile(JetStreamMetaFileSum, sum) != nil {
return
}
// Can't use join path here, tar only recognizes relative paths with forward slashes.
msgPre := msgDir + "/"
// Now do messages themselves.
for _, mb := range blks {
if mb.pendingWriteSize() > 0 {
mb.flushPendingMsgsAndWait()
mb.writeIndexInfo()
}
mb.mu.Lock()
buf, err := ioutil.ReadFile(mb.ifn)
if err != nil {
mb.mu.Unlock()
writeErr(fmt.Sprintf("Could not read message block [%d] meta file: %v", mb.index, err))
return
}
if writeFile(msgPre+fmt.Sprintf(indexScan, mb.index), buf) != nil {
mb.mu.Unlock()
return
}
// We could stream but don't want to hold the lock and prevent changes, so just read in and
// release the lock for now.
// TODO(dlc) - Maybe reuse buffer?
buf, err = ioutil.ReadFile(mb.mfn)
if err != nil {
mb.mu.Unlock()
writeErr(fmt.Sprintf("Could not read message block [%d]: %v", mb.index, err))
return
}
mb.mu.Unlock()
// Do this one unlocked.
if writeFile(msgPre+fmt.Sprintf(blkScan, mb.index), buf) != nil {
return
}
}
// Bail if no consumers requested.
if !includeConsumers {
return
}
// Do consumers' state last.
fs.mu.Lock()
cfs := fs.cfs
fs.mu.Unlock()
for _, o := range cfs {
o.mu.Lock()
meta, err := ioutil.ReadFile(path.Join(o.odir, JetStreamMetaFile))
if err != nil {
o.mu.Unlock()
writeErr(fmt.Sprintf("Could not read consumer meta file for %q: %v", o.name, err))
return
}
sum, err := ioutil.ReadFile(path.Join(o.odir, JetStreamMetaFileSum))
if err != nil {
o.mu.Unlock()
writeErr(fmt.Sprintf("Could not read consumer checksum file for %q: %v", o.name, err))
return
}
// We can have the running state directly encoded now.
state, err := o.encodeState()
if err != nil {
o.mu.Unlock()
writeErr(fmt.Sprintf("Could not encode consumer state for %q: %v", o.name, err))
return
}
odirPre := consumerDir + "/" + o.name
o.mu.Unlock()
// Write all the consumer files.
if writeFile(path.Join(odirPre, JetStreamMetaFile), meta) != nil {
return
}
if writeFile(path.Join(odirPre, JetStreamMetaFileSum), sum) != nil {
return
}
writeFile(path.Join(odirPre, consumerState), state)
}
}
// Create a snapshot of this stream and its consumer's state along with messages.
func (fs *fileStore) Snapshot(deadline time.Duration, checkMsgs, includeConsumers bool) (*SnapshotResult, error) {
fs.mu.Lock()
if fs.closed {
fs.mu.Unlock()
return nil, ErrStoreClosed
}
// Only allow one at a time.
if fs.sips > 0 {
fs.mu.Unlock()
return nil, ErrStoreSnapshotInProgress
}
// Mark us as snapshotting
fs.sips += 1
fs.mu.Unlock()
// We can add to our stream while snapshotting but not delete anything.
state := fs.State()
if checkMsgs {
ld := fs.checkMsgs()
if ld != nil && len(ld.Msgs) > 0 {
return nil, fmt.Errorf("snapshot check detected %d bad messages", len(ld.Msgs))
}
}
pr, pw := net.Pipe()
// Set a write deadline here to protect ourselves.
if deadline > 0 {
pw.SetWriteDeadline(time.Now().Add(deadline))
}
// Stream in separate Go routine.
go fs.streamSnapshot(pw, &state, includeConsumers)
return &SnapshotResult{pr, state}, nil
}
// Helper to return the config.
func (fs *fileStore) fileStoreConfig() FileStoreConfig {
fs.mu.RLock()
defer fs.mu.RUnlock()
return fs.fcfg
}
////////////////////////////////////////////////////////////////////////////////
// Consumers
////////////////////////////////////////////////////////////////////////////////
type consumerFileStore struct {
mu sync.Mutex
fs *fileStore
cfg *FileConsumerInfo
name string
odir string
ifn string
ifd *os.File
lwsz int64
hh hash.Hash64
state ConsumerState
fch chan struct{}
qch chan struct{}
flusher bool
writing bool
closed bool
}
func (fs *fileStore) ConsumerStore(name string, cfg *ConsumerConfig) (ConsumerStore, error) {
if fs == nil {
return nil, fmt.Errorf("filestore is nil")
}
if fs.isClosed() {
return nil, ErrStoreClosed
}
if cfg == nil || name == "" {
return nil, fmt.Errorf("bad consumer config")
}
odir := path.Join(fs.fcfg.StoreDir, consumerDir, name)
if err := os.MkdirAll(odir, 0755); err != nil {
return nil, fmt.Errorf("could not create consumer directory - %v", err)
}
csi := &FileConsumerInfo{ConsumerConfig: *cfg}
o := &consumerFileStore{
fs: fs,
cfg: csi,
name: name,
odir: odir,
ifn: path.Join(odir, consumerState),
}
key := sha256.Sum256([]byte(fs.cfg.Name + "/" + name))
hh, err := highwayhash.New64(key[:])
if err != nil {
return nil, fmt.Errorf("could not create hash: %v", err)
}
o.hh = hh
// Write our meta data iff does not exist.
meta := path.Join(odir, JetStreamMetaFile)
if _, err := os.Stat(meta); err != nil && os.IsNotExist(err) {
csi.Created = time.Now().UTC()
if err := o.writeConsumerMeta(); err != nil {
return nil, err
}
}
// Create channels to control our flush go routine.
o.fch = make(chan struct{}, 1)
o.qch = make(chan struct{})
go o.flushLoop()
fs.mu.Lock()
fs.cfs = append(fs.cfs, o)
fs.mu.Unlock()
return o, nil
}
// Kick flusher for this consumer.
// Lock should be held.
func (o *consumerFileStore) kickFlusher() {
if o.fch != nil {
select {
case o.fch <- struct{}{}:
default:
}
}
}
// Set in flusher status
func (o *consumerFileStore) setInFlusher() {
o.mu.Lock()
o.flusher = true
o.mu.Unlock()
}
// Clear in flusher status
func (o *consumerFileStore) clearInFlusher() {
o.mu.Lock()
o.flusher = false
o.mu.Unlock()
}
// Report in flusher status
func (o *consumerFileStore) inFlusher() bool {
o.mu.Lock()
defer o.mu.Unlock()
return o.flusher
}
// flushLoop watches for consumer updates and the quit channel.
func (o *consumerFileStore) flushLoop() {
o.mu.Lock()
fch, qch := o.fch, o.qch
o.mu.Unlock()
o.setInFlusher()
defer o.clearInFlusher()
for {
select {
case <-fch:
time.Sleep(5 * time.Millisecond)
select {
case <-qch:
return
default:
}
o.mu.Lock()
if o.closed {
o.mu.Unlock()
return
}
buf, err := o.encodeState()
o.mu.Unlock()
if err != nil {
return
}
// TODO(dlc) - if we error should start failing upwards.
o.writeState(buf)
case <-qch:
return
}
}
}
// UpdateDelivered is called whenever a new message has been delivered.
func (o *consumerFileStore) UpdateDelivered(dseq, sseq, dc uint64, ts int64) error {
o.mu.Lock()
defer o.mu.Unlock()
if dc != 1 && o.cfg.AckPolicy == AckNone {
return ErrNoAckPolicy
}
// See if we expect an ack for this.
if o.cfg.AckPolicy != AckNone {
// Need to create pending records here.
if o.state.Pending == nil {
o.state.Pending = make(map[uint64]*Pending)
}
var p *Pending
// Check for an update to a message already delivered.
if sseq <= o.state.Delivered.Stream {
if p = o.state.Pending[sseq]; p != nil {
p.Timestamp = ts
}
}
if p == nil {
// Move delivered if this is new.
o.state.Delivered.Consumer = dseq
o.state.Delivered.Stream = sseq
p = &Pending{dseq, ts}
}
if dc > 1 {
if o.state.Redelivered == nil {
o.state.Redelivered = make(map[uint64]uint64)
}
o.state.Redelivered[sseq] = dc - 1
}
o.state.Pending[sseq] = &Pending{dseq, ts}
} else {
// For AckNone just update delivered and ackfloor at the same time.
o.state.Delivered.Consumer = dseq
o.state.Delivered.Stream = sseq
o.state.AckFloor.Consumer = dseq
o.state.AckFloor.Stream = sseq
}
// Make sure we flush to disk.
o.kickFlusher()
return nil
}
// UpdateAcks is called whenever a consumer with explicit ack or ack all acks a message.
func (o *consumerFileStore) UpdateAcks(dseq, sseq uint64) error {
o.mu.Lock()
defer o.mu.Unlock()
if o.cfg.AckPolicy == AckNone {
return ErrNoAckPolicy
}
if len(o.state.Pending) == 0 {
return ErrStoreMsgNotFound
}
p := o.state.Pending[sseq]
if p == nil {
return ErrStoreMsgNotFound
}
// Delete from our state.
delete(o.state.Pending, sseq)
if len(o.state.Redelivered) > 0 {
delete(o.state.Redelivered, sseq)
if len(o.state.Redelivered) == 0 {
o.state.Redelivered = nil
}
}
if len(o.state.Pending) == 0 {
o.state.Pending = nil
o.state.AckFloor.Consumer = o.state.Delivered.Consumer
o.state.AckFloor.Stream = o.state.Delivered.Stream
} else if o.state.AckFloor.Consumer == dseq-1 {
notFirst := o.state.AckFloor.Consumer != 0
o.state.AckFloor.Consumer = dseq
o.state.AckFloor.Stream = sseq
// Close the gap if needed.
if notFirst && o.state.Delivered.Consumer > dseq {
for ss := sseq + 1; ss < o.state.Delivered.Stream; ss++ {
if p, ok := o.state.Pending[ss]; ok {
if p.Sequence > 0 {
o.state.AckFloor.Consumer = p.Sequence - 1
o.state.AckFloor.Stream = ss - 1
}
break
}
}
}
}
o.kickFlusher()
return nil
}
const seqsHdrSize = 6*binary.MaxVarintLen64 + hdrLen
// Encode our consumer state, version 2.
// Lock should be held.
func (o *consumerFileStore) encodeState() ([]byte, error) {
if o.closed {
return nil, ErrStoreClosed
}
return encodeConsumerState(&o.state), nil
}
func encodeConsumerState(state *ConsumerState) []byte {
var hdr [seqsHdrSize]byte
var buf []byte
maxSize := seqsHdrSize
if lp := len(state.Pending); lp > 0 {
maxSize += lp*(3*binary.MaxVarintLen64) + binary.MaxVarintLen64
}
if lr := len(state.Redelivered); lr > 0 {
maxSize += lr*(2*binary.MaxVarintLen64) + binary.MaxVarintLen64
}
if maxSize == seqsHdrSize {
buf = hdr[:seqsHdrSize]
} else {
buf = make([]byte, maxSize)
}
now := time.Now()
// Write header
buf[0] = magic
buf[1] = 2
n := hdrLen
n += binary.PutUvarint(buf[n:], state.AckFloor.Consumer)
n += binary.PutUvarint(buf[n:], state.AckFloor.Stream)
n += binary.PutUvarint(buf[n:], state.Delivered.Consumer)
n += binary.PutUvarint(buf[n:], state.Delivered.Stream)
n += binary.PutUvarint(buf[n:], uint64(len(state.Pending)))
asflr := state.AckFloor.Stream
adflr := state.AckFloor.Consumer
// These are optional, but always write len. This is to avoid a truncate inline.
if len(state.Pending) > 0 {
// To save space we will use now rounded to seconds to be base timestamp.
mints := now.Round(time.Second).Unix()
// Write minimum timestamp we found from above.
n += binary.PutVarint(buf[n:], mints)
for k, v := range state.Pending {
n += binary.PutUvarint(buf[n:], k-asflr)
n += binary.PutUvarint(buf[n:], v.Sequence-adflr)
// Downsample to seconds to save on space.
// Subsecond resolution not needed for recovery etc.
ts := v.Timestamp / 1_000_000_000
n += binary.PutVarint(buf[n:], mints-ts)
}
}
// We always write the redelivered len.
n += binary.PutUvarint(buf[n:], uint64(len(state.Redelivered)))
// We expect these to be small.
if len(state.Redelivered) > 0 {
for k, v := range state.Redelivered {
n += binary.PutUvarint(buf[n:], k-asflr)
n += binary.PutUvarint(buf[n:], v)
}
}
return buf[:n]
}
func (o *consumerFileStore) Update(state *ConsumerState) error {
// Sanity checks.
if state.AckFloor.Consumer > state.Delivered.Consumer {
return fmt.Errorf("bad ack floor for consumer")
}
if state.AckFloor.Stream > state.Delivered.Stream {
return fmt.Errorf("bad ack floor for stream")
}
// Copy to our state.
var pending map[uint64]*Pending
var redelivered map[uint64]uint64
if len(state.Pending) > 0 {
pending = make(map[uint64]*Pending, len(state.Pending))
for seq, p := range state.Pending {
pending[seq] = &Pending{p.Sequence, p.Timestamp}
}
for seq := range pending {
if seq <= state.AckFloor.Stream || seq > state.Delivered.Stream {
return fmt.Errorf("bad pending entry, sequence [%d] out of range", seq)
}
}
}
if len(state.Redelivered) > 0 {
redelivered = make(map[uint64]uint64, len(state.Redelivered))
for seq, dc := range state.Redelivered {
redelivered[seq] = dc
}
}
// Replace our state.
o.mu.Lock()
o.state.Delivered = state.Delivered
o.state.AckFloor = state.AckFloor
o.state.Pending = pending
o.state.Redelivered = redelivered
o.mu.Unlock()
o.kickFlusher()
return nil
}
func (o *consumerFileStore) writeState(buf []byte) error {
// Check if we have the index file open.
o.mu.Lock()
if o.writing || len(buf) == 0 {
o.mu.Unlock()
return nil
}
if err := o.ensureStateFileOpen(); err != nil {
o.mu.Unlock()
return err
}
o.writing = true
ifd := o.ifd
o.mu.Unlock()
n, err := ifd.WriteAt(buf, 0)
o.mu.Lock()
if err == nil {
o.lwsz = int64(n)
}
o.writing = false
o.mu.Unlock()
return err
}
// Will upodate the config. Only used when recovering ephemerals.
func (o *consumerFileStore) updateConfig(cfg ConsumerConfig) error {
o.mu.Lock()
defer o.mu.Unlock()
o.cfg = &FileConsumerInfo{ConsumerConfig: cfg}
return o.writeConsumerMeta()
}
// Write out the consumer meta data, i.e. state.
// Lock should be held.
func (cfs *consumerFileStore) writeConsumerMeta() error {
meta := path.Join(cfs.odir, JetStreamMetaFile)
if _, err := os.Stat(meta); (err != nil && !os.IsNotExist(err)) || err == nil {
return err
}
b, err := json.Marshal(cfs.cfg)
if err != nil {
return err
}
if err := ioutil.WriteFile(meta, b, 0644); err != nil {
return err
}
cfs.hh.Reset()
cfs.hh.Write(b)
checksum := hex.EncodeToString(cfs.hh.Sum(nil))
sum := path.Join(cfs.odir, JetStreamMetaFileSum)
if err := ioutil.WriteFile(sum, []byte(checksum), 0644); err != nil {
return err
}
return nil
}
func (o *consumerFileStore) syncStateFile() {
// FIXME(dlc) - Hold last error?
o.mu.Lock()
if o.ifd != nil {
o.ifd.Sync()
o.ifd.Truncate(o.lwsz)
}
o.mu.Unlock()
}
// Lock should be held.
func (o *consumerFileStore) ensureStateFileOpen() error {
if o.ifd == nil {
ifd, err := os.OpenFile(o.ifn, os.O_CREATE|os.O_RDWR, 0644)
if err != nil {
return err
}
o.ifd = ifd
}
return nil
}
// Make sure the header is correct.
func checkHeader(hdr []byte) error {
if hdr == nil || len(hdr) < 2 || hdr[0] != magic || hdr[1] != version {
return errCorruptState
}
return nil
}
func checkConsumerHeader(hdr []byte) (uint8, error) {
if hdr == nil || len(hdr) < 2 || hdr[0] != magic {
return 0, errCorruptState
}
version := hdr[1]
switch version {
case 1, 2:
return version, nil
}
return 0, fmt.Errorf("unsupported version: %d", version)
}
func (o *consumerFileStore) copyPending() map[uint64]*Pending {
pending := make(map[uint64]*Pending, len(o.state.Pending))
for seq, p := range o.state.Pending {
pending[seq] = &Pending{p.Sequence, p.Timestamp}
}
return pending
}
func (o *consumerFileStore) copyRedelivered() map[uint64]uint64 {
redelivered := make(map[uint64]uint64, len(o.state.Redelivered))
for seq, dc := range o.state.Redelivered {
redelivered[seq] = dc
}
return redelivered
}
// State retrieves the state from the state file.
// This is not expected to be called in high performance code, only on startup.
func (o *consumerFileStore) State() (*ConsumerState, error) {
o.mu.Lock()
defer o.mu.Unlock()
var state *ConsumerState
// See if we have a running state or if we need to read in from disk.
if o.state.Delivered.Consumer != 0 {
state = &ConsumerState{}
state.Delivered = o.state.Delivered
state.AckFloor = o.state.AckFloor
if len(o.state.Pending) > 0 {
state.Pending = o.copyPending()
}
if len(o.state.Redelivered) > 0 {
state.Redelivered = o.copyRedelivered()
}
return state, nil
}
// Read the state in here from disk..
buf, err := ioutil.ReadFile(o.ifn)
if err != nil && !os.IsNotExist(err) {
return nil, err
}
if len(buf) == 0 {
return state, nil
}
state, err = decodeConsumerState(buf)
if err != nil {
return nil, err
}
// Copy this state into our own.
o.state.Delivered = state.Delivered
o.state.AckFloor = state.AckFloor
if len(state.Pending) > 0 {
o.state.Pending = make(map[uint64]*Pending, len(state.Pending))
for seq, p := range state.Pending {
o.state.Pending[seq] = &Pending{p.Sequence, p.Timestamp}
}
}
if len(state.Redelivered) > 0 {
o.state.Redelivered = make(map[uint64]uint64, len(state.Redelivered))
for seq, dc := range state.Redelivered {
o.state.Redelivered[seq] = dc
}
}
return state, nil
}
func decodeConsumerState(buf []byte) (*ConsumerState, error) {
version, err := checkConsumerHeader(buf)
if err != nil {
return nil, err
}
bi := hdrLen
// Helpers, will set i to -1 on error.
readSeq := func() uint64 {
if bi < 0 {
return 0
}
seq, n := binary.Uvarint(buf[bi:])
if n <= 0 {
bi = -1
return 0
}
bi += n
return seq
}
readTimeStamp := func() int64 {
if bi < 0 {
return 0
}
ts, n := binary.Varint(buf[bi:])
if n <= 0 {
bi = -1
return -1
}
bi += n
return ts
}
// Just for clarity below.
readLen := readSeq
readCount := readSeq
state := &ConsumerState{}
state.AckFloor.Consumer = readSeq()
state.AckFloor.Stream = readSeq()
state.Delivered.Consumer = readSeq()
state.Delivered.Stream = readSeq()
if bi == -1 {
return nil, errCorruptState
}
if version == 1 {
// Adjust back. Version 1 also stored delivered as next to be delivered,
// so adjust that back down here.
state.Delivered.Consumer += state.AckFloor.Consumer - 1
state.Delivered.Stream += state.AckFloor.Stream - 1
}
// We have additional stuff.
if numPending := readLen(); numPending > 0 {
mints := readTimeStamp()
state.Pending = make(map[uint64]*Pending, numPending)
for i := 0; i < int(numPending); i++ {
sseq := readSeq()
var dseq uint64
if version == 2 {
dseq = readSeq()
}
ts := readTimeStamp()
if sseq == 0 || ts == -1 {
return nil, errCorruptState
}
// Adjust seq back.
sseq += state.AckFloor.Stream
if version == 2 {
dseq += state.AckFloor.Consumer
}
// Adjust the timestamp back.
if version == 1 {
ts = (ts + mints) * int64(time.Second)
} else {
ts = (mints - ts) * int64(time.Second)
}
// Store in pending.
state.Pending[sseq] = &Pending{dseq, ts}
}
}
// We have redelivered entries here.
if numRedelivered := readLen(); numRedelivered > 0 {
state.Redelivered = make(map[uint64]uint64, numRedelivered)
for i := 0; i < int(numRedelivered); i++ {
seq := readSeq()
n := readCount()
if seq == 0 || n == 0 {
return nil, errCorruptState
}
state.Redelivered[seq] = n
}
}
return state, nil
}
// Stop the processing of the consumers's state.
func (o *consumerFileStore) Stop() error {
o.mu.Lock()
if o.closed {
o.mu.Unlock()
return nil
}
if o.qch != nil {
close(o.qch)
o.qch = nil
}
err := o.ensureStateFileOpen()
ifd := o.ifd
if err == nil {
var buf []byte
// Make sure to write this out..
if buf, err = o.encodeState(); err == nil {
_, err = ifd.WriteAt(buf, 0)
}
}
o.ifd, o.odir = nil, _EMPTY_
fs := o.fs
o.closed = true
o.kickFlusher()
o.mu.Unlock()
if ifd != nil {
ifd.Close()
}
fs.removeConsumer(o)
return err
}
// Delete the consumer.
func (o *consumerFileStore) Delete() error {
o.mu.Lock()
if o.closed {
o.mu.Unlock()
return nil
}
if o.qch != nil {
close(o.qch)
o.qch = nil
}
if o.ifd != nil {
o.ifd.Close()
o.ifd = nil
}
var err error
if o.odir != _EMPTY_ {
err = os.RemoveAll(o.odir)
}
o.ifd, o.odir = nil, _EMPTY_
o.closed = true
fs := o.fs
o.mu.Unlock()
fs.removeConsumer(o)
return err
}
func (fs *fileStore) removeConsumer(cfs *consumerFileStore) {
fs.mu.Lock()
defer fs.mu.Unlock()
for i, o := range fs.cfs {
if o == cfs {
fs.cfs = append(fs.cfs[:i], fs.cfs[i+1:]...)
break
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Templates
////////////////////////////////////////////////////////////////////////////////
type templateFileStore struct {
dir string
hh hash.Hash64
}
func newTemplateFileStore(storeDir string) *templateFileStore {
tdir := path.Join(storeDir, tmplsDir)
key := sha256.Sum256([]byte("templates"))
hh, err := highwayhash.New64(key[:])
if err != nil {
return nil
}
return &templateFileStore{dir: tdir, hh: hh}
}
func (ts *templateFileStore) Store(t *streamTemplate) error {
dir := path.Join(ts.dir, t.Name)
if err := os.MkdirAll(dir, 0755); err != nil {
return fmt.Errorf("could not create templates storage directory for %q- %v", t.Name, err)
}
meta := path.Join(dir, JetStreamMetaFile)
if _, err := os.Stat(meta); (err != nil && !os.IsNotExist(err)) || err == nil {
return err
}
t.mu.Lock()
b, err := json.Marshal(t)
t.mu.Unlock()
if err != nil {
return err
}
if err := ioutil.WriteFile(meta, b, 0644); err != nil {
return err
}
// FIXME(dlc) - Do checksum
ts.hh.Reset()
ts.hh.Write(b)
checksum := hex.EncodeToString(ts.hh.Sum(nil))
sum := path.Join(dir, JetStreamMetaFileSum)
if err := ioutil.WriteFile(sum, []byte(checksum), 0644); err != nil {
return err
}
return nil
}
func (ts *templateFileStore) Delete(t *streamTemplate) error {
return os.RemoveAll(path.Join(ts.dir, t.Name))
}
| 1 | 12,638 | If sync has really a negative impact, this is just moving the issue from 10sec to 60sec. Wonder if you should not expose the (auto)sync params so users can decide. | nats-io-nats-server | go |
@@ -341,11 +341,12 @@ type Decoder interface {
// (0, false).
MapLen() (int, bool)
- // If MapLen returned true, the DecodeMap will be called. It should iterate over
- // the fields of the value being decoded, invoking the callback on each with the
- // field name and a Decoder for the field value. If the callback returns false,
- // DecodeMap should return immediately.
- DecodeMap(func(string, Decoder) bool)
+ // DecodeMap iterates over the fields of the value being decoded, invoke the
+ // callback on each with field name, a Decoder for the field value, and a bool
+ // to indicate whether or not to use exact match for the field names. It should
+ // be called when MapLen returns true or decoding a struct. If the callback
+ // returns false, DecodeMap should return immediately.
+ DecodeMap(func(string, Decoder, bool) bool)
// AsInterface should decode the value into the Go value that best represents it.
AsInterface() (interface{}, error) | 1 | // Copyright 2019 The Go Cloud Development Kit Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// TODO(jba): support struct tags.
// TODO(jba): for efficiency, enable encoding of only a subset of field paths.
package driver
import (
"encoding"
"fmt"
"reflect"
"strconv"
"github.com/golang/protobuf/proto"
"gocloud.dev/docstore/internal/fields"
"gocloud.dev/internal/gcerr"
)
var (
binaryMarshalerType = reflect.TypeOf((*encoding.BinaryMarshaler)(nil)).Elem()
binaryUnmarshalerType = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem()
textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
protoMessageType = reflect.TypeOf((*proto.Message)(nil)).Elem()
)
// An Encoder encodes Go values in some other form (e.g. JSON, protocol buffers).
// The encoding protocol is designed to avoid losing type information by passing
// values using interface{}. An Encoder is responsible for storing the value
// it is encoding.
//
// Because all providers must support the same set of values, the encoding methods
// (with the exception of EncodeStruct) do not return errors. EncodeStruct is special
// because it is an escape hatch for arbitrary structs, not all of which may be
// encodable.
type Encoder interface {
// These methods all encode and store a single Go value.
EncodeNil()
EncodeBool(bool)
EncodeString(string)
EncodeInt(int64)
EncodeUint(uint64)
EncodeFloat(float64)
EncodeBytes([]byte)
// EncodeList is called when a slice or array is encountered (except for a
// []byte, which is handled by EncodeBytes). Its argument is the length of the
// slice or array. The encoding algorithm will call the returned Encoder that
// many times to encode the successive values of the list. After each such call,
// ListIndex will be called with the index of the element just encoded.
//
// For example, []string{"a", "b"} will result in these calls:
// enc2 := enc.EncodeList(2)
// enc2.EncodeString("a")
// enc2.ListIndex(0)
// enc2.EncodeString("b")
// enc2.ListIndex(1)
EncodeList(n int) Encoder
ListIndex(i int)
// EncodeMap is called when a map is encountered. Its argument is the number of
// fields in the map. The encoding algorithm will call the returned Encoder that
// many times to encode the successive values of the map. After each such call,
// MapKey will be called with the key of the element just encoded.
//
// For example, map[string}int{"A": 1, "B": 2} will result in these calls:
// enc2 := enc.EncodeMap(2)
// enc2.EncodeInt(1)
// enc2.MapKey("A")
// enc2.EncodeInt(2)
// enc2.MapKey("B")
//
// EncodeMap is also called for structs. The map then consists of the exported
// fields of the struct. For struct{A, B int}{1, 2}, if EncodeStruct returns
// false, the same sequence of calls as above will occur.
EncodeMap(n int) Encoder
MapKey(string)
// If the encoder wants to encode a value in a special way it should do so here
// and return true along with any error from the encoding. Otherwise, it should
// return false.
EncodeSpecial(v reflect.Value) (bool, error)
}
// Encode encodes the value using the given Encoder. It traverses the value,
// iterating over arrays, slices, maps and the exported fields of structs. If it
// encounters a non-nil pointer, it encodes the value that it points to.
// Encode treats a few interfaces specially:
//
// If the value implements encoding.BinaryMarshaler, Encode invokes MarshalBinary
// on it and encodes the resulting byte slice.
//
// If the value implements encoding.TextMarshaler, Encode invokes MarshalText on it
// and encodes the resulting string.
//
// If the value implements proto.Message, Encode invokes proto.Marshal on it and encodes
// the resulting byte slice. Here proto is the package "github.com/golang/protobuf/proto".
//
// Not every map key type can be encoded. Only strings, integers (signed or
// unsigned), and types that implement encoding.TextMarshaler are permitted as map
// keys. These restrictions match exactly those of the encoding/json package.
func Encode(v reflect.Value, e Encoder) error {
return wrap(encode(v, e), gcerr.InvalidArgument)
}
func encode(v reflect.Value, enc Encoder) error {
if !v.IsValid() {
enc.EncodeNil()
return nil
}
done, err := enc.EncodeSpecial(v)
if done {
return err
}
if v.Type().Implements(binaryMarshalerType) {
bytes, err := v.Interface().(encoding.BinaryMarshaler).MarshalBinary()
if err != nil {
return err
}
enc.EncodeBytes(bytes)
return nil
}
if v.Type().Implements(protoMessageType) {
if v.IsNil() {
enc.EncodeNil()
} else {
bytes, err := proto.Marshal(v.Interface().(proto.Message))
if err != nil {
return err
}
enc.EncodeBytes(bytes)
}
return nil
}
if v.Type().Implements(textMarshalerType) {
bytes, err := v.Interface().(encoding.TextMarshaler).MarshalText()
if err != nil {
return err
}
enc.EncodeString(string(bytes))
return nil
}
switch v.Kind() {
case reflect.Bool:
enc.EncodeBool(v.Bool())
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
enc.EncodeInt(v.Int())
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
enc.EncodeUint(v.Uint())
case reflect.Float32, reflect.Float64:
enc.EncodeFloat(v.Float())
case reflect.String:
enc.EncodeString(v.String())
case reflect.Slice:
if v.IsNil() {
enc.EncodeNil()
return nil
}
fallthrough
case reflect.Array:
return encodeList(v, enc)
case reflect.Map:
return encodeMap(v, enc)
case reflect.Ptr:
if v.IsNil() {
enc.EncodeNil()
return nil
}
return encode(v.Elem(), enc)
case reflect.Interface:
if v.IsNil() {
enc.EncodeNil()
return nil
}
return encode(v.Elem(), enc)
case reflect.Struct:
fields, err := fieldCache.Fields(v.Type())
if err != nil {
return err
}
return encodeStructWithFields(v, fields, enc)
default:
return gcerr.Newf(gcerr.InvalidArgument, nil, "cannot encode type %s", v.Type())
}
return nil
}
// Encode an array or non-nil slice.
func encodeList(v reflect.Value, enc Encoder) error {
// Byte slices encode specially.
if v.Type().Kind() == reflect.Slice && v.Type().Elem().Kind() == reflect.Uint8 {
enc.EncodeBytes(v.Bytes())
return nil
}
n := v.Len()
enc2 := enc.EncodeList(n)
for i := 0; i < n; i++ {
if err := encode(v.Index(i), enc2); err != nil {
return err
}
enc2.ListIndex(i)
}
return nil
}
// Encode a map.
func encodeMap(v reflect.Value, enc Encoder) error {
if v.IsNil() {
enc.EncodeNil()
return nil
}
keys := v.MapKeys()
enc2 := enc.EncodeMap(len(keys))
for _, k := range keys {
sk, err := stringifyMapKey(k)
if err != nil {
return err
}
if err := encode(v.MapIndex(k), enc2); err != nil {
return err
}
enc2.MapKey(sk)
}
return nil
}
// k is the key of a map. Encode it as a string.
// Only strings, integers (signed or unsigned), and types that implement
// encoding.TextMarshaler are supported.
func stringifyMapKey(k reflect.Value) (string, error) {
// This is basically reflectWithString.resolve, from encoding/json/encode.go.
if k.Kind() == reflect.String {
return k.String(), nil
}
if tm, ok := k.Interface().(encoding.TextMarshaler); ok {
b, err := tm.MarshalText()
if err != nil {
return "", err
}
return string(b), nil
}
switch k.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return strconv.FormatInt(k.Int(), 10), nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return strconv.FormatUint(k.Uint(), 10), nil
default:
return "", gcerr.Newf(gcerr.InvalidArgument, nil, "cannot encode key %v of type %s", k, k.Type())
}
}
func encodeStructWithFields(v reflect.Value, fields fields.List, e Encoder) error {
e2 := e.EncodeMap(len(fields))
for _, f := range fields {
fv, ok := fieldByIndex(v, f.Index)
if !ok {
// if !ok, then f is a field in an embedded pointer to struct, and that embedded pointer
// is nil in v. In other words, the field exists in the struct type, but not this particular
// struct value. So we just ignore it.
continue
}
if f.ParsedTag.(tagOptions).omitEmpty && IsEmptyValue(fv) {
continue
}
if err := encode(fv, e2); err != nil {
return err
}
e2.MapKey(f.Name)
}
return nil
}
// fieldByIndex retrieves the the field of v at the given index if present.
// v must be a struct. index must refer to a valid field of v's type.
// The second return value is false if there is a nil embedded pointer
// along the path denoted by index.
//
// From encoding/json/encode.go.
func fieldByIndex(v reflect.Value, index []int) (reflect.Value, bool) {
for _, i := range index {
if v.Kind() == reflect.Ptr {
if v.IsNil() {
return reflect.Value{}, false
}
v = v.Elem()
}
v = v.Field(i)
}
return v, true
}
////////////////////////////////////////////////////////////////
// TODO(jba): consider a fast path: if we are decoding into a struct, assume the same struct
// was used to encode. Then we can build a map from field names to functions, where each
// function avoids all the tests of Decode and contains just the code for setting the field.
// TODO(jba): provide a way to override the check on missing fields.
// A Decoder decodes data that was produced by Encode back into Go values.
// Each Decoder instance is responsible for decoding one value.
type Decoder interface {
// The AsXXX methods each report whether the value being decoded can be represented as
// a particular Go type. If so, the method should return the value as that type, and true;
// otherwise it should return the zero value and false.
AsString() (string, bool)
AsInt() (int64, bool)
AsUint() (uint64, bool)
AsFloat() (float64, bool)
AsBytes() ([]byte, bool)
AsBool() (bool, bool)
AsNull() bool
// ListLen should return the length of the value being decoded and true, if the
// value can be decoded into a slice or array. Otherwise, ListLen should return
// (0, false).
ListLen() (int, bool)
// If ListLen returned true, then DecodeList will be called. It should iterate
// over the value being decoded in sequence from index 0, invoking the callback
// for each element with the element's index and a Decoder for the element.
// If the callback returns false, DecodeList should return immediately.
DecodeList(func(int, Decoder) bool)
// MapLen should return the number of fields of the value being decoded and true,
// if the value can be decoded into a map or struct. Otherwise, it should return
// (0, false).
MapLen() (int, bool)
// If MapLen returned true, the DecodeMap will be called. It should iterate over
// the fields of the value being decoded, invoking the callback on each with the
// field name and a Decoder for the field value. If the callback returns false,
// DecodeMap should return immediately.
DecodeMap(func(string, Decoder) bool)
// AsInterface should decode the value into the Go value that best represents it.
AsInterface() (interface{}, error)
// If the decoder wants to decode a value in a special way it should do so here
// and return true, the decoded value, and any error from the decoding.
// Otherwise, it should return false.
AsSpecial(reflect.Value) (bool, interface{}, error)
// String should return a human-readable representation of the Decoder, for error messages.
String() string
}
// Decode decodes the value held in the Decoder d into v.
// Decode creates slices, maps and pointer elements as needed.
// It treats values that implement encoding.BinaryUnmarshaler, encoding.TextUnmarshaler
// and proto.Message specially; see Encode.
func Decode(v reflect.Value, d Decoder) error {
return wrap(decode(v, d), gcerr.InvalidArgument)
}
func decode(v reflect.Value, d Decoder) error {
if !v.CanSet() {
return fmt.Errorf("while decoding: cannot set %+v", v)
}
// A Null value sets anything nullable to nil.
// If the value isn't nullable, we keep going.
// TODO(jba): should we treat decoding a null into a non-nullable as an error, or
// ignore it like encoding/json does?
if d.AsNull() {
switch v.Kind() {
case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice:
v.Set(reflect.Zero(v.Type()))
return nil
}
}
if done, val, err := d.AsSpecial(v); done {
if err != nil {
return err
}
v.Set(reflect.ValueOf(val))
return nil
}
// Handle implemented interfaces first.
if reflect.PtrTo(v.Type()).Implements(binaryUnmarshalerType) {
if b, ok := d.AsBytes(); ok {
return v.Addr().Interface().(encoding.BinaryUnmarshaler).UnmarshalBinary(b)
}
return decodingError(v, d)
}
if reflect.PtrTo(v.Type()).Implements(protoMessageType) {
if b, ok := d.AsBytes(); ok {
return proto.Unmarshal(b, v.Addr().Interface().(proto.Message))
}
return decodingError(v, d)
}
if reflect.PtrTo(v.Type()).Implements(textUnmarshalerType) {
if s, ok := d.AsString(); ok {
return v.Addr().Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(s))
}
return decodingError(v, d)
}
switch v.Kind() {
case reflect.Bool:
if b, ok := d.AsBool(); ok {
v.SetBool(b)
return nil
}
case reflect.String:
if s, ok := d.AsString(); ok {
v.SetString(s)
return nil
}
case reflect.Float32, reflect.Float64:
if f, ok := d.AsFloat(); ok {
v.SetFloat(f)
return nil
}
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
i, ok := d.AsInt()
if !ok {
// Accept a floating-point number with integral value.
f, ok := d.AsFloat()
if !ok {
return decodingError(v, d)
}
i = int64(f)
if float64(i) != f {
return gcerr.Newf(gcerr.InvalidArgument, nil, "float %f does not fit into %s", f, v.Type())
}
}
if v.OverflowInt(i) {
return overflowError(i, v.Type())
}
v.SetInt(i)
return nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
u, ok := d.AsUint()
if !ok {
// Accept a floating-point number with integral value.
f, ok := d.AsFloat()
if !ok {
return decodingError(v, d)
}
u = uint64(f)
if float64(u) != f {
return gcerr.Newf(gcerr.InvalidArgument, nil, "float %f does not fit into %s", f, v.Type())
}
}
if v.OverflowUint(u) {
return overflowError(u, v.Type())
}
v.SetUint(u)
return nil
case reflect.Slice, reflect.Array:
return decodeList(v, d)
case reflect.Map:
return decodeMap(v, d)
case reflect.Ptr:
// If the pointer is nil, set it to a zero value.
if v.IsNil() {
v.Set(reflect.New(v.Type().Elem()))
}
return decode(v.Elem(), d)
case reflect.Struct:
return decodeStruct(v, d)
case reflect.Interface:
if v.NumMethod() == 0 { // empty interface
// If v holds a pointer, set the pointer.
if !v.IsNil() && v.Elem().Kind() == reflect.Ptr {
return decode(v.Elem(), d)
}
// Otherwise, create a fresh value.
x, err := d.AsInterface()
if err != nil {
return err
}
v.Set(reflect.ValueOf(x))
return nil
}
// Any other kind of interface is an error???
}
return decodingError(v, d)
}
func decodeList(v reflect.Value, d Decoder) error {
// If we're decoding into a byte slice or array, and the decoded value
// supports that, then do the decoding.
if v.Type().Elem().Kind() == reflect.Uint8 {
if b, ok := d.AsBytes(); ok {
v.SetBytes(b)
return nil
}
// Fall through to decode the []byte as an ordinary slice.
}
dlen, ok := d.ListLen()
if !ok {
return decodingError(v, d)
}
err := prepareLength(v, dlen)
if err != nil {
return err
}
d.DecodeList(func(i int, vd Decoder) bool {
if err != nil || i >= dlen {
return false
}
err = decode(v.Index(i), vd)
return err == nil
})
return err
}
// v must be a slice or array. We want it to be of length wantLen. Prepare it as
// necessary (details described in the code below), and return its resulting length.
// If an array is too short, return an error. This behavior differs from
// encoding/json, which just populates a short array with whatever it can and drops
// the rest. That can lose data.
func prepareLength(v reflect.Value, wantLen int) error {
vLen := v.Len()
if v.Kind() == reflect.Slice {
// Construct a slice of the right size, avoiding allocation if possible.
switch {
case vLen < wantLen: // v too short
if v.Cap() >= wantLen { // extend its length if there's room
v.SetLen(wantLen)
} else { // else make a new one
v.Set(reflect.MakeSlice(v.Type(), wantLen, wantLen))
}
case vLen > wantLen: // v too long; truncate it
v.SetLen(wantLen)
}
} else { // array
switch {
case vLen < wantLen: // v too short
return gcerr.Newf(gcerr.InvalidArgument, nil, "array length %d is too short for incoming list of length %d",
vLen, wantLen)
case vLen > wantLen: // v too long; set extra elements to zero
z := reflect.Zero(v.Type().Elem())
for i := wantLen; i < vLen; i++ {
v.Index(i).Set(z)
}
}
}
return nil
}
// Since a map value is not settable via reflection, this function always creates a
// new element for each corresponding map key. Existing values of v are overwritten.
// This happens even if the map value is something like a pointer to a struct, where
// we could in theory populate the existing struct value instead of discarding it.
// This behavior matches encoding/json.
func decodeMap(v reflect.Value, d Decoder) error {
mapLen, ok := d.MapLen()
if !ok {
return decodingError(v, d)
}
t := v.Type()
if v.IsNil() {
v.Set(reflect.MakeMapWithSize(t, mapLen))
}
et := t.Elem()
var err error
kt := v.Type().Key()
d.DecodeMap(func(key string, vd Decoder) bool {
if err != nil {
return false
}
el := reflect.New(et).Elem()
err = decode(el, vd)
if err != nil {
return false
}
vk, e := unstringifyMapKey(key, kt)
if e != nil {
err = e
return false
}
v.SetMapIndex(vk, el)
return err == nil
})
return err
}
// Given a map key encoded as a string, and the type of the map key, convert the key
// into the type.
// For example, if we are decoding the key "3" for a map[int]interface{}, then key is "3"
// and keyType is reflect.Int.
func unstringifyMapKey(key string, keyType reflect.Type) (reflect.Value, error) {
// This code is mostly from the middle of decodeState.object in encoding/json/decode.go.
// Except for literalStore, which I don't understand.
// TODO(jba): understand literalStore.
switch {
case keyType.Kind() == reflect.String:
return reflect.ValueOf(key).Convert(keyType), nil
case reflect.PtrTo(keyType).Implements(textUnmarshalerType):
tu := reflect.New(keyType)
if err := tu.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(key)); err != nil {
return reflect.Value{}, err
}
return tu.Elem(), nil
case keyType.Kind() == reflect.Interface && keyType.NumMethod() == 0:
// TODO: remove this case? encoding/json doesn't support it.
return reflect.ValueOf(key), nil
default:
switch keyType.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
n, err := strconv.ParseInt(key, 10, 64)
if err != nil {
return reflect.Value{}, err
}
if reflect.Zero(keyType).OverflowInt(n) {
return reflect.Value{}, overflowError(n, keyType)
}
return reflect.ValueOf(n).Convert(keyType), nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
n, err := strconv.ParseUint(key, 10, 64)
if err != nil {
return reflect.Value{}, err
}
if reflect.Zero(keyType).OverflowUint(n) {
return reflect.Value{}, overflowError(n, keyType)
}
return reflect.ValueOf(n).Convert(keyType), nil
default:
return reflect.Value{}, gcerr.Newf(gcerr.InvalidArgument, nil, "invalid key type %s", keyType)
}
}
}
func decodeStruct(v reflect.Value, d Decoder) error {
fields, err := fieldCache.Fields(v.Type())
if err != nil {
return err
}
d.DecodeMap(func(key string, d2 Decoder) bool {
if err != nil {
return false
}
f := fields.MatchFold(key)
if f == nil {
err = gcerr.Newf(gcerr.InvalidArgument, nil, "no field matching %q in %s", key, v.Type())
return false
}
fv, ok := fieldByIndexCreate(v, f.Index)
if !ok {
err = gcerr.Newf(gcerr.InvalidArgument, nil,
"setting field %q in %s: cannot create embedded pointer field of unexported type",
key, v.Type())
return false
}
err = decode(fv, d2)
return err == nil
})
return err
}
// fieldByIndexCreate retrieves the the field of v at the given index if present,
// creating embedded struct pointers where necessary.
// v must be a struct. index must refer to a valid field of v's type.
// The second return value is false If there is a nil embedded pointer of unexported
// type along the path denoted by index. (We cannot create such pointers.)
func fieldByIndexCreate(v reflect.Value, index []int) (reflect.Value, bool) {
for _, i := range index {
if v.Kind() == reflect.Ptr {
if v.IsNil() {
if !v.CanSet() {
return reflect.Value{}, false
}
v.Set(reflect.New(v.Type().Elem()))
}
v = v.Elem()
}
v = v.Field(i)
}
return v, true
}
func decodingError(v reflect.Value, d Decoder) error {
return gcerr.Newf(gcerr.InvalidArgument, nil, "cannot set type %s to %v", v.Type(), d)
}
func overflowError(x interface{}, t reflect.Type) error {
return gcerr.Newf(gcerr.InvalidArgument, nil, "value %v overflows type %s", x, t)
}
func wrap(err error, code gcerr.ErrorCode) error {
if _, ok := err.(*gcerr.Error); !ok && err != nil {
err = gcerr.New(code, err, 2, err.Error())
}
return err
}
var fieldCache = fields.NewCache(parseTag, nil, nil)
// Copied from encoding/json, go 1.12.
func IsEmptyValue(v reflect.Value) bool {
switch k := v.Kind(); k {
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
return v.Len() == 0
case reflect.Bool:
return !v.Bool()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflect.Interface, reflect.Ptr:
return v.IsNil()
}
return false
}
// Options for struct tags.
type tagOptions struct {
omitEmpty bool // do not encode value if empty
}
// parseTag interprets docstore struct field tags.
func parseTag(t reflect.StructTag) (name string, keep bool, other interface{}, err error) {
var opts []string
if _, ok := t.Lookup("docstore"); ok {
name, keep, opts = fields.ParseStandardTag("docstore", t)
} else {
name, keep, opts = fields.ParseStandardTag("json", t)
}
tagOpts := tagOptions{}
for _, opt := range opts {
switch opt {
case "omitempty":
tagOpts.omitEmpty = true
default:
return "", false, nil, gcerr.Newf(gcerr.InvalidArgument, nil, "unknown tag option: %q", opt)
}
}
return name, keep, tagOpts, nil
}
| 1 | 18,860 | should be called => will be called | google-go-cloud | go |
@@ -133,12 +133,11 @@ func (c *roundCalculator) roundInfo(
) (roundNum uint32, roundStartTime time.Time, err error) {
lastBlockTime := time.Unix(c.chain.GenesisTimestamp(), 0)
if height > 1 {
- var lastBlock *block.Footer
- if lastBlock, err = c.chain.BlockFooterByHeight(height - 1); err != nil {
+ var lastBlock *block.Header
+ if lastBlock, err = c.chain.BlockHeaderByHeight(height - 1); err != nil {
return
}
- lastBlockCommitTime := lastBlock.CommitTime()
- lastBlockTime = lastBlockTime.Add(lastBlockCommitTime.Sub(lastBlockTime) / c.blockInterval * c.blockInterval)
+ lastBlockTime = lastBlockTime.Add(lastBlock.Timestamp().Sub(lastBlockTime) / c.blockInterval * c.blockInterval)
}
if !lastBlockTime.Before(now) {
err = errors.Errorf( | 1 | // Copyright (c) 2019 IoTeX Foundation
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package rolldpos
import (
"time"
"github.com/pkg/errors"
"github.com/iotexproject/iotex-core/action/protocol/rolldpos"
"github.com/iotexproject/iotex-core/blockchain"
"github.com/iotexproject/iotex-core/blockchain/block"
"github.com/iotexproject/iotex-core/crypto"
"github.com/iotexproject/iotex-core/endorsement"
)
type roundCalculator struct {
chain blockchain.Blockchain
blockInterval time.Duration
timeBasedRotation bool
rp *rolldpos.Protocol
candidatesByHeightFunc CandidatesByHeightFunc
}
func (c *roundCalculator) BlockInterval() time.Duration {
return c.blockInterval
}
func (c *roundCalculator) UpdateRound(round *roundCtx, height uint64, now time.Time, toleratedOvertime time.Duration) (*roundCtx, error) {
epochNum := round.EpochNum()
epochStartHeight := round.EpochStartHeight()
delegates := round.Delegates()
switch {
case height < round.Height():
return nil, errors.New("cannot update to a lower height")
case height == round.Height():
if now.Before(round.StartTime()) {
return round, nil
}
default:
if height >= round.NextEpochStartHeight() {
epochNum = c.rp.GetEpochNum(height)
epochStartHeight = c.rp.GetEpochHeight(epochNum)
var err error
if delegates, err = c.Delegates(epochStartHeight); err != nil {
return nil, err
}
}
}
roundNum, roundStartTime, err := c.roundInfo(height, now, toleratedOvertime)
if err != nil {
return nil, err
}
proposer, err := c.calculateProposer(height, roundNum, delegates)
if err != nil {
return nil, err
}
var eManager *endorsementManager
var status status
var blockInLock []byte
var proofOfLock []*endorsement.Endorsement
if height == round.Height() {
err = round.eManager.Cleanup(roundStartTime)
if err != nil {
return nil, err
}
status = round.status
blockInLock = round.blockInLock
proofOfLock = round.proofOfLock
} else {
err = round.eManager.Cleanup(time.Time{})
if err != nil {
return nil, err
}
}
eManager = round.eManager
return &roundCtx{
epochNum: epochNum,
epochStartHeight: epochStartHeight,
nextEpochStartHeight: c.rp.GetEpochHeight(epochNum + 1),
delegates: delegates,
height: height,
roundNum: roundNum,
proposer: proposer,
roundStartTime: roundStartTime,
nextRoundStartTime: roundStartTime.Add(c.blockInterval),
eManager: eManager,
status: status,
blockInLock: blockInLock,
proofOfLock: proofOfLock,
}, nil
}
func (c *roundCalculator) Proposer(height uint64, roundStartTime time.Time) string {
round, err := c.newRound(height, roundStartTime, nil, 0)
if err != nil {
return ""
}
return round.Proposer()
}
func (c *roundCalculator) IsDelegate(addr string, height uint64) bool {
delegates, err := c.Delegates(height)
if err != nil {
return false
}
for _, d := range delegates {
if addr == d {
return true
}
}
return false
}
func (c *roundCalculator) RoundInfo(
height uint64,
now time.Time,
) (roundNum uint32, roundStartTime time.Time, err error) {
return c.roundInfo(height, now, 0)
}
func (c *roundCalculator) roundInfo(
height uint64,
now time.Time,
toleratedOvertime time.Duration,
) (roundNum uint32, roundStartTime time.Time, err error) {
lastBlockTime := time.Unix(c.chain.GenesisTimestamp(), 0)
if height > 1 {
var lastBlock *block.Footer
if lastBlock, err = c.chain.BlockFooterByHeight(height - 1); err != nil {
return
}
lastBlockCommitTime := lastBlock.CommitTime()
lastBlockTime = lastBlockTime.Add(lastBlockCommitTime.Sub(lastBlockTime) / c.blockInterval * c.blockInterval)
}
if !lastBlockTime.Before(now) {
err = errors.Errorf(
"last block time %s is a future time, vs now %s",
lastBlockTime,
now,
)
return
}
duration := now.Sub(lastBlockTime)
if duration > c.blockInterval {
roundNum = uint32(duration / c.blockInterval)
if toleratedOvertime == 0 || duration%c.blockInterval < toleratedOvertime {
roundNum--
}
}
roundStartTime = lastBlockTime.Add(time.Duration(roundNum+1) * c.blockInterval)
return roundNum, roundStartTime, nil
}
func (c *roundCalculator) Delegates(height uint64) ([]string, error) {
epochStartHeight := c.rp.GetEpochHeight(c.rp.GetEpochNum(height))
numDelegates := c.rp.NumDelegates()
candidates, err := c.candidatesByHeightFunc(epochStartHeight)
if err != nil {
return nil, errors.Wrapf(
err,
"failed to get candidates on height %d",
epochStartHeight,
)
}
if len(candidates) < int(numDelegates) {
return nil, errors.Errorf(
"# of candidates %d is less than from required number %d",
len(candidates),
numDelegates,
)
}
addrs := []string{}
for i, candidate := range candidates {
if uint64(i) >= c.rp.NumCandidateDelegates() {
break
}
addrs = append(addrs, candidate.Address)
}
crypto.SortCandidates(addrs, epochStartHeight, crypto.CryptoSeed)
return addrs[:numDelegates], nil
}
func (c *roundCalculator) NewRoundWithToleration(
height uint64,
now time.Time,
eManager *endorsementManager,
toleratedOvertime time.Duration,
) (round *roundCtx, err error) {
return c.newRound(height, now, eManager, toleratedOvertime)
}
func (c *roundCalculator) NewRound(
height uint64,
now time.Time,
eManager *endorsementManager,
) (round *roundCtx, err error) {
return c.newRound(height, now, eManager, 0)
}
func (c *roundCalculator) newRound(
height uint64,
now time.Time,
eManager *endorsementManager,
toleratedOvertime time.Duration,
) (round *roundCtx, err error) {
epochNum := uint64(0)
epochStartHeight := uint64(0)
var delegates []string
var roundNum uint32
var proposer string
var roundStartTime time.Time
if height != 0 {
epochNum = c.rp.GetEpochNum(height)
epochStartHeight := c.rp.GetEpochHeight(epochNum)
if delegates, err = c.Delegates(epochStartHeight); err != nil {
return
}
if roundNum, roundStartTime, err = c.roundInfo(height, now, toleratedOvertime); err != nil {
return
}
if proposer, err = c.calculateProposer(height, roundNum, delegates); err != nil {
return
}
}
if eManager == nil {
if eManager, err = newEndorsementManager(nil); err != nil {
return nil, err
}
}
round = &roundCtx{
epochNum: epochNum,
epochStartHeight: epochStartHeight,
nextEpochStartHeight: c.rp.GetEpochHeight(epochNum + 1),
delegates: delegates,
height: height,
roundNum: roundNum,
proposer: proposer,
eManager: eManager,
roundStartTime: roundStartTime,
nextRoundStartTime: roundStartTime.Add(c.blockInterval),
status: open,
}
eManager.SetIsMarjorityFunc(round.EndorsedByMajority)
return round, nil
}
func (c *roundCalculator) calculateProposer(
height uint64,
round uint32,
delegates []string,
) (proposer string, err error) {
numDelegates := c.rp.NumDelegates()
if numDelegates != uint64(len(delegates)) {
err = errors.New("invalid delegate list")
return
}
idx := height
if c.timeBasedRotation {
idx += uint64(round)
}
proposer = delegates[idx%numDelegates]
return
}
| 1 | 18,884 | not sure whether we should do this. It may cause problem that delegates upgrade their nodes at different time, that they will have different "last block time", some use "commit time", some use "block time". Potential solution: Only use block time after berling, and then delete it in the next version after berling. Open to discussion. | iotexproject-iotex-core | go |
@@ -190,10 +190,12 @@ public class CreateCollectionCmd implements OverseerCollectionMessageHandler.Cmd
try {
replicaPositions = buildReplicaPositions(ocmh.cloudManager, clusterState, clusterState.getCollection(collectionName), message, shardNames, sessionWrapper);
} catch (Assign.AssignmentException e) {
- ZkNodeProps deleteMessage = new ZkNodeProps("name", collectionName);
- new DeleteCollectionCmd(ocmh).call(clusterState, deleteMessage, results);
+ deleteCollection(clusterState, results, collectionName);
// unwrap the exception
throw new SolrException(ErrorCode.SERVER_ERROR, e.getMessage(), e.getCause());
+ } catch (SolrException e) {
+ deleteCollection(clusterState, results, collectionName);
+ throw e;
}
if (replicaPositions.isEmpty()) { | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.cloud.api.collections;
import java.io.IOException;
import java.lang.invoke.MethodHandles;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.NoSuchElementException;
import java.util.Properties;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicReference;
import org.apache.solr.client.solrj.cloud.DistribStateManager;
import org.apache.solr.client.solrj.cloud.SolrCloudManager;
import org.apache.solr.client.solrj.cloud.autoscaling.AlreadyExistsException;
import org.apache.solr.client.solrj.cloud.autoscaling.BadVersionException;
import org.apache.solr.client.solrj.cloud.autoscaling.NotEmptyException;
import org.apache.solr.client.solrj.cloud.autoscaling.PolicyHelper;
import org.apache.solr.client.solrj.cloud.autoscaling.VersionedData;
import org.apache.solr.cloud.Overseer;
import org.apache.solr.cloud.ZkController;
import org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.ShardRequestTracker;
import org.apache.solr.cloud.overseer.ClusterStateMutator;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.SolrException.ErrorCode;
import org.apache.solr.common.cloud.Aliases;
import org.apache.solr.common.cloud.ClusterState;
import org.apache.solr.common.cloud.DocCollection;
import org.apache.solr.common.cloud.DocRouter;
import org.apache.solr.common.cloud.ImplicitDocRouter;
import org.apache.solr.common.cloud.Replica;
import org.apache.solr.common.cloud.ReplicaPosition;
import org.apache.solr.common.cloud.ZkConfigManager;
import org.apache.solr.common.cloud.ZkNodeProps;
import org.apache.solr.common.cloud.ZkStateReader;
import org.apache.solr.common.cloud.ZooKeeperException;
import org.apache.solr.common.params.CollectionAdminParams;
import org.apache.solr.common.params.CommonAdminParams;
import org.apache.solr.common.params.CoreAdminParams;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.common.util.SimpleOrderedMap;
import org.apache.solr.common.util.TimeSource;
import org.apache.solr.common.util.Utils;
import org.apache.solr.handler.admin.ConfigSetsHandlerApi;
import org.apache.solr.handler.component.ShardHandler;
import org.apache.solr.handler.component.ShardRequest;
import org.apache.solr.util.TimeOut;
import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.KeeperException.NoNodeException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.solr.common.cloud.ZkStateReader.MAX_SHARDS_PER_NODE;
import static org.apache.solr.common.cloud.ZkStateReader.NRT_REPLICAS;
import static org.apache.solr.common.cloud.ZkStateReader.PULL_REPLICAS;
import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
import static org.apache.solr.common.cloud.ZkStateReader.TLOG_REPLICAS;
import static org.apache.solr.common.params.CollectionAdminParams.ALIAS;
import static org.apache.solr.common.params.CollectionAdminParams.COLL_CONF;
import static org.apache.solr.common.params.CollectionAdminParams.COLOCATED_WITH;
import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDREPLICA;
import static org.apache.solr.common.params.CollectionParams.CollectionAction.MODIFYCOLLECTION;
import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
import static org.apache.solr.common.params.CommonAdminParams.WAIT_FOR_FINAL_STATE;
import static org.apache.solr.common.params.CommonParams.NAME;
import static org.apache.solr.common.util.StrUtils.formatString;
public class CreateCollectionCmd implements OverseerCollectionMessageHandler.Cmd {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
private final OverseerCollectionMessageHandler ocmh;
private final TimeSource timeSource;
private final DistribStateManager stateManager;
public CreateCollectionCmd(OverseerCollectionMessageHandler ocmh) {
this.ocmh = ocmh;
this.stateManager = ocmh.cloudManager.getDistribStateManager();
this.timeSource = ocmh.cloudManager.getTimeSource();
}
@Override
public void call(ClusterState clusterState, ZkNodeProps message, NamedList results) throws Exception {
if (ocmh.zkStateReader.aliasesManager != null) { // not a mock ZkStateReader
ocmh.zkStateReader.aliasesManager.update();
}
final Aliases aliases = ocmh.zkStateReader.getAliases();
final String collectionName = message.getStr(NAME);
final boolean waitForFinalState = message.getBool(WAIT_FOR_FINAL_STATE, false);
final String alias = message.getStr(ALIAS, collectionName);
log.info("Create collection {}", collectionName);
if (clusterState.hasCollection(collectionName)) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "collection already exists: " + collectionName);
}
if (aliases.hasAlias(collectionName)) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "collection alias already exists: " + collectionName);
}
String withCollection = message.getStr(CollectionAdminParams.WITH_COLLECTION);
String withCollectionShard = null;
if (withCollection != null) {
String realWithCollection = aliases.resolveSimpleAlias(withCollection);
if (!clusterState.hasCollection(realWithCollection)) {
throw new SolrException(ErrorCode.BAD_REQUEST, "The 'withCollection' does not exist: " + realWithCollection);
} else {
DocCollection collection = clusterState.getCollection(realWithCollection);
if (collection.getActiveSlices().size() > 1) {
throw new SolrException(ErrorCode.BAD_REQUEST, "The `withCollection` must have only one shard, found: " + collection.getActiveSlices().size());
}
withCollectionShard = collection.getActiveSlices().iterator().next().getName();
}
}
String configName = getConfigName(collectionName, message);
if (configName == null) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "No config set found to associate with the collection.");
}
ocmh.validateConfigOrThrowSolrException(configName);
String router = message.getStr("router.name", DocRouter.DEFAULT_NAME);
// fail fast if parameters are wrong or incomplete
List<String> shardNames = populateShardNames(message, router);
checkReplicaTypes(message);
AtomicReference<PolicyHelper.SessionWrapper> sessionWrapper = new AtomicReference<>();
try {
final String async = message.getStr(ASYNC);
ZkStateReader zkStateReader = ocmh.zkStateReader;
boolean isLegacyCloud = Overseer.isLegacy(zkStateReader);
OverseerCollectionMessageHandler.createConfNode(stateManager, configName, collectionName, isLegacyCloud);
Map<String,String> collectionParams = new HashMap<>();
Map<String,Object> collectionProps = message.getProperties();
for (Map.Entry<String, Object> entry : collectionProps.entrySet()) {
String propName = entry.getKey();
if (propName.startsWith(ZkController.COLLECTION_PARAM_PREFIX)) {
collectionParams.put(propName.substring(ZkController.COLLECTION_PARAM_PREFIX.length()), (String) entry.getValue());
}
}
createCollectionZkNode(stateManager, collectionName, collectionParams);
ocmh.overseer.offerStateUpdate(Utils.toJSON(message));
// wait for a while until we see the collection
TimeOut waitUntil = new TimeOut(30, TimeUnit.SECONDS, timeSource);
boolean created = false;
while (! waitUntil.hasTimedOut()) {
waitUntil.sleep(100);
created = ocmh.cloudManager.getClusterStateProvider().getClusterState().hasCollection(collectionName);
if(created) break;
}
if (!created) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Could not fully create collection: " + collectionName);
}
// refresh cluster state
clusterState = ocmh.cloudManager.getClusterStateProvider().getClusterState();
List<ReplicaPosition> replicaPositions = null;
try {
replicaPositions = buildReplicaPositions(ocmh.cloudManager, clusterState, clusterState.getCollection(collectionName), message, shardNames, sessionWrapper);
} catch (Assign.AssignmentException e) {
ZkNodeProps deleteMessage = new ZkNodeProps("name", collectionName);
new DeleteCollectionCmd(ocmh).call(clusterState, deleteMessage, results);
// unwrap the exception
throw new SolrException(ErrorCode.SERVER_ERROR, e.getMessage(), e.getCause());
}
if (replicaPositions.isEmpty()) {
log.debug("Finished create command for collection: {}", collectionName);
return;
}
final ShardRequestTracker shardRequestTracker = ocmh.asyncRequestTracker(async);
log.debug(formatString("Creating SolrCores for new collection {0}, shardNames {1} , message : {2}",
collectionName, shardNames, message));
Map<String,ShardRequest> coresToCreate = new LinkedHashMap<>();
ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler(ocmh.overseer.getCoreContainer().getUpdateShardHandler().getDefaultHttpClient());
for (ReplicaPosition replicaPosition : replicaPositions) {
String nodeName = replicaPosition.node;
if (withCollection != null) {
// check that we have a replica of `withCollection` on this node and if not, create one
DocCollection collection = clusterState.getCollection(withCollection);
List<Replica> replicas = collection.getReplicas(nodeName);
if (replicas == null || replicas.isEmpty()) {
ZkNodeProps props = new ZkNodeProps(
Overseer.QUEUE_OPERATION, ADDREPLICA.toString(),
ZkStateReader.COLLECTION_PROP, withCollection,
ZkStateReader.SHARD_ID_PROP, withCollectionShard,
"node", nodeName,
CommonAdminParams.WAIT_FOR_FINAL_STATE, Boolean.TRUE.toString()); // set to true because we want `withCollection` to be ready after this collection is created
new AddReplicaCmd(ocmh).call(clusterState, props, results);
clusterState = zkStateReader.getClusterState(); // refresh
}
}
String coreName = Assign.buildSolrCoreName(ocmh.cloudManager.getDistribStateManager(),
ocmh.cloudManager.getClusterStateProvider().getClusterState().getCollection(collectionName),
replicaPosition.shard, replicaPosition.type, true);
log.debug(formatString("Creating core {0} as part of shard {1} of collection {2} on {3}"
, coreName, replicaPosition.shard, collectionName, nodeName));
String baseUrl = zkStateReader.getBaseUrlForNodeName(nodeName);
//in the new mode, create the replica in clusterstate prior to creating the core.
// Otherwise the core creation fails
if (!isLegacyCloud) {
ZkNodeProps props = new ZkNodeProps(
Overseer.QUEUE_OPERATION, ADDREPLICA.toString(),
ZkStateReader.COLLECTION_PROP, collectionName,
ZkStateReader.SHARD_ID_PROP, replicaPosition.shard,
ZkStateReader.CORE_NAME_PROP, coreName,
ZkStateReader.STATE_PROP, Replica.State.DOWN.toString(),
ZkStateReader.BASE_URL_PROP, baseUrl,
ZkStateReader.REPLICA_TYPE, replicaPosition.type.name(),
CommonAdminParams.WAIT_FOR_FINAL_STATE, Boolean.toString(waitForFinalState));
ocmh.overseer.offerStateUpdate(Utils.toJSON(props));
}
// Need to create new params for each request
ModifiableSolrParams params = new ModifiableSolrParams();
params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.CREATE.toString());
params.set(CoreAdminParams.NAME, coreName);
params.set(COLL_CONF, configName);
params.set(CoreAdminParams.COLLECTION, collectionName);
params.set(CoreAdminParams.SHARD, replicaPosition.shard);
params.set(ZkStateReader.NUM_SHARDS_PROP, shardNames.size());
params.set(CoreAdminParams.NEW_COLLECTION, "true");
params.set(CoreAdminParams.REPLICA_TYPE, replicaPosition.type.name());
if (async != null) {
String coreAdminAsyncId = async + Math.abs(System.nanoTime());
params.add(ASYNC, coreAdminAsyncId);
shardRequestTracker.track(nodeName, coreAdminAsyncId);
}
ocmh.addPropertyParams(message, params);
ShardRequest sreq = new ShardRequest();
sreq.nodeName = nodeName;
params.set("qt", ocmh.adminPath);
sreq.purpose = 1;
sreq.shards = new String[]{baseUrl};
sreq.actualShards = sreq.shards;
sreq.params = params;
if (isLegacyCloud) {
shardHandler.submit(sreq, sreq.shards[0], sreq.params);
} else {
coresToCreate.put(coreName, sreq);
}
}
if(!isLegacyCloud) {
// wait for all replica entries to be created
Map<String, Replica> replicas = ocmh.waitToSeeReplicasInState(collectionName, coresToCreate.keySet());
for (Map.Entry<String, ShardRequest> e : coresToCreate.entrySet()) {
ShardRequest sreq = e.getValue();
sreq.params.set(CoreAdminParams.CORE_NODE_NAME, replicas.get(e.getKey()).getName());
shardHandler.submit(sreq, sreq.shards[0], sreq.params);
}
}
shardRequestTracker.processResponses(results, shardHandler, false, null, Collections.emptySet());
boolean failure = results.get("failure") != null && ((SimpleOrderedMap)results.get("failure")).size() > 0;
if (failure) {
// Let's cleanup as we hit an exception
// We shouldn't be passing 'results' here for the cleanup as the response would then contain 'success'
// element, which may be interpreted by the user as a positive ack
ocmh.cleanupCollection(collectionName, new NamedList<Object>());
log.info("Cleaned up artifacts for failed create collection for [{}]", collectionName);
throw new SolrException(ErrorCode.BAD_REQUEST, "Underlying core creation failed while creating collection: " + collectionName);
} else {
log.debug("Finished create command on all shards for collection: {}", collectionName);
// Emit a warning about production use of data driven functionality
boolean defaultConfigSetUsed = message.getStr(COLL_CONF) == null ||
message.getStr(COLL_CONF).equals(ConfigSetsHandlerApi.DEFAULT_CONFIGSET_NAME);
if (defaultConfigSetUsed) {
results.add("warning", "Using _default configset. Data driven schema functionality"
+ " is enabled by default, which is NOT RECOMMENDED for production use. To turn it off:"
+ " curl http://{host:port}/solr/" + collectionName + "/config -d '{\"set-user-property\": {\"update.autoCreateFields\":\"false\"}}'");
}
}
// modify the `withCollection` and store this new collection's name with it
if (withCollection != null) {
ZkNodeProps props = new ZkNodeProps(
Overseer.QUEUE_OPERATION, MODIFYCOLLECTION.toString(),
ZkStateReader.COLLECTION_PROP, withCollection,
CollectionAdminParams.COLOCATED_WITH, collectionName);
ocmh.overseer.offerStateUpdate(Utils.toJSON(props));
try {
zkStateReader.waitForState(withCollection, 5, TimeUnit.SECONDS, (collectionState) -> collectionName.equals(collectionState.getStr(COLOCATED_WITH)));
} catch (TimeoutException e) {
log.warn("Timed out waiting to see the " + COLOCATED_WITH + " property set on collection: " + withCollection);
// maybe the overseer queue is backed up, we don't want to fail the create request
// because of this time out, continue
}
}
// create an alias pointing to the new collection, if different from the collectionName
if (!alias.equals(collectionName)) {
ocmh.zkStateReader.aliasesManager.applyModificationAndExportToZk(a -> a.cloneWithCollectionAlias(alias, collectionName));
}
} catch (SolrException ex) {
throw ex;
} catch (Exception ex) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, null, ex);
} finally {
if (sessionWrapper.get() != null) sessionWrapper.get().release();
}
}
public static List<ReplicaPosition> buildReplicaPositions(SolrCloudManager cloudManager, ClusterState clusterState,
DocCollection docCollection,
ZkNodeProps message,
List<String> shardNames,
AtomicReference<PolicyHelper.SessionWrapper> sessionWrapper) throws IOException, InterruptedException, Assign.AssignmentException {
final String collectionName = message.getStr(NAME);
// look at the replication factor and see if it matches reality
// if it does not, find best nodes to create more cores
int numTlogReplicas = message.getInt(TLOG_REPLICAS, 0);
int numNrtReplicas = message.getInt(NRT_REPLICAS, message.getInt(REPLICATION_FACTOR, numTlogReplicas>0?0:1));
int numPullReplicas = message.getInt(PULL_REPLICAS, 0);
int numSlices = shardNames.size();
int maxShardsPerNode = message.getInt(MAX_SHARDS_PER_NODE, 1);
if (maxShardsPerNode == -1) maxShardsPerNode = Integer.MAX_VALUE;
// we need to look at every node and see how many cores it serves
// add our new cores to existing nodes serving the least number of cores
// but (for now) require that each core goes on a distinct node.
List<ReplicaPosition> replicaPositions;
List<String> nodeList = Assign.getLiveOrLiveAndCreateNodeSetList(clusterState.getLiveNodes(), message, OverseerCollectionMessageHandler.RANDOM);
if (nodeList.isEmpty()) {
log.warn("It is unusual to create a collection ("+collectionName+") without cores.");
replicaPositions = new ArrayList<>();
} else {
int totalNumReplicas = numNrtReplicas + numTlogReplicas + numPullReplicas;
if (totalNumReplicas > nodeList.size()) {
log.warn("Specified number of replicas of "
+ totalNumReplicas
+ " on collection "
+ collectionName
+ " is higher than the number of Solr instances currently live or live and part of your " + OverseerCollectionMessageHandler.CREATE_NODE_SET + "("
+ nodeList.size()
+ "). It's unusual to run two replica of the same slice on the same Solr-instance.");
}
int maxShardsAllowedToCreate = maxShardsPerNode == Integer.MAX_VALUE ?
Integer.MAX_VALUE :
maxShardsPerNode * nodeList.size();
int requestedShardsToCreate = numSlices * totalNumReplicas;
if (maxShardsAllowedToCreate < requestedShardsToCreate) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Cannot create collection " + collectionName + ". Value of "
+ MAX_SHARDS_PER_NODE + " is " + maxShardsPerNode
+ ", and the number of nodes currently live or live and part of your "+OverseerCollectionMessageHandler.CREATE_NODE_SET+" is " + nodeList.size()
+ ". This allows a maximum of " + maxShardsAllowedToCreate
+ " to be created. Value of " + OverseerCollectionMessageHandler.NUM_SLICES + " is " + numSlices
+ ", value of " + NRT_REPLICAS + " is " + numNrtReplicas
+ ", value of " + TLOG_REPLICAS + " is " + numTlogReplicas
+ " and value of " + PULL_REPLICAS + " is " + numPullReplicas
+ ". This requires " + requestedShardsToCreate
+ " shards to be created (higher than the allowed number)");
}
Assign.AssignRequest assignRequest = new Assign.AssignRequestBuilder()
.forCollection(collectionName)
.forShard(shardNames)
.assignNrtReplicas(numNrtReplicas)
.assignTlogReplicas(numTlogReplicas)
.assignPullReplicas(numPullReplicas)
.onNodes(nodeList)
.build();
Assign.AssignStrategyFactory assignStrategyFactory = new Assign.AssignStrategyFactory(cloudManager);
Assign.AssignStrategy assignStrategy = assignStrategyFactory.create(clusterState, docCollection);
replicaPositions = assignStrategy.assign(cloudManager, assignRequest);
sessionWrapper.set(PolicyHelper.getLastSessionWrapper(true));
}
return replicaPositions;
}
public static void checkReplicaTypes(ZkNodeProps message) {
int numTlogReplicas = message.getInt(TLOG_REPLICAS, 0);
int numNrtReplicas = message.getInt(NRT_REPLICAS, message.getInt(REPLICATION_FACTOR, numTlogReplicas > 0 ? 0 : 1));
if (numNrtReplicas + numTlogReplicas <= 0) {
throw new SolrException(ErrorCode.BAD_REQUEST, NRT_REPLICAS + " + " + TLOG_REPLICAS + " must be greater than 0");
}
}
public static List<String> populateShardNames(ZkNodeProps message, String router) {
List<String> shardNames = new ArrayList<>();
Integer numSlices = message.getInt(OverseerCollectionMessageHandler.NUM_SLICES, null);
if (ImplicitDocRouter.NAME.equals(router)) {
ClusterStateMutator.getShardNames(shardNames, message.getStr("shards", null));
numSlices = shardNames.size();
} else {
if (numSlices == null) {
throw new SolrException(ErrorCode.BAD_REQUEST, OverseerCollectionMessageHandler.NUM_SLICES + " is a required param (when using CompositeId router).");
}
if (numSlices <= 0) {
throw new SolrException(ErrorCode.BAD_REQUEST, OverseerCollectionMessageHandler.NUM_SLICES + " must be > 0");
}
ClusterStateMutator.getShardNames(numSlices, shardNames);
}
return shardNames;
}
String getConfigName(String coll, ZkNodeProps message) throws KeeperException, InterruptedException {
String configName = message.getStr(COLL_CONF);
if (configName == null) {
// if there is only one conf, use that
List<String> configNames = null;
try {
configNames = ocmh.zkStateReader.getZkClient().getChildren(ZkConfigManager.CONFIGS_ZKNODE, null, true);
if (configNames.contains(ConfigSetsHandlerApi.DEFAULT_CONFIGSET_NAME)) {
if (CollectionAdminParams.SYSTEM_COLL.equals(coll)) {
return coll;
} else {
String intendedConfigSetName = ConfigSetsHandlerApi.getSuffixedNameForAutoGeneratedConfigSet(coll);
copyDefaultConfigSetTo(configNames, intendedConfigSetName);
return intendedConfigSetName;
}
} else if (configNames != null && configNames.size() == 1) {
configName = configNames.get(0);
// no config set named, but there is only 1 - use it
log.info("Only one config set found in zk - using it:" + configName);
}
} catch (KeeperException.NoNodeException e) {
}
}
return "".equals(configName)? null: configName;
}
/**
* Copies the _default configset to the specified configset name (overwrites if pre-existing)
*/
private void copyDefaultConfigSetTo(List<String> configNames, String targetConfig) {
ZkConfigManager configManager = new ZkConfigManager(ocmh.zkStateReader.getZkClient());
// if a configset named collection exists, re-use it
if (configNames.contains(targetConfig)) {
log.info("There exists a configset by the same name as the collection we're trying to create: " + targetConfig +
", re-using it.");
return;
}
// Copy _default into targetConfig
try {
configManager.copyConfigDir(ConfigSetsHandlerApi.DEFAULT_CONFIGSET_NAME, targetConfig, new HashSet<>());
} catch (Exception e) {
throw new SolrException(ErrorCode.INVALID_STATE, "Error while copying _default to " + targetConfig, e);
}
}
public static void createCollectionZkNode(DistribStateManager stateManager, String collection, Map<String,String> params) {
log.debug("Check for collection zkNode:" + collection);
String collectionPath = ZkStateReader.COLLECTIONS_ZKNODE + "/" + collection;
// clean up old terms node
String termsPath = ZkStateReader.COLLECTIONS_ZKNODE + "/" + collection + "/terms";
try {
stateManager.removeRecursively(termsPath, true, true);
} catch (InterruptedException e) {
Thread.interrupted();
throw new SolrException(ErrorCode.SERVER_ERROR, "Error deleting old term nodes for collection from Zookeeper", e);
} catch (KeeperException | IOException | NotEmptyException | BadVersionException e) {
throw new SolrException(ErrorCode.SERVER_ERROR, "Error deleting old term nodes for collection from Zookeeper", e);
}
try {
if (!stateManager.hasData(collectionPath)) {
log.debug("Creating collection in ZooKeeper:" + collection);
try {
Map<String,Object> collectionProps = new HashMap<>();
if (params.size() > 0) {
collectionProps.putAll(params);
// if the config name wasn't passed in, use the default
if (!collectionProps.containsKey(ZkController.CONFIGNAME_PROP)) {
// users can create the collection node and conf link ahead of time, or this may return another option
getConfName(stateManager, collection, collectionPath, collectionProps);
}
} else if (System.getProperty("bootstrap_confdir") != null) {
String defaultConfigName = System.getProperty(ZkController.COLLECTION_PARAM_PREFIX + ZkController.CONFIGNAME_PROP, collection);
// if we are bootstrapping a collection, default the config for
// a new collection to the collection we are bootstrapping
log.info("Setting config for collection:" + collection + " to " + defaultConfigName);
Properties sysProps = System.getProperties();
for (String sprop : System.getProperties().stringPropertyNames()) {
if (sprop.startsWith(ZkController.COLLECTION_PARAM_PREFIX)) {
collectionProps.put(sprop.substring(ZkController.COLLECTION_PARAM_PREFIX.length()), sysProps.getProperty(sprop));
}
}
// if the config name wasn't passed in, use the default
if (!collectionProps.containsKey(ZkController.CONFIGNAME_PROP))
collectionProps.put(ZkController.CONFIGNAME_PROP, defaultConfigName);
} else if (Boolean.getBoolean("bootstrap_conf")) {
// the conf name should should be the collection name of this core
collectionProps.put(ZkController.CONFIGNAME_PROP, collection);
} else {
getConfName(stateManager, collection, collectionPath, collectionProps);
}
collectionProps.remove(ZkStateReader.NUM_SHARDS_PROP); // we don't put numShards in the collections properties
ZkNodeProps zkProps = new ZkNodeProps(collectionProps);
stateManager.makePath(collectionPath, Utils.toJSON(zkProps), CreateMode.PERSISTENT, false);
} catch (KeeperException e) {
//TODO shouldn't the stateManager ensure this does not happen; should throw AlreadyExistsException
// it's okay if the node already exists
if (e.code() != KeeperException.Code.NODEEXISTS) {
throw e;
}
} catch (AlreadyExistsException e) {
// it's okay if the node already exists
}
} else {
log.debug("Collection zkNode exists");
}
} catch (KeeperException e) {
// it's okay if another beats us creating the node
if (e.code() == KeeperException.Code.NODEEXISTS) {
return;
}
throw new SolrException(ErrorCode.SERVER_ERROR, "Error creating collection node in Zookeeper", e);
} catch (IOException e) {
throw new SolrException(ErrorCode.SERVER_ERROR, "Error creating collection node in Zookeeper", e);
} catch (InterruptedException e) {
Thread.interrupted();
throw new SolrException(ErrorCode.SERVER_ERROR, "Error creating collection node in Zookeeper", e);
}
}
private static void getConfName(DistribStateManager stateManager, String collection, String collectionPath, Map<String,Object> collectionProps) throws IOException,
KeeperException, InterruptedException {
// check for configName
log.debug("Looking for collection configName");
if (collectionProps.containsKey("configName")) {
log.info("configName was passed as a param {}", collectionProps.get("configName"));
return;
}
List<String> configNames = null;
int retry = 1;
int retryLimt = 6;
for (; retry < retryLimt; retry++) {
if (stateManager.hasData(collectionPath)) {
VersionedData data = stateManager.getData(collectionPath);
ZkNodeProps cProps = ZkNodeProps.load(data.getData());
if (cProps.containsKey(ZkController.CONFIGNAME_PROP)) {
break;
}
}
try {
configNames = stateManager.listData(ZkConfigManager.CONFIGS_ZKNODE);
} catch (NoSuchElementException | NoNodeException e) {
// just keep trying
}
// check if there's a config set with the same name as the collection
if (configNames != null && configNames.contains(collection)) {
log.info(
"Could not find explicit collection configName, but found config name matching collection name - using that set.");
collectionProps.put(ZkController.CONFIGNAME_PROP, collection);
break;
}
// if _default exists, use that
if (configNames != null && configNames.contains(ConfigSetsHandlerApi.DEFAULT_CONFIGSET_NAME)) {
log.info(
"Could not find explicit collection configName, but found _default config set - using that set.");
collectionProps.put(ZkController.CONFIGNAME_PROP, ConfigSetsHandlerApi.DEFAULT_CONFIGSET_NAME);
break;
}
// if there is only one conf, use that
if (configNames != null && configNames.size() == 1) {
// no config set named, but there is only 1 - use it
log.info("Only one config set found in zk - using it:" + configNames.get(0));
collectionProps.put(ZkController.CONFIGNAME_PROP, configNames.get(0));
break;
}
log.info("Could not find collection configName - pausing for 3 seconds and trying again - try: " + retry);
Thread.sleep(3000);
}
if (retry == retryLimt) {
log.error("Could not find configName for collection " + collection);
throw new ZooKeeperException(
SolrException.ErrorCode.SERVER_ERROR,
"Could not find configName for collection " + collection + " found:" + configNames);
}
}
}
| 1 | 31,974 | So one question I have is why is the error coming back from `buildReplicaPositions` not an `Assign.AssignmentException`? Is it because it is wrapped in a `SolrException` from the remote node? | apache-lucene-solr | java |
@@ -316,13 +316,13 @@ def test_debug_logger_object():
assert dt.options.debug.enabled is True
DT = dt.rbind([])
- assert "dt.rbind([]) {" in logger.msg
+ assert "datatable.rbind([]) {" in logger.msg
assert re.search(r"} # \d+(?:\.\d+)?(?:[eE][+-]?\d+)? s", logger.msg)
logger.msg = ""
with pytest.raises(TypeError):
dt.rbind(4)
- assert "dt.rbind(4) {" in logger.msg
+ assert "datatable.rbind(4) {" in logger.msg
assert re.search(r"} # \d+(?:\.\d+)?(?:[eE][+-]?\d+)? s \(failed\)", logger.msg)
| 1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
# Copyright 2018-2021 H2O.ai
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#-------------------------------------------------------------------------------
import pytest
import re
import datatable as dt
from datatable.internal import frame_integrity_check
from tests import noop
def test_options_all():
# Update this test every time a new option is added
assert repr(dt.options).startswith("datatable.options.")
assert set(dir(dt.options)) == {
"nthreads",
"debug",
"sort",
"display",
"frame",
"fread",
"progress",
}
assert set(dir(dt.options.sort)) == {
"insert_method_threshold",
"max_chunk_length",
"max_radix_bits",
"new",
"nthreads",
"over_radix_bits",
"thread_multiplier",
}
assert set(dir(dt.options.display)) == {
"allow_unicode",
"head_nrows",
"interactive",
"max_nrows",
"max_column_width",
"tail_nrows",
"use_colors"
}
assert set(dir(dt.options.frame)) == {
"names_auto_index",
"names_auto_prefix",
}
assert set(dir(dt.options.fread)) == {
"anonymize",
"log",
"parse_dates",
"parse_times",
}
assert set(dir(dt.options.progress)) == {
"callback",
"clear_on_success",
"enabled",
"min_duration",
"updates_per_second",
"allow_interruption",
}
assert set(dir(dt.options.debug)) == {
"enabled",
"logger",
"report_args",
"arg_max_size"
}
def test_option_api():
dt.options.register_option(name="fooo", xtype=int, default=13,
doc="a dozen")
assert "fooo" in dir(dt.options)
assert dt.options.fooo == 13
assert dt.options.get("fooo") == 13
dt.options.fooo = 23
assert dt.options.fooo == 23
dt.options.set("fooo", 25)
assert dt.options.fooo == 25
del dt.options.fooo
assert dt.options.fooo == 13
dt.options.fooo = 0
dt.options.reset("fooo")
assert dt.options.fooo == 13
def test_option_bad():
with pytest.raises(AttributeError):
noop(dt.options.gooo)
with pytest.raises(TypeError) as e:
dt.options.register_option(name="gooo", xtype=str, default=3, doc="??")
assert "Default value 3 is not of type <class 'str'>" in str(e.value)
with pytest.raises(ValueError) as e:
dt.options.register_option(name=".hidden", xtype=int, default=0)
assert "Invalid option name .hidden" in str(e.value)
dt.options.register_option(name="gooo", xtype=int, default=3)
with pytest.raises(ValueError) as e:
dt.options.register_option(name="gooo", xtype=int, default=4, doc="???")
assert "Option gooo already registered" in str(e.value)
with pytest.raises(TypeError) as e:
dt.options.gooo = 2.5
assert ("Invalid value for option gooo: expected <class 'int'>, instead "
"got <class 'float'>"
in str(e.value))
def test_option_suggest():
with pytest.raises(AttributeError) as e:
dt.options.fread.log.escapeunicode = False
assert ("did you mean `fread.log.escape_unicode`?" in str(e.value))
def test_options_many():
dt.options.register_option("tmp1.alpha", 1, doc="A", xtype=int)
dt.options.register_option("tmp1.beta", 2, doc="B", xtype=int)
dt.options.register_option("tmp1.gamma", 3, doc="C", xtype=int)
dt.options.register_option("tmp1.delta.x", 4, doc="X", xtype=int)
dt.options.register_option("tmp1.delta.y", 5, doc="Y", xtype=int)
dt.options.register_option("tmp1.delta.z.zz", 6, doc="Z", xtype=int)
for _ in [1, 2]:
assert dt.options.tmp1.alpha == 1
assert dt.options.tmp1.beta == 2
assert dt.options.tmp1.gamma == 3
assert dt.options.tmp1.delta.x == 4
assert dt.options.tmp1.delta.y == 5
assert dt.options.tmp1.delta.z.zz == 6
assert set(dir(dt.options.tmp1)) == {"alpha", "beta", "gamma", "delta"}
assert set(dir(dt.options.tmp1.delta)) == {"x", "y", "z"}
dt.options.tmp1.delta.x = 0
dt.options.tmp1.delta.z.zz = 0
dt.options.tmp1.reset()
def test_options_many_bad():
dt.options.register_option("tmp2.foo.x", 4, xtype=int)
dt.options.register_option("tmp2.foo.y", 5, xtype=int)
dt.options.tmp2.foo.x = 8
assert dt.options.tmp2.foo.x == 8
assert dt.options.get("tmp2.foo.x") == 8
with pytest.raises(TypeError) as e:
dt.options.tmp2.foo = 0
assert ("Cannot assign a value to group of options tmp2.foo.*"
in str(e.value))
def test_options_context1():
with dt.options.context(**{"fread.log.anonymize": True}):
assert dt.options.fread.log.anonymize is True
assert dt.options.fread.log.anonymize is False
def test_options_context2():
with dt.options.fread.log.context(escape_unicode=True):
assert dt.options.fread.log.escape_unicode is True
assert dt.options.fread.log.escape_unicode is False
def test_options_context3():
# Check that in case of exception the value still gets restored
dt.options.register_option("zoonk", 1, xtype=int)
try:
with dt.options.context(zoonk=100):
raise RuntimeError
except RuntimeError:
pass
assert dt.options.zoonk == 1
def test_options_context4():
# Check that if context requires changing multiple parameters, that all
# their values are properly restored in the end, even if there is an
# exception during setting one of the parameters
dt.options.register_option("tmp1.odyn", 1, xtype=int)
dt.options.register_option("tmp1.dwa", 2, xtype=int)
try:
with dt.options.tmp1.context(odyn=10, dwa=None):
assert False, "Should not be able to enter this context"
except TypeError:
pass
assert dt.options.tmp1.odyn == 1
assert dt.options.tmp1.dwa == 2
#-------------------------------------------------------------------------------
# Individual options
#-------------------------------------------------------------------------------
def test_nthreads():
from datatable.internal import get_thread_ids
nthreads0 = dt.options.nthreads
curr_threads = get_thread_ids()
assert len(curr_threads) == nthreads0
for n in [4, 1, nthreads0 + 10, nthreads0]:
dt.options.nthreads = n
new_threads = get_thread_ids()
assert len(new_threads) == n
m = min(len(curr_threads), len(new_threads))
assert curr_threads[:m] == new_threads[:m]
curr_threads = new_threads
assert dt.options.nthreads == nthreads0
def test_frame_names_auto_index():
assert dt.options.frame.names_auto_index == 0
dt.options.frame.names_auto_index = 1
f0 = dt.Frame([[1], [2], [3], [4]])
assert f0.names == ("C1", "C2", "C3", "C4")
dt.options.frame.names_auto_index = 999
f1 = dt.Frame([[1], [2], [3], [4]])
assert f1.names == ("C999", "C1000", "C1001", "C1002")
del dt.options.frame.names_auto_index
f2 = dt.Frame([[1], [2], [3], [4]])
assert f2.names == ("C0", "C1", "C2", "C3")
with pytest.raises(TypeError):
dt.options.frame.names_auto_index = "C"
def test_frame_names_auto_prefix():
assert dt.options.frame.names_auto_prefix == "C"
dt.options.frame.names_auto_prefix = "foo"
f0 = dt.Frame([[3], [3], [3]])
assert f0.names == ("foo0", "foo1", "foo2")
del dt.options.frame.names_auto_prefix
f2 = dt.Frame([[1], [2], [3], [4]])
assert f2.names == ("C0", "C1", "C2", "C3")
with pytest.raises(TypeError):
dt.options.frame.names_auto_prefix = 0
#-------------------------------------------------------------------------------
# .debug options
#-------------------------------------------------------------------------------
class SimpleLogger:
def __init__(self):
self.msg = ""
def debug(self, msg):
self.msg += msg
self.msg += "\n"
def test_debug_logger_default_without_report_args(capsys):
assert dt.options.debug.enabled is False
assert dt.options.debug.logger is None
assert dt.options.debug.report_args is False
with dt.options.debug.context(enabled=True):
assert dt.options.debug.logger is None
assert dt.options.debug.enabled is True
DT = dt.Frame(range(100000))
out, err = capsys.readouterr()
assert not err
assert re.search(r"<Frame#[\da-fA-F]+>.__init__\(\) ", out)
assert re.search(r"# \d+(?:\.\d+)?(?:[eE][+-]?\d+)? s", out)
with pytest.raises(TypeError):
dt.cbind(3)
out, err = capsys.readouterr()
assert not err
assert "datatable.cbind() {" in out
assert re.search(r"} # \d+(?:\.\d+)?(?:[eE][+-]?\d+)? s \(failed\)", out)
def test_debug_logger_default_with_report_args(capsys):
assert dt.options.debug.logger is None
with dt.options.debug.context(enabled=True, report_args=True):
assert dt.options.debug.logger is None
assert dt.options.debug.enabled is True
DT = dt.Frame(range(100000))
out, err = capsys.readouterr()
print(out)
assert not err
assert re.search(r"<Frame#[\da-fA-F]+>.__init__\(range\(0, 100000\)\)", out)
assert re.search(r"# \d+(?:\.\d+)?(?:[eE][+-]?\d+)? s", out)
with pytest.raises(TypeError):
dt.cbind(3)
out, err = capsys.readouterr()
assert not err
assert "datatable.cbind(3) {" in out
assert re.search(r"} # \d+(?:\.\d+)?(?:[eE][+-]?\d+)? s \(failed\)", out)
def test_debug_logger_object():
assert dt.options.debug.logger is None
logger = SimpleLogger()
with dt.options.debug.context(logger=logger, enabled=True, report_args=True):
assert dt.options.debug.logger is logger
assert dt.options.debug.enabled is True
DT = dt.rbind([])
assert "dt.rbind([]) {" in logger.msg
assert re.search(r"} # \d+(?:\.\d+)?(?:[eE][+-]?\d+)? s", logger.msg)
logger.msg = ""
with pytest.raises(TypeError):
dt.rbind(4)
assert "dt.rbind(4) {" in logger.msg
assert re.search(r"} # \d+(?:\.\d+)?(?:[eE][+-]?\d+)? s \(failed\)", logger.msg)
def test_debug_logger_invalid_object():
msg = r"Logger should be an object having a method \.debug\(self, msg\)"
with pytest.raises(TypeError, match=msg):
dt.options.debug.logger = "default"
with pytest.raises(TypeError, match=msg):
dt.options.debug.logger = False
class A: pass
with pytest.raises(TypeError, match=msg):
dt.options.debug.logger = A()
class B:
debug = True
with pytest.raises(TypeError, match=msg):
dt.options.debug.logger = B()
def test_debug_arg_max_size():
logger = SimpleLogger()
with dt.options.debug.context(logger=logger, enabled=True, report_args=True):
assert dt.options.debug.arg_max_size == 100
with dt.options.debug.context(arg_max_size=0):
assert dt.options.debug.arg_max_size == 10
with pytest.raises(ValueError):
dt.options.debug.arg_max_size = -1
with pytest.raises(TypeError):
dt.options.debug.arg_max_size = None
with dt.options.debug.context(arg_max_size=20):
logger.msg = ""
DT = dt.Frame(A=["abcdefghij"*100])
assert ".__init__(A=['abcdefghij...hij']) #" in logger.msg
def test_debug_logger_invalid_option():
# This test checks that invalid options do not cause a crash
# when logging is enabled
with dt.options.debug.context(enabled=True, report_args=True):
try:
dt.options.gooo0
assert False, "Did not raise AttributeError"
except AttributeError:
pass
def test_debug_logger_bad_repr():
# This test checks that logging does not crash if a repr()
# function throws an error
class A:
def __repr__(self):
raise RuntimeError("Malformed repr")
with dt.options.debug.context(enabled=True, report_args=True):
DT = dt.Frame()
try:
DT[A()]
except TypeError:
pass
def test_debug_logger_no_deadlock():
# This custom logger invokes datatable functionality, which has
# the potential of causing deadlocks or deep recursive messages.
class MyLogger:
def __init__(self):
self.frame = dt.Frame(msg=[], stype=str)
def debug(self, msg):
self.frame.nrows += 1
self.frame[-1, 0] = msg
logger = MyLogger()
with dt.options.debug.context(enabled=True, logger=logger, report_args=True):
logger.frame.nrows = 0
DT = dt.Frame(range(10))
DT.rbind(DT)
del DT[::2, :]
assert logger.frame.nrows == 4
| 1 | 13,117 | `dt` won't work for some reason? | h2oai-datatable | py |
@@ -96,7 +96,7 @@ def add_db(doctest_namespace):
doctest_namespace["db"] = db_name
[email protected](autouse=os.getenv("KOALAS_USAGE_LOGGER", None) is not None)
[email protected](autouse=os.getenv("KOALAS_USAGE_LOGGER", "") != "")
def add_caplog(caplog):
with caplog.at_level(logging.INFO, logger="databricks.koalas.usage_logger"):
yield | 1 | #
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import numpy
import tempfile
import atexit
import os
import shutil
import uuid
import logging
from distutils.version import LooseVersion
import pandas as pd
import pyarrow as pa
import matplotlib.pyplot as plt
from pyspark import __version__
from databricks import koalas as ks
from databricks.koalas import utils
shared_conf = {"spark.sql.shuffle.partitions": "4"}
# Initialize Spark session that should be used in doctests or unittests.
# Delta requires Spark 2.4.2+. See
# https://github.com/delta-io/delta#compatibility-with-apache-spark-versions.
if LooseVersion(__version__) >= LooseVersion("3.0.0"):
shared_conf["spark.jars.packages"] = "io.delta:delta-core_2.12:0.7.0"
session = utils.default_session(shared_conf)
elif LooseVersion(__version__) >= LooseVersion("2.4.2"):
shared_conf["spark.jars.packages"] = "io.delta:delta-core_2.11:0.6.1"
session = utils.default_session(shared_conf)
else:
session = utils.default_session(shared_conf)
if os.getenv("DEFAULT_INDEX_TYPE", "") != "":
ks.options.compute.default_index_type = os.getenv("DEFAULT_INDEX_TYPE")
@pytest.fixture(scope="session", autouse=True)
def session_termination():
yield
# Share one session across all the tests. Repeating starting and stopping sessions and contexts
# seems causing a memory leak for an unknown reason in PySpark.
session.stop()
@pytest.fixture(autouse=True)
def add_ks(doctest_namespace):
doctest_namespace["ks"] = ks
@pytest.fixture(autouse=True)
def add_pd(doctest_namespace):
if os.getenv("PANDAS_VERSION", None) is not None:
assert pd.__version__ == os.getenv("PANDAS_VERSION")
doctest_namespace["pd"] = pd
@pytest.fixture(autouse=True)
def add_pa(doctest_namespace):
if os.getenv("PYARROW_VERSION", None) is not None:
assert pa.__version__ == os.getenv("PYARROW_VERSION")
doctest_namespace["pa"] = pa
@pytest.fixture(autouse=True)
def add_np(doctest_namespace):
doctest_namespace["np"] = numpy
@pytest.fixture(autouse=True)
def add_path(doctest_namespace):
path = tempfile.mkdtemp()
atexit.register(lambda: shutil.rmtree(path, ignore_errors=True))
doctest_namespace["path"] = path
@pytest.fixture(autouse=True)
def add_db(doctest_namespace):
db_name = "db%s" % str(uuid.uuid4()).replace("-", "")
session.sql("CREATE DATABASE %s" % db_name)
atexit.register(lambda: session.sql("DROP DATABASE IF EXISTS %s CASCADE" % db_name))
doctest_namespace["db"] = db_name
@pytest.fixture(autouse=os.getenv("KOALAS_USAGE_LOGGER", None) is not None)
def add_caplog(caplog):
with caplog.at_level(logging.INFO, logger="databricks.koalas.usage_logger"):
yield
@pytest.fixture(autouse=True)
def close_figs():
yield
plt.close("all")
| 1 | 16,201 | Is this because "KOALAS_USAGE_LOGGER" can be `None` ?? | databricks-koalas | py |
@@ -0,0 +1,16 @@
+using Nethermind.Int256;
+
+namespace Nethermind.JsonRpc.Modules.Eth
+{
+ public static class GasPriceConfig
+ {
+ public const int NoHeadBlockChangeErrorCode = 7; //Error code used in debug mode when the head block is not changed
+ public const int PercentileOfSortedTxs = 60; //Percentile of sortedTxList indexes to choose as gas price
+ public const int DefaultBlocksLimit = 20; //Limit for how many blocks we check txs in to add to sortedTxList
+ public const int DefaultGasPrice = 1; //Tx price added to sortedTxList for a block that has 0 txs (now adds 1 tx)
+ public const int SoftTxLimit = 40; //If a block adds only 1 tx price, the block is not counted towards BlocksLimit if sortedTxList.Count() + blocksLeftToCheck <= SoftTxLimit (after price is added to sortedTxList)
+ public const int DefaultBaseFee = 200; //Minimum price a transaction sender has to pay
+ public const int TxLimitFromABlock = 3; //Maximum number of tx we can add to sortedTxList from one block
+ public static readonly UInt256 _maxGasPrice = 500; //Maximum gas price we can return
+ }
+} | 1 | 1 | 25,626 | I like it but maybe the better name will be EthGasPriceConstants or EthGasPriceEstimatorConstants? but Constants not Config | NethermindEth-nethermind | .cs |
|
@@ -38,7 +38,9 @@ import (
func TestNew(t *testing.T) {
ctx, _ := SetupFakeContext(t)
- c := NewController(ctx, configmap.NewStaticWatcher(
+ ctor := NewConstructor(&dataresidency.StoreSingleton{})
+
+ c := ctor(ctx, configmap.NewStaticWatcher(
&corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: logging.ConfigMapName(), | 1 | /*
Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package broker
import (
"testing"
"github.com/google/knative-gcp/pkg/apis/configs/dataresidency"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"knative.dev/pkg/configmap"
"knative.dev/pkg/logging"
"knative.dev/pkg/metrics"
. "knative.dev/pkg/reconciler/testing"
"knative.dev/pkg/system"
tracingconfig "knative.dev/pkg/tracing/config"
// Fake injection informers
_ "github.com/google/knative-gcp/pkg/client/injection/informers/broker/v1beta1/broker/fake"
_ "github.com/google/knative-gcp/pkg/client/injection/informers/intevents/v1alpha1/brokercell/fake"
)
func TestNew(t *testing.T) {
ctx, _ := SetupFakeContext(t)
c := NewController(ctx, configmap.NewStaticWatcher(
&corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: logging.ConfigMapName(),
Namespace: system.Namespace(),
},
Data: map[string]string{},
},
&corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: metrics.ConfigMapName(),
Namespace: system.Namespace(),
},
Data: map[string]string{},
},
&corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: tracingconfig.ConfigName,
Namespace: system.Namespace(),
},
Data: map[string]string{},
},
&corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: dataresidency.ConfigMapName(),
Namespace: system.Namespace(),
},
Data: map[string]string{},
},
))
if c == nil {
t.Fatal("Expected NewController to return a non-nil value")
}
}
| 1 | 18,031 | I would inline the constructor as well | google-knative-gcp | go |
@@ -107,10 +107,10 @@ TEST(Scanner, Basic) {
}; \
GraphScanner lexer; \
lexer.setReadBuffer(input); \
- nebula::GraphParser::semantic_type dumyyylval; \
- nebula::GraphParser::location_type dumyyyloc; \
+ nebula::GraphParser::semantic_type dummyyylval; \
+ nebula::GraphParser::location_type dummyyyloc; \
try { \
- auto token = lexer.yylex(&dumyyylval, &dumyyyloc); \
+ auto token = lexer.yylex(&dummyyylval, &dummyyyloc); \
if (token != 0) { \
return AssertionFailure() << "Lexical error should've " \
<< "happened for `" << STR << "'"; \ | 1 | /* Copyright (c) 2018 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License.
*/
#include <gtest/gtest.h>
#include <sstream>
#include <utility>
#include <vector>
#include "common/base/Base.h"
#include "parser/GraphParser.hpp"
#include "parser/GraphScanner.h"
using testing::AssertionFailure;
using testing::AssertionResult;
using testing::AssertionSuccess;
namespace nebula {
using semantic_type = nebula::GraphParser::semantic_type;
static auto checkSemanticValue(const char *expected, semantic_type *sv) {
auto actual = *sv->strval;
delete sv->strval;
if (expected != actual) {
return AssertionFailure() << "Semantic value not match, "
<< "expected: " << expected << ", actual: " << actual;
}
return AssertionSuccess();
}
static auto checkSemanticValue(bool expected, semantic_type *sv) {
auto actual = sv->boolval;
if (expected != actual) {
return AssertionFailure() << "Semantic value not match, "
<< "expected: " << expected << ", actual: " << actual;
}
return AssertionSuccess();
}
template <typename T>
static std::enable_if_t<std::is_integral<T>::value, AssertionResult> checkSemanticValue(
T expected, semantic_type *sv) {
auto actual = static_cast<T>(sv->intval);
if (expected != actual) {
return AssertionFailure() << "Semantic value not match, "
<< "expected: " << expected << ", actual: " << actual;
}
return AssertionSuccess();
}
template <typename T>
static std::enable_if_t<std::is_floating_point<T>::value, AssertionResult> checkSemanticValue(
T expected, semantic_type *sv) {
auto actual = static_cast<T>(sv->doubleval);
if (expected != actual) {
return AssertionFailure() << "Semantic value not match, "
<< "expected: " << expected << ", actual: " << actual;
}
return AssertionSuccess();
}
TEST(Scanner, Basic) {
using TokenType = nebula::GraphParser::token_type;
using Validator = std::function<::testing::AssertionResult()>;
nebula::GraphParser::semantic_type yylval;
nebula::GraphParser::location_type yyloc;
GraphScanner scanner;
std::string stream;
#define CHECK_SEMANTIC_TYPE(STR, TYPE) \
(stream += " ", stream += STR, [&]() { \
auto actual = scanner.yylex(&yylval, &yyloc); \
if (actual != TYPE) { \
return AssertionFailure() << "Token type not match for `" << STR \
<< "', expected: " << static_cast<int>(TYPE) \
<< ", actual: " << static_cast<int>(actual); \
} else { \
return AssertionSuccess(); \
} \
})
#define CHECK_SEMANTIC_VALUE(STR, TYPE, value) \
(stream += " ", stream += STR, [&]() { \
auto actual = scanner.yylex(&yylval, &yyloc); \
if (actual != TYPE) { \
return AssertionFailure() << "Token type not match for `" << STR \
<< "', expected: " << static_cast<int>(TYPE) \
<< ", actual: " << static_cast<int>(actual); \
} else { \
return checkSemanticValue(value, &yylval); \
} \
})
#define CHECK_LEXICAL_ERROR(STR) \
([]() { \
auto input = [](char *buf, int) -> int { \
static bool first = true; \
if (!first) { \
return 0; \
} \
first = false; \
auto size = ::strlen(STR); \
::memcpy(buf, STR, size); \
return size; \
}; \
GraphScanner lexer; \
lexer.setReadBuffer(input); \
nebula::GraphParser::semantic_type dumyyylval; \
nebula::GraphParser::location_type dumyyyloc; \
try { \
auto token = lexer.yylex(&dumyyylval, &dumyyyloc); \
if (token != 0) { \
return AssertionFailure() << "Lexical error should've " \
<< "happened for `" << STR << "'"; \
} else { \
return AssertionSuccess(); \
} \
} catch (const std::exception &e) { \
LOG(INFO) << e.what() << STR; \
return AssertionSuccess(); \
} \
})
std::vector<Validator> validators = {
CHECK_SEMANTIC_TYPE(".", TokenType::DOT),
CHECK_SEMANTIC_TYPE("..", TokenType::DOT_DOT),
CHECK_SEMANTIC_TYPE(",", TokenType::COMMA),
CHECK_SEMANTIC_TYPE(":", TokenType::COLON),
CHECK_SEMANTIC_TYPE(";", TokenType::SEMICOLON),
CHECK_SEMANTIC_TYPE("+", TokenType::PLUS),
CHECK_SEMANTIC_TYPE("-", TokenType::MINUS),
CHECK_SEMANTIC_TYPE("*", TokenType::STAR),
CHECK_SEMANTIC_TYPE("/", TokenType::DIV),
CHECK_SEMANTIC_TYPE("%", TokenType::MOD),
CHECK_SEMANTIC_TYPE("!", TokenType::NOT),
CHECK_SEMANTIC_TYPE("@", TokenType::AT),
CHECK_SEMANTIC_TYPE("?", TokenType::QM),
CHECK_SEMANTIC_TYPE("<", TokenType::LT),
CHECK_SEMANTIC_TYPE("<=", TokenType::LE),
CHECK_SEMANTIC_TYPE(">", TokenType::GT),
CHECK_SEMANTIC_TYPE(">=", TokenType::GE),
CHECK_SEMANTIC_TYPE("==", TokenType::EQ),
CHECK_SEMANTIC_TYPE("!=", TokenType::NE),
CHECK_SEMANTIC_TYPE("<>", TokenType::NE),
CHECK_SEMANTIC_TYPE("=~", TokenType::REG),
CHECK_SEMANTIC_TYPE("|", TokenType::PIPE),
CHECK_SEMANTIC_TYPE("=", TokenType::ASSIGN),
CHECK_SEMANTIC_TYPE("(", TokenType::L_PAREN),
CHECK_SEMANTIC_TYPE(")", TokenType::R_PAREN),
CHECK_SEMANTIC_TYPE("[", TokenType::L_BRACKET),
CHECK_SEMANTIC_TYPE("]", TokenType::R_BRACKET),
CHECK_SEMANTIC_TYPE("{", TokenType::L_BRACE),
CHECK_SEMANTIC_TYPE("}", TokenType::R_BRACE),
CHECK_SEMANTIC_TYPE("<-", TokenType::L_ARROW),
CHECK_SEMANTIC_TYPE("->", TokenType::R_ARROW),
CHECK_SEMANTIC_TYPE("$-", TokenType::INPUT_REF),
CHECK_SEMANTIC_TYPE("$^", TokenType::SRC_REF),
CHECK_SEMANTIC_TYPE("$$", TokenType::DST_REF),
CHECK_SEMANTIC_TYPE("GO", TokenType::KW_GO),
CHECK_SEMANTIC_TYPE("go", TokenType::KW_GO),
CHECK_SEMANTIC_TYPE("AS", TokenType::KW_AS),
CHECK_SEMANTIC_TYPE("as", TokenType::KW_AS),
CHECK_SEMANTIC_TYPE("TO", TokenType::KW_TO),
CHECK_SEMANTIC_TYPE("to", TokenType::KW_TO),
CHECK_SEMANTIC_TYPE("USE", TokenType::KW_USE),
CHECK_SEMANTIC_TYPE("use", TokenType::KW_USE),
CHECK_SEMANTIC_TYPE("SET", TokenType::KW_SET),
CHECK_SEMANTIC_TYPE("set", TokenType::KW_SET),
CHECK_SEMANTIC_TYPE("FROM", TokenType::KW_FROM),
CHECK_SEMANTIC_TYPE("from", TokenType::KW_FROM),
CHECK_SEMANTIC_TYPE("WHERE", TokenType::KW_WHERE),
CHECK_SEMANTIC_TYPE("where", TokenType::KW_WHERE),
CHECK_SEMANTIC_TYPE("MATCH", TokenType::KW_MATCH),
CHECK_SEMANTIC_TYPE("match", TokenType::KW_MATCH),
CHECK_SEMANTIC_TYPE("INSERT", TokenType::KW_INSERT),
CHECK_SEMANTIC_TYPE("insert", TokenType::KW_INSERT),
CHECK_SEMANTIC_TYPE("VALUE", TokenType::KW_VALUES),
CHECK_SEMANTIC_TYPE("value", TokenType::KW_VALUES),
CHECK_SEMANTIC_TYPE("VALUES", TokenType::KW_VALUES),
CHECK_SEMANTIC_TYPE("values", TokenType::KW_VALUES),
CHECK_SEMANTIC_TYPE("YIELD", TokenType::KW_YIELD),
CHECK_SEMANTIC_TYPE("yield", TokenType::KW_YIELD),
CHECK_SEMANTIC_TYPE("RETURN", TokenType::KW_RETURN),
CHECK_SEMANTIC_TYPE("return", TokenType::KW_RETURN),
CHECK_SEMANTIC_TYPE("VERTEX", TokenType::KW_VERTEX),
CHECK_SEMANTIC_TYPE("vertex", TokenType::KW_VERTEX),
CHECK_SEMANTIC_TYPE("EDGE", TokenType::KW_EDGE),
CHECK_SEMANTIC_TYPE("edge", TokenType::KW_EDGE),
CHECK_SEMANTIC_TYPE("EDGES", TokenType::KW_EDGES),
CHECK_SEMANTIC_TYPE("edges", TokenType::KW_EDGES),
CHECK_SEMANTIC_TYPE("UPDATE", TokenType::KW_UPDATE),
CHECK_SEMANTIC_TYPE("update", TokenType::KW_UPDATE),
CHECK_SEMANTIC_TYPE("ALTER", TokenType::KW_ALTER),
CHECK_SEMANTIC_TYPE("alter", TokenType::KW_ALTER),
CHECK_SEMANTIC_TYPE("STEP", TokenType::KW_STEPS),
CHECK_SEMANTIC_TYPE("step", TokenType::KW_STEPS),
CHECK_SEMANTIC_TYPE("STEPS", TokenType::KW_STEPS),
CHECK_SEMANTIC_TYPE("steps", TokenType::KW_STEPS),
CHECK_SEMANTIC_TYPE("OVER", TokenType::KW_OVER),
CHECK_SEMANTIC_TYPE("over", TokenType::KW_OVER),
CHECK_SEMANTIC_TYPE("UPTO", TokenType::KW_UPTO),
CHECK_SEMANTIC_TYPE("upto", TokenType::KW_UPTO),
CHECK_SEMANTIC_TYPE("REVERSELY", TokenType::KW_REVERSELY),
CHECK_SEMANTIC_TYPE("reversely", TokenType::KW_REVERSELY),
CHECK_SEMANTIC_TYPE("SPACE", TokenType::KW_SPACE),
CHECK_SEMANTIC_TYPE("space", TokenType::KW_SPACE),
CHECK_SEMANTIC_TYPE("SPACES", TokenType::KW_SPACES),
CHECK_SEMANTIC_TYPE("spaces", TokenType::KW_SPACES),
CHECK_SEMANTIC_TYPE("PARTS", TokenType::KW_PARTS),
CHECK_SEMANTIC_TYPE("Parts", TokenType::KW_PARTS),
CHECK_SEMANTIC_TYPE("parts", TokenType::KW_PARTS),
CHECK_SEMANTIC_TYPE("DOUBLE", TokenType::KW_DOUBLE),
CHECK_SEMANTIC_TYPE("double", TokenType::KW_DOUBLE),
CHECK_SEMANTIC_TYPE("STRING", TokenType::KW_STRING),
CHECK_SEMANTIC_TYPE("string", TokenType::KW_STRING),
CHECK_SEMANTIC_TYPE("BOOL", TokenType::KW_BOOL),
CHECK_SEMANTIC_TYPE("bool", TokenType::KW_BOOL),
CHECK_SEMANTIC_TYPE("TAG", TokenType::KW_TAG),
CHECK_SEMANTIC_TYPE("tag", TokenType::KW_TAG),
CHECK_SEMANTIC_TYPE("TAGS", TokenType::KW_TAGS),
CHECK_SEMANTIC_TYPE("tags", TokenType::KW_TAGS),
CHECK_SEMANTIC_TYPE("UNION", TokenType::KW_UNION),
CHECK_SEMANTIC_TYPE("union", TokenType::KW_UNION),
CHECK_SEMANTIC_TYPE("INTERSECT", TokenType::KW_INTERSECT),
CHECK_SEMANTIC_TYPE("intersect", TokenType::KW_INTERSECT),
CHECK_SEMANTIC_TYPE("MINUS", TokenType::KW_MINUS),
CHECK_SEMANTIC_TYPE("minus", TokenType::KW_MINUS),
CHECK_SEMANTIC_TYPE("SHOW", TokenType::KW_SHOW),
CHECK_SEMANTIC_TYPE("show", TokenType::KW_SHOW),
CHECK_SEMANTIC_TYPE("Show", TokenType::KW_SHOW),
CHECK_SEMANTIC_TYPE("ADD", TokenType::KW_ADD),
CHECK_SEMANTIC_TYPE("add", TokenType::KW_ADD),
CHECK_SEMANTIC_TYPE("Add", TokenType::KW_ADD),
CHECK_SEMANTIC_TYPE("HOST", TokenType::KW_HOST),
CHECK_SEMANTIC_TYPE("host", TokenType::KW_HOST),
CHECK_SEMANTIC_TYPE("Host", TokenType::KW_HOST),
CHECK_SEMANTIC_TYPE("HOSTS", TokenType::KW_HOSTS),
CHECK_SEMANTIC_TYPE("hosts", TokenType::KW_HOSTS),
CHECK_SEMANTIC_TYPE("Hosts", TokenType::KW_HOSTS),
CHECK_SEMANTIC_TYPE("TIMESTAMP", TokenType::KW_TIMESTAMP),
CHECK_SEMANTIC_TYPE("timestamp", TokenType::KW_TIMESTAMP),
CHECK_SEMANTIC_TYPE("Timestamp", TokenType::KW_TIMESTAMP),
CHECK_SEMANTIC_TYPE("DELETE", TokenType::KW_DELETE),
CHECK_SEMANTIC_TYPE("delete", TokenType::KW_DELETE),
CHECK_SEMANTIC_TYPE("Delete", TokenType::KW_DELETE),
CHECK_SEMANTIC_TYPE("FIND", TokenType::KW_FIND),
CHECK_SEMANTIC_TYPE("find", TokenType::KW_FIND),
CHECK_SEMANTIC_TYPE("Find", TokenType::KW_FIND),
CHECK_SEMANTIC_TYPE("CREATE", TokenType::KW_CREATE),
CHECK_SEMANTIC_TYPE("create", TokenType::KW_CREATE),
CHECK_SEMANTIC_TYPE("Create", TokenType::KW_CREATE),
CHECK_SEMANTIC_TYPE("PARTITION_NUM", TokenType::KW_PARTITION_NUM),
CHECK_SEMANTIC_TYPE("partition_num", TokenType::KW_PARTITION_NUM),
CHECK_SEMANTIC_TYPE("Partition_num", TokenType::KW_PARTITION_NUM),
CHECK_SEMANTIC_TYPE("REPLICA_FACTOR", TokenType::KW_REPLICA_FACTOR),
CHECK_SEMANTIC_TYPE("replica_factor", TokenType::KW_REPLICA_FACTOR),
CHECK_SEMANTIC_TYPE("Replica_factor", TokenType::KW_REPLICA_FACTOR),
CHECK_SEMANTIC_TYPE("DROP", TokenType::KW_DROP),
CHECK_SEMANTIC_TYPE("drop", TokenType::KW_DROP),
CHECK_SEMANTIC_TYPE("Drop", TokenType::KW_DROP),
CHECK_SEMANTIC_TYPE("DESC", TokenType::KW_DESC),
CHECK_SEMANTIC_TYPE("desc", TokenType::KW_DESC),
CHECK_SEMANTIC_TYPE("Desc", TokenType::KW_DESC),
CHECK_SEMANTIC_TYPE("DESCRIBE", TokenType::KW_DESCRIBE),
CHECK_SEMANTIC_TYPE("describe", TokenType::KW_DESCRIBE),
CHECK_SEMANTIC_TYPE("Describe", TokenType::KW_DESCRIBE),
CHECK_SEMANTIC_TYPE("REMOVE", TokenType::KW_REMOVE),
CHECK_SEMANTIC_TYPE("remove", TokenType::KW_REMOVE),
CHECK_SEMANTIC_TYPE("Remove", TokenType::KW_REMOVE),
CHECK_SEMANTIC_TYPE("IF", TokenType::KW_IF),
CHECK_SEMANTIC_TYPE("If", TokenType::KW_IF),
CHECK_SEMANTIC_TYPE("if", TokenType::KW_IF),
CHECK_SEMANTIC_TYPE("NOT", TokenType::KW_NOT),
CHECK_SEMANTIC_TYPE("Not", TokenType::KW_NOT),
CHECK_SEMANTIC_TYPE("not", TokenType::KW_NOT),
CHECK_SEMANTIC_TYPE("OR", TokenType::KW_OR),
CHECK_SEMANTIC_TYPE("Or", TokenType::KW_OR),
CHECK_SEMANTIC_TYPE("or", TokenType::KW_OR),
CHECK_SEMANTIC_TYPE("AND", TokenType::KW_AND),
CHECK_SEMANTIC_TYPE("And", TokenType::KW_AND),
CHECK_SEMANTIC_TYPE("and", TokenType::KW_AND),
CHECK_SEMANTIC_TYPE("XOR", TokenType::KW_XOR),
CHECK_SEMANTIC_TYPE("Xor", TokenType::KW_XOR),
CHECK_SEMANTIC_TYPE("xor", TokenType::KW_XOR),
CHECK_SEMANTIC_TYPE("EXISTS", TokenType::KW_EXISTS),
CHECK_SEMANTIC_TYPE("Exists", TokenType::KW_EXISTS),
CHECK_SEMANTIC_TYPE("exists", TokenType::KW_EXISTS),
CHECK_SEMANTIC_TYPE("WITH", TokenType::KW_WITH),
CHECK_SEMANTIC_TYPE("With", TokenType::KW_WITH),
CHECK_SEMANTIC_TYPE("with", TokenType::KW_WITH),
CHECK_SEMANTIC_TYPE("USER", TokenType::KW_USER),
CHECK_SEMANTIC_TYPE("User", TokenType::KW_USER),
CHECK_SEMANTIC_TYPE("user", TokenType::KW_USER),
CHECK_SEMANTIC_TYPE("USERS", TokenType::KW_USERS),
CHECK_SEMANTIC_TYPE("Users", TokenType::KW_USERS),
CHECK_SEMANTIC_TYPE("users", TokenType::KW_USERS),
CHECK_SEMANTIC_TYPE("PASSWORD", TokenType::KW_PASSWORD),
CHECK_SEMANTIC_TYPE("Password", TokenType::KW_PASSWORD),
CHECK_SEMANTIC_TYPE("password", TokenType::KW_PASSWORD),
CHECK_SEMANTIC_TYPE("CHANGE", TokenType::KW_CHANGE),
CHECK_SEMANTIC_TYPE("Change", TokenType::KW_CHANGE),
CHECK_SEMANTIC_TYPE("change", TokenType::KW_CHANGE),
CHECK_SEMANTIC_TYPE("ROLE", TokenType::KW_ROLE),
CHECK_SEMANTIC_TYPE("Role", TokenType::KW_ROLE),
CHECK_SEMANTIC_TYPE("role", TokenType::KW_ROLE),
CHECK_SEMANTIC_TYPE("GOD", TokenType::KW_GOD),
CHECK_SEMANTIC_TYPE("God", TokenType::KW_GOD),
CHECK_SEMANTIC_TYPE("god", TokenType::KW_GOD),
CHECK_SEMANTIC_TYPE("ADMIN", TokenType::KW_ADMIN),
CHECK_SEMANTIC_TYPE("Admin", TokenType::KW_ADMIN),
CHECK_SEMANTIC_TYPE("admin", TokenType::KW_ADMIN),
CHECK_SEMANTIC_TYPE("GUEST", TokenType::KW_GUEST),
CHECK_SEMANTIC_TYPE("Guest", TokenType::KW_GUEST),
CHECK_SEMANTIC_TYPE("guest", TokenType::KW_GUEST),
CHECK_SEMANTIC_TYPE("GRANT", TokenType::KW_GRANT),
CHECK_SEMANTIC_TYPE("Grant", TokenType::KW_GRANT),
CHECK_SEMANTIC_TYPE("grant", TokenType::KW_GRANT),
CHECK_SEMANTIC_TYPE("REVOKE", TokenType::KW_REVOKE),
CHECK_SEMANTIC_TYPE("Revoke", TokenType::KW_REVOKE),
CHECK_SEMANTIC_TYPE("revoke", TokenType::KW_REVOKE),
CHECK_SEMANTIC_TYPE("ON", TokenType::KW_ON),
CHECK_SEMANTIC_TYPE("On", TokenType::KW_ON),
CHECK_SEMANTIC_TYPE("on", TokenType::KW_ON),
CHECK_SEMANTIC_TYPE("ROLES", TokenType::KW_ROLES),
CHECK_SEMANTIC_TYPE("Roles", TokenType::KW_ROLES),
CHECK_SEMANTIC_TYPE("BY", TokenType::KW_BY),
CHECK_SEMANTIC_TYPE("By", TokenType::KW_BY),
CHECK_SEMANTIC_TYPE("by", TokenType::KW_BY),
CHECK_SEMANTIC_TYPE("IN", TokenType::KW_IN),
CHECK_SEMANTIC_TYPE("In", TokenType::KW_IN),
CHECK_SEMANTIC_TYPE("TTL_DURATION", TokenType::KW_TTL_DURATION),
CHECK_SEMANTIC_TYPE("ttl_duration", TokenType::KW_TTL_DURATION),
CHECK_SEMANTIC_TYPE("Ttl_duration", TokenType::KW_TTL_DURATION),
CHECK_SEMANTIC_TYPE("TTL_COL", TokenType::KW_TTL_COL),
CHECK_SEMANTIC_TYPE("ttl_col", TokenType::KW_TTL_COL),
CHECK_SEMANTIC_TYPE("Ttl_col", TokenType::KW_TTL_COL),
CHECK_SEMANTIC_TYPE("DOWNLOAD", TokenType::KW_DOWNLOAD),
CHECK_SEMANTIC_TYPE("download", TokenType::KW_DOWNLOAD),
CHECK_SEMANTIC_TYPE("Download", TokenType::KW_DOWNLOAD),
CHECK_SEMANTIC_TYPE("HDFS", TokenType::KW_HDFS),
CHECK_SEMANTIC_TYPE("Hdfs", TokenType::KW_HDFS),
CHECK_SEMANTIC_TYPE("hdfs", TokenType::KW_HDFS),
CHECK_SEMANTIC_TYPE("ORDER", TokenType::KW_ORDER),
CHECK_SEMANTIC_TYPE("Order", TokenType::KW_ORDER),
CHECK_SEMANTIC_TYPE("order", TokenType::KW_ORDER),
CHECK_SEMANTIC_TYPE("ASC", TokenType::KW_ASC),
CHECK_SEMANTIC_TYPE("Asc", TokenType::KW_ASC),
CHECK_SEMANTIC_TYPE("asc", TokenType::KW_ASC),
CHECK_SEMANTIC_TYPE("DISTINCT", TokenType::KW_DISTINCT),
CHECK_SEMANTIC_TYPE("Distinct", TokenType::KW_DISTINCT),
CHECK_SEMANTIC_TYPE("distinct", TokenType::KW_DISTINCT),
CHECK_SEMANTIC_TYPE("DEFAULT", TokenType::KW_DEFAULT),
CHECK_SEMANTIC_TYPE("Default", TokenType::KW_DEFAULT),
CHECK_SEMANTIC_TYPE("default", TokenType::KW_DEFAULT),
CHECK_SEMANTIC_TYPE("INGEST", TokenType::KW_INGEST),
CHECK_SEMANTIC_TYPE("Ingest", TokenType::KW_INGEST),
CHECK_SEMANTIC_TYPE("ingest", TokenType::KW_INGEST),
CHECK_SEMANTIC_TYPE("CONFIGS", TokenType::KW_CONFIGS),
CHECK_SEMANTIC_TYPE("configs", TokenType::KW_CONFIGS),
CHECK_SEMANTIC_TYPE("Configs", TokenType::KW_CONFIGS),
CHECK_SEMANTIC_TYPE("ALL", TokenType::KW_ALL),
CHECK_SEMANTIC_TYPE("all", TokenType::KW_ALL),
CHECK_SEMANTIC_TYPE("BALANCE", TokenType::KW_BALANCE),
CHECK_SEMANTIC_TYPE("Balance", TokenType::KW_BALANCE),
CHECK_SEMANTIC_TYPE("balance", TokenType::KW_BALANCE),
CHECK_SEMANTIC_TYPE("LEADER", TokenType::KW_LEADER),
CHECK_SEMANTIC_TYPE("Leader", TokenType::KW_LEADER),
CHECK_SEMANTIC_TYPE("leader", TokenType::KW_LEADER),
CHECK_SEMANTIC_TYPE("RESET", TokenType::KW_RESET),
CHECK_SEMANTIC_TYPE("reset", TokenType::KW_RESET),
CHECK_SEMANTIC_TYPE("Reset", TokenType::KW_RESET),
CHECK_SEMANTIC_TYPE("PLAN", TokenType::KW_PLAN),
CHECK_SEMANTIC_TYPE("plan", TokenType::KW_PLAN),
CHECK_SEMANTIC_TYPE("Plan", TokenType::KW_PLAN),
CHECK_SEMANTIC_TYPE("FETCH", TokenType::KW_FETCH),
CHECK_SEMANTIC_TYPE("Fetch", TokenType::KW_FETCH),
CHECK_SEMANTIC_TYPE("fetch", TokenType::KW_FETCH),
CHECK_SEMANTIC_TYPE("UUID", TokenType::KW_UUID),
CHECK_SEMANTIC_TYPE("Uuid", TokenType::KW_UUID),
CHECK_SEMANTIC_TYPE("uuid", TokenType::KW_UUID),
CHECK_SEMANTIC_TYPE("OF", TokenType::KW_OF),
CHECK_SEMANTIC_TYPE("Of", TokenType::KW_OF),
CHECK_SEMANTIC_TYPE("of", TokenType::KW_OF),
CHECK_SEMANTIC_TYPE("LIMIT", TokenType::KW_LIMIT),
CHECK_SEMANTIC_TYPE("limit", TokenType::KW_LIMIT),
CHECK_SEMANTIC_TYPE("OFFSET", TokenType::KW_OFFSET),
CHECK_SEMANTIC_TYPE("offset", TokenType::KW_OFFSET),
CHECK_SEMANTIC_TYPE("SNAPSHOT", TokenType::KW_SNAPSHOT),
CHECK_SEMANTIC_TYPE("Snapshot", TokenType::KW_SNAPSHOT),
CHECK_SEMANTIC_TYPE("snapshot", TokenType::KW_SNAPSHOT),
CHECK_SEMANTIC_TYPE("SNAPSHOTS", TokenType::KW_SNAPSHOTS),
CHECK_SEMANTIC_TYPE("Snapshots", TokenType::KW_SNAPSHOTS),
CHECK_SEMANTIC_TYPE("snapshots", TokenType::KW_SNAPSHOTS),
CHECK_SEMANTIC_TYPE("SHORTEST", TokenType::KW_SHORTEST),
CHECK_SEMANTIC_TYPE("Shortest", TokenType::KW_SHORTEST),
CHECK_SEMANTIC_TYPE("shortest", TokenType::KW_SHORTEST),
CHECK_SEMANTIC_TYPE("SUBGRAPH", TokenType::KW_SUBGRAPH),
CHECK_SEMANTIC_TYPE("Subgraph", TokenType::KW_SUBGRAPH),
CHECK_SEMANTIC_TYPE("subgraph", TokenType::KW_SUBGRAPH),
CHECK_SEMANTIC_TYPE("CONTAINS", TokenType::KW_CONTAINS),
CHECK_SEMANTIC_TYPE("Contains", TokenType::KW_CONTAINS),
CHECK_SEMANTIC_TYPE("contains", TokenType::KW_CONTAINS),
CHECK_SEMANTIC_TYPE("STARTS", TokenType::KW_STARTS),
CHECK_SEMANTIC_TYPE("Starts", TokenType::KW_STARTS),
CHECK_SEMANTIC_TYPE("starts", TokenType::KW_STARTS),
CHECK_SEMANTIC_TYPE("ENDS", TokenType::KW_ENDS),
CHECK_SEMANTIC_TYPE("Ends", TokenType::KW_ENDS),
CHECK_SEMANTIC_TYPE("ends", TokenType::KW_ENDS),
CHECK_SEMANTIC_TYPE("STARTS WITH", TokenType::KW_STARTS_WITH),
CHECK_SEMANTIC_TYPE("Starts with", TokenType::KW_STARTS_WITH),
CHECK_SEMANTIC_TYPE("starts with", TokenType::KW_STARTS_WITH),
CHECK_SEMANTIC_TYPE("ENDS WITH", TokenType::KW_ENDS_WITH),
CHECK_SEMANTIC_TYPE("Ends with", TokenType::KW_ENDS_WITH),
CHECK_SEMANTIC_TYPE("ends with", TokenType::KW_ENDS_WITH),
CHECK_SEMANTIC_TYPE("OUT", TokenType::KW_OUT),
CHECK_SEMANTIC_TYPE("Out", TokenType::KW_OUT),
CHECK_SEMANTIC_TYPE("out", TokenType::KW_OUT),
CHECK_SEMANTIC_TYPE("BOTH", TokenType::KW_BOTH),
CHECK_SEMANTIC_TYPE("Both", TokenType::KW_BOTH),
CHECK_SEMANTIC_TYPE("both", TokenType::KW_BOTH),
CHECK_SEMANTIC_TYPE("GRAPH", TokenType::KW_GRAPH),
CHECK_SEMANTIC_TYPE("Graph", TokenType::KW_GRAPH),
CHECK_SEMANTIC_TYPE("graph", TokenType::KW_GRAPH),
CHECK_SEMANTIC_TYPE("META", TokenType::KW_META),
CHECK_SEMANTIC_TYPE("Meta", TokenType::KW_META),
CHECK_SEMANTIC_TYPE("meta", TokenType::KW_META),
CHECK_SEMANTIC_TYPE("STORAGE", TokenType::KW_STORAGE),
CHECK_SEMANTIC_TYPE("Storage", TokenType::KW_STORAGE),
CHECK_SEMANTIC_TYPE("storage", TokenType::KW_STORAGE),
CHECK_SEMANTIC_TYPE("UNWIND", TokenType::KW_UNWIND),
CHECK_SEMANTIC_TYPE("unwind", TokenType::KW_UNWIND),
CHECK_SEMANTIC_TYPE("SKIP", TokenType::KW_SKIP),
CHECK_SEMANTIC_TYPE("skip", TokenType::KW_SKIP),
CHECK_SEMANTIC_TYPE("OPTIONAL", TokenType::KW_OPTIONAL),
CHECK_SEMANTIC_TYPE("optional", TokenType::KW_OPTIONAL),
CHECK_SEMANTIC_TYPE("DATE", TokenType::KW_DATE),
CHECK_SEMANTIC_TYPE("date", TokenType::KW_DATE),
CHECK_SEMANTIC_TYPE("TIME", TokenType::KW_TIME),
CHECK_SEMANTIC_TYPE("time", TokenType::KW_TIME),
CHECK_SEMANTIC_TYPE("DATETIME", TokenType::KW_DATETIME),
CHECK_SEMANTIC_TYPE("datetime", TokenType::KW_DATETIME),
CHECK_SEMANTIC_TYPE("GROUP", TokenType::KW_GROUP),
CHECK_SEMANTIC_TYPE("Group", TokenType::KW_GROUP),
CHECK_SEMANTIC_TYPE("group", TokenType::KW_GROUP),
CHECK_SEMANTIC_TYPE("GROUPS", TokenType::KW_GROUPS),
CHECK_SEMANTIC_TYPE("Groups", TokenType::KW_GROUPS),
CHECK_SEMANTIC_TYPE("groups", TokenType::KW_GROUPS),
CHECK_SEMANTIC_TYPE("ZONE", TokenType::KW_ZONE),
CHECK_SEMANTIC_TYPE("Zone", TokenType::KW_ZONE),
CHECK_SEMANTIC_TYPE("zone", TokenType::KW_ZONE),
CHECK_SEMANTIC_TYPE("ZONES", TokenType::KW_ZONES),
CHECK_SEMANTIC_TYPE("Zones", TokenType::KW_ZONES),
CHECK_SEMANTIC_TYPE("zones", TokenType::KW_ZONES),
CHECK_SEMANTIC_TYPE("INTO", TokenType::KW_INTO),
CHECK_SEMANTIC_TYPE("Into", TokenType::KW_INTO),
CHECK_SEMANTIC_TYPE("into", TokenType::KW_INTO),
CHECK_SEMANTIC_TYPE("STATS", TokenType::KW_STATS),
CHECK_SEMANTIC_TYPE("Stats", TokenType::KW_STATS),
CHECK_SEMANTIC_TYPE("stats", TokenType::KW_STATS),
CHECK_SEMANTIC_TYPE("ANY", TokenType::KW_ANY),
CHECK_SEMANTIC_TYPE("any", TokenType::KW_ANY),
CHECK_SEMANTIC_TYPE("SINGLE", TokenType::KW_SINGLE),
CHECK_SEMANTIC_TYPE("single", TokenType::KW_SINGLE),
CHECK_SEMANTIC_TYPE("NONE", TokenType::KW_NONE),
CHECK_SEMANTIC_TYPE("none", TokenType::KW_NONE),
CHECK_SEMANTIC_TYPE("CASE", TokenType::KW_CASE),
CHECK_SEMANTIC_TYPE("case", TokenType::KW_CASE),
CHECK_SEMANTIC_TYPE("WHEN", TokenType::KW_WHEN),
CHECK_SEMANTIC_TYPE("when", TokenType::KW_WHEN),
CHECK_SEMANTIC_TYPE("THEN", TokenType::KW_THEN),
CHECK_SEMANTIC_TYPE("then", TokenType::KW_THEN),
CHECK_SEMANTIC_TYPE("ELSE", TokenType::KW_ELSE),
CHECK_SEMANTIC_TYPE("else", TokenType::KW_ELSE),
CHECK_SEMANTIC_TYPE("END", TokenType::KW_END),
CHECK_SEMANTIC_TYPE("end", TokenType::KW_END),
CHECK_SEMANTIC_TYPE("REDUCE", TokenType::KW_REDUCE),
CHECK_SEMANTIC_TYPE("reduce", TokenType::KW_REDUCE),
CHECK_SEMANTIC_TYPE("SESSIONS", TokenType::KW_SESSIONS),
CHECK_SEMANTIC_TYPE("Sessions", TokenType::KW_SESSIONS),
CHECK_SEMANTIC_TYPE("sessions", TokenType::KW_SESSIONS),
CHECK_SEMANTIC_TYPE("SESSION", TokenType::KW_SESSION),
CHECK_SEMANTIC_TYPE("Session", TokenType::KW_SESSION),
CHECK_SEMANTIC_TYPE("session", TokenType::KW_SESSION),
CHECK_SEMANTIC_TYPE("QUERY", TokenType::KW_QUERY),
CHECK_SEMANTIC_TYPE("Query", TokenType::KW_QUERY),
CHECK_SEMANTIC_TYPE("query", TokenType::KW_QUERY),
CHECK_SEMANTIC_TYPE("QUERIES", TokenType::KW_QUERIES),
CHECK_SEMANTIC_TYPE("Queries", TokenType::KW_QUERIES),
CHECK_SEMANTIC_TYPE("queries", TokenType::KW_QUERIES),
CHECK_SEMANTIC_TYPE("KILL", TokenType::KW_KILL),
CHECK_SEMANTIC_TYPE("Kill", TokenType::KW_KILL),
CHECK_SEMANTIC_TYPE("kill", TokenType::KW_KILL),
CHECK_SEMANTIC_TYPE("TOP", TokenType::KW_TOP),
CHECK_SEMANTIC_TYPE("Top", TokenType::KW_TOP),
CHECK_SEMANTIC_TYPE("top", TokenType::KW_TOP),
CHECK_SEMANTIC_TYPE("_type", TokenType::TYPE_PROP),
CHECK_SEMANTIC_TYPE("_id", TokenType::ID_PROP),
CHECK_SEMANTIC_TYPE("_src", TokenType::SRC_ID_PROP),
CHECK_SEMANTIC_TYPE("_dst", TokenType::DST_ID_PROP),
CHECK_SEMANTIC_TYPE("_rank", TokenType::RANK_PROP),
CHECK_SEMANTIC_VALUE("TRUE", TokenType::BOOL, true),
CHECK_SEMANTIC_VALUE("true", TokenType::BOOL, true),
CHECK_SEMANTIC_VALUE("FALSE", TokenType::BOOL, false),
CHECK_SEMANTIC_VALUE("false", TokenType::BOOL, false),
CHECK_SEMANTIC_VALUE("$var", TokenType::VARIABLE, "var"),
CHECK_SEMANTIC_VALUE("$var123", TokenType::VARIABLE, "var123"),
CHECK_SEMANTIC_VALUE("label", TokenType::LABEL, "label"),
CHECK_SEMANTIC_VALUE("label123", TokenType::LABEL, "label123"),
CHECK_SEMANTIC_VALUE("123", TokenType::INTEGER, 123),
CHECK_SEMANTIC_VALUE("0x123", TokenType::INTEGER, 0x123),
CHECK_SEMANTIC_VALUE("0xdeadbeef", TokenType::INTEGER, 0xdeadbeef),
CHECK_SEMANTIC_VALUE("0123", TokenType::INTEGER, 0123),
CHECK_SEMANTIC_VALUE("123.", TokenType::DOUBLE, 123.),
CHECK_SEMANTIC_VALUE(".123", TokenType::DOUBLE, 0.123),
CHECK_SEMANTIC_VALUE("123.456", TokenType::DOUBLE, 123.456),
CHECK_SEMANTIC_VALUE("0x7FFFFFFFFFFFFFFF", TokenType::INTEGER, 0x7FFFFFFFFFFFFFFFL),
CHECK_SEMANTIC_VALUE("0x007FFFFFFFFFFFFFFF", TokenType::INTEGER, 0x007FFFFFFFFFFFFFFFL),
CHECK_SEMANTIC_VALUE("9223372036854775807", TokenType::INTEGER, 9223372036854775807L),
CHECK_SEMANTIC_VALUE("00777777777777777777777", TokenType::INTEGER, 00777777777777777777777),
CHECK_LEXICAL_ERROR("9223372036854775809"),
CHECK_LEXICAL_ERROR("0x8000000000000001"),
CHECK_LEXICAL_ERROR("001000000000000000000001"),
// TODO(dutor) It's too tedious to paste an overflowed double number here,
// thus we rely on `folly::to<double>' to cover those cases for us.
CHECK_SEMANTIC_VALUE("127.0.0.1", TokenType::IPV4, "127.0.0.1"),
CHECK_SEMANTIC_VALUE("\"Hello\"", TokenType::STRING, "Hello"),
CHECK_SEMANTIC_VALUE("\"Hello\\\\\"", TokenType::STRING, "Hello\\"),
CHECK_SEMANTIC_VALUE("\"He\\nllo\"", TokenType::STRING, "He\nllo"),
CHECK_SEMANTIC_VALUE("\"He\\\nllo\"", TokenType::STRING, "He\nllo"),
CHECK_SEMANTIC_VALUE("\"\\\"Hello\\\"\"", TokenType::STRING, "\"Hello\""),
CHECK_SEMANTIC_VALUE("'Hello'", TokenType::STRING, "Hello"),
CHECK_SEMANTIC_VALUE("'\"Hello\"'", TokenType::STRING, "\"Hello\""),
CHECK_SEMANTIC_VALUE("'\\'Hello\\''", TokenType::STRING, "'Hello'"),
// escape Normal character
CHECK_SEMANTIC_VALUE("\"Hell\\o\"", TokenType::STRING, "Hello"),
CHECK_SEMANTIC_VALUE("\"Hell\\\\o\"", TokenType::STRING, "Hell\\o"),
CHECK_SEMANTIC_VALUE("\"Hell\\\\\\o\"", TokenType::STRING, "Hell\\o"),
CHECK_SEMANTIC_VALUE("\"\\110ello\"", TokenType::STRING, "Hello"),
CHECK_SEMANTIC_VALUE("\"\110ello\"", TokenType::STRING, "Hello"),
CHECK_SEMANTIC_VALUE("\"\110 \"", TokenType::STRING, "H "),
CHECK_SEMANTIC_VALUE("\"\\110 \"", TokenType::STRING, "H "),
CHECK_SEMANTIC_VALUE("\"\\\110 \"", TokenType::STRING, "H "),
CHECK_SEMANTIC_VALUE("\"\\\\110 \"", TokenType::STRING, "\\110 "),
CHECK_SEMANTIC_VALUE("\"\\\\\110 \"", TokenType::STRING, "\\H "),
CHECK_SEMANTIC_VALUE("\"\\\\\\110 \"", TokenType::STRING, "\\H "),
CHECK_SEMANTIC_VALUE("\"\\\\\\\110 \"", TokenType::STRING, "\\H "),
CHECK_SEMANTIC_VALUE("\"\\\\\\\\110 \"", TokenType::STRING, "\\\\110 "),
CHECK_SEMANTIC_VALUE("\"\\\\\\\\\110 \"", TokenType::STRING, "\\\\H "),
CHECK_SEMANTIC_VALUE("\"己所不欲,勿施于人\"", TokenType::STRING, "己所不欲,勿施于人"),
};
#undef CHECK_SEMANTIC_TYPE
#undef CHECK_SEMANTIC_VALUE
auto input = [&](char *buf, int maxSize) {
static int copied = 0;
int left = stream.size() - copied;
if (left == 0) {
return 0;
}
int n = left < maxSize ? left : maxSize;
::memcpy(buf, &stream[copied], n);
copied += n;
return n;
};
scanner.setReadBuffer(input);
for (auto &item : validators) {
ASSERT_TRUE(item());
}
}
} // namespace nebula
| 1 | 31,980 | I'm not confident about this... | vesoft-inc-nebula | cpp |
@@ -34,6 +34,10 @@ import (
"github.com/jetstack/cert-manager/pkg/util/pki"
)
+var (
+ certificateRequestGvk = v1alpha1.SchemeGroupVersion.WithKind("CertificateRequest")
+)
+
func (c *Controller) Sync(ctx context.Context, cr *v1alpha1.CertificateRequest) (err error) {
c.metrics.IncrementSyncCallCount(ControllerName)
| 1 | /*
Copyright 2019 The Jetstack cert-manager contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package certificaterequests
import (
"context"
"encoding/json"
"fmt"
"reflect"
"github.com/kr/pretty"
k8sErrors "k8s.io/apimachinery/pkg/api/errors"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
apiutil "github.com/jetstack/cert-manager/pkg/api/util"
"github.com/jetstack/cert-manager/pkg/apis/certmanager"
"github.com/jetstack/cert-manager/pkg/apis/certmanager/v1alpha1"
"github.com/jetstack/cert-manager/pkg/apis/certmanager/validation"
logf "github.com/jetstack/cert-manager/pkg/logs"
"github.com/jetstack/cert-manager/pkg/util/pki"
)
func (c *Controller) Sync(ctx context.Context, cr *v1alpha1.CertificateRequest) (err error) {
c.metrics.IncrementSyncCallCount(ControllerName)
log := logf.FromContext(ctx)
dbg := log.V(logf.DebugLevel)
if !(cr.Spec.IssuerRef.Group == "" || cr.Spec.IssuerRef.Group == certmanager.GroupName) {
dbg.Info("certificate request issuerRef group does not match certmanager group so skipping processing")
return nil
}
switch apiutil.CertificateRequestReadyReason(cr) {
case v1alpha1.CertificateRequestReasonFailed:
dbg.Info("certificate request Ready condition failed so skipping processing")
return
case v1alpha1.CertificateRequestReasonIssued:
dbg.Info("certificate request Ready condition true so skipping processing")
return
}
crCopy := cr.DeepCopy()
defer func() {
if _, saveErr := c.updateCertificateRequestStatus(ctx, cr, crCopy); saveErr != nil {
err = utilerrors.NewAggregate([]error{saveErr, err})
}
}()
dbg.Info("fetching issuer object referenced by CertificateRequest")
issuerObj, err := c.helper.GetGenericIssuer(crCopy.Spec.IssuerRef, crCopy.Namespace)
if k8sErrors.IsNotFound(err) {
c.reporter.Pending(crCopy, err, "IssuerNotFound",
fmt.Sprintf("Referenced %q not found", apiutil.IssuerKind(crCopy.Spec.IssuerRef)))
return nil
}
if err != nil {
log.Error(err, "failed to get issuer")
return err
}
log = logf.WithRelatedResource(log, issuerObj)
dbg.Info("ensuring issuer type matches this controller")
issuerType, err := apiutil.NameForIssuer(issuerObj)
if err != nil {
c.reporter.Pending(crCopy, err, "IssuerTypeMissing",
"Missing issuer type")
return nil
}
// This CertificateRequest is not meant for us, ignore
if issuerType != c.issuerType {
c.log.WithValues(
logf.RelatedResourceKindKey, issuerType,
).V(5).Info("issuer reference type does not match controller resource kind, ignoring")
return nil
}
dbg.Info("validating CertificateRequest resource object")
el := validation.ValidateCertificateRequest(crCopy)
if len(el) > 0 {
c.reporter.Failed(crCopy, el.ToAggregate(), "BadConfig",
"Resource validation failed")
return nil
}
if len(crCopy.Status.Certificate) > 0 {
dbg.Info("certificate field is already set in status so skipping processing")
return nil
}
// TODO: Metrics??
dbg.Info("invoking sign function as existing certificate does not exist")
// Attempt to call the Sign function on our issuer
resp, err := c.issuer.Sign(ctx, crCopy, issuerObj)
if err != nil {
log.Error(err, "error issuing certificate request")
return err
}
// If the issuer has not returned any data we may be pending or failed. The
// underlying issuer will have set the condition of pending or failed and we
// should potentially wait for a re-sync.
if resp == nil {
return nil
}
// Update to status with the new given response.
crCopy.Status.Certificate = resp.Certificate
crCopy.Status.CA = resp.CA
// invalid cert
_, err = pki.DecodeX509CertificateBytes(crCopy.Status.Certificate)
if err != nil {
c.reporter.Failed(crCopy, err, "DecodeError", "Failed to decode returned certificate")
return nil
}
// Set condition to Ready.
c.reporter.Ready(crCopy)
return nil
}
func (c *Controller) updateCertificateRequestStatus(ctx context.Context, old, new *v1alpha1.CertificateRequest) (*v1alpha1.CertificateRequest, error) {
log := logf.FromContext(ctx, "updateStatus")
oldBytes, _ := json.Marshal(old.Status)
newBytes, _ := json.Marshal(new.Status)
if reflect.DeepEqual(oldBytes, newBytes) {
return nil, nil
}
log.V(logf.DebugLevel).Info("updating resource due to change in status", "diff", pretty.Diff(string(oldBytes), string(newBytes)))
// TODO: replace Update call with UpdateStatus. This requires a custom API
// server with the /status subresource enabled and/or subresource support
// for CRDs (https://github.com/kubernetes/kubernetes/issues/38113)
return c.cmClient.CertmanagerV1alpha1().CertificateRequests(new.Namespace).Update(new)
}
| 1 | 18,255 | There is also `v1alpha1.CertificateRequestKind` I think? | jetstack-cert-manager | go |
@@ -49,6 +49,10 @@ func SenderJob(name string, envVars []corev1.EnvVar) *batchv1.Job {
return baseJob(name, "sender", envVars)
}
+func BrokerSenderJob(name string, envVars []corev1.EnvVar) *batchv1.Job {
+ return baseJob(name, "sender_gcpbroker", envVars)
+}
+
// baseJob will return a base Job that has imageName and envVars set for its PodTemplateSpec.
func baseJob(name, imageName string, envVars []corev1.EnvVar) *batchv1.Job {
return &batchv1.Job{ | 1 | /*
Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resources
import (
"github.com/golang/protobuf/proto"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
pkgTest "knative.dev/pkg/test"
)
func PubSubTargetJob(name string, envVars []corev1.EnvVar) *batchv1.Job {
return baseJob(name, "pubsub_target", envVars)
}
func StorageTargetJob(name string, envVars []corev1.EnvVar) *batchv1.Job {
return baseJob(name, "storage_target", envVars)
}
func AuditLogsTargetJob(name string, envVars []corev1.EnvVar) *batchv1.Job {
return baseJob(name, "auditlogs_target", envVars)
}
func SchedulerTargetJob(name string, envVars []corev1.EnvVar) *batchv1.Job {
return baseJob(name, "scheduler_target", envVars)
}
func TargetJob(name string, envVars []corev1.EnvVar) *batchv1.Job {
return baseJob(name, "target", envVars)
}
func SenderJob(name string, envVars []corev1.EnvVar) *batchv1.Job {
return baseJob(name, "sender", envVars)
}
// baseJob will return a base Job that has imageName and envVars set for its PodTemplateSpec.
func baseJob(name, imageName string, envVars []corev1.EnvVar) *batchv1.Job {
return &batchv1.Job{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: batchv1.JobSpec{
Parallelism: proto.Int32(1),
BackoffLimit: proto.Int32(0),
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"e2etest": string(uuid.NewUUID()),
},
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{{
Name: name,
Image: pkgTest.ImagePath(imageName),
ImagePullPolicy: corev1.PullAlways,
Env: envVars,
}},
RestartPolicy: corev1.RestartPolicyNever,
},
},
},
}
}
| 1 | 16,205 | Can we rename it to make it less confusing? Instead of "sender-gcpbroker", maybe name it to "retryable-sender" | google-knative-gcp | go |
@@ -136,9 +136,8 @@ public class UnboundPredicate<T> extends Predicate<T, UnboundTerm<T>> implements
Literal<T> lit = literal().to(boundTerm.type());
if (lit == null) {
- throw new ValidationException(String.format(
- "Invalid value for conversion to type %s: %s (%s)",
- boundTerm.type(), literal().value(), literal().value().getClass().getName()));
+ throw new ValidationException("Invalid value for conversion to type %s: %s (%s)",
+ boundTerm.type(), literal().value(), literal().value().getClass().getName());
} else if (lit == Literals.aboveMax()) {
switch (op()) { | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.expressions;
import java.util.List;
import java.util.Set;
import org.apache.iceberg.exceptions.ValidationException;
import org.apache.iceberg.relocated.com.google.common.base.Joiner;
import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
import org.apache.iceberg.relocated.com.google.common.collect.Iterables;
import org.apache.iceberg.relocated.com.google.common.collect.Lists;
import org.apache.iceberg.relocated.com.google.common.collect.Sets;
import org.apache.iceberg.types.Types.StructType;
import org.apache.iceberg.util.CharSequenceSet;
public class UnboundPredicate<T> extends Predicate<T, UnboundTerm<T>> implements Unbound<T, Expression> {
private static final Joiner COMMA = Joiner.on(", ");
private final List<Literal<T>> literals;
UnboundPredicate(Operation op, UnboundTerm<T> term, T value) {
this(op, term, Literals.from(value));
}
UnboundPredicate(Operation op, UnboundTerm<T> term) {
super(op, term);
this.literals = null;
}
UnboundPredicate(Operation op, UnboundTerm<T> term, Literal<T> lit) {
super(op, term);
this.literals = Lists.newArrayList(lit);
}
UnboundPredicate(Operation op, UnboundTerm<T> term, Iterable<T> values) {
super(op, term);
this.literals = Lists.newArrayList(Iterables.transform(values, Literals::from));
}
private UnboundPredicate(Operation op, UnboundTerm<T> term, List<Literal<T>> literals) {
super(op, term);
this.literals = literals;
}
@Override
public NamedReference<?> ref() {
return term().ref();
}
@Override
public Expression negate() {
return new UnboundPredicate<>(op().negate(), term(), literals);
}
public Literal<T> literal() {
Preconditions.checkArgument(op() != Operation.IN && op() != Operation.NOT_IN,
"%s predicate cannot return a literal", op());
return literals == null ? null : Iterables.getOnlyElement(literals);
}
public List<Literal<T>> literals() {
return literals;
}
/**
* Bind this UnboundPredicate, defaulting to case sensitive mode.
*
* Access modifier is package-private, to only allow use from existing tests.
*
* @param struct The {@link StructType struct type} to resolve references by name.
* @return an {@link Expression}
* @throws ValidationException if literals do not match bound references, or if comparison on expression is invalid
*/
Expression bind(StructType struct) {
return bind(struct, true);
}
/**
* Bind this UnboundPredicate.
*
* @param struct The {@link StructType struct type} to resolve references by name.
* @param caseSensitive A boolean flag to control whether the bind should enforce case sensitivity.
* @return an {@link Expression}
* @throws ValidationException if literals do not match bound references, or if comparison on expression is invalid
*/
@Override
public Expression bind(StructType struct, boolean caseSensitive) {
BoundTerm<T> bound = term().bind(struct, caseSensitive);
if (literals == null) {
return bindUnaryOperation(bound);
}
if (op() == Operation.IN || op() == Operation.NOT_IN) {
return bindInOperation(bound);
}
return bindLiteralOperation(bound);
}
private Expression bindUnaryOperation(BoundTerm<T> boundTerm) {
switch (op()) {
case IS_NULL:
if (boundTerm.ref().field().isRequired()) {
return Expressions.alwaysFalse();
}
return new BoundUnaryPredicate<>(Operation.IS_NULL, boundTerm);
case NOT_NULL:
if (boundTerm.ref().field().isRequired()) {
return Expressions.alwaysTrue();
}
return new BoundUnaryPredicate<>(Operation.NOT_NULL, boundTerm);
default:
throw new ValidationException("Operation must be IS_NULL or NOT_NULL");
}
}
private Expression bindLiteralOperation(BoundTerm<T> boundTerm) {
Literal<T> lit = literal().to(boundTerm.type());
if (lit == null) {
throw new ValidationException(String.format(
"Invalid value for conversion to type %s: %s (%s)",
boundTerm.type(), literal().value(), literal().value().getClass().getName()));
} else if (lit == Literals.aboveMax()) {
switch (op()) {
case LT:
case LT_EQ:
case NOT_EQ:
return Expressions.alwaysTrue();
case GT:
case GT_EQ:
case EQ:
return Expressions.alwaysFalse();
}
} else if (lit == Literals.belowMin()) {
switch (op()) {
case GT:
case GT_EQ:
case NOT_EQ:
return Expressions.alwaysTrue();
case LT:
case LT_EQ:
case EQ:
return Expressions.alwaysFalse();
}
}
// TODO: translate truncate(col) == value to startsWith(value)
return new BoundLiteralPredicate<>(op(), boundTerm, lit);
}
private Expression bindInOperation(BoundTerm<T> boundTerm) {
List<Literal<T>> convertedLiterals = Lists.newArrayList(Iterables.filter(
Lists.transform(literals, lit -> {
Literal<T> converted = lit.to(boundTerm.type());
ValidationException.check(converted != null,
"Invalid value for conversion to type %s: %s (%s)", boundTerm.type(), lit, lit.getClass().getName());
return converted;
}),
lit -> lit != Literals.aboveMax() && lit != Literals.belowMin()));
if (convertedLiterals.isEmpty()) {
switch (op()) {
case IN:
return Expressions.alwaysFalse();
case NOT_IN:
return Expressions.alwaysTrue();
default:
throw new ValidationException("Operation must be IN or NOT_IN");
}
}
Set<T> literalSet = setOf(convertedLiterals);
if (literalSet.size() == 1) {
switch (op()) {
case IN:
return new BoundLiteralPredicate<>(Operation.EQ, boundTerm, Iterables.get(convertedLiterals, 0));
case NOT_IN:
return new BoundLiteralPredicate<>(Operation.NOT_EQ, boundTerm, Iterables.get(convertedLiterals, 0));
default:
throw new ValidationException("Operation must be IN or NOT_IN");
}
}
return new BoundSetPredicate<>(op(), boundTerm, literalSet);
}
@Override
public String toString() {
switch (op()) {
case IS_NULL:
return "is_null(" + term() + ")";
case NOT_NULL:
return "not_null(" + term() + ")";
case LT:
return term() + " < " + literal();
case LT_EQ:
return term() + " <= " + literal();
case GT:
return term() + " > " + literal();
case GT_EQ:
return term() + " >= " + literal();
case EQ:
return term() + " == " + literal();
case NOT_EQ:
return term() + " != " + literal();
case STARTS_WITH:
return term() + " startsWith \"" + literal() + "\"";
case IN:
return term() + " in (" + COMMA.join(literals()) + ")";
case NOT_IN:
return term() + " not in (" + COMMA.join(literals()) + ")";
default:
return "Invalid predicate: operation = " + op();
}
}
@SuppressWarnings("unchecked")
static <T> Set<T> setOf(Iterable<Literal<T>> literals) {
Literal<T> lit = Iterables.get(literals, 0);
if (lit instanceof Literals.StringLiteral && lit.value() instanceof CharSequence) {
Iterable<T> values = Iterables.transform(literals, Literal::value);
Iterable<CharSequence> charSeqs = Iterables.transform(values, val -> (CharSequence) val);
return (Set<T>) CharSequenceSet.of(charSeqs);
} else {
return Sets.newHashSet(Iterables.transform(literals, Literal::value));
}
}
}
| 1 | 24,332 | So looking at the definition of the `literal()` function in this class, it seems it's possible for it to return null. I guess it's not a concern as we would get NPE on the above call at line 136 when trying to call `.to` if `literal()` returned `null` before even getting to this part that calls `literal().value()`, but something I thought I'd bring up. Perhaps something we might follow up on in another issue or possibly I just missed the workflow that makes `literal()`'s result non-null by the time this `bindLiteralOperration` is called. | apache-iceberg | java |
@@ -47,6 +47,19 @@ class OPFMetricsTest(unittest.TestCase):
< OPFMetricsTest.DELTA)
+ def testNRMSE(self):
+ nrmse = getModule(MetricSpec("nrmse", None, None,
+{"verbosity" : OPFMetricsTest.VERBOSITY}))
+ gt = [9, 4, 5, 6]
+ p = [0, 13, 8, 3]
+ for i in xrange(len(gt)):
+ nrmse.addInstance(gt[i], p[i])
+ target = 1.342
+
+ self.assertTrue(abs(nrmse.getMetric()["value"]-target)\
+< OPFMetricsTest.DELTA)
+
+
def testWindowedRMSE(self):
wrmse = getModule(MetricSpec("rmse", None, None,
{"verbosity": OPFMetricsTest.VERBOSITY, "window":3})) | 1 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import numpy as np
import unittest2 as unittest
from nupic.frameworks.opf.metrics import getModule, MetricSpec, MetricMulti
class OPFMetricsTest(unittest.TestCase):
DELTA = 0.01
VERBOSITY = 0
def testRMSE(self):
rmse = getModule(MetricSpec("rmse", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY}))
gt = [9, 4, 5, 6]
p = [0, 13, 8, 3]
for i in xrange(len(gt)):
rmse.addInstance(gt[i], p[i])
target = 6.71
self.assertTrue(abs(rmse.getMetric()["value"]-target)\
< OPFMetricsTest.DELTA)
def testWindowedRMSE(self):
wrmse = getModule(MetricSpec("rmse", None, None,
{"verbosity": OPFMetricsTest.VERBOSITY, "window":3}))
gt = [9, 4, 4, 100, 44]
p = [0, 13, 4, 6, 7]
for gv, pv in zip(gt, p):
wrmse.addInstance(gv, pv)
target = 58.324
self.assertTrue (abs(wrmse.getMetric()["value"]-target)\
< OPFMetricsTest.DELTA)
def testAAE(self):
aae = getModule(MetricSpec("aae", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY}))
gt = [9, 4, 5, 6]
p = [0, 13, 8, 3]
for i in xrange(len(gt)):
aae.addInstance(gt[i], p[i])
target = 6.0
self.assertTrue(abs(aae.getMetric()["value"]-target) < OPFMetricsTest.DELTA)
def testTrivialAAE(self):
trivialaae = getModule(MetricSpec("trivial", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY,"errorMetric":"aae"}))
gt = [i/4+1 for i in range(100)]
p = [i for i in range(100)]
for i in xrange(len(gt)):
trivialaae.addInstance(gt[i], p[i])
target = .25
self.assertTrue(abs(trivialaae.getMetric()["value"]-target) \
< OPFMetricsTest.DELTA)
def testTrivialAccuracy(self):
trivialaccuracy = getModule(MetricSpec("trivial", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY,"errorMetric":"acc"}))
gt = [str(i/4+1) for i in range(100)]
p = [str(i) for i in range(100)]
for i in xrange(len(gt)):
trivialaccuracy.addInstance(gt[i], p[i])
target = .75
self.assertTrue(abs(trivialaccuracy.getMetric()["value"]-target) \
< OPFMetricsTest.DELTA)
def testWindowedTrivialAAE (self):
"""Trivial Average Error metric test"""
trivialAveErr = getModule(MetricSpec("trivial", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY,"errorMetric":"avg_err"}))
gt = [str(i/4+1) for i in range(100)]
p = [str(i) for i in range(100)]
for i in xrange(len(gt)):
trivialAveErr.addInstance(gt[i], p[i])
target = .25
self.assertTrue(abs(trivialAveErr.getMetric()["value"]-target)\
< OPFMetricsTest.DELTA)
def testWindowedTrivialAccuract(self):
"""Trivial AAE metric test"""
trivialaae = getModule(MetricSpec("trivial", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":100,"errorMetric":"aae"}))
gt = [i/4+1 for i in range(1000)]
p = [i for i in range(1000)]
for i in xrange(len(gt)):
trivialaae.addInstance(gt[i], p[i])
target = .25
self.assertTrue(abs(trivialaae.getMetric()["value"]-target) \
< OPFMetricsTest.DELTA)
def testWindowedTrivialAccuracy(self):
"""Trivial Accuracy metric test"""
trivialaccuracy = getModule(MetricSpec("trivial", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":100,"errorMetric":"acc"}))
gt = [str(i/4+1) for i in range(1000)]
p = [str(i) for i in range(1000)]
for i in xrange(len(gt)):
trivialaccuracy.addInstance(gt[i], p[i])
target = .75
self.assertTrue(abs(trivialaccuracy.getMetric()["value"]-target)\
< OPFMetricsTest.DELTA)
def testWindowedTrivialAverageError (self):
"""Trivial Average Error metric test"""
trivialAveErr = getModule(MetricSpec("trivial", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":100,"errorMetric":"avg_err"}))
gt = [str(i/4+1) for i in range(500, 1000)]
p = [str(i) for i in range(1000)]
for i in xrange(len(gt)):
trivialAveErr.addInstance(gt[i], p[i])
target = .25
self.assertTrue(abs(trivialAveErr.getMetric()["value"]-target)\
< OPFMetricsTest.DELTA)
def testMultistepAAE(self):
"""Multistep AAE metric test"""
msp = getModule(MetricSpec("multiStep", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":100, "errorMetric":"aae",
"steps": 3}))
# Make each ground truth 1 greater than the prediction
gt = [i+1 for i in range(100)]
p = [{3: {i: .7, 5: 0.3}} for i in range(100)]
for i in xrange(len(gt)):
msp.addInstance(gt[i], p[i])
target = 1
self.assertTrue(abs(msp.getMetric()["value"]-target) < OPFMetricsTest.DELTA)
def testMultistepAAEMultipleSteps(self):
"""Multistep AAE metric test, predicting 2 different step sizes"""
msp = getModule(MetricSpec("multiStep", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":100, "errorMetric":"aae",
"steps": [3,6]}))
# Make each 3 step prediction +1 over ground truth and each 6 step
# prediction +0.5 over ground truth
gt = [i for i in range(100)]
p = [{3: {i+1: .7, 5: 0.3},
6: {i+0.5: .7, 5: 0.3}} for i in range(100)]
for i in xrange(len(gt)):
msp.addInstance(gt[i], p[i])
target = 0.75 # average of +1 error and 0.5 error
self.assertTrue(abs(msp.getMetric()["value"]-target) < OPFMetricsTest.DELTA)
def testMultistepProbability(self):
"""Multistep with probabilities metric test"""
msp = getModule(MetricSpec("multiStepProbability", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":100, "errorMetric":"aae",
"steps":3}))
gt = [5 for i in range(1000)]
p = [{3: {i: .3, 5: .7}} for i in range(1000)]
for i in xrange(len(gt)):
msp.addInstance(gt[i], p[i])
#((999-5)(1000-5)/2-(899-5)(900-5)/2)*.3/100
target = 283.35
self.assertTrue(abs(msp.getMetric()["value"]-target) < OPFMetricsTest.DELTA)
def testMultistepProbabilityMultipleSteps(self):
"""Multistep with probabilities metric test, predicting 2 different step
sizes"""
msp = getModule(MetricSpec("multiStepProbability", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":100,
"errorMetric":"aae", "steps": [1,3]}))
gt = [5 for i in range(1000)]
p = [{3: {i: .3, 5: .7},
1: {5: 1.0}} for i in range(1000)]
for i in xrange(len(gt)):
msp.addInstance(gt[i], p[i])
#(((999-5)(1000-5)/2-(899-5)(900-5)/2)*.3/100) / 2
# / 2 because the 1-step prediction is 100% accurate
target = 283.35/2
self.assertTrue(abs(msp.getMetric()["value"]-target) < OPFMetricsTest.DELTA)
def testMovingMeanAbsoluteError(self):
"""Moving mean Average Absolute Error metric test"""
movingMeanAAE = getModule(MetricSpec("moving_mean", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":100, "mean_window":3,
"errorMetric":"aae"}))
gt = [i for i in range(890)]
gt.extend([2*i for i in range(110)])
p = [i for i in range(1000)]
res = []
for i in xrange(len(gt)):
movingMeanAAE.addInstance(gt[i], p[i])
res.append(movingMeanAAE.getMetric()["value"])
self.assertTrue(max(res[1:890]) == 2.0)
self.assertTrue(min(res[891:])>=4.0)
target = 4.0
self.assertTrue(abs(movingMeanAAE.getMetric()["value"]-target)\
< OPFMetricsTest.DELTA)
def testMovingMeanRMSE(self):
"""Moving mean RMSE metric test"""
movingMeanRMSE = getModule(MetricSpec("moving_mean", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":100, "mean_window":3,
"errorMetric":"rmse"}))
gt = [i for i in range(890)]
gt.extend([2*i for i in range(110)])
p = [i for i in range(1000)]
res = []
for i in xrange(len(gt)):
movingMeanRMSE.addInstance(gt[i], p[i])
res.append(movingMeanRMSE.getMetric()["value"])
self.assertTrue(max(res[1:890]) == 2.0)
self.assertTrue(min(res[891:])>=4.0)
target = 4.0
self.assertTrue(abs(movingMeanRMSE.getMetric()["value"]-target) \
< OPFMetricsTest.DELTA)
def testMovingModeAverageError(self):
"""Moving mode Average Error metric test"""
movingModeAvgErr = getModule(MetricSpec("moving_mode", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":100, "mode_window":3,
"errorMetric":"avg_err"}))
#Should initially assymptote to .5
#Then after 900 should go to 1.0 as the predictions will always be offset
gt = [i/4 for i in range(900)]
gt.extend([2*i/4 for i in range(100)])
p = [i for i in range(1000)]
res = []
for i in xrange(len(gt)):
movingModeAvgErr.addInstance(gt[i], p[i])
res.append(movingModeAvgErr.getMetric()["value"])
#Make sure that there is no point where the average error is >.5
self.assertTrue(max(res[1:890]) == .5)
#Make sure that after the statistics switch the error goes to 1.0
self.assertTrue(min(res[891:])>=.5)
#Make sure that the statistics change is still noticeable while it is
#in the window
self.assertTrue(res[998]<1.0)
target = 1.0
self.assertTrue(abs(movingModeAvgErr.getMetric()["value"]-target)\
< OPFMetricsTest.DELTA)
def testMovingModeAccuracy(self):
"""Moving mode Accuracy metric test"""
movingModeACC = getModule(MetricSpec("moving_mode", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":100, "mode_window":3,
"errorMetric":"acc"}))
#Should initially asymptote to .5
#Then after 900 should go to 0.0 as the predictions will always be offset
gt = [i/4 for i in range(900)]
gt.extend([2*i/4 for i in range(100)])
p = [i for i in range(1000)]
res = []
for i in xrange(len(gt)):
movingModeACC.addInstance(gt[i], p[i])
res.append(movingModeACC.getMetric()["value"])
#Make sure that there is no point where the average acc is <.5
self.assertTrue(min(res[1:899]) == .5)
#Make sure that after the statistics switch the acc goes to 0.0
self.assertTrue(max(res[900:])<=.5)
#Make sure that the statistics change is still noticeable while it
#is in the window
self.assertTrue(res[998]>0.0)
target = 0.0
self.assertTrue(abs(movingModeACC.getMetric()["value"]-target)\
< OPFMetricsTest.DELTA)
def testTwoGramScalars(self):
"""Two gram scalars test"""
oneGram = getModule(MetricSpec("two_gram", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, \
"window":100, "predictionField":"test",
"errorMetric":"acc"}))
# Sequences of 0,1,2,3,4,0,1,2,3,4,...
encodings = [np.zeros(10) for i in range(5)]
for i in range(len(encodings)):
encoding = encodings[i]
encoding[i] = 1
gt = [i%5 for i in range(1000)]
res = []
for i in xrange(len(gt)):
if i == 20:
# Make sure we don"t barf with missing values
oneGram.addInstance(np.zeros(10), prediction=None,
record={"test":None})
else:
# Feed in next groundTruth
oneGram.addInstance(encodings[i%5], prediction=None,
record={"test":gt[i]})
res.append(oneGram.getMetric()["value"])
target = 1.0
self.assertTrue(abs(oneGram.getMetric()["value"]-target)\
< OPFMetricsTest.DELTA)
def testTwoGramScalarsStepsGreaterOne(self):
"""Two gram scalars test with step size other than 1"""
oneGram = getModule(MetricSpec("two_gram", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY,\
"window":100, "predictionField":"test",
"errorMetric":"acc", "steps": 2}))
# Sequences of 0,1,2,3,4,0,1,2,3,4,...
encodings = [np.zeros(10) for i in range(5)]
for i in range(len(encodings)):
encoding = encodings[i]
encoding[i] = 1
gt = [i%5 for i in range(1000)]
res = []
for i in xrange(len(gt)):
if i == 20:
# Make sure we don"t barf with missing values
oneGram.addInstance(np.zeros(10), prediction=None,
record={"test":None})
else:
# Feed in next groundTruth
oneGram.addInstance(encodings[i%5], prediction=None,
record={"test":gt[i]})
res.append(oneGram.getMetric()["value"])
target = 1.0
self.assertTrue(abs(oneGram.getMetric()["value"]-target) \
< OPFMetricsTest.DELTA)
def testTwoGramStrings(self):
"""One gram string test"""
oneGram = getModule(MetricSpec("two_gram", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":100, "errorMetric":"acc",
"predictionField":"test"}))
# Sequences of "0", "1", "2", "3", "4", "0", "1", ...
gt = [str(i%5) for i in range(1000)]
encodings = [np.zeros(10) for i in range(5)]
for i in range(len(encodings)):
encoding = encodings[i]
encoding[i] = 1
# Make every 5th element random
newElem = 100
for i in range(5, 1000, 5):
gt[i] = str(newElem)
newElem += 20
res = []
for i in xrange(len(gt)):
if i==20:
# Make sure we don"t barf with missing values
oneGram.addInstance(np.zeros(10), prediction=None,
record={"test":None})
else:
oneGram.addInstance(encodings[i%5], prediction=None,
record={"test":gt[i]})
res.append(oneGram.getMetric()["value"])
target = .8
self.assertTrue(abs(oneGram.getMetric()["value"]-target)\
< OPFMetricsTest.DELTA)
def testWindowedAAE(self):
"""Windowed AAE"""
waae = getModule(MetricSpec("aae", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":1}))
gt = [9, 4, 5, 6]
p = [0, 13, 8, 3]
for i in xrange(len(gt)):
waae.addInstance(gt[i], p[i])
target = 3.0
self.assertTrue( abs(waae.getMetric()["value"]-target) \
< OPFMetricsTest.DELTA, "Got %s" %waae.getMetric())
def testAccuracy(self):
"""Accuracy"""
acc = getModule(MetricSpec("acc", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY}))
gt = [0, 1, 2, 3, 4, 5]
p = [0, 1, 2, 4, 5, 6]
for i in xrange(len(gt)):
acc.addInstance(gt[i], p[i])
target = 0.5
self.assertTrue(abs(acc.getMetric()["value"]-target) < OPFMetricsTest.DELTA)
def testWindowedAccuracy(self):
"""Windowed accuracy"""
acc = getModule(MetricSpec("acc", None, None, \
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":2}))
gt = [0, 1, 2, 3, 4, 5]
p = [0, 1, 2, 4, 5, 6]
for i in xrange(len(gt)):
acc.addInstance(gt[i], p[i])
target = 0.0
self.assertTrue(abs(acc.getMetric()["value"]-target) < OPFMetricsTest.DELTA)
def testAverageError(self):
"""Ave Error"""
err = getModule(MetricSpec("avg_err", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY}))
gt = [1, 1, 2, 3, 4, 5]
p = [0, 1, 2, 4, 5, 6]
for i in xrange(len(gt)):
err.addInstance(gt[i], p[i])
target = (2.0/3.0)
self.assertTrue(abs(err.getMetric()["value"]-target) < OPFMetricsTest.DELTA)
def testWindowedAverageError(self):
"""Windowed Ave Error"""
err = getModule(MetricSpec("avg_err", None, None, \
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":2}))
gt = [0, 1, 2, 3, 4, 5]
p = [0, 1, 2, 4, 5, 6]
for i in xrange(len(gt)):
err.addInstance(gt[i], p[i])
target = 1.0
self.assertTrue(abs(err.getMetric()["value"]-target) < OPFMetricsTest.DELTA)
def testLongWindowRMSE(self):
"""RMSE"""
rmse = getModule(MetricSpec("rmse", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":100}))
gt = [9, 4, 5, 6]
p = [0, 13, 8, 3]
for i in xrange(len(gt)):
rmse.addInstance(gt[i], p[i])
target = 6.71
self.assertTrue(abs(rmse.getMetric()["value"]-target)\
< OPFMetricsTest.DELTA)
def testCustomErrorMetric(self):
customFunc = """def getError(pred,ground,tools):
return abs(pred-ground)"""
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc, "errorWindow":3}))
gt = [9, 4, 5, 6]
p = [0, 13, 8, 3]
for i in xrange(len(gt)):
aggErr = customEM.addInstance(gt[i], p[i])
target = 5.0
delta = 0.001
# insure that addInstance returns the aggregate error - other
# uber metrics depend on this behavior.
self.assertEqual(aggErr, customEM.getMetric()["value"])
self.assertTrue(abs(customEM.getMetric()["value"]-target) < delta)
customFunc = """def getError(pred,ground,tools):
sum = 0
for i in range(min(3,tools.getBufferLen())):
sum+=abs(tools.getPrediction(i)-tools.getGroundTruth(i))
return sum/3"""
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc}))
gt = [9, 4, 5, 6]
p = [0, 13, 8, 3]
for i in xrange(len(gt)):
customEM.addInstance(gt[i], p[i])
target = 5.0
delta = 0.001
self.assertTrue(abs(customEM.getMetric()["value"]-target) < delta)
# Test custom error metric helper functions
# Test getPrediction
# Not-Windowed
storeWindow=4
failed = False
for lookBack in range(3):
customFunc = """def getError(pred,ground,tools):
return tools.getPrediction(%d)""" % lookBack
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc}))
gt = [i for i in range(100)]
p = [2*i for i in range(100)]
t1 = [3*i for i in range(100)]
t2 = [str(4*i) for i in range(100)]
for i in xrange(len(gt)):
curRecord = {"pred":p[i], "ground":gt[i], "test1":t1[i], "test2":t2[i]}
if i < lookBack:
try:
customEM.addInstance(gt[i], p[i], curRecord)
failed = True
except:
self.assertTrue( not failed,
"An exception should have been generated, but wasn't")
else:
customEM.addInstance(gt[i], p[i], curRecord)
self.assertTrue( customEM.getMetric()["value"] == p[i-lookBack])
#Windowed
for lookBack in range(5):
customFunc = """def getError(pred,ground,tools):
return tools.getPrediction(%d)""" % lookBack
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc,"storeWindow":storeWindow}))
gt = [i for i in range(100)]
p = [2*i for i in range(100)]
t1 = [3*i for i in range(100)]
t2 = [str(4*i) for i in range(100)]
for i in xrange(len(gt)):
curRecord = {"pred":p[i], "ground":gt[i], "test1":t1[i], "test2":t2[i]}
if lookBack>=storeWindow-1:
pass
if i < lookBack or lookBack>=storeWindow:
try:
customEM.addInstance(gt[i], p[i], curRecord)
failed = True
except:
self.assertTrue (not failed ,
"An exception should have been generated, but wasn't")
else:
customEM.addInstance(gt[i], p[i], curRecord)
self.assertTrue (customEM.getMetric()["value"] == p[i-lookBack])
#Test getGroundTruth
#Not-Windowed
for lookBack in range(3):
customFunc = """def getError(pred,ground,tools):
return tools.getGroundTruth(%d)""" % lookBack
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc}))
gt = [i for i in range(100)]
p = [2*i for i in range(100)]
t1 = [3*i for i in range(100)]
t2 = [str(4*i) for i in range(100)]
for i in xrange(len(gt)):
curRecord = {"pred":p[i], "ground":gt[i], "test1":t1[i], "test2":t2[i]}
if i < lookBack:
try:
customEM.addInstance(gt[i], p[i], curRecord)
failed = True
except:
self.assertTrue( not failed ,
"An exception should have been generated, but wasn't")
else:
customEM.addInstance(gt[i], p[i], curRecord)
self.assertTrue (customEM.getMetric()["value"] == gt[i-lookBack])
#Windowed
for lookBack in range(5):
customFunc = """def getError(pred,ground,tools):
return tools.getGroundTruth(%d)""" % lookBack
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc,"storeWindow":storeWindow}))
gt = [i for i in range(100)]
p = [2*i for i in range(100)]
t1 = [3*i for i in range(100)]
t2 = [str(4*i) for i in range(100)]
for i in xrange(len(gt)):
curRecord = {"pred":p[i], "ground":gt[i], "test1":t1[i], "test2":t2[i]}
if i < lookBack or lookBack>=storeWindow:
try:
customEM.addInstance(gt[i], p[i], curRecord)
failed = True
except:
self.assertTrue( not failed ,
"An exception should have been generated, but wasn't")
else:
customEM.addInstance(gt[i], p[i], curRecord)
self.assertTrue( customEM.getMetric()["value"] == gt[i-lookBack])
#Test getFieldValue
#Not-Windowed Scalar
for lookBack in range(3):
customFunc = """def getError(pred,ground,tools):
return tools.getFieldValue(%d,"test1")""" % lookBack
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc}))
gt = [i for i in range(100)]
p = [2*i for i in range(100)]
t1 = [3*i for i in range(100)]
t2 = [str(4*i) for i in range(100)]
for i in xrange(len(gt)):
curRecord = {"pred":p[i], "ground":gt[i], "test1":t1[i], "test2":t2[i]}
if i < lookBack:
try:
customEM.addInstance(gt[i], p[i], curRecord)
failed = True
except:
self.assertTrue( not failed ,
"An exception should have been generated, but wasn't")
else:
customEM.addInstance(gt[i], p[i], curRecord)
self.assertTrue (customEM.getMetric()["value"] == t1[i-lookBack])
#Windowed Scalar
for lookBack in range(3):
customFunc = """def getError(pred,ground,tools):
return tools.getFieldValue(%d,"test1")""" % lookBack
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc,"storeWindow":storeWindow}))
gt = [i for i in range(100)]
p = [2*i for i in range(100)]
t1 = [3*i for i in range(100)]
t2 = [str(4*i) for i in range(100)]
for i in xrange(len(gt)):
curRecord = {"pred":p[i], "ground":gt[i], "test1":t1[i], "test2":t2[i]}
if i < lookBack or lookBack>=storeWindow:
try:
customEM.addInstance(gt[i], p[i], curRecord)
failed = True
except:
self.assertTrue (not failed ,
"An exception should have been generated, but wasn't")
else:
customEM.addInstance(gt[i], p[i], curRecord)
self.assertTrue( customEM.getMetric()["value"] == t1[i-lookBack])
#Not-Windowed category
for lookBack in range(3):
customFunc = """def getError(pred,ground,tools):
return tools.getFieldValue(%d,"test1")""" % lookBack
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc}))
gt = [i for i in range(100)]
p = [2*i for i in range(100)]
t1 = [3*i for i in range(100)]
t2 = [str(4*i) for i in range(100)]
for i in xrange(len(gt)):
curRecord = {"pred":p[i], "ground":gt[i], "test1":t1[i], "test2":t2[i]}
if i < lookBack:
try:
customEM.addInstance(gt[i], p[i], curRecord)
failed = True
except:
self.assertTrue( not failed ,
"An exception should have been generated, but wasn't")
else:
customEM.addInstance(gt[i], p[i], curRecord)
self.assertTrue (customEM.getMetric()["value"] == t1[i-lookBack])
#Windowed category
for lookBack in range(3):
customFunc = """def getError(pred,ground,tools):
return tools.getFieldValue(%d,"test1")""" % lookBack
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc,"storeWindow":storeWindow}))
gt = [i for i in range(100)]
p = [2*i for i in range(100)]
t1 = [3*i for i in range(100)]
t2 = [str(4*i) for i in range(100)]
for i in xrange(len(gt)):
curRecord = {"pred":p[i], "ground":gt[i], "test1":t1[i], "test2":t2[i]}
if i < lookBack or lookBack>=storeWindow:
try:
customEM.addInstance(gt[i], p[i], curRecord)
failed = True
except:
self.assertTrue (not failed ,
"An exception should have been generated, but wasn't")
else:
customEM.addInstance(gt[i], p[i], curRecord)
self.assertTrue (customEM.getMetric()["value"] == t1[i-lookBack])
#Test getBufferLen
#Not-Windowed
customFunc = """def getError(pred,ground,tools):
return tools.getBufferLen()"""
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc}))
gt = [i for i in range(100)]
p = [2*i for i in range(100)]
t1 = [3*i for i in range(100)]
t2 = [str(4*i) for i in range(100)]
for i in xrange(len(gt)):
curRecord = {"pred":p[i], "ground":gt[i], "test1":t1[i], "test2":t2[i]}
customEM.addInstance(gt[i], p[i], curRecord)
self.assertTrue (customEM.getMetric()["value"] == i+1)
#Windowed
customFunc = """def getError(pred,ground,tools):
return tools.getBufferLen()"""
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc,"storeWindow":storeWindow}))
gt = [i for i in range(100)]
p = [2*i for i in range(100)]
t1 = [3*i for i in range(100)]
t2 = [str(4*i) for i in range(100)]
for i in xrange(len(gt)):
curRecord = {"pred":p[i], "ground":gt[i], "test1":t1[i], "test2":t2[i]}
customEM.addInstance(gt[i], p[i], curRecord)
self.assertTrue (customEM.getMetric()["value"] == min(i+1, 4))
#Test initialization edge cases
try:
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc,"errorWindow":0}))
self.assertTrue (False , "error Window of 0 should fail self.assertTrue")
except:
pass
try:
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc,"storeWindow":0}))
self.assertTrue (False , "error Window of 0 should fail self.assertTrue")
except:
pass
def testMultiMetric(self):
ms1 = MetricSpec(field='a', metric='trivial', inferenceElement='prediction', params={'errorMetric': 'aae', 'window': 1000, 'steps': 1})
ms2 = MetricSpec(metric='trivial', inferenceElement='prediction', field='a', params={'window': 10, 'steps': 1, 'errorMetric': 'rmse'})
metric1000 = getModule(ms1)
metric10 = getModule(ms2)
# create multi metric
multi = MetricMulti(weights=[0.2, 0.8], metrics=[metric10, metric1000])
multi.verbosity = 1
print multi
# create reference metrics (must be diff from metrics above used in MultiMetric, as they keep history)
metric1000ref = getModule(ms1)
metric10ref = getModule(ms2)
gt = range(500, 1000)
p = range(500)
for i in xrange(len(gt)):
v10=metric10ref.addInstance(gt[i], p[i])
v1000=metric1000ref.addInstance(gt[i], p[i])
if v10 is None or v1000 is None:
check=None
else:
check=0.2*float(v10) + 0.8*float(v1000)
metricValue = multi.addInstance(gt[i], p[i])
self.assertEqual(check, metricValue, "iter i= %s gt=%s pred=%s multi=%s sub1=%s sub2=%s" % (i, gt[i], p[i], metricValue, v10, v1000))
if __name__ == "__main__":
unittest.main()
| 1 | 19,120 | Why did you break the line? It looks like it is under 80 characters without the break and it is inside parens so no need for backslash anyway | numenta-nupic | py |
@@ -90,6 +90,19 @@ try:
except ImportError:
GRAPHVIZ_INSTALLED = False
+"""datatable"""
+try:
+ from datatable import DataTable
+ DT_INSTALLED = True
+except ImportError:
+ DT_INSTALLED = False
+
+ class DataTable(object):
+ """Dummy class for DataTable."""
+
+ pass
+
+
"""sklearn"""
try:
from sklearn.base import BaseEstimator | 1 | # coding: utf-8
# pylint: disable = C0103
"""Compatibility library."""
from __future__ import absolute_import
import inspect
import sys
import numpy as np
is_py3 = (sys.version_info[0] == 3)
"""Compatibility between Python2 and Python3"""
if is_py3:
zip_ = zip
string_type = str
numeric_types = (int, float, bool)
integer_types = (int, )
range_ = range
def argc_(func):
"""Count the number of arguments of a function."""
return len(inspect.signature(func).parameters)
def decode_string(bytestring):
"""Decode C bytestring to ordinary string."""
return bytestring.decode('utf-8')
else:
from itertools import izip as zip_
string_type = basestring
numeric_types = (int, long, float, bool)
integer_types = (int, long)
range_ = xrange
def argc_(func):
"""Count the number of arguments of a function."""
return len(inspect.getargspec(func).args)
def decode_string(bytestring):
"""Decode C bytestring to ordinary string."""
return bytestring
"""json"""
try:
import simplejson as json
except (ImportError, SyntaxError):
# simplejson does not support Python 3.2, it throws a SyntaxError
# because of u'...' Unicode literals.
import json
def json_default_with_numpy(obj):
"""Convert numpy classes to JSON serializable objects."""
if isinstance(obj, (np.integer, np.floating, np.bool_)):
return obj.item()
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return obj
"""pandas"""
try:
from pandas import Series, DataFrame
PANDAS_INSTALLED = True
except ImportError:
PANDAS_INSTALLED = False
class Series(object):
"""Dummy class for pandas.Series."""
pass
class DataFrame(object):
"""Dummy class for pandas.DataFrame."""
pass
"""matplotlib"""
try:
import matplotlib
MATPLOTLIB_INSTALLED = True
except ImportError:
MATPLOTLIB_INSTALLED = False
"""graphviz"""
try:
import graphviz
GRAPHVIZ_INSTALLED = True
except ImportError:
GRAPHVIZ_INSTALLED = False
"""sklearn"""
try:
from sklearn.base import BaseEstimator
from sklearn.base import RegressorMixin, ClassifierMixin
from sklearn.preprocessing import LabelEncoder
from sklearn.utils.class_weight import compute_sample_weight
from sklearn.utils.multiclass import check_classification_targets
from sklearn.utils.validation import (assert_all_finite, check_X_y,
check_array, check_consistent_length)
try:
from sklearn.model_selection import StratifiedKFold, GroupKFold
from sklearn.exceptions import NotFittedError
except ImportError:
from sklearn.cross_validation import StratifiedKFold, GroupKFold
from sklearn.utils.validation import NotFittedError
SKLEARN_INSTALLED = True
_LGBMModelBase = BaseEstimator
_LGBMRegressorBase = RegressorMixin
_LGBMClassifierBase = ClassifierMixin
_LGBMLabelEncoder = LabelEncoder
LGBMNotFittedError = NotFittedError
_LGBMStratifiedKFold = StratifiedKFold
_LGBMGroupKFold = GroupKFold
_LGBMCheckXY = check_X_y
_LGBMCheckArray = check_array
_LGBMCheckConsistentLength = check_consistent_length
_LGBMAssertAllFinite = assert_all_finite
_LGBMCheckClassificationTargets = check_classification_targets
_LGBMComputeSampleWeight = compute_sample_weight
except ImportError:
SKLEARN_INSTALLED = False
_LGBMModelBase = object
_LGBMClassifierBase = object
_LGBMRegressorBase = object
_LGBMLabelEncoder = None
LGBMNotFittedError = ValueError
_LGBMStratifiedKFold = None
_LGBMGroupKFold = None
_LGBMCheckXY = None
_LGBMCheckArray = None
_LGBMCheckConsistentLength = None
_LGBMAssertAllFinite = None
_LGBMCheckClassificationTargets = None
_LGBMComputeSampleWeight = None
# DeprecationWarning is not shown by default, so let's create our own with higher level
class LGBMDeprecationWarning(UserWarning):
"""Custom deprecation warning."""
pass
| 1 | 19,670 | @guolinke Don't you mind to rename this variable to `DATATABLE_INSTALLED`, for the consistency with other variables (for example, there are `PANDAS_INSTALLED` but not `PD_INSTALLED`). Also, `DT` is a little bit confusing: sometimes `dt` is used for `datetime`. | microsoft-LightGBM | cpp |
@@ -37,7 +37,7 @@ module RSpec
profile.slowest_examples.each do |example|
@output.puts " #{example.full_description}"
@output.puts " #{bold(Helpers.format_seconds(example.execution_result.run_time))} " \
- "#{bold("seconds")} #{format_caller(example.location)}"
+ "#{bold('seconds')} #{format_caller(example.location)}"
end
end
| 1 | RSpec::Support.require_rspec_core "formatters/console_codes"
module RSpec
module Core
module Formatters
# @api private
# Formatter for providing profile output.
class ProfileFormatter
Formatters.register self, :dump_profile
def initialize(output)
@output = output
end
# @private
attr_reader :output
# @api public
#
# This method is invoked after the dumping the summary if profiling is
# enabled.
#
# @param profile [ProfileNotification] containing duration,
# slowest_examples and slowest_example_groups
def dump_profile(profile)
dump_profile_slowest_examples(profile)
dump_profile_slowest_example_groups(profile)
end
private
def dump_profile_slowest_examples(profile)
@output.puts "\nTop #{profile.slowest_examples.size} slowest " \
"examples (#{Helpers.format_seconds(profile.slow_duration)} " \
"seconds, #{profile.percentage}% of total time):\n"
profile.slowest_examples.each do |example|
@output.puts " #{example.full_description}"
@output.puts " #{bold(Helpers.format_seconds(example.execution_result.run_time))} " \
"#{bold("seconds")} #{format_caller(example.location)}"
end
end
def dump_profile_slowest_example_groups(profile)
return if profile.slowest_groups.empty?
@output.puts "\nTop #{profile.slowest_groups.size} slowest example groups:"
profile.slowest_groups.each do |loc, hash|
average = "#{bold(Helpers.format_seconds(hash[:average]))} #{bold("seconds")} average"
total = "#{Helpers.format_seconds(hash[:total_time])} seconds"
count = Helpers.pluralize(hash[:count], "example")
@output.puts " #{hash[:description]}"
@output.puts " #{average} (#{total} / #{count}) #{loc}"
end
end
def format_caller(caller_info)
RSpec.configuration.backtrace_formatter.backtrace_line(
caller_info.to_s.split(':in `block').first)
end
def bold(text)
ConsoleCodes.wrap(text, :bold)
end
end
end
end
end
| 1 | 15,404 | We're fine with double quotes here. Just for future reference. | rspec-rspec-core | rb |
@@ -242,9 +242,5 @@ func (c call) maxAttemptsError(err error) {
}
func getErrorName(err error) string {
- errCode := yarpcerrors.ErrorCode(err)
- if errCode == yarpcerrors.CodeOK {
- return "unknown_internal_yarpc"
- }
- return errCode.String()
+ return yarpcerrors.ErrorCode(err).String()
} | 1 | // Copyright (c) 2017 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package retry
import (
"context"
"sync"
"time"
"github.com/uber-go/tally"
"go.uber.org/yarpc/api/transport"
"go.uber.org/yarpc/internal/digester"
"go.uber.org/yarpc/internal/pally"
"go.uber.org/yarpc/yarpcerrors"
"go.uber.org/zap"
)
const (
// Sleep between pushes to Tally metrics. At some point, we may want this
// to be configurable.
_tallyPushInterval = 500 * time.Millisecond
_packageName = "yarpc-retry"
_defaultGraphSize = 128
// Retry failure "reason" labels
_unretryable = "unretryable"
_yarpcInternal = "yarpc_internal"
_noTime = "no_time"
_maxAttempts = "max_attempts"
)
type observerGraph struct {
reg *pally.Registry
logger *zap.Logger
edgesMu sync.RWMutex
edges map[string]*edge
}
func newObserverGraph(logger *zap.Logger, scope tally.Scope) (*observerGraph, context.CancelFunc) {
reg, stopPush := newRegistry(logger, scope)
return &observerGraph{
edges: make(map[string]*edge, _defaultGraphSize),
reg: reg,
logger: logger,
}, stopPush
}
func newRegistry(logger *zap.Logger, scope tally.Scope) (*pally.Registry, context.CancelFunc) {
r := pally.NewRegistry(
pally.Labeled(pally.Labels{
"component": _packageName,
}),
)
if scope == nil {
return r, func() {}
}
stop, err := r.Push(scope, _tallyPushInterval)
if err != nil {
logger.Error("Failed to start pushing metrics to Tally.", zap.Error(err))
return r, func() {}
}
return r, stop
}
func (g *observerGraph) begin(req *transport.Request) call {
return call{e: g.getOrCreateEdge(req)}
}
func (g *observerGraph) getOrCreateEdge(req *transport.Request) *edge {
d := digester.New()
d.Add(req.Caller)
d.Add(req.Service)
d.Add(string(req.Encoding))
d.Add(req.Procedure)
d.Add(req.RoutingKey)
d.Add(req.RoutingDelegate)
e := g.getOrCreateEdgeForKey(d.Digest(), req)
d.Free()
return e
}
func (g *observerGraph) getOrCreateEdgeForKey(key []byte, req *transport.Request) *edge {
if e := g.getEdge(key); e != nil {
return e
}
return g.createEdge(key, req)
}
func (g *observerGraph) getEdge(key []byte) *edge {
g.edgesMu.RLock()
e := g.edges[string(key)]
g.edgesMu.RUnlock()
return e
}
func (g *observerGraph) createEdge(key []byte, req *transport.Request) *edge {
g.edgesMu.Lock()
// Since we'll rarely hit this code path, the overhead of defer is acceptable.
defer g.edgesMu.Unlock()
if e, ok := g.edges[string(key)]; ok {
// Someone beat us to the punch.
return e
}
e := newEdge(g.logger, g.reg, req)
g.edges[string(key)] = e
return e
}
func newEdge(logger *zap.Logger, reg *pally.Registry, req *transport.Request) *edge {
labels := pally.Labels{
"source": pally.ScrubLabelValue(req.Caller),
"dest": pally.ScrubLabelValue(req.Service),
"procedure": pally.ScrubLabelValue(req.Procedure),
"encoding": pally.ScrubLabelValue(string(req.Encoding)),
"routing_key": pally.ScrubLabelValue(req.RoutingKey),
"routing_delegate": pally.ScrubLabelValue(req.RoutingDelegate),
}
attempts, err := reg.NewCounter(pally.Opts{
Name: "attempts",
Help: "Total number of RPC attempts.",
ConstLabels: labels,
})
if err != nil {
logger.Error("Failed to create attempts counter.", zap.Error(err))
attempts = pally.NewNopCounter()
}
successes, err := reg.NewCounter(pally.Opts{
Name: "successes",
Help: "Number of successful attempts, including successful initial attempts.",
ConstLabels: labels,
})
if err != nil {
logger.Error("Failed to create successes counter.", zap.Error(err))
successes = pally.NewNopCounter()
}
retriesAfterError, err := reg.NewCounterVector(pally.Opts{
Name: "retries_after_error",
Help: "Total RPC retry attempts for each prior error.",
ConstLabels: labels,
VariableLabels: []string{"error"},
})
if err != nil {
logger.Error("Failed to create retry after error vector.", zap.Error(err))
retriesAfterError = pally.NewNopCounterVector()
}
failures, err := reg.NewCounterVector(pally.Opts{
Name: "retry_failures",
Help: "Number of RPC final attempt failures.",
ConstLabels: labels,
VariableLabels: []string{"reason", "error"},
})
if err != nil {
logger.Error("Failed to create retry failures reason and error vector.", zap.Error(err))
failures = pally.NewNopCounterVector()
}
return &edge{
attempts: attempts,
successes: successes,
retriesAfterError: retriesAfterError,
failures: failures,
}
}
type edge struct {
attempts pally.Counter
successes pally.Counter
// Retry counter that has the error being retried.
retriesAfterError pally.CounterVector
// Failures are hard exits from the retry loop. Failures will log the
// reason we didn't retry, and the error we just had.
failures pally.CounterVector
}
// call is carried through an outbound request with retries. It will record
// all appropriate data onto the edge.
type call struct {
e *edge
}
func (c call) attempt() {
c.e.attempts.Inc()
}
func (c call) success() {
c.e.successes.Inc()
}
func (c call) retryOnError(err error) {
if counter, err := c.e.retriesAfterError.Get(getErrorName(err)); err == nil {
counter.Inc()
}
}
func (c call) unretryableError(err error) {
if counter, err := c.e.failures.Get(_unretryable, getErrorName(err)); err == nil {
counter.Inc()
}
}
func (c call) yarpcInternalError(err error) {
if counter, err := c.e.failures.Get(_yarpcInternal, getErrorName(err)); err == nil {
counter.Inc()
}
}
func (c call) noTimeError(err error) {
if counter, err := c.e.failures.Get(_noTime, getErrorName(err)); err == nil {
counter.Inc()
}
}
func (c call) maxAttemptsError(err error) {
if counter, err := c.e.failures.Get(_maxAttempts, getErrorName(err)); err == nil {
counter.Inc()
}
}
func getErrorName(err error) string {
errCode := yarpcerrors.ErrorCode(err)
if errCode == yarpcerrors.CodeOK {
return "unknown_internal_yarpc"
}
return errCode.String()
}
| 1 | 15,267 | can we keep this around? We should make sure we can distinguish between properly wrapped errors and "unwrapped" errors | yarpc-yarpc-go | go |
@@ -14265,6 +14265,10 @@ void PIPELINE_STATE::initGraphicsPipeline(ValidationStateTracker *state_data, co
}
}
graphicsPipelineCI.initialize(pCreateInfo, uses_color_attachment, uses_depthstencil_attachment);
+ if (graphicsPipelineCI.pInputAssemblyState) {
+ topology_at_rasterizer = graphicsPipelineCI.pInputAssemblyState->topology;
+ }
+
stage_state.resize(pCreateInfo->stageCount);
for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
const VkPipelineShaderStageCreateInfo *pPSSCI = &pCreateInfo->pStages[i]; | 1 | /* Copyright (c) 2015-2019 The Khronos Group Inc.
* Copyright (c) 2015-2019 Valve Corporation
* Copyright (c) 2015-2019 LunarG, Inc.
* Copyright (C) 2015-2019 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Author: Cody Northrop <[email protected]>
* Author: Michael Lentine <[email protected]>
* Author: Tobin Ehlis <[email protected]>
* Author: Chia-I Wu <[email protected]>
* Author: Chris Forbes <[email protected]>
* Author: Mark Lobodzinski <[email protected]>
* Author: Ian Elliott <[email protected]>
* Author: Dave Houlton <[email protected]>
* Author: Dustin Graves <[email protected]>
* Author: Jeremy Hayes <[email protected]>
* Author: Jon Ashburn <[email protected]>
* Author: Karl Schultz <[email protected]>
* Author: Mark Young <[email protected]>
* Author: Mike Schuchardt <[email protected]>
* Author: Mike Weiblen <[email protected]>
* Author: Tony Barbour <[email protected]>
* Author: John Zulauf <[email protected]>
* Author: Shannon McPherson <[email protected]>
*/
// Allow use of STL min and max functions in Windows
#define NOMINMAX
#include <algorithm>
#include <array>
#include <assert.h>
#include <cmath>
#include <iostream>
#include <list>
#include <map>
#include <memory>
#include <mutex>
#include <set>
#include <sstream>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <string>
#include <valarray>
#include "vk_loader_platform.h"
#include "vk_dispatch_table_helper.h"
#include "vk_enum_string_helper.h"
#include "chassis.h"
#include "convert_to_renderpass2.h"
#include "core_validation.h"
#include "buffer_validation.h"
#include "shader_validation.h"
#include "vk_layer_utils.h"
// These functions are defined *outside* the core_validation namespace as their type
// is also defined outside that namespace
size_t PipelineLayoutCompatDef::hash() const {
hash_util::HashCombiner hc;
// The set number is integral to the CompatDef's distinctiveness
hc << set << push_constant_ranges.get();
const auto &descriptor_set_layouts = *set_layouts_id.get();
for (uint32_t i = 0; i <= set; i++) {
hc << descriptor_set_layouts[i].get();
}
return hc.Value();
}
bool PipelineLayoutCompatDef::operator==(const PipelineLayoutCompatDef &other) const {
if ((set != other.set) || (push_constant_ranges != other.push_constant_ranges)) {
return false;
}
if (set_layouts_id == other.set_layouts_id) {
// if it's the same set_layouts_id, then *any* subset will match
return true;
}
// They aren't exactly the same PipelineLayoutSetLayouts, so we need to check if the required subsets match
const auto &descriptor_set_layouts = *set_layouts_id.get();
assert(set < descriptor_set_layouts.size());
const auto &other_ds_layouts = *other.set_layouts_id.get();
assert(set < other_ds_layouts.size());
for (uint32_t i = 0; i <= set; i++) {
if (descriptor_set_layouts[i] != other_ds_layouts[i]) {
return false;
}
}
return true;
}
using std::max;
using std::string;
using std::stringstream;
using std::unique_ptr;
using std::unordered_map;
using std::unordered_set;
using std::vector;
// WSI Image Objects bypass usual Image Object creation methods. A special Memory
// Object value will be used to identify them internally.
static const VkDeviceMemory MEMTRACKER_SWAP_CHAIN_IMAGE_KEY = (VkDeviceMemory)(-1);
// 2nd special memory handle used to flag object as unbound from memory
static const VkDeviceMemory MEMORY_UNBOUND = VkDeviceMemory(~((uint64_t)(0)) - 1);
// Get the global maps of pending releases
const GlobalQFOTransferBarrierMap<VkImageMemoryBarrier> &CoreChecks::GetGlobalQFOReleaseBarrierMap(
const QFOTransferBarrier<VkImageMemoryBarrier>::Tag &type_tag) const {
return qfo_release_image_barrier_map;
}
const GlobalQFOTransferBarrierMap<VkBufferMemoryBarrier> &CoreChecks::GetGlobalQFOReleaseBarrierMap(
const QFOTransferBarrier<VkBufferMemoryBarrier>::Tag &type_tag) const {
return qfo_release_buffer_barrier_map;
}
GlobalQFOTransferBarrierMap<VkImageMemoryBarrier> &CoreChecks::GetGlobalQFOReleaseBarrierMap(
const QFOTransferBarrier<VkImageMemoryBarrier>::Tag &type_tag) {
return qfo_release_image_barrier_map;
}
GlobalQFOTransferBarrierMap<VkBufferMemoryBarrier> &CoreChecks::GetGlobalQFOReleaseBarrierMap(
const QFOTransferBarrier<VkBufferMemoryBarrier>::Tag &type_tag) {
return qfo_release_buffer_barrier_map;
}
// Get the image viewstate for a given framebuffer attachment
IMAGE_VIEW_STATE *ValidationStateTracker::GetAttachmentImageViewState(FRAMEBUFFER_STATE *framebuffer, uint32_t index) {
assert(framebuffer && (index < framebuffer->createInfo.attachmentCount));
const VkImageView &image_view = framebuffer->createInfo.pAttachments[index];
return GetImageViewState(image_view);
}
EVENT_STATE *ValidationStateTracker::GetEventState(VkEvent event) {
auto it = eventMap.find(event);
if (it == eventMap.end()) {
return nullptr;
}
return &it->second;
}
const QUEUE_STATE *ValidationStateTracker::GetQueueState(VkQueue queue) const {
auto it = queueMap.find(queue);
if (it == queueMap.cend()) {
return nullptr;
}
return &it->second;
}
QUEUE_STATE *ValidationStateTracker::GetQueueState(VkQueue queue) {
auto it = queueMap.find(queue);
if (it == queueMap.end()) {
return nullptr;
}
return &it->second;
}
const PHYSICAL_DEVICE_STATE *ValidationStateTracker::GetPhysicalDeviceState(VkPhysicalDevice phys) const {
auto *phys_dev_map = ((physical_device_map.size() > 0) ? &physical_device_map : &instance_state->physical_device_map);
auto it = phys_dev_map->find(phys);
if (it == phys_dev_map->end()) {
return nullptr;
}
return &it->second;
}
PHYSICAL_DEVICE_STATE *ValidationStateTracker::GetPhysicalDeviceState(VkPhysicalDevice phys) {
auto *phys_dev_map = ((physical_device_map.size() > 0) ? &physical_device_map : &instance_state->physical_device_map);
auto it = phys_dev_map->find(phys);
if (it == phys_dev_map->end()) {
return nullptr;
}
return &it->second;
}
PHYSICAL_DEVICE_STATE *ValidationStateTracker::GetPhysicalDeviceState() { return physical_device_state; }
const PHYSICAL_DEVICE_STATE *ValidationStateTracker::GetPhysicalDeviceState() const { return physical_device_state; }
// Return ptr to memory binding for given handle of specified type
template <typename State, typename Result>
static Result GetObjectMemBindingImpl(State state, const VulkanTypedHandle &typed_handle) {
switch (typed_handle.type) {
case kVulkanObjectTypeImage:
return state->GetImageState(typed_handle.Cast<VkImage>());
case kVulkanObjectTypeBuffer:
return state->GetBufferState(typed_handle.Cast<VkBuffer>());
case kVulkanObjectTypeAccelerationStructureNV:
return state->GetAccelerationStructureState(typed_handle.Cast<VkAccelerationStructureNV>());
default:
break;
}
return nullptr;
}
const BINDABLE *ValidationStateTracker::GetObjectMemBinding(const VulkanTypedHandle &typed_handle) const {
return GetObjectMemBindingImpl<const ValidationStateTracker *, const BINDABLE *>(this, typed_handle);
}
BINDABLE *ValidationStateTracker::GetObjectMemBinding(const VulkanTypedHandle &typed_handle) {
return GetObjectMemBindingImpl<ValidationStateTracker *, BINDABLE *>(this, typed_handle);
}
ImageSubresourceLayoutMap::InitialLayoutState::InitialLayoutState(const CMD_BUFFER_STATE &cb_state,
const IMAGE_VIEW_STATE *view_state)
: image_view(VK_NULL_HANDLE), aspect_mask(0), label(cb_state.debug_label) {
if (view_state) {
image_view = view_state->image_view;
aspect_mask = view_state->create_info.subresourceRange.aspectMask;
}
}
std::string FormatDebugLabel(const char *prefix, const LoggingLabel &label) {
if (label.Empty()) return std::string();
std::string out;
string_sprintf(&out, "%sVkDebugUtilsLabel(name='%s' color=[%g, %g %g, %g])", prefix, label.name.c_str(), label.color[0],
label.color[1], label.color[2], label.color[3]);
return out;
}
// the ImageLayoutMap implementation bakes in the number of valid aspects -- we have to choose the correct one at construction time
template <uint32_t kThreshold>
static std::unique_ptr<ImageSubresourceLayoutMap> LayoutMapFactoryByAspect(const IMAGE_STATE &image_state) {
ImageSubresourceLayoutMap *map = nullptr;
switch (image_state.full_range.aspectMask) {
case VK_IMAGE_ASPECT_COLOR_BIT:
map = new ImageSubresourceLayoutMapImpl<ColorAspectTraits, kThreshold>(image_state);
break;
case VK_IMAGE_ASPECT_DEPTH_BIT:
map = new ImageSubresourceLayoutMapImpl<DepthAspectTraits, kThreshold>(image_state);
break;
case VK_IMAGE_ASPECT_STENCIL_BIT:
map = new ImageSubresourceLayoutMapImpl<StencilAspectTraits, kThreshold>(image_state);
break;
case VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT:
map = new ImageSubresourceLayoutMapImpl<DepthStencilAspectTraits, kThreshold>(image_state);
break;
case VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT:
map = new ImageSubresourceLayoutMapImpl<Multiplane2AspectTraits, kThreshold>(image_state);
break;
case VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT | VK_IMAGE_ASPECT_PLANE_2_BIT:
map = new ImageSubresourceLayoutMapImpl<Multiplane3AspectTraits, kThreshold>(image_state);
break;
}
assert(map); // We shouldn't be able to get here null unless the traits cases are incomplete
return std::unique_ptr<ImageSubresourceLayoutMap>(map);
}
static std::unique_ptr<ImageSubresourceLayoutMap> LayoutMapFactory(const IMAGE_STATE &image_state) {
std::unique_ptr<ImageSubresourceLayoutMap> map;
const uint32_t kAlwaysDenseLimit = 16; // About a cacheline on deskop architectures
if (image_state.full_range.layerCount <= kAlwaysDenseLimit) {
// Create a dense row map
map = LayoutMapFactoryByAspect<0>(image_state);
} else {
// Create an initially sparse row map
map = LayoutMapFactoryByAspect<kAlwaysDenseLimit>(image_state);
}
return map;
}
// The const variant only need the image as it is the key for the map
const ImageSubresourceLayoutMap *GetImageSubresourceLayoutMap(const CMD_BUFFER_STATE *cb_state, VkImage image) {
auto it = cb_state->image_layout_map.find(image);
if (it == cb_state->image_layout_map.cend()) {
return nullptr;
}
return it->second.get();
}
// The non-const variant only needs the image state, as the factory requires it to construct a new entry
ImageSubresourceLayoutMap *GetImageSubresourceLayoutMap(CMD_BUFFER_STATE *cb_state, const IMAGE_STATE &image_state) {
auto it = cb_state->image_layout_map.find(image_state.image);
if (it == cb_state->image_layout_map.end()) {
// Empty slot... fill it in.
auto insert_pair = cb_state->image_layout_map.insert(std::make_pair(image_state.image, LayoutMapFactory(image_state)));
assert(insert_pair.second);
ImageSubresourceLayoutMap *new_map = insert_pair.first->second.get();
assert(new_map);
return new_map;
}
return it->second.get();
}
void ValidationStateTracker::AddMemObjInfo(void *object, const VkDeviceMemory mem, const VkMemoryAllocateInfo *pAllocateInfo) {
assert(object != NULL);
auto *mem_info = new DEVICE_MEMORY_STATE(object, mem, pAllocateInfo);
memObjMap[mem] = unique_ptr<DEVICE_MEMORY_STATE>(mem_info);
auto dedicated = lvl_find_in_chain<VkMemoryDedicatedAllocateInfoKHR>(pAllocateInfo->pNext);
if (dedicated) {
mem_info->is_dedicated = true;
mem_info->dedicated_buffer = dedicated->buffer;
mem_info->dedicated_image = dedicated->image;
}
auto export_info = lvl_find_in_chain<VkExportMemoryAllocateInfo>(pAllocateInfo->pNext);
if (export_info) {
mem_info->is_export = true;
mem_info->export_handle_type_flags = export_info->handleTypes;
}
}
// Create binding link between given sampler and command buffer node
void ValidationStateTracker::AddCommandBufferBindingSampler(CMD_BUFFER_STATE *cb_node, SAMPLER_STATE *sampler_state) {
if (disabled.command_buffer_state) {
return;
}
auto inserted = cb_node->object_bindings.emplace(sampler_state->sampler, kVulkanObjectTypeSampler);
if (inserted.second) {
// Only need to complete the cross-reference if this is a new item
sampler_state->cb_bindings.insert(cb_node);
}
}
// Create binding link between given image node and command buffer node
void ValidationStateTracker::AddCommandBufferBindingImage(CMD_BUFFER_STATE *cb_node, IMAGE_STATE *image_state) {
if (disabled.command_buffer_state) {
return;
}
// Skip validation if this image was created through WSI
if (image_state->binding.mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
// First update cb binding for image
auto image_inserted = cb_node->object_bindings.emplace(image_state->image, kVulkanObjectTypeImage);
if (image_inserted.second) {
// Only need to continue if this is a new item (the rest of the work would have be done previous)
image_state->cb_bindings.insert(cb_node);
// Now update CB binding in MemObj mini CB list
for (auto mem_binding : image_state->GetBoundMemory()) {
DEVICE_MEMORY_STATE *pMemInfo = GetDevMemState(mem_binding);
if (pMemInfo) {
// Now update CBInfo's Mem reference list
auto mem_inserted = cb_node->memObjs.insert(mem_binding);
if (mem_inserted.second) {
// Only need to complete the cross-reference if this is a new item
pMemInfo->cb_bindings.insert(cb_node);
}
}
}
}
}
}
// Create binding link between given image view node and its image with command buffer node
void ValidationStateTracker::AddCommandBufferBindingImageView(CMD_BUFFER_STATE *cb_node, IMAGE_VIEW_STATE *view_state) {
if (disabled.command_buffer_state) {
return;
}
// First add bindings for imageView
auto inserted = cb_node->object_bindings.emplace(view_state->image_view, kVulkanObjectTypeImageView);
if (inserted.second) {
// Only need to continue if this is a new item
view_state->cb_bindings.insert(cb_node);
auto image_state = GetImageState(view_state->create_info.image);
// Add bindings for image within imageView
if (image_state) {
AddCommandBufferBindingImage(cb_node, image_state);
}
}
}
// Create binding link between given buffer node and command buffer node
void ValidationStateTracker::AddCommandBufferBindingBuffer(CMD_BUFFER_STATE *cb_node, BUFFER_STATE *buffer_state) {
if (disabled.command_buffer_state) {
return;
}
// First update cb binding for buffer
auto buffer_inserted = cb_node->object_bindings.emplace(buffer_state->buffer, kVulkanObjectTypeBuffer);
if (buffer_inserted.second) {
// Only need to continue if this is a new item
buffer_state->cb_bindings.insert(cb_node);
// Now update CB binding in MemObj mini CB list
for (auto mem_binding : buffer_state->GetBoundMemory()) {
DEVICE_MEMORY_STATE *pMemInfo = GetDevMemState(mem_binding);
if (pMemInfo) {
// Now update CBInfo's Mem reference list
auto inserted = cb_node->memObjs.insert(mem_binding);
if (inserted.second) {
// Only need to complete the cross-reference if this is a new item
pMemInfo->cb_bindings.insert(cb_node);
}
}
}
}
}
// Create binding link between given buffer view node and its buffer with command buffer node
void ValidationStateTracker::AddCommandBufferBindingBufferView(CMD_BUFFER_STATE *cb_node, BUFFER_VIEW_STATE *view_state) {
if (disabled.command_buffer_state) {
return;
}
// First add bindings for bufferView
auto inserted = cb_node->object_bindings.emplace(view_state->buffer_view, kVulkanObjectTypeBufferView);
if (inserted.second) {
// Only need to complete the cross-reference if this is a new item
view_state->cb_bindings.insert(cb_node);
auto buffer_state = GetBufferState(view_state->create_info.buffer);
// Add bindings for buffer within bufferView
if (buffer_state) {
AddCommandBufferBindingBuffer(cb_node, buffer_state);
}
}
}
// Create binding link between given acceleration structure and command buffer node
void ValidationStateTracker::AddCommandBufferBindingAccelerationStructure(CMD_BUFFER_STATE *cb_node,
ACCELERATION_STRUCTURE_STATE *as_state) {
if (disabled.command_buffer_state) {
return;
}
auto as_inserted = cb_node->object_bindings.emplace(as_state->acceleration_structure, kVulkanObjectTypeAccelerationStructureNV);
if (as_inserted.second) {
// Only need to complete the cross-reference if this is a new item
as_state->cb_bindings.insert(cb_node);
// Now update CB binding in MemObj mini CB list
for (auto mem_binding : as_state->GetBoundMemory()) {
DEVICE_MEMORY_STATE *pMemInfo = GetDevMemState(mem_binding);
if (pMemInfo) {
// Now update CBInfo's Mem reference list
auto mem_inserted = cb_node->memObjs.insert(mem_binding);
if (mem_inserted.second) {
// Only need to complete the cross-reference if this is a new item
pMemInfo->cb_bindings.insert(cb_node);
}
}
}
}
}
// For every mem obj bound to particular CB, free bindings related to that CB
void ValidationStateTracker::ClearCmdBufAndMemReferences(CMD_BUFFER_STATE *cb_node) {
if (cb_node) {
if (cb_node->memObjs.size() > 0) {
for (auto mem : cb_node->memObjs) {
DEVICE_MEMORY_STATE *pInfo = GetDevMemState(mem);
if (pInfo) {
pInfo->cb_bindings.erase(cb_node);
}
}
cb_node->memObjs.clear();
}
}
}
// Clear a single object binding from given memory object
void ValidationStateTracker::ClearMemoryObjectBinding(const VulkanTypedHandle &typed_handle, VkDeviceMemory mem) {
DEVICE_MEMORY_STATE *mem_info = GetDevMemState(mem);
// This obj is bound to a memory object. Remove the reference to this object in that memory object's list
if (mem_info) {
mem_info->obj_bindings.erase(typed_handle);
}
}
// ClearMemoryObjectBindings clears the binding of objects to memory
// For the given object it pulls the memory bindings and makes sure that the bindings
// no longer refer to the object being cleared. This occurs when objects are destroyed.
void ValidationStateTracker::ClearMemoryObjectBindings(const VulkanTypedHandle &typed_handle) {
BINDABLE *mem_binding = GetObjectMemBinding(typed_handle);
if (mem_binding) {
if (!mem_binding->sparse) {
ClearMemoryObjectBinding(typed_handle, mem_binding->binding.mem);
} else { // Sparse, clear all bindings
for (auto &sparse_mem_binding : mem_binding->sparse_bindings) {
ClearMemoryObjectBinding(typed_handle, sparse_mem_binding.mem);
}
}
}
}
// For given mem object, verify that it is not null or UNBOUND, if it is, report error. Return skip value.
bool CoreChecks::VerifyBoundMemoryIsValid(VkDeviceMemory mem, const VulkanTypedHandle &typed_handle, const char *api_name,
const char *error_code) const {
bool result = false;
auto type_name = object_string[typed_handle.type];
if (VK_NULL_HANDLE == mem) {
result = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, typed_handle.handle,
error_code, "%s: %s used with no memory bound. Memory should be bound by calling vkBind%sMemory().",
api_name, report_data->FormatHandle(typed_handle).c_str(), type_name + 2);
} else if (MEMORY_UNBOUND == mem) {
result = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, typed_handle.handle,
error_code,
"%s: %s used with no memory bound and previously bound memory was freed. Memory must not be freed "
"prior to this operation.",
api_name, report_data->FormatHandle(typed_handle).c_str());
}
return result;
}
// Check to see if memory was ever bound to this image
bool CoreChecks::ValidateMemoryIsBoundToImage(const IMAGE_STATE *image_state, const char *api_name, const char *error_code) const {
bool result = false;
if (image_state->create_from_swapchain != VK_NULL_HANDLE) {
if (image_state->bind_swapchain == VK_NULL_HANDLE) {
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), error_code,
"%s: %s is created by %s, and the image should be bound by calling vkBindImageMemory2(), and the pNext chain "
"includes VkBindImageMemorySwapchainInfoKHR.",
api_name, report_data->FormatHandle(image_state->image).c_str(),
report_data->FormatHandle(image_state->create_from_swapchain).c_str());
}
} else if (0 == (static_cast<uint32_t>(image_state->createInfo.flags) & VK_IMAGE_CREATE_SPARSE_BINDING_BIT)) {
result = VerifyBoundMemoryIsValid(image_state->binding.mem, VulkanTypedHandle(image_state->image, kVulkanObjectTypeImage),
api_name, error_code);
}
return result;
}
// Check to see if memory was bound to this buffer
bool CoreChecks::ValidateMemoryIsBoundToBuffer(const BUFFER_STATE *buffer_state, const char *api_name,
const char *error_code) const {
bool result = false;
if (0 == (static_cast<uint32_t>(buffer_state->createInfo.flags) & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)) {
result = VerifyBoundMemoryIsValid(buffer_state->binding.mem,
VulkanTypedHandle(buffer_state->buffer, kVulkanObjectTypeBuffer), api_name, error_code);
}
return result;
}
// Check to see if memory was bound to this acceleration structure
bool CoreChecks::ValidateMemoryIsBoundToAccelerationStructure(const ACCELERATION_STRUCTURE_STATE *as_state, const char *api_name,
const char *error_code) const {
return VerifyBoundMemoryIsValid(as_state->binding.mem,
VulkanTypedHandle(as_state->acceleration_structure, kVulkanObjectTypeAccelerationStructureNV),
api_name, error_code);
}
// SetMemBinding is used to establish immutable, non-sparse binding between a single image/buffer object and memory object.
// Corresponding valid usage checks are in ValidateSetMemBinding().
void ValidationStateTracker::SetMemBinding(VkDeviceMemory mem, BINDABLE *mem_binding, VkDeviceSize memory_offset,
const VulkanTypedHandle &typed_handle) {
assert(mem_binding);
mem_binding->binding.mem = mem;
mem_binding->UpdateBoundMemorySet(); // force recreation of cached set
mem_binding->binding.offset = memory_offset;
mem_binding->binding.size = mem_binding->requirements.size;
if (mem != VK_NULL_HANDLE) {
DEVICE_MEMORY_STATE *mem_info = GetDevMemState(mem);
if (mem_info) {
mem_info->obj_bindings.insert(typed_handle);
// For image objects, make sure default memory state is correctly set
// TODO : What's the best/correct way to handle this?
if (kVulkanObjectTypeImage == typed_handle.type) {
auto const image_state = reinterpret_cast<const IMAGE_STATE *>(mem_binding);
if (image_state) {
VkImageCreateInfo ici = image_state->createInfo;
if (ici.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
// TODO:: More memory state transition stuff.
}
}
}
}
}
}
// Valid usage checks for a call to SetMemBinding().
// For NULL mem case, output warning
// Make sure given object is in global object map
// IF a previous binding existed, output validation error
// Otherwise, add reference from objectInfo to memoryInfo
// Add reference off of objInfo
// TODO: We may need to refactor or pass in multiple valid usage statements to handle multiple valid usage conditions.
bool CoreChecks::ValidateSetMemBinding(VkDeviceMemory mem, const VulkanTypedHandle &typed_handle, const char *apiName) const {
bool skip = false;
// It's an error to bind an object to NULL memory
if (mem != VK_NULL_HANDLE) {
const BINDABLE *mem_binding = GetObjectMemBinding(typed_handle);
assert(mem_binding);
if (mem_binding->sparse) {
const char *error_code = "VUID-vkBindImageMemory-image-01045";
const char *handle_type = "IMAGE";
if (typed_handle.type == kVulkanObjectTypeBuffer) {
error_code = "VUID-vkBindBufferMemory-buffer-01030";
handle_type = "BUFFER";
} else {
assert(typed_handle.type == kVulkanObjectTypeImage);
}
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
HandleToUint64(mem), error_code,
"In %s, attempting to bind %s to %s which was created with sparse memory flags "
"(VK_%s_CREATE_SPARSE_*_BIT).",
apiName, report_data->FormatHandle(mem).c_str(), report_data->FormatHandle(typed_handle).c_str(),
handle_type);
}
const DEVICE_MEMORY_STATE *mem_info = GetDevMemState(mem);
if (mem_info) {
const DEVICE_MEMORY_STATE *prev_binding = GetDevMemState(mem_binding->binding.mem);
if (prev_binding) {
const char *error_code = "VUID-vkBindImageMemory-image-01044";
if (typed_handle.type == kVulkanObjectTypeBuffer) {
error_code = "VUID-vkBindBufferMemory-buffer-01029";
} else {
assert(typed_handle.type == kVulkanObjectTypeImage);
}
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
HandleToUint64(mem), error_code,
"In %s, attempting to bind %s to %s which has already been bound to %s.", apiName,
report_data->FormatHandle(mem).c_str(), report_data->FormatHandle(typed_handle).c_str(),
report_data->FormatHandle(prev_binding->mem).c_str());
} else if (mem_binding->binding.mem == MEMORY_UNBOUND) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
HandleToUint64(mem), kVUID_Core_MemTrack_RebindObject,
"In %s, attempting to bind %s to %s which was previous bound to memory that has "
"since been freed. Memory bindings are immutable in "
"Vulkan so this attempt to bind to new memory is not allowed.",
apiName, report_data->FormatHandle(mem).c_str(), report_data->FormatHandle(typed_handle).c_str());
}
}
}
return skip;
}
// For NULL mem case, clear any previous binding Else...
// Make sure given object is in its object map
// IF a previous binding existed, update binding
// Add reference from objectInfo to memoryInfo
// Add reference off of object's binding info
// Return VK_TRUE if addition is successful, VK_FALSE otherwise
bool CoreChecks::SetSparseMemBinding(MEM_BINDING binding, const VulkanTypedHandle &typed_handle) {
bool skip = VK_FALSE;
// Handle NULL case separately, just clear previous binding & decrement reference
if (binding.mem == VK_NULL_HANDLE) {
// TODO : This should cause the range of the resource to be unbound according to spec
} else {
BINDABLE *mem_binding = GetObjectMemBinding(typed_handle);
assert(mem_binding);
if (mem_binding) { // Invalid handles are reported by object tracker, but Get returns NULL for them, so avoid SEGV here
assert(mem_binding->sparse);
DEVICE_MEMORY_STATE *mem_info = GetDevMemState(binding.mem);
if (mem_info) {
mem_info->obj_bindings.insert(typed_handle);
// Need to set mem binding for this object
mem_binding->sparse_bindings.insert(binding);
mem_binding->UpdateBoundMemorySet();
}
}
}
return skip;
}
bool CoreChecks::ValidateDeviceQueueFamily(uint32_t queue_family, const char *cmd_name, const char *parameter_name,
const char *error_code, bool optional = false) const {
bool skip = false;
if (!optional && queue_family == VK_QUEUE_FAMILY_IGNORED) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
error_code,
"%s: %s is VK_QUEUE_FAMILY_IGNORED, but it is required to provide a valid queue family index value.",
cmd_name, parameter_name);
} else if (queue_family_index_map.find(queue_family) == queue_family_index_map.end()) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), error_code,
"%s: %s (= %" PRIu32
") is not one of the queue families given via VkDeviceQueueCreateInfo structures when the device was created.",
cmd_name, parameter_name, queue_family);
}
return skip;
}
bool CoreChecks::ValidateQueueFamilies(uint32_t queue_family_count, const uint32_t *queue_families, const char *cmd_name,
const char *array_parameter_name, const char *unique_error_code,
const char *valid_error_code, bool optional = false) const {
bool skip = false;
if (queue_families) {
std::unordered_set<uint32_t> set;
for (uint32_t i = 0; i < queue_family_count; ++i) {
std::string parameter_name = std::string(array_parameter_name) + "[" + std::to_string(i) + "]";
if (set.count(queue_families[i])) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), unique_error_code, "%s: %s (=%" PRIu32 ") is not unique within %s array.",
cmd_name, parameter_name.c_str(), queue_families[i], array_parameter_name);
} else {
set.insert(queue_families[i]);
skip |= ValidateDeviceQueueFamily(queue_families[i], cmd_name, parameter_name.c_str(), valid_error_code, optional);
}
}
}
return skip;
}
// Check object status for selected flag state
bool CoreChecks::ValidateStatus(const CMD_BUFFER_STATE *pNode, CBStatusFlags status_mask, VkFlags msg_flags, const char *fail_msg,
const char *msg_code) const {
if (!(pNode->status & status_mask)) {
return log_msg(report_data, msg_flags, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pNode->commandBuffer),
msg_code, "%s: %s..", report_data->FormatHandle(pNode->commandBuffer).c_str(), fail_msg);
}
return false;
}
const RENDER_PASS_STATE *ValidationStateTracker::GetRenderPassState(VkRenderPass renderpass) const {
auto it = renderPassMap.find(renderpass);
if (it == renderPassMap.end()) {
return nullptr;
}
return it->second.get();
}
RENDER_PASS_STATE *ValidationStateTracker::GetRenderPassState(VkRenderPass renderpass) {
auto it = renderPassMap.find(renderpass);
if (it == renderPassMap.end()) {
return nullptr;
}
return it->second.get();
}
std::shared_ptr<RENDER_PASS_STATE> ValidationStateTracker::GetRenderPassStateSharedPtr(VkRenderPass renderpass) {
auto it = renderPassMap.find(renderpass);
if (it == renderPassMap.end()) {
return nullptr;
}
return it->second;
}
std::shared_ptr<cvdescriptorset::DescriptorSetLayout const> const GetDescriptorSetLayout(const ValidationStateTracker *state_data,
VkDescriptorSetLayout dsLayout) {
auto it = state_data->descriptorSetLayoutMap.find(dsLayout);
if (it == state_data->descriptorSetLayoutMap.end()) {
return nullptr;
}
return it->second;
}
// Return true if for a given PSO, the given state enum is dynamic, else return false
static bool IsDynamic(const PIPELINE_STATE *pPipeline, const VkDynamicState state) {
if (pPipeline && pPipeline->graphicsPipelineCI.pDynamicState) {
for (uint32_t i = 0; i < pPipeline->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
if (state == pPipeline->graphicsPipelineCI.pDynamicState->pDynamicStates[i]) return true;
}
}
return false;
}
// Validate state stored as flags at time of draw call
bool CoreChecks::ValidateDrawStateFlags(const CMD_BUFFER_STATE *pCB, const PIPELINE_STATE *pPipe, bool indexed,
const char *msg_code) const {
bool result = false;
if (pPipe->topology_at_rasterizer == VK_PRIMITIVE_TOPOLOGY_LINE_LIST ||
pPipe->topology_at_rasterizer == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP) {
result |= ValidateStatus(pCB, CBSTATUS_LINE_WIDTH_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
"Dynamic line width state not set for this command buffer", msg_code);
}
if (pPipe->graphicsPipelineCI.pRasterizationState &&
(pPipe->graphicsPipelineCI.pRasterizationState->depthBiasEnable == VK_TRUE)) {
result |= ValidateStatus(pCB, CBSTATUS_DEPTH_BIAS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
"Dynamic depth bias state not set for this command buffer", msg_code);
}
if (pPipe->blendConstantsEnabled) {
result |= ValidateStatus(pCB, CBSTATUS_BLEND_CONSTANTS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
"Dynamic blend constants state not set for this command buffer", msg_code);
}
if (pPipe->graphicsPipelineCI.pDepthStencilState &&
(pPipe->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE)) {
result |= ValidateStatus(pCB, CBSTATUS_DEPTH_BOUNDS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
"Dynamic depth bounds state not set for this command buffer", msg_code);
}
if (pPipe->graphicsPipelineCI.pDepthStencilState &&
(pPipe->graphicsPipelineCI.pDepthStencilState->stencilTestEnable == VK_TRUE)) {
result |= ValidateStatus(pCB, CBSTATUS_STENCIL_READ_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
"Dynamic stencil read mask state not set for this command buffer", msg_code);
result |= ValidateStatus(pCB, CBSTATUS_STENCIL_WRITE_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
"Dynamic stencil write mask state not set for this command buffer", msg_code);
result |= ValidateStatus(pCB, CBSTATUS_STENCIL_REFERENCE_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
"Dynamic stencil reference state not set for this command buffer", msg_code);
}
if (indexed) {
result |= ValidateStatus(pCB, CBSTATUS_INDEX_BUFFER_BOUND, VK_DEBUG_REPORT_ERROR_BIT_EXT,
"Index buffer object not bound to this command buffer when Indexed Draw attempted", msg_code);
}
return result;
}
bool CoreChecks::LogInvalidAttachmentMessage(const char *type1_string, const RENDER_PASS_STATE *rp1_state, const char *type2_string,
const RENDER_PASS_STATE *rp2_state, uint32_t primary_attach, uint32_t secondary_attach,
const char *msg, const char *caller, const char *error_code) const {
return log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(rp1_state->renderPass), error_code,
"%s: RenderPasses incompatible between %s w/ %s and %s w/ %s Attachment %u is not "
"compatible with %u: %s.",
caller, type1_string, report_data->FormatHandle(rp1_state->renderPass).c_str(), type2_string,
report_data->FormatHandle(rp2_state->renderPass).c_str(), primary_attach, secondary_attach, msg);
}
bool CoreChecks::ValidateAttachmentCompatibility(const char *type1_string, const RENDER_PASS_STATE *rp1_state,
const char *type2_string, const RENDER_PASS_STATE *rp2_state,
uint32_t primary_attach, uint32_t secondary_attach, const char *caller,
const char *error_code) const {
bool skip = false;
const auto &primaryPassCI = rp1_state->createInfo;
const auto &secondaryPassCI = rp2_state->createInfo;
if (primaryPassCI.attachmentCount <= primary_attach) {
primary_attach = VK_ATTACHMENT_UNUSED;
}
if (secondaryPassCI.attachmentCount <= secondary_attach) {
secondary_attach = VK_ATTACHMENT_UNUSED;
}
if (primary_attach == VK_ATTACHMENT_UNUSED && secondary_attach == VK_ATTACHMENT_UNUSED) {
return skip;
}
if (primary_attach == VK_ATTACHMENT_UNUSED) {
skip |= LogInvalidAttachmentMessage(type1_string, rp1_state, type2_string, rp2_state, primary_attach, secondary_attach,
"The first is unused while the second is not.", caller, error_code);
return skip;
}
if (secondary_attach == VK_ATTACHMENT_UNUSED) {
skip |= LogInvalidAttachmentMessage(type1_string, rp1_state, type2_string, rp2_state, primary_attach, secondary_attach,
"The second is unused while the first is not.", caller, error_code);
return skip;
}
if (primaryPassCI.pAttachments[primary_attach].format != secondaryPassCI.pAttachments[secondary_attach].format) {
skip |= LogInvalidAttachmentMessage(type1_string, rp1_state, type2_string, rp2_state, primary_attach, secondary_attach,
"They have different formats.", caller, error_code);
}
if (primaryPassCI.pAttachments[primary_attach].samples != secondaryPassCI.pAttachments[secondary_attach].samples) {
skip |= LogInvalidAttachmentMessage(type1_string, rp1_state, type2_string, rp2_state, primary_attach, secondary_attach,
"They have different samples.", caller, error_code);
}
if (primaryPassCI.pAttachments[primary_attach].flags != secondaryPassCI.pAttachments[secondary_attach].flags) {
skip |= LogInvalidAttachmentMessage(type1_string, rp1_state, type2_string, rp2_state, primary_attach, secondary_attach,
"They have different flags.", caller, error_code);
}
return skip;
}
bool CoreChecks::ValidateSubpassCompatibility(const char *type1_string, const RENDER_PASS_STATE *rp1_state,
const char *type2_string, const RENDER_PASS_STATE *rp2_state, const int subpass,
const char *caller, const char *error_code) const {
bool skip = false;
const auto &primary_desc = rp1_state->createInfo.pSubpasses[subpass];
const auto &secondary_desc = rp2_state->createInfo.pSubpasses[subpass];
uint32_t maxInputAttachmentCount = std::max(primary_desc.inputAttachmentCount, secondary_desc.inputAttachmentCount);
for (uint32_t i = 0; i < maxInputAttachmentCount; ++i) {
uint32_t primary_input_attach = VK_ATTACHMENT_UNUSED, secondary_input_attach = VK_ATTACHMENT_UNUSED;
if (i < primary_desc.inputAttachmentCount) {
primary_input_attach = primary_desc.pInputAttachments[i].attachment;
}
if (i < secondary_desc.inputAttachmentCount) {
secondary_input_attach = secondary_desc.pInputAttachments[i].attachment;
}
skip |= ValidateAttachmentCompatibility(type1_string, rp1_state, type2_string, rp2_state, primary_input_attach,
secondary_input_attach, caller, error_code);
}
uint32_t maxColorAttachmentCount = std::max(primary_desc.colorAttachmentCount, secondary_desc.colorAttachmentCount);
for (uint32_t i = 0; i < maxColorAttachmentCount; ++i) {
uint32_t primary_color_attach = VK_ATTACHMENT_UNUSED, secondary_color_attach = VK_ATTACHMENT_UNUSED;
if (i < primary_desc.colorAttachmentCount) {
primary_color_attach = primary_desc.pColorAttachments[i].attachment;
}
if (i < secondary_desc.colorAttachmentCount) {
secondary_color_attach = secondary_desc.pColorAttachments[i].attachment;
}
skip |= ValidateAttachmentCompatibility(type1_string, rp1_state, type2_string, rp2_state, primary_color_attach,
secondary_color_attach, caller, error_code);
if (rp1_state->createInfo.subpassCount > 1) {
uint32_t primary_resolve_attach = VK_ATTACHMENT_UNUSED, secondary_resolve_attach = VK_ATTACHMENT_UNUSED;
if (i < primary_desc.colorAttachmentCount && primary_desc.pResolveAttachments) {
primary_resolve_attach = primary_desc.pResolveAttachments[i].attachment;
}
if (i < secondary_desc.colorAttachmentCount && secondary_desc.pResolveAttachments) {
secondary_resolve_attach = secondary_desc.pResolveAttachments[i].attachment;
}
skip |= ValidateAttachmentCompatibility(type1_string, rp1_state, type2_string, rp2_state, primary_resolve_attach,
secondary_resolve_attach, caller, error_code);
}
}
uint32_t primary_depthstencil_attach = VK_ATTACHMENT_UNUSED, secondary_depthstencil_attach = VK_ATTACHMENT_UNUSED;
if (primary_desc.pDepthStencilAttachment) {
primary_depthstencil_attach = primary_desc.pDepthStencilAttachment[0].attachment;
}
if (secondary_desc.pDepthStencilAttachment) {
secondary_depthstencil_attach = secondary_desc.pDepthStencilAttachment[0].attachment;
}
skip |= ValidateAttachmentCompatibility(type1_string, rp1_state, type2_string, rp2_state, primary_depthstencil_attach,
secondary_depthstencil_attach, caller, error_code);
return skip;
}
// Verify that given renderPass CreateInfo for primary and secondary command buffers are compatible.
// This function deals directly with the CreateInfo, there are overloaded versions below that can take the renderPass handle and
// will then feed into this function
bool CoreChecks::ValidateRenderPassCompatibility(const char *type1_string, const RENDER_PASS_STATE *rp1_state,
const char *type2_string, const RENDER_PASS_STATE *rp2_state, const char *caller,
const char *error_code) const {
bool skip = false;
if (rp1_state->createInfo.subpassCount != rp2_state->createInfo.subpassCount) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(rp1_state->renderPass), error_code,
"%s: RenderPasses incompatible between %s w/ %s with a subpassCount of %u and %s w/ "
"%s with a subpassCount of %u.",
caller, type1_string, report_data->FormatHandle(rp1_state->renderPass).c_str(),
rp1_state->createInfo.subpassCount, type2_string, report_data->FormatHandle(rp2_state->renderPass).c_str(),
rp2_state->createInfo.subpassCount);
} else {
for (uint32_t i = 0; i < rp1_state->createInfo.subpassCount; ++i) {
skip |= ValidateSubpassCompatibility(type1_string, rp1_state, type2_string, rp2_state, i, caller, error_code);
}
}
return skip;
}
// For given pipeline, return number of MSAA samples, or one if MSAA disabled
static VkSampleCountFlagBits GetNumSamples(PIPELINE_STATE const *pipe) {
if (pipe->graphicsPipelineCI.pMultisampleState != NULL &&
VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO == pipe->graphicsPipelineCI.pMultisampleState->sType) {
return pipe->graphicsPipelineCI.pMultisampleState->rasterizationSamples;
}
return VK_SAMPLE_COUNT_1_BIT;
}
static void ListBits(std::ostream &s, uint32_t bits) {
for (int i = 0; i < 32 && bits; i++) {
if (bits & (1 << i)) {
s << i;
bits &= ~(1 << i);
if (bits) {
s << ",";
}
}
}
}
// Validate draw-time state related to the PSO
bool CoreChecks::ValidatePipelineDrawtimeState(const LAST_BOUND_STATE &state, const CMD_BUFFER_STATE *pCB, CMD_TYPE cmd_type,
const PIPELINE_STATE *pPipeline, const char *caller) const {
bool skip = false;
const auto ¤t_vtx_bfr_binding_info = pCB->current_vertex_buffer_binding_info.vertex_buffer_bindings;
// Verify vertex binding
if (pPipeline->vertex_binding_descriptions_.size() > 0) {
for (size_t i = 0; i < pPipeline->vertex_binding_descriptions_.size(); i++) {
const auto vertex_binding = pPipeline->vertex_binding_descriptions_[i].binding;
if ((current_vtx_bfr_binding_info.size() < (vertex_binding + 1)) ||
(current_vtx_bfr_binding_info[vertex_binding].buffer == VK_NULL_HANDLE)) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCB->commandBuffer), kVUID_Core_DrawState_VtxIndexOutOfBounds,
"%s expects that this Command Buffer's vertex binding Index %u should be set via "
"vkCmdBindVertexBuffers. This is because VkVertexInputBindingDescription struct at "
"index " PRINTF_SIZE_T_SPECIFIER " of pVertexBindingDescriptions has a binding value of %u.",
report_data->FormatHandle(state.pipeline_state->pipeline).c_str(), vertex_binding, i, vertex_binding);
}
}
// Verify vertex attribute address alignment
for (size_t i = 0; i < pPipeline->vertex_attribute_descriptions_.size(); i++) {
const auto &attribute_description = pPipeline->vertex_attribute_descriptions_[i];
const auto vertex_binding = attribute_description.binding;
const auto attribute_offset = attribute_description.offset;
const auto attribute_format = attribute_description.format;
const auto &vertex_binding_map_it = pPipeline->vertex_binding_to_index_map_.find(vertex_binding);
if ((vertex_binding_map_it != pPipeline->vertex_binding_to_index_map_.cend()) &&
(vertex_binding < current_vtx_bfr_binding_info.size()) &&
(current_vtx_bfr_binding_info[vertex_binding].buffer != VK_NULL_HANDLE)) {
const auto vertex_buffer_stride = pPipeline->vertex_binding_descriptions_[vertex_binding_map_it->second].stride;
const auto vertex_buffer_offset = current_vtx_bfr_binding_info[vertex_binding].offset;
const auto buffer_state = GetBufferState(current_vtx_bfr_binding_info[vertex_binding].buffer);
// Use only memory binding offset as base memory should be properly aligned by the driver
const auto buffer_binding_address = buffer_state->binding.offset + vertex_buffer_offset;
// Use 1 as vertex/instance index to use buffer stride as well
const auto attrib_address = buffer_binding_address + vertex_buffer_stride + attribute_offset;
VkDeviceSize vtx_attrib_req_alignment = FormatElementSize(attribute_format);
if (FormatElementIsTexel(attribute_format)) {
vtx_attrib_req_alignment = SafeDivision(vtx_attrib_req_alignment, FormatChannelCount(attribute_format));
}
if (SafeModulo(attrib_address, vtx_attrib_req_alignment) != 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
HandleToUint64(current_vtx_bfr_binding_info[vertex_binding].buffer),
kVUID_Core_DrawState_InvalidVtxAttributeAlignment,
"Invalid attribAddress alignment for vertex attribute " PRINTF_SIZE_T_SPECIFIER
" from %s and vertex %s.",
i, report_data->FormatHandle(state.pipeline_state->pipeline).c_str(),
report_data->FormatHandle(current_vtx_bfr_binding_info[vertex_binding].buffer).c_str());
}
}
}
} else {
if ((!current_vtx_bfr_binding_info.empty()) && (!pCB->vertex_buffer_used)) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCB->commandBuffer), kVUID_Core_DrawState_VtxIndexOutOfBounds,
"Vertex buffers are bound to %s but no vertex buffers are attached to %s.",
report_data->FormatHandle(pCB->commandBuffer).c_str(),
report_data->FormatHandle(state.pipeline_state->pipeline).c_str());
}
}
// If Viewport or scissors are dynamic, verify that dynamic count matches PSO count.
// Skip check if rasterization is disabled or there is no viewport.
if ((!pPipeline->graphicsPipelineCI.pRasterizationState ||
(pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) &&
pPipeline->graphicsPipelineCI.pViewportState) {
bool dynViewport = IsDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
bool dynScissor = IsDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
if (dynViewport) {
const auto requiredViewportsMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->viewportCount) - 1;
const auto missingViewportMask = ~pCB->viewportMask & requiredViewportsMask;
if (missingViewportMask) {
std::stringstream ss;
ss << "Dynamic viewport(s) ";
ListBits(ss, missingViewportMask);
ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetViewport().";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_Core_DrawState_ViewportScissorMismatch, "%s", ss.str().c_str());
}
}
if (dynScissor) {
const auto requiredScissorMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->scissorCount) - 1;
const auto missingScissorMask = ~pCB->scissorMask & requiredScissorMask;
if (missingScissorMask) {
std::stringstream ss;
ss << "Dynamic scissor(s) ";
ListBits(ss, missingScissorMask);
ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetScissor().";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_Core_DrawState_ViewportScissorMismatch, "%s", ss.str().c_str());
}
}
}
// Verify that any MSAA request in PSO matches sample# in bound FB
// Skip the check if rasterization is disabled.
if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
(pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
VkSampleCountFlagBits pso_num_samples = GetNumSamples(pPipeline);
if (pCB->activeRenderPass) {
const auto render_pass_info = pCB->activeRenderPass->createInfo.ptr();
const VkSubpassDescription2KHR *subpass_desc = &render_pass_info->pSubpasses[pCB->activeSubpass];
uint32_t i;
unsigned subpass_num_samples = 0;
for (i = 0; i < subpass_desc->colorAttachmentCount; i++) {
const auto attachment = subpass_desc->pColorAttachments[i].attachment;
if (attachment != VK_ATTACHMENT_UNUSED)
subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples;
}
if (subpass_desc->pDepthStencilAttachment &&
subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
const auto attachment = subpass_desc->pDepthStencilAttachment->attachment;
subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples;
}
if (!(device_extensions.vk_amd_mixed_attachment_samples || device_extensions.vk_nv_framebuffer_mixed_samples) &&
((subpass_num_samples & static_cast<unsigned>(pso_num_samples)) != subpass_num_samples)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline), kVUID_Core_DrawState_NumSamplesMismatch,
"Num samples mismatch! At draw-time in %s with %u samples while current %s w/ "
"%u samples!",
report_data->FormatHandle(pPipeline->pipeline).c_str(), pso_num_samples,
report_data->FormatHandle(pCB->activeRenderPass->renderPass).c_str(), subpass_num_samples);
}
} else {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline), kVUID_Core_DrawState_NoActiveRenderpass,
"No active render pass found at draw-time in %s!", report_data->FormatHandle(pPipeline->pipeline).c_str());
}
}
// Verify that PSO creation renderPass is compatible with active renderPass
if (pCB->activeRenderPass) {
// TODO: Move all of the error codes common across different Draws into a LUT accessed by cmd_type
// TODO: AMD extension codes are included here, but actual function entrypoints are not yet intercepted
// Error codes for renderpass and subpass mismatches
auto rp_error = "VUID-vkCmdDraw-renderPass-02684", sp_error = "VUID-vkCmdDraw-subpass-02685";
switch (cmd_type) {
case CMD_DRAWINDEXED:
rp_error = "VUID-vkCmdDrawIndexed-renderPass-02684";
sp_error = "VUID-vkCmdDrawIndexed-subpass-02685";
break;
case CMD_DRAWINDIRECT:
rp_error = "VUID-vkCmdDrawIndirect-renderPass-02684";
sp_error = "VUID-vkCmdDrawIndirect-subpass-02685";
break;
case CMD_DRAWINDIRECTCOUNTKHR:
rp_error = "VUID-vkCmdDrawIndirectCountKHR-renderPass-02684";
sp_error = "VUID-vkCmdDrawIndirectCountKHR-subpass-02685";
break;
case CMD_DRAWINDEXEDINDIRECT:
rp_error = "VUID-vkCmdDrawIndexedIndirect-renderPass-02684";
sp_error = "VUID-vkCmdDrawIndexedIndirect-subpass-02685";
break;
case CMD_DRAWINDEXEDINDIRECTCOUNTKHR:
rp_error = "VUID-vkCmdDrawIndexedIndirectCountKHR-renderPass-02684";
sp_error = "VUID-vkCmdDrawIndexedIndirectCountKHR-subpass-02685";
break;
case CMD_DRAWMESHTASKSNV:
rp_error = "VUID-vkCmdDrawMeshTasksNV-renderPass-02684";
sp_error = "VUID-vkCmdDrawMeshTasksNV-subpass-02685";
break;
case CMD_DRAWMESHTASKSINDIRECTNV:
rp_error = "VUID-vkCmdDrawMeshTasksIndirectNV-renderPass-02684";
sp_error = "VUID-vkCmdDrawMeshTasksIndirectNV-subpass-02685";
break;
case CMD_DRAWMESHTASKSINDIRECTCOUNTNV:
rp_error = "VUID-vkCmdDrawMeshTasksIndirectCountNV-renderPass-02684";
sp_error = "VUID-vkCmdDrawMeshTasksIndirectCountNV-subpass-02685";
break;
default:
assert(CMD_DRAW == cmd_type);
break;
}
if (pCB->activeRenderPass->renderPass != pPipeline->rp_state->renderPass) {
// renderPass that PSO was created with must be compatible with active renderPass that PSO is being used with
skip |= ValidateRenderPassCompatibility("active render pass", pCB->activeRenderPass, "pipeline state object",
pPipeline->rp_state.get(), caller, rp_error);
}
if (pPipeline->graphicsPipelineCI.subpass != pCB->activeSubpass) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline), sp_error, "Pipeline was built for subpass %u but used in subpass %u.",
pPipeline->graphicsPipelineCI.subpass, pCB->activeSubpass);
}
}
return skip;
}
// For given cvdescriptorset::DescriptorSet, verify that its Set is compatible w/ the setLayout corresponding to
// pipelineLayout[layoutIndex]
static bool VerifySetLayoutCompatibility(const cvdescriptorset::DescriptorSet *descriptor_set,
PIPELINE_LAYOUT_STATE const *pipeline_layout, const uint32_t layoutIndex,
string &errorMsg) {
auto num_sets = pipeline_layout->set_layouts.size();
if (layoutIndex >= num_sets) {
stringstream errorStr;
errorStr << "VkPipelineLayout (" << pipeline_layout->layout << ") only contains " << num_sets
<< " setLayouts corresponding to sets 0-" << num_sets - 1 << ", but you're attempting to bind set to index "
<< layoutIndex;
errorMsg = errorStr.str();
return false;
}
if (descriptor_set->IsPushDescriptor()) return true;
auto layout_node = pipeline_layout->set_layouts[layoutIndex];
return cvdescriptorset::VerifySetLayoutCompatibility(layout_node.get(), descriptor_set->GetLayout().get(), &errorMsg);
}
// Validate overall state at the time of a draw call
bool CoreChecks::ValidateCmdBufDrawState(const CMD_BUFFER_STATE *cb_node, CMD_TYPE cmd_type, const bool indexed,
const VkPipelineBindPoint bind_point, const char *function, const char *pipe_err_code,
const char *state_err_code) const {
const auto last_bound_it = cb_node->lastBound.find(bind_point);
const PIPELINE_STATE *pPipe = nullptr;
if (last_bound_it != cb_node->lastBound.cend()) {
pPipe = last_bound_it->second.pipeline_state;
}
if (nullptr == pPipe) {
return log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), pipe_err_code,
"Must not call %s on this command buffer while there is no %s pipeline bound.", function,
bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS ? "Graphics" : "Compute");
}
bool result = false;
auto const &state = last_bound_it->second;
// First check flag states
if (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point) result = ValidateDrawStateFlags(cb_node, pPipe, indexed, state_err_code);
// Now complete other state checks
string errorString;
auto const &pipeline_layout = pPipe->pipeline_layout;
for (const auto &set_binding_pair : pPipe->active_slots) {
uint32_t setIndex = set_binding_pair.first;
// If valid set is not bound throw an error
if ((state.per_set.size() <= setIndex) || (!state.per_set[setIndex].bound_descriptor_set)) {
result |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), kVUID_Core_DrawState_DescriptorSetNotBound,
"%s uses set #%u but that set is not bound.", report_data->FormatHandle(pPipe->pipeline).c_str(), setIndex);
} else if (!VerifySetLayoutCompatibility(state.per_set[setIndex].bound_descriptor_set, &pipeline_layout, setIndex,
errorString)) {
// Set is bound but not compatible w/ overlapping pipeline_layout from PSO
VkDescriptorSet setHandle = state.per_set[setIndex].bound_descriptor_set->GetSet();
result |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
HandleToUint64(setHandle), kVUID_Core_DrawState_PipelineLayoutsIncompatible,
"%s bound as set #%u is not compatible with overlapping %s due to: %s",
report_data->FormatHandle(setHandle).c_str(), setIndex,
report_data->FormatHandle(pipeline_layout.layout).c_str(), errorString.c_str());
} else { // Valid set is bound and layout compatible, validate that it's updated
// Pull the set node
const cvdescriptorset::DescriptorSet *descriptor_set = state.per_set[setIndex].bound_descriptor_set;
// Validate the draw-time state for this descriptor set
std::string err_str;
if (!descriptor_set->IsPushDescriptor()) {
// For the "bindless" style resource usage with many descriptors, need to optimize command <-> descriptor
// binding validation. Take the requested binding set and prefilter it to eliminate redundant validation checks.
// Here, the currently bound pipeline determines whether an image validation check is redundant...
// for images are the "req" portion of the binding_req is indirectly (but tightly) coupled to the pipeline.
cvdescriptorset::PrefilterBindRequestMap reduced_map(*descriptor_set, set_binding_pair.second);
const auto &binding_req_map = reduced_map.FilteredMap(*cb_node, *pPipe);
// We can skip validating the descriptor set if "nothing" has changed since the last validation.
// Same set, no image layout changes, and same "pipeline state" (binding_req_map). If there are
// any dynamic descriptors, always revalidate rather than caching the values. We currently only
// apply this optimization if IsManyDescriptors is true, to avoid the overhead of copying the
// binding_req_map which could potentially be expensive.
bool need_validate =
!reduced_map.IsManyDescriptors() ||
// Revalidate each time if the set has dynamic offsets
state.per_set[setIndex].dynamicOffsets.size() > 0 ||
// Revalidate if descriptor set (or contents) has changed
state.per_set[setIndex].validated_set != descriptor_set ||
state.per_set[setIndex].validated_set_change_count != descriptor_set->GetChangeCount() ||
(!disabled.image_layout_validation &&
state.per_set[setIndex].validated_set_image_layout_change_count != cb_node->image_layout_change_count) ||
// Revalidate if previous bindingReqMap doesn't include new bindingRepMap
!std::includes(state.per_set[setIndex].validated_set_binding_req_map.begin(),
state.per_set[setIndex].validated_set_binding_req_map.end(), set_binding_pair.second.begin(),
set_binding_pair.second.end());
if (need_validate) {
if (!ValidateDrawState(descriptor_set, binding_req_map, state.per_set[setIndex].dynamicOffsets, cb_node,
function, &err_str)) {
auto set = descriptor_set->GetSet();
result |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
HandleToUint64(set), kVUID_Core_DrawState_DescriptorSetNotUpdated,
"%s bound as set #%u encountered the following validation error at %s time: %s",
report_data->FormatHandle(set).c_str(), setIndex, function, err_str.c_str());
}
}
}
}
}
// Check general pipeline state that needs to be validated at drawtime
if (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point)
result |= ValidatePipelineDrawtimeState(state, cb_node, cmd_type, pPipe, function);
return result;
}
void ValidationStateTracker::UpdateDrawState(CMD_BUFFER_STATE *cb_state, const VkPipelineBindPoint bind_point) {
auto &state = cb_state->lastBound[bind_point];
PIPELINE_STATE *pPipe = state.pipeline_state;
if (VK_NULL_HANDLE != state.pipeline_layout) {
for (const auto &set_binding_pair : pPipe->active_slots) {
uint32_t setIndex = set_binding_pair.first;
// Pull the set node
cvdescriptorset::DescriptorSet *descriptor_set = state.per_set[setIndex].bound_descriptor_set;
if (!descriptor_set->IsPushDescriptor()) {
// For the "bindless" style resource usage with many descriptors, need to optimize command <-> descriptor binding
// TODO: If recreating the reduced_map here shows up in profilinging, need to find a way of sharing with the
// Validate pass. Though in the case of "many" descriptors, typically the descriptor count >> binding count
cvdescriptorset::PrefilterBindRequestMap reduced_map(*descriptor_set, set_binding_pair.second);
const auto &binding_req_map = reduced_map.FilteredMap(*cb_state, *pPipe);
if (reduced_map.IsManyDescriptors()) {
// Only update validate binding tags if we meet the "many" criteria in the Prefilter class
descriptor_set->UpdateValidationCache(*cb_state, *pPipe, binding_req_map);
}
// We can skip updating the state if "nothing" has changed since the last validation.
// See CoreChecks::ValidateCmdBufDrawState for more details.
bool need_update =
!reduced_map.IsManyDescriptors() ||
// Update if descriptor set (or contents) has changed
state.per_set[setIndex].validated_set != descriptor_set ||
state.per_set[setIndex].validated_set_change_count != descriptor_set->GetChangeCount() ||
(!disabled.image_layout_validation &&
state.per_set[setIndex].validated_set_image_layout_change_count != cb_state->image_layout_change_count) ||
// Update if previous bindingReqMap doesn't include new bindingRepMap
!std::includes(state.per_set[setIndex].validated_set_binding_req_map.begin(),
state.per_set[setIndex].validated_set_binding_req_map.end(), set_binding_pair.second.begin(),
set_binding_pair.second.end());
if (need_update) {
// Bind this set and its active descriptor resources to the command buffer
descriptor_set->UpdateDrawState(this, cb_state, binding_req_map);
state.per_set[setIndex].validated_set = descriptor_set;
state.per_set[setIndex].validated_set_change_count = descriptor_set->GetChangeCount();
state.per_set[setIndex].validated_set_image_layout_change_count = cb_state->image_layout_change_count;
state.per_set[setIndex].validated_set_binding_req_map =
reduced_map.IsManyDescriptors() ? set_binding_pair.second : BindingReqMap();
}
}
}
}
if (!pPipe->vertex_binding_descriptions_.empty()) {
cb_state->vertex_buffer_used = true;
}
}
bool CoreChecks::ValidatePipelineLocked(std::vector<std::unique_ptr<PIPELINE_STATE>> const &pPipelines, int pipelineIndex) const {
bool skip = false;
const PIPELINE_STATE *pPipeline = pPipelines[pipelineIndex].get();
// If create derivative bit is set, check that we've specified a base
// pipeline correctly, and that the base pipeline was created to allow
// derivatives.
if (pPipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
const PIPELINE_STATE *pBasePipeline = nullptr;
if (!((pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) ^
(pPipeline->graphicsPipelineCI.basePipelineIndex != -1))) {
// This check is a superset of "VUID-VkGraphicsPipelineCreateInfo-flags-00724" and
// "VUID-VkGraphicsPipelineCreateInfo-flags-00725"
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), kVUID_Core_DrawState_InvalidPipelineCreateState,
"Invalid Pipeline CreateInfo: exactly one of base pipeline index and handle must be specified");
} else if (pPipeline->graphicsPipelineCI.basePipelineIndex != -1) {
if (pPipeline->graphicsPipelineCI.basePipelineIndex >= pipelineIndex) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-vkCreateGraphicsPipelines-flags-00720",
"Invalid Pipeline CreateInfo: base pipeline must occur earlier in array than derivative pipeline.");
} else {
pBasePipeline = pPipelines[pPipeline->graphicsPipelineCI.basePipelineIndex].get();
}
} else if (pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) {
pBasePipeline = GetPipelineState(pPipeline->graphicsPipelineCI.basePipelineHandle);
}
if (pBasePipeline && !(pBasePipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), kVUID_Core_DrawState_InvalidPipelineCreateState,
"Invalid Pipeline CreateInfo: base pipeline does not allow derivatives.");
}
}
return skip;
}
// UNLOCKED pipeline validation. DO NOT lookup objects in the CoreChecks->* maps in this function.
bool CoreChecks::ValidatePipelineUnlocked(const PIPELINE_STATE *pPipeline, uint32_t pipelineIndex) const {
bool skip = false;
// Ensure the subpass index is valid. If not, then ValidateGraphicsPipelineShaderState
// produces nonsense errors that confuse users. Other layers should already
// emit errors for renderpass being invalid.
auto subpass_desc = &pPipeline->rp_state->createInfo.pSubpasses[pPipeline->graphicsPipelineCI.subpass];
if (pPipeline->graphicsPipelineCI.subpass >= pPipeline->rp_state->createInfo.subpassCount) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkGraphicsPipelineCreateInfo-subpass-00759",
"Invalid Pipeline CreateInfo State: Subpass index %u is out of range for this renderpass (0..%u).",
pPipeline->graphicsPipelineCI.subpass, pPipeline->rp_state->createInfo.subpassCount - 1);
subpass_desc = nullptr;
}
if (pPipeline->graphicsPipelineCI.pColorBlendState != NULL) {
const safe_VkPipelineColorBlendStateCreateInfo *color_blend_state = pPipeline->graphicsPipelineCI.pColorBlendState;
if (color_blend_state->attachmentCount != subpass_desc->colorAttachmentCount) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkGraphicsPipelineCreateInfo-attachmentCount-00746",
"vkCreateGraphicsPipelines(): %s subpass %u has colorAttachmentCount of %u which doesn't "
"match the pColorBlendState->attachmentCount of %u.",
report_data->FormatHandle(pPipeline->rp_state->renderPass).c_str(), pPipeline->graphicsPipelineCI.subpass,
subpass_desc->colorAttachmentCount, color_blend_state->attachmentCount);
}
if (!enabled_features.core.independentBlend) {
if (pPipeline->attachments.size() > 1) {
const VkPipelineColorBlendAttachmentState *const pAttachments = &pPipeline->attachments[0];
for (size_t i = 1; i < pPipeline->attachments.size(); i++) {
// Quoting the spec: "If [the independent blend] feature is not enabled, the VkPipelineColorBlendAttachmentState
// settings for all color attachments must be identical." VkPipelineColorBlendAttachmentState contains
// only attachment state, so memcmp is best suited for the comparison
if (memcmp(static_cast<const void *>(pAttachments), static_cast<const void *>(&pAttachments[i]),
sizeof(pAttachments[0]))) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkPipelineColorBlendStateCreateInfo-pAttachments-00605",
"Invalid Pipeline CreateInfo: If independent blend feature not enabled, all elements of "
"pAttachments must be identical.");
break;
}
}
}
}
if (!enabled_features.core.logicOp && (pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable != VK_FALSE)) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkPipelineColorBlendStateCreateInfo-logicOpEnable-00606",
"Invalid Pipeline CreateInfo: If logic operations feature not enabled, logicOpEnable must be VK_FALSE.");
}
for (size_t i = 0; i < pPipeline->attachments.size(); i++) {
if ((pPipeline->attachments[i].srcColorBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) ||
(pPipeline->attachments[i].srcColorBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) ||
(pPipeline->attachments[i].srcColorBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) ||
(pPipeline->attachments[i].srcColorBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) {
if (!enabled_features.core.dualSrcBlend) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkPipelineColorBlendAttachmentState-srcColorBlendFactor-00608",
"vkCreateGraphicsPipelines(): pPipelines[%d].pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER
"].srcColorBlendFactor uses a dual-source blend factor (%d), but this device feature is not "
"enabled.",
pipelineIndex, i, pPipeline->attachments[i].srcColorBlendFactor);
}
}
if ((pPipeline->attachments[i].dstColorBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) ||
(pPipeline->attachments[i].dstColorBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) ||
(pPipeline->attachments[i].dstColorBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) ||
(pPipeline->attachments[i].dstColorBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) {
if (!enabled_features.core.dualSrcBlend) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkPipelineColorBlendAttachmentState-dstColorBlendFactor-00609",
"vkCreateGraphicsPipelines(): pPipelines[%d].pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER
"].dstColorBlendFactor uses a dual-source blend factor (%d), but this device feature is not "
"enabled.",
pipelineIndex, i, pPipeline->attachments[i].dstColorBlendFactor);
}
}
if ((pPipeline->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) ||
(pPipeline->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) ||
(pPipeline->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) ||
(pPipeline->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) {
if (!enabled_features.core.dualSrcBlend) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkPipelineColorBlendAttachmentState-srcAlphaBlendFactor-00610",
"vkCreateGraphicsPipelines(): pPipelines[%d].pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER
"].srcAlphaBlendFactor uses a dual-source blend factor (%d), but this device feature is not "
"enabled.",
pipelineIndex, i, pPipeline->attachments[i].srcAlphaBlendFactor);
}
}
if ((pPipeline->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) ||
(pPipeline->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) ||
(pPipeline->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) ||
(pPipeline->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) {
if (!enabled_features.core.dualSrcBlend) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkPipelineColorBlendAttachmentState-dstAlphaBlendFactor-00611",
"vkCreateGraphicsPipelines(): pPipelines[%d].pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER
"].dstAlphaBlendFactor uses a dual-source blend factor (%d), but this device feature is not "
"enabled.",
pipelineIndex, i, pPipeline->attachments[i].dstAlphaBlendFactor);
}
}
}
}
if (ValidateGraphicsPipelineShaderState(pPipeline)) {
skip = true;
}
// Each shader's stage must be unique
if (pPipeline->duplicate_shaders) {
for (uint32_t stage = VK_SHADER_STAGE_VERTEX_BIT; stage & VK_SHADER_STAGE_ALL_GRAPHICS; stage <<= 1) {
if (pPipeline->duplicate_shaders & stage) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), kVUID_Core_DrawState_InvalidPipelineCreateState,
"Invalid Pipeline CreateInfo State: Multiple shaders provided for stage %s",
string_VkShaderStageFlagBits(VkShaderStageFlagBits(stage)));
}
}
}
if (device_extensions.vk_nv_mesh_shader) {
// VS or mesh is required
if (!(pPipeline->active_shaders & (VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_MESH_BIT_NV))) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkGraphicsPipelineCreateInfo-stage-02096",
"Invalid Pipeline CreateInfo State: Vertex Shader or Mesh Shader required.");
}
// Can't mix mesh and VTG
if ((pPipeline->active_shaders & (VK_SHADER_STAGE_MESH_BIT_NV | VK_SHADER_STAGE_TASK_BIT_NV)) &&
(pPipeline->active_shaders &
(VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_GEOMETRY_BIT | VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT |
VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT))) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkGraphicsPipelineCreateInfo-pStages-02095",
"Invalid Pipeline CreateInfo State: Geometric shader stages must either be all mesh (mesh | task) "
"or all VTG (vertex, tess control, tess eval, geom).");
}
} else {
// VS is required
if (!(pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkGraphicsPipelineCreateInfo-stage-00727",
"Invalid Pipeline CreateInfo State: Vertex Shader required.");
}
}
if (!enabled_features.mesh_shader.meshShader && (pPipeline->active_shaders & VK_SHADER_STAGE_MESH_BIT_NV)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkPipelineShaderStageCreateInfo-stage-02091",
"Invalid Pipeline CreateInfo State: Mesh Shader not supported.");
}
if (!enabled_features.mesh_shader.taskShader && (pPipeline->active_shaders & VK_SHADER_STAGE_TASK_BIT_NV)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkPipelineShaderStageCreateInfo-stage-02092",
"Invalid Pipeline CreateInfo State: Task Shader not supported.");
}
// Either both or neither TC/TE shaders should be defined
bool has_control = (pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) != 0;
bool has_eval = (pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) != 0;
if (has_control && !has_eval) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkGraphicsPipelineCreateInfo-pStages-00729",
"Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair.");
}
if (!has_control && has_eval) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkGraphicsPipelineCreateInfo-pStages-00730",
"Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair.");
}
// Compute shaders should be specified independent of Gfx shaders
if (pPipeline->active_shaders & VK_SHADER_STAGE_COMPUTE_BIT) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkGraphicsPipelineCreateInfo-stage-00728",
"Invalid Pipeline CreateInfo State: Do not specify Compute Shader for Gfx Pipeline.");
}
if ((pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT) && !pPipeline->graphicsPipelineCI.pInputAssemblyState) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkGraphicsPipelineCreateInfo-pStages-02098",
"Invalid Pipeline CreateInfo State: Missing pInputAssemblyState.");
}
// VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid for tessellation pipelines.
// Mismatching primitive topology and tessellation fails graphics pipeline creation.
if (has_control && has_eval &&
(!pPipeline->graphicsPipelineCI.pInputAssemblyState ||
pPipeline->graphicsPipelineCI.pInputAssemblyState->topology != VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkGraphicsPipelineCreateInfo-pStages-00736",
"Invalid Pipeline CreateInfo State: VK_PRIMITIVE_TOPOLOGY_PATCH_LIST must be set as IA topology for "
"tessellation pipelines.");
}
if (pPipeline->graphicsPipelineCI.pInputAssemblyState) {
if (pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) {
if (!has_control || !has_eval) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkGraphicsPipelineCreateInfo-topology-00737",
"Invalid Pipeline CreateInfo State: VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid "
"for tessellation pipelines.");
}
}
if ((pPipeline->graphicsPipelineCI.pInputAssemblyState->primitiveRestartEnable == VK_TRUE) &&
(pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_POINT_LIST ||
pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST ||
pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST ||
pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY ||
pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY ||
pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkPipelineInputAssemblyStateCreateInfo-topology-00428",
"topology is %s and primitiveRestartEnable is VK_TRUE. It is invalid.",
string_VkPrimitiveTopology(pPipeline->graphicsPipelineCI.pInputAssemblyState->topology));
}
if ((enabled_features.core.geometryShader == VK_FALSE) &&
(pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY ||
pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY ||
pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY ||
pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkPipelineInputAssemblyStateCreateInfo-topology-00429",
"topology is %s and geometry shaders feature is not enabled. It is invalid.",
string_VkPrimitiveTopology(pPipeline->graphicsPipelineCI.pInputAssemblyState->topology));
}
if ((enabled_features.core.tessellationShader == VK_FALSE) &&
(pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkPipelineInputAssemblyStateCreateInfo-topology-00430",
"topology is %s and tessellation shaders feature is not enabled. It is invalid.",
string_VkPrimitiveTopology(pPipeline->graphicsPipelineCI.pInputAssemblyState->topology));
}
}
// If a rasterization state is provided...
if (pPipeline->graphicsPipelineCI.pRasterizationState) {
if ((pPipeline->graphicsPipelineCI.pRasterizationState->depthClampEnable == VK_TRUE) &&
(!enabled_features.core.depthClamp)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkPipelineRasterizationStateCreateInfo-depthClampEnable-00782",
"vkCreateGraphicsPipelines(): the depthClamp device feature is disabled: the depthClampEnable member "
"of the VkPipelineRasterizationStateCreateInfo structure must be set to VK_FALSE.");
}
if (!IsDynamic(pPipeline, VK_DYNAMIC_STATE_DEPTH_BIAS) &&
(pPipeline->graphicsPipelineCI.pRasterizationState->depthBiasClamp != 0.0) && (!enabled_features.core.depthBiasClamp)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), kVUID_Core_DrawState_InvalidFeature,
"vkCreateGraphicsPipelines(): the depthBiasClamp device feature is disabled: the depthBiasClamp member "
"of the VkPipelineRasterizationStateCreateInfo structure must be set to 0.0 unless the "
"VK_DYNAMIC_STATE_DEPTH_BIAS dynamic state is enabled");
}
// If rasterization is enabled...
if (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE) {
if ((pPipeline->graphicsPipelineCI.pMultisampleState->alphaToOneEnable == VK_TRUE) &&
(!enabled_features.core.alphaToOne)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkPipelineMultisampleStateCreateInfo-alphaToOneEnable-00785",
"vkCreateGraphicsPipelines(): the alphaToOne device feature is disabled: the alphaToOneEnable "
"member of the VkPipelineMultisampleStateCreateInfo structure must be set to VK_FALSE.");
}
// If subpass uses a depth/stencil attachment, pDepthStencilState must be a pointer to a valid structure
if (subpass_desc && subpass_desc->pDepthStencilAttachment &&
subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
if (!pPipeline->graphicsPipelineCI.pDepthStencilState) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkGraphicsPipelineCreateInfo-rasterizerDiscardEnable-00752",
"Invalid Pipeline CreateInfo State: pDepthStencilState is NULL when rasterization is enabled "
"and subpass uses a depth/stencil attachment.");
} else if ((pPipeline->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE) &&
(!enabled_features.core.depthBounds)) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkPipelineDepthStencilStateCreateInfo-depthBoundsTestEnable-00598",
"vkCreateGraphicsPipelines(): the depthBounds device feature is disabled: the "
"depthBoundsTestEnable member of the VkPipelineDepthStencilStateCreateInfo structure must be "
"set to VK_FALSE.");
}
}
// If subpass uses color attachments, pColorBlendState must be valid pointer
if (subpass_desc) {
uint32_t color_attachment_count = 0;
for (uint32_t i = 0; i < subpass_desc->colorAttachmentCount; ++i) {
if (subpass_desc->pColorAttachments[i].attachment != VK_ATTACHMENT_UNUSED) {
++color_attachment_count;
}
}
if (color_attachment_count > 0 && pPipeline->graphicsPipelineCI.pColorBlendState == nullptr) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkGraphicsPipelineCreateInfo-rasterizerDiscardEnable-00753",
"Invalid Pipeline CreateInfo State: pColorBlendState is NULL when rasterization is enabled and "
"subpass uses color attachments.");
}
}
}
}
if ((pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT) && !pPipeline->graphicsPipelineCI.pVertexInputState) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkGraphicsPipelineCreateInfo-pStages-02097",
"Invalid Pipeline CreateInfo State: Missing pVertexInputState.");
}
auto vi = pPipeline->graphicsPipelineCI.pVertexInputState;
if (vi != NULL) {
for (uint32_t j = 0; j < vi->vertexAttributeDescriptionCount; j++) {
VkFormat format = vi->pVertexAttributeDescriptions[j].format;
// Internal call to get format info. Still goes through layers, could potentially go directly to ICD.
VkFormatProperties properties;
DispatchGetPhysicalDeviceFormatProperties(physical_device, format, &properties);
if ((properties.bufferFeatures & VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT) == 0) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkVertexInputAttributeDescription-format-00623",
"vkCreateGraphicsPipelines: pCreateInfo[%d].pVertexInputState->vertexAttributeDescriptions[%d].format "
"(%s) is not a supported vertex buffer format.",
pipelineIndex, j, string_VkFormat(format));
}
}
}
if (pPipeline->graphicsPipelineCI.pMultisampleState) {
auto accumColorSamples = [subpass_desc, pPipeline](uint32_t &samples) {
for (uint32_t i = 0; i < subpass_desc->colorAttachmentCount; i++) {
const auto attachment = subpass_desc->pColorAttachments[i].attachment;
if (attachment != VK_ATTACHMENT_UNUSED) {
samples |= static_cast<uint32_t>(pPipeline->rp_state->createInfo.pAttachments[attachment].samples);
}
}
};
if (!(device_extensions.vk_amd_mixed_attachment_samples || device_extensions.vk_nv_framebuffer_mixed_samples)) {
uint32_t raster_samples = static_cast<uint32_t>(GetNumSamples(pPipeline));
uint32_t subpass_num_samples = 0;
accumColorSamples(subpass_num_samples);
if (subpass_desc->pDepthStencilAttachment &&
subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
const auto attachment = subpass_desc->pDepthStencilAttachment->attachment;
subpass_num_samples |= static_cast<uint32_t>(pPipeline->rp_state->createInfo.pAttachments[attachment].samples);
}
// subpass_num_samples is 0 when the subpass has no attachments or if all attachments are VK_ATTACHMENT_UNUSED.
// Only validate the value of subpass_num_samples if the subpass has attachments that are not VK_ATTACHMENT_UNUSED.
if (subpass_num_samples && (!IsPowerOfTwo(subpass_num_samples) || (subpass_num_samples != raster_samples))) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkGraphicsPipelineCreateInfo-subpass-00757",
"vkCreateGraphicsPipelines: pCreateInfo[%d].pMultisampleState->rasterizationSamples (%u) "
"does not match the number of samples of the RenderPass color and/or depth attachment.",
pipelineIndex, raster_samples);
}
}
if (device_extensions.vk_amd_mixed_attachment_samples) {
VkSampleCountFlagBits max_sample_count = static_cast<VkSampleCountFlagBits>(0);
for (uint32_t i = 0; i < subpass_desc->colorAttachmentCount; ++i) {
if (subpass_desc->pColorAttachments[i].attachment != VK_ATTACHMENT_UNUSED) {
max_sample_count = std::max(
max_sample_count,
pPipeline->rp_state->createInfo.pAttachments[subpass_desc->pColorAttachments[i].attachment].samples);
}
}
if (subpass_desc->pDepthStencilAttachment &&
subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
max_sample_count = std::max(
max_sample_count,
pPipeline->rp_state->createInfo.pAttachments[subpass_desc->pDepthStencilAttachment->attachment].samples);
}
if ((pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE) &&
(pPipeline->graphicsPipelineCI.pMultisampleState->rasterizationSamples != max_sample_count)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkGraphicsPipelineCreateInfo-subpass-01505",
"vkCreateGraphicsPipelines: pCreateInfo[%d].pMultisampleState->rasterizationSamples (%s) != max "
"attachment samples (%s) used in subpass %u.",
pipelineIndex,
string_VkSampleCountFlagBits(pPipeline->graphicsPipelineCI.pMultisampleState->rasterizationSamples),
string_VkSampleCountFlagBits(max_sample_count), pPipeline->graphicsPipelineCI.subpass);
}
}
if (device_extensions.vk_nv_framebuffer_mixed_samples) {
uint32_t raster_samples = static_cast<uint32_t>(GetNumSamples(pPipeline));
uint32_t subpass_color_samples = 0;
accumColorSamples(subpass_color_samples);
if (subpass_desc->pDepthStencilAttachment &&
subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
const auto attachment = subpass_desc->pDepthStencilAttachment->attachment;
const uint32_t subpass_depth_samples =
static_cast<uint32_t>(pPipeline->rp_state->createInfo.pAttachments[attachment].samples);
if (pPipeline->graphicsPipelineCI.pDepthStencilState) {
const bool ds_test_enabled =
(pPipeline->graphicsPipelineCI.pDepthStencilState->depthTestEnable == VK_TRUE) ||
(pPipeline->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE) ||
(pPipeline->graphicsPipelineCI.pDepthStencilState->stencilTestEnable == VK_TRUE);
if (ds_test_enabled && (!IsPowerOfTwo(subpass_depth_samples) || (raster_samples != subpass_depth_samples))) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkGraphicsPipelineCreateInfo-subpass-01411",
"vkCreateGraphicsPipelines: pCreateInfo[%d].pMultisampleState->rasterizationSamples (%u) "
"does not match the number of samples of the RenderPass depth attachment (%u).",
pipelineIndex, raster_samples, subpass_depth_samples);
}
}
}
if (IsPowerOfTwo(subpass_color_samples)) {
if (raster_samples < subpass_color_samples) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkGraphicsPipelineCreateInfo-subpass-01412",
"vkCreateGraphicsPipelines: pCreateInfo[%d].pMultisampleState->rasterizationSamples (%u) "
"is not greater or equal to the number of samples of the RenderPass color attachment (%u).",
pipelineIndex, raster_samples, subpass_color_samples);
}
if (pPipeline->graphicsPipelineCI.pMultisampleState) {
if ((raster_samples > subpass_color_samples) &&
(pPipeline->graphicsPipelineCI.pMultisampleState->sampleShadingEnable == VK_TRUE)) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkPipelineMultisampleStateCreateInfo-rasterizationSamples-01415",
"vkCreateGraphicsPipelines: pCreateInfo[%d].pMultisampleState->sampleShadingEnable must be "
"VK_FALSE when "
"pCreateInfo[%d].pMultisampleState->rasterizationSamples (%u) is greater than the number of "
"samples of the "
"subpass color attachment (%u).",
pipelineIndex, pipelineIndex, raster_samples, subpass_color_samples);
}
const auto *coverage_modulation_state = lvl_find_in_chain<VkPipelineCoverageModulationStateCreateInfoNV>(
pPipeline->graphicsPipelineCI.pMultisampleState->pNext);
if (coverage_modulation_state && (coverage_modulation_state->coverageModulationTableEnable == VK_TRUE)) {
if (coverage_modulation_state->coverageModulationTableCount != (raster_samples / subpass_color_samples)) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device),
"VUID-VkPipelineCoverageModulationStateCreateInfoNV-coverageModulationTableEnable-01405",
"vkCreateGraphicsPipelines: pCreateInfos[%d] VkPipelineCoverageModulationStateCreateInfoNV "
"coverageModulationTableCount of %u is invalid.",
pipelineIndex, coverage_modulation_state->coverageModulationTableCount);
}
}
}
}
}
if (device_extensions.vk_nv_fragment_coverage_to_color) {
const auto coverage_to_color_state =
lvl_find_in_chain<VkPipelineCoverageToColorStateCreateInfoNV>(pPipeline->graphicsPipelineCI.pMultisampleState);
if (coverage_to_color_state && coverage_to_color_state->coverageToColorEnable == VK_TRUE) {
bool attachment_is_valid = false;
std::string error_detail;
if (coverage_to_color_state->coverageToColorLocation < subpass_desc->colorAttachmentCount) {
const auto color_attachment_ref =
subpass_desc->pColorAttachments[coverage_to_color_state->coverageToColorLocation];
if (color_attachment_ref.attachment != VK_ATTACHMENT_UNUSED) {
const auto color_attachment = pPipeline->rp_state->createInfo.pAttachments[color_attachment_ref.attachment];
switch (color_attachment.format) {
case VK_FORMAT_R8_UINT:
case VK_FORMAT_R8_SINT:
case VK_FORMAT_R16_UINT:
case VK_FORMAT_R16_SINT:
case VK_FORMAT_R32_UINT:
case VK_FORMAT_R32_SINT:
attachment_is_valid = true;
break;
default:
string_sprintf(&error_detail, "references an attachment with an invalid format (%s).",
string_VkFormat(color_attachment.format));
break;
}
} else {
string_sprintf(&error_detail,
"references an invalid attachment. The subpass pColorAttachments[%" PRIu32
"].attachment has the value "
"VK_ATTACHMENT_UNUSED.",
coverage_to_color_state->coverageToColorLocation);
}
} else {
string_sprintf(&error_detail,
"references an non-existing attachment since the subpass colorAttachmentCount is %" PRIu32 ".",
subpass_desc->colorAttachmentCount);
}
if (!attachment_is_valid) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device),
"VUID-VkPipelineCoverageToColorStateCreateInfoNV-coverageToColorEnable-01404",
"vkCreateGraphicsPipelines: pCreateInfos[%" PRId32
"].pMultisampleState VkPipelineCoverageToColorStateCreateInfoNV "
"coverageToColorLocation = %" PRIu32 " %s",
pipelineIndex, coverage_to_color_state->coverageToColorLocation, error_detail.c_str());
}
}
}
}
return skip;
}
// Block of code at start here specifically for managing/tracking DSs
// Validate that given set is valid and that it's not being used by an in-flight CmdBuffer
// func_str is the name of the calling function
// Return false if no errors occur
// Return true if validation error occurs and callback returns true (to skip upcoming API call down the chain)
bool CoreChecks::ValidateIdleDescriptorSet(VkDescriptorSet set, const char *func_str) {
if (disabled.idle_descriptor_set) return false;
bool skip = false;
auto set_node = setMap.find(set);
if (set_node == setMap.end()) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
HandleToUint64(set), kVUID_Core_DrawState_DoubleDestroy,
"Cannot call %s() on %s that has not been allocated.", func_str, report_data->FormatHandle(set).c_str());
} else {
// TODO : This covers various error cases so should pass error enum into this function and use passed in enum here
if (set_node->second->in_use.load()) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
HandleToUint64(set), "VUID-vkFreeDescriptorSets-pDescriptorSets-00309",
"Cannot call %s() on %s that is in use by a command buffer.", func_str,
report_data->FormatHandle(set).c_str());
}
}
return skip;
}
// Remove set from setMap and delete the set
void ValidationStateTracker::FreeDescriptorSet(cvdescriptorset::DescriptorSet *descriptor_set) {
setMap.erase(descriptor_set->GetSet());
}
// Free all DS Pools including their Sets & related sub-structs
// NOTE : Calls to this function should be wrapped in mutex
void ValidationStateTracker::DeleteDescriptorSetPools() {
for (auto ii = descriptorPoolMap.begin(); ii != descriptorPoolMap.end();) {
// Remove this pools' sets from setMap and delete them
for (auto ds : ii->second->sets) {
FreeDescriptorSet(ds);
}
ii->second->sets.clear();
ii = descriptorPoolMap.erase(ii);
}
}
// If a renderpass is active, verify that the given command type is appropriate for current subpass state
bool CoreChecks::ValidateCmdSubpassState(const CMD_BUFFER_STATE *pCB, const CMD_TYPE cmd_type) const {
if (!pCB->activeRenderPass) return false;
bool skip = false;
if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS &&
(cmd_type != CMD_EXECUTECOMMANDS && cmd_type != CMD_NEXTSUBPASS && cmd_type != CMD_ENDRENDERPASS &&
cmd_type != CMD_NEXTSUBPASS2KHR && cmd_type != CMD_ENDRENDERPASS2KHR)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCB->commandBuffer), kVUID_Core_DrawState_InvalidCommandBuffer,
"Commands cannot be called in a subpass using secondary command buffers.");
} else if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_INLINE && cmd_type == CMD_EXECUTECOMMANDS) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCB->commandBuffer), kVUID_Core_DrawState_InvalidCommandBuffer,
"vkCmdExecuteCommands() cannot be called in a subpass using inline commands.");
}
return skip;
}
bool CoreChecks::ValidateCmdQueueFlags(const CMD_BUFFER_STATE *cb_node, const char *caller_name, VkQueueFlags required_flags,
const char *error_code) const {
auto pool = GetCommandPoolState(cb_node->createInfo.commandPool);
if (pool) {
VkQueueFlags queue_flags = GetPhysicalDeviceState()->queue_family_properties[pool->queueFamilyIndex].queueFlags;
if (!(required_flags & queue_flags)) {
string required_flags_string;
for (auto flag : {VK_QUEUE_TRANSFER_BIT, VK_QUEUE_GRAPHICS_BIT, VK_QUEUE_COMPUTE_BIT}) {
if (flag & required_flags) {
if (required_flags_string.size()) {
required_flags_string += " or ";
}
required_flags_string += string_VkQueueFlagBits(flag);
}
}
return log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), error_code,
"Cannot call %s on a command buffer allocated from a pool without %s capabilities..", caller_name,
required_flags_string.c_str());
}
}
return false;
}
static char const *GetCauseStr(VulkanTypedHandle obj) {
if (obj.type == kVulkanObjectTypeDescriptorSet) return "destroyed or updated";
if (obj.type == kVulkanObjectTypeCommandBuffer) return "destroyed or rerecorded";
return "destroyed";
}
bool CoreChecks::ReportInvalidCommandBuffer(const CMD_BUFFER_STATE *cb_state, const char *call_source) const {
bool skip = false;
for (auto obj : cb_state->broken_bindings) {
const char *cause_str = GetCauseStr(obj);
string VUID;
string_sprintf(&VUID, "%s-%s", kVUID_Core_DrawState_InvalidCommandBuffer, object_string[obj.type]);
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), VUID.c_str(),
"You are adding %s to %s that is invalid because bound %s was %s.", call_source,
report_data->FormatHandle(cb_state->commandBuffer).c_str(), report_data->FormatHandle(obj).c_str(), cause_str);
}
return skip;
}
// 'commandBuffer must be in the recording state' valid usage error code for each command
// Autogenerated as part of the vk_validation_error_message.h codegen
static const std::array<const char *, CMD_RANGE_SIZE> must_be_recording_list = {{VUID_MUST_BE_RECORDING_LIST}};
// Validate the given command being added to the specified cmd buffer, flagging errors if CB is not in the recording state or if
// there's an issue with the Cmd ordering
bool CoreChecks::ValidateCmd(const CMD_BUFFER_STATE *cb_state, const CMD_TYPE cmd, const char *caller_name) const {
switch (cb_state->state) {
case CB_RECORDING:
return ValidateCmdSubpassState(cb_state, cmd);
case CB_INVALID_COMPLETE:
case CB_INVALID_INCOMPLETE:
return ReportInvalidCommandBuffer(cb_state, caller_name);
default:
assert(cmd != CMD_NONE);
const auto error = must_be_recording_list[cmd];
return log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), error,
"You must call vkBeginCommandBuffer() before this call to %s.", caller_name);
}
}
bool CoreChecks::ValidateDeviceMaskToPhysicalDeviceCount(uint32_t deviceMask, VkDebugReportObjectTypeEXT VUID_handle_type,
uint64_t VUID_handle, const char *VUID) const {
bool skip = false;
uint32_t count = 1 << physical_device_count;
if (count <= deviceMask) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VUID_handle_type, VUID_handle, VUID,
"deviceMask(0x%" PRIx32 ") is invaild. Physical device count is %" PRIu32 ".", deviceMask,
physical_device_count);
}
return skip;
}
bool CoreChecks::ValidateDeviceMaskToZero(uint32_t deviceMask, VkDebugReportObjectTypeEXT VUID_handle_type, uint64_t VUID_handle,
const char *VUID) const {
bool skip = false;
if (deviceMask == 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VUID_handle_type, VUID_handle, VUID,
"deviceMask(0x%" PRIx32 ") must be non-zero.", deviceMask);
}
return skip;
}
bool CoreChecks::ValidateDeviceMaskToCommandBuffer(const CMD_BUFFER_STATE *pCB, uint32_t deviceMask,
VkDebugReportObjectTypeEXT VUID_handle_type, uint64_t VUID_handle,
const char *VUID) const {
bool skip = false;
if ((deviceMask & pCB->initial_device_mask) != deviceMask) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VUID_handle_type, VUID_handle, VUID,
"deviceMask(0x%" PRIx32 ") is not a subset of %s initial device mask(0x%" PRIx32 ").", deviceMask,
report_data->FormatHandle(pCB->commandBuffer).c_str(), pCB->initial_device_mask);
}
return skip;
}
bool CoreChecks::ValidateDeviceMaskToRenderPass(const CMD_BUFFER_STATE *pCB, uint32_t deviceMask,
VkDebugReportObjectTypeEXT VUID_handle_type, uint64_t VUID_handle,
const char *VUID) {
bool skip = false;
if ((deviceMask & pCB->active_render_pass_device_mask) != deviceMask) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VUID_handle_type, VUID_handle, VUID,
"deviceMask(0x%" PRIx32 ") is not a subset of %s device mask(0x%" PRIx32 ").", deviceMask,
report_data->FormatHandle(pCB->activeRenderPass->renderPass).c_str(), pCB->active_render_pass_device_mask);
}
return skip;
}
// For given object struct return a ptr of BASE_NODE type for its wrapping struct
BASE_NODE *ValidationStateTracker::GetStateStructPtrFromObject(const VulkanTypedHandle &object_struct) {
BASE_NODE *base_ptr = nullptr;
switch (object_struct.type) {
case kVulkanObjectTypeDescriptorSet: {
base_ptr = GetSetNode(object_struct.Cast<VkDescriptorSet>());
break;
}
case kVulkanObjectTypeSampler: {
base_ptr = GetSamplerState(object_struct.Cast<VkSampler>());
break;
}
case kVulkanObjectTypeQueryPool: {
base_ptr = GetQueryPoolState(object_struct.Cast<VkQueryPool>());
break;
}
case kVulkanObjectTypePipeline: {
base_ptr = GetPipelineState(object_struct.Cast<VkPipeline>());
break;
}
case kVulkanObjectTypeBuffer: {
base_ptr = GetBufferState(object_struct.Cast<VkBuffer>());
break;
}
case kVulkanObjectTypeBufferView: {
base_ptr = GetBufferViewState(object_struct.Cast<VkBufferView>());
break;
}
case kVulkanObjectTypeImage: {
base_ptr = GetImageState(object_struct.Cast<VkImage>());
break;
}
case kVulkanObjectTypeImageView: {
base_ptr = GetImageViewState(object_struct.Cast<VkImageView>());
break;
}
case kVulkanObjectTypeEvent: {
base_ptr = GetEventState(object_struct.Cast<VkEvent>());
break;
}
case kVulkanObjectTypeDescriptorPool: {
base_ptr = GetDescriptorPoolState(object_struct.Cast<VkDescriptorPool>());
break;
}
case kVulkanObjectTypeCommandPool: {
base_ptr = GetCommandPoolState(object_struct.Cast<VkCommandPool>());
break;
}
case kVulkanObjectTypeFramebuffer: {
base_ptr = GetFramebufferState(object_struct.Cast<VkFramebuffer>());
break;
}
case kVulkanObjectTypeRenderPass: {
base_ptr = GetRenderPassState(object_struct.Cast<VkRenderPass>());
break;
}
case kVulkanObjectTypeDeviceMemory: {
base_ptr = GetDevMemState(object_struct.Cast<VkDeviceMemory>());
break;
}
case kVulkanObjectTypeAccelerationStructureNV: {
base_ptr = GetAccelerationStructureState(object_struct.Cast<VkAccelerationStructureNV>());
break;
}
default:
// TODO : Any other objects to be handled here?
assert(0);
break;
}
return base_ptr;
}
// Tie the VulkanTypedHandle to the cmd buffer which includes:
// Add object_binding to cmd buffer
// Add cb_binding to object
void ValidationStateTracker::AddCommandBufferBinding(std::unordered_set<CMD_BUFFER_STATE *> *cb_bindings,
const VulkanTypedHandle &obj, CMD_BUFFER_STATE *cb_node) {
if (disabled.command_buffer_state) {
return;
}
cb_bindings->insert(cb_node);
cb_node->object_bindings.insert(obj);
}
// For a given object, if cb_node is in that objects cb_bindings, remove cb_node
void ValidationStateTracker::RemoveCommandBufferBinding(VulkanTypedHandle const &object, CMD_BUFFER_STATE *cb_node) {
BASE_NODE *base_obj = GetStateStructPtrFromObject(object);
if (base_obj) base_obj->cb_bindings.erase(cb_node);
}
// Reset the command buffer state
// Maintain the createInfo and set state to CB_NEW, but clear all other state
void ValidationStateTracker::ResetCommandBufferState(const VkCommandBuffer cb) {
CMD_BUFFER_STATE *pCB = GetCBState(cb);
if (pCB) {
pCB->in_use.store(0);
// Reset CB state (note that createInfo is not cleared)
pCB->commandBuffer = cb;
memset(&pCB->beginInfo, 0, sizeof(VkCommandBufferBeginInfo));
memset(&pCB->inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo));
pCB->hasDrawCmd = false;
pCB->hasTraceRaysCmd = false;
pCB->hasDispatchCmd = false;
pCB->state = CB_NEW;
pCB->submitCount = 0;
pCB->image_layout_change_count = 1; // Start at 1. 0 is insert value for validation cache versions, s.t. new == dirty
pCB->status = 0;
pCB->static_status = 0;
pCB->viewportMask = 0;
pCB->scissorMask = 0;
for (auto &item : pCB->lastBound) {
item.second.reset();
}
memset(&pCB->activeRenderPassBeginInfo, 0, sizeof(pCB->activeRenderPassBeginInfo));
pCB->activeRenderPass = nullptr;
pCB->activeSubpassContents = VK_SUBPASS_CONTENTS_INLINE;
pCB->activeSubpass = 0;
pCB->broken_bindings.clear();
pCB->waitedEvents.clear();
pCB->events.clear();
pCB->writeEventsBeforeWait.clear();
pCB->queryToStateMap.clear();
pCB->activeQueries.clear();
pCB->startedQueries.clear();
pCB->image_layout_map.clear();
pCB->eventToStageMap.clear();
pCB->cb_vertex_buffer_binding_info.clear();
pCB->current_vertex_buffer_binding_info.vertex_buffer_bindings.clear();
pCB->vertex_buffer_used = false;
pCB->primaryCommandBuffer = VK_NULL_HANDLE;
// If secondary, invalidate any primary command buffer that may call us.
if (pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
InvalidateCommandBuffers(pCB->linkedCommandBuffers, VulkanTypedHandle(cb, kVulkanObjectTypeCommandBuffer));
}
// Remove reverse command buffer links.
for (auto pSubCB : pCB->linkedCommandBuffers) {
pSubCB->linkedCommandBuffers.erase(pCB);
}
pCB->linkedCommandBuffers.clear();
ClearCmdBufAndMemReferences(pCB);
pCB->queue_submit_functions.clear();
pCB->cmd_execute_commands_functions.clear();
pCB->eventUpdates.clear();
pCB->queryUpdates.clear();
// Remove object bindings
for (const auto &obj : pCB->object_bindings) {
RemoveCommandBufferBinding(obj, pCB);
}
pCB->object_bindings.clear();
// Remove this cmdBuffer's reference from each FrameBuffer's CB ref list
for (auto framebuffer : pCB->framebuffers) {
auto fb_state = GetFramebufferState(framebuffer);
if (fb_state) fb_state->cb_bindings.erase(pCB);
}
pCB->framebuffers.clear();
pCB->activeFramebuffer = VK_NULL_HANDLE;
memset(&pCB->index_buffer_binding, 0, sizeof(pCB->index_buffer_binding));
pCB->qfo_transfer_image_barriers.Reset();
pCB->qfo_transfer_buffer_barriers.Reset();
// Clean up the label data
ResetCmdDebugUtilsLabel(report_data, pCB->commandBuffer);
pCB->debug_label.Reset();
}
if (command_buffer_reset_callback) {
(*command_buffer_reset_callback)(cb);
}
}
CBStatusFlags MakeStaticStateMask(VkPipelineDynamicStateCreateInfo const *ds) {
// initially assume everything is static state
CBStatusFlags flags = CBSTATUS_ALL_STATE_SET;
if (ds) {
for (uint32_t i = 0; i < ds->dynamicStateCount; i++) {
switch (ds->pDynamicStates[i]) {
case VK_DYNAMIC_STATE_LINE_WIDTH:
flags &= ~CBSTATUS_LINE_WIDTH_SET;
break;
case VK_DYNAMIC_STATE_DEPTH_BIAS:
flags &= ~CBSTATUS_DEPTH_BIAS_SET;
break;
case VK_DYNAMIC_STATE_BLEND_CONSTANTS:
flags &= ~CBSTATUS_BLEND_CONSTANTS_SET;
break;
case VK_DYNAMIC_STATE_DEPTH_BOUNDS:
flags &= ~CBSTATUS_DEPTH_BOUNDS_SET;
break;
case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK:
flags &= ~CBSTATUS_STENCIL_READ_MASK_SET;
break;
case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK:
flags &= ~CBSTATUS_STENCIL_WRITE_MASK_SET;
break;
case VK_DYNAMIC_STATE_STENCIL_REFERENCE:
flags &= ~CBSTATUS_STENCIL_REFERENCE_SET;
break;
case VK_DYNAMIC_STATE_SCISSOR:
flags &= ~CBSTATUS_SCISSOR_SET;
break;
case VK_DYNAMIC_STATE_VIEWPORT:
flags &= ~CBSTATUS_VIEWPORT_SET;
break;
case VK_DYNAMIC_STATE_EXCLUSIVE_SCISSOR_NV:
flags &= ~CBSTATUS_EXCLUSIVE_SCISSOR_SET;
break;
case VK_DYNAMIC_STATE_VIEWPORT_SHADING_RATE_PALETTE_NV:
flags &= ~CBSTATUS_SHADING_RATE_PALETTE_SET;
break;
default:
break;
}
}
}
return flags;
}
// Flags validation error if the associated call is made inside a render pass. The apiName routine should ONLY be called outside a
// render pass.
bool CoreChecks::InsideRenderPass(const CMD_BUFFER_STATE *pCB, const char *apiName, const char *msgCode) const {
bool inside = false;
if (pCB->activeRenderPass) {
inside = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCB->commandBuffer), msgCode, "%s: It is invalid to issue this call inside an active %s.",
apiName, report_data->FormatHandle(pCB->activeRenderPass->renderPass).c_str());
}
return inside;
}
// Flags validation error if the associated call is made outside a render pass. The apiName
// routine should ONLY be called inside a render pass.
bool CoreChecks::OutsideRenderPass(const CMD_BUFFER_STATE *pCB, const char *apiName, const char *msgCode) const {
bool outside = false;
if (((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (!pCB->activeRenderPass)) ||
((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) && (!pCB->activeRenderPass) &&
!(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))) {
outside = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCB->commandBuffer), msgCode, "%s: This call must be issued inside an active render pass.",
apiName);
}
return outside;
}
void CoreChecks::InitGpuValidation() {
// Process the layer settings file.
enum CoreValidationGpuFlagBits {
CORE_VALIDATION_GPU_VALIDATION_ALL_BIT = 0x00000001,
CORE_VALIDATION_GPU_VALIDATION_RESERVE_BINDING_SLOT_BIT = 0x00000002,
};
typedef VkFlags CoreGPUFlags;
static const std::unordered_map<std::string, VkFlags> gpu_flags_option_definitions = {
{std::string("all"), CORE_VALIDATION_GPU_VALIDATION_ALL_BIT},
{std::string("reserve_binding_slot"), CORE_VALIDATION_GPU_VALIDATION_RESERVE_BINDING_SLOT_BIT},
};
std::string gpu_flags_key = "lunarg_core_validation.gpu_validation";
CoreGPUFlags gpu_flags = GetLayerOptionFlags(gpu_flags_key, gpu_flags_option_definitions, 0);
gpu_flags_key = "khronos_validation.gpu_validation";
gpu_flags |= GetLayerOptionFlags(gpu_flags_key, gpu_flags_option_definitions, 0);
if (gpu_flags & CORE_VALIDATION_GPU_VALIDATION_ALL_BIT) {
instance_state->enabled.gpu_validation = true;
}
if (gpu_flags & CORE_VALIDATION_GPU_VALIDATION_RESERVE_BINDING_SLOT_BIT) {
instance_state->enabled.gpu_validation_reserve_binding_slot = true;
}
}
void CoreChecks::PostCallRecordCreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
VkInstance *pInstance, VkResult result) {
if (VK_SUCCESS != result) return;
InitGpuValidation();
}
bool CoreChecks::ValidateQueueFamilyIndex(const PHYSICAL_DEVICE_STATE *pd_state, uint32_t requested_queue_family,
const char *err_code, const char *cmd_name, const char *queue_family_var_name) {
bool skip = false;
if (requested_queue_family >= pd_state->queue_family_known_count) {
const char *conditional_ext_cmd =
instance_extensions.vk_khr_get_physical_device_properties_2 ? " or vkGetPhysicalDeviceQueueFamilyProperties2[KHR]" : "";
const std::string count_note = (UNCALLED == pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState)
? "the pQueueFamilyPropertyCount was never obtained"
: "i.e. is not less than " + std::to_string(pd_state->queue_family_known_count);
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
HandleToUint64(pd_state->phys_device), err_code,
"%s: %s (= %" PRIu32
") is not less than any previously obtained pQueueFamilyPropertyCount from "
"vkGetPhysicalDeviceQueueFamilyProperties%s (%s).",
cmd_name, queue_family_var_name, requested_queue_family, conditional_ext_cmd, count_note.c_str());
}
return skip;
}
// Verify VkDeviceQueueCreateInfos
bool CoreChecks::ValidateDeviceQueueCreateInfos(const PHYSICAL_DEVICE_STATE *pd_state, uint32_t info_count,
const VkDeviceQueueCreateInfo *infos) {
bool skip = false;
std::unordered_set<uint32_t> queue_family_set;
for (uint32_t i = 0; i < info_count; ++i) {
const auto requested_queue_family = infos[i].queueFamilyIndex;
std::string queue_family_var_name = "pCreateInfo->pQueueCreateInfos[" + std::to_string(i) + "].queueFamilyIndex";
skip |= ValidateQueueFamilyIndex(pd_state, requested_queue_family, "VUID-VkDeviceQueueCreateInfo-queueFamilyIndex-00381",
"vkCreateDevice", queue_family_var_name.c_str());
if (queue_family_set.insert(requested_queue_family).second == false) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(pd_state->phys_device), "VUID-VkDeviceCreateInfo-queueFamilyIndex-00372",
"CreateDevice(): %s (=%" PRIu32 ") is not unique within pQueueCreateInfos.",
queue_family_var_name.c_str(), requested_queue_family);
}
// Verify that requested queue count of queue family is known to be valid at this point in time
if (requested_queue_family < pd_state->queue_family_known_count) {
const auto requested_queue_count = infos[i].queueCount;
const bool queue_family_has_props = requested_queue_family < pd_state->queue_family_properties.size();
// spec guarantees at least one queue for each queue family
const uint32_t available_queue_count =
queue_family_has_props ? pd_state->queue_family_properties[requested_queue_family].queueCount : 1;
const char *conditional_ext_cmd = instance_extensions.vk_khr_get_physical_device_properties_2
? " or vkGetPhysicalDeviceQueueFamilyProperties2[KHR]"
: "";
if (requested_queue_count > available_queue_count) {
const std::string count_note =
queue_family_has_props
? "i.e. is not less than or equal to " +
std::to_string(pd_state->queue_family_properties[requested_queue_family].queueCount)
: "the pQueueFamilyProperties[" + std::to_string(requested_queue_family) + "] was never obtained";
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
HandleToUint64(pd_state->phys_device), "VUID-VkDeviceQueueCreateInfo-queueCount-00382",
"vkCreateDevice: pCreateInfo->pQueueCreateInfos[%" PRIu32 "].queueCount (=%" PRIu32
") is not less than or equal to available queue count for this pCreateInfo->pQueueCreateInfos[%" PRIu32
"].queueFamilyIndex} (=%" PRIu32 ") obtained previously from vkGetPhysicalDeviceQueueFamilyProperties%s (%s).",
i, requested_queue_count, i, requested_queue_family, conditional_ext_cmd, count_note.c_str());
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
bool skip = false;
auto pd_state = GetPhysicalDeviceState(gpu);
// TODO: object_tracker should perhaps do this instead
// and it does not seem to currently work anyway -- the loader just crashes before this point
if (!pd_state) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
kVUID_Core_DevLimit_MustQueryCount,
"Invalid call to vkCreateDevice() w/o first calling vkEnumeratePhysicalDevices().");
} else {
skip |= ValidateDeviceQueueCreateInfos(pd_state, pCreateInfo->queueCreateInfoCount, pCreateInfo->pQueueCreateInfos);
}
return skip;
}
void CoreChecks::PreCallRecordCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkDevice *pDevice,
std::unique_ptr<safe_VkDeviceCreateInfo> &modified_create_info) {
// GPU Validation can possibly turn on device features, so give it a chance to change the create info.
if (enabled.gpu_validation) {
VkPhysicalDeviceFeatures supported_features;
DispatchGetPhysicalDeviceFeatures(gpu, &supported_features);
GpuPreCallRecordCreateDevice(gpu, modified_create_info, &supported_features);
}
}
void CoreChecks::PostCallRecordCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkDevice *pDevice, VkResult result) {
// The state tracker sets up the device state
StateTracker::PostCallRecordCreateDevice(gpu, pCreateInfo, pAllocator, pDevice, result);
// Add the callback hooks for the functions that are either broadly or deeply used and that the ValidationStateTracker refactor
// would be messier without.
// TODO: Find a good way to do this hooklessly.
ValidationObject *device_object = GetLayerDataPtr(get_dispatch_key(*pDevice), layer_data_map);
ValidationObject *validation_data = GetValidationObject(device_object->object_dispatch, LayerObjectTypeCoreValidation);
CoreChecks *core_checks = static_cast<CoreChecks *>(validation_data);
if (enabled.gpu_validation) {
// The only CoreCheck specific init is for gpu_validation
core_checks->GpuPostCallRecordCreateDevice(&enabled, pCreateInfo);
core_checks->SetCommandBufferResetCallback(
[core_checks](VkCommandBuffer command_buffer) -> void { core_checks->GpuResetCommandBuffer(command_buffer); });
}
core_checks->SetSetImageViewInitialLayoutCallback(
[core_checks](CMD_BUFFER_STATE *cb_node, const IMAGE_VIEW_STATE &iv_state, VkImageLayout layout) -> void {
core_checks->SetImageViewInitialLayout(cb_node, iv_state, layout);
});
}
void ValidationStateTracker::PostCallRecordCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkDevice *pDevice,
VkResult result) {
if (VK_SUCCESS != result) return;
const VkPhysicalDeviceFeatures *enabled_features_found = pCreateInfo->pEnabledFeatures;
if (nullptr == enabled_features_found) {
const auto *features2 = lvl_find_in_chain<VkPhysicalDeviceFeatures2KHR>(pCreateInfo->pNext);
if (features2) {
enabled_features_found = &(features2->features);
}
}
ValidationObject *device_object = GetLayerDataPtr(get_dispatch_key(*pDevice), layer_data_map);
ValidationObject *validation_data = GetValidationObject(device_object->object_dispatch, LayerObjectTypeCoreValidation);
ValidationStateTracker *state_tracker = static_cast<ValidationStateTracker *>(validation_data);
if (nullptr == enabled_features_found) {
state_tracker->enabled_features.core = {};
} else {
state_tracker->enabled_features.core = *enabled_features_found;
}
// Make sure that queue_family_properties are obtained for this device's physical_device, even if the app has not
// previously set them through an explicit API call.
uint32_t count;
auto pd_state = GetPhysicalDeviceState(gpu);
DispatchGetPhysicalDeviceQueueFamilyProperties(gpu, &count, nullptr);
pd_state->queue_family_properties.resize(std::max(static_cast<uint32_t>(pd_state->queue_family_properties.size()), count));
DispatchGetPhysicalDeviceQueueFamilyProperties(gpu, &count, &pd_state->queue_family_properties[0]);
// Save local link to this device's physical device state
state_tracker->physical_device_state = pd_state;
const auto *device_group_ci = lvl_find_in_chain<VkDeviceGroupDeviceCreateInfo>(pCreateInfo->pNext);
state_tracker->physical_device_count =
device_group_ci && device_group_ci->physicalDeviceCount > 0 ? device_group_ci->physicalDeviceCount : 1;
const auto *descriptor_indexing_features = lvl_find_in_chain<VkPhysicalDeviceDescriptorIndexingFeaturesEXT>(pCreateInfo->pNext);
if (descriptor_indexing_features) {
state_tracker->enabled_features.descriptor_indexing = *descriptor_indexing_features;
}
const auto *eight_bit_storage_features = lvl_find_in_chain<VkPhysicalDevice8BitStorageFeaturesKHR>(pCreateInfo->pNext);
if (eight_bit_storage_features) {
state_tracker->enabled_features.eight_bit_storage = *eight_bit_storage_features;
}
const auto *exclusive_scissor_features = lvl_find_in_chain<VkPhysicalDeviceExclusiveScissorFeaturesNV>(pCreateInfo->pNext);
if (exclusive_scissor_features) {
state_tracker->enabled_features.exclusive_scissor = *exclusive_scissor_features;
}
const auto *shading_rate_image_features = lvl_find_in_chain<VkPhysicalDeviceShadingRateImageFeaturesNV>(pCreateInfo->pNext);
if (shading_rate_image_features) {
state_tracker->enabled_features.shading_rate_image = *shading_rate_image_features;
}
const auto *mesh_shader_features = lvl_find_in_chain<VkPhysicalDeviceMeshShaderFeaturesNV>(pCreateInfo->pNext);
if (mesh_shader_features) {
state_tracker->enabled_features.mesh_shader = *mesh_shader_features;
}
const auto *inline_uniform_block_features =
lvl_find_in_chain<VkPhysicalDeviceInlineUniformBlockFeaturesEXT>(pCreateInfo->pNext);
if (inline_uniform_block_features) {
state_tracker->enabled_features.inline_uniform_block = *inline_uniform_block_features;
}
const auto *transform_feedback_features = lvl_find_in_chain<VkPhysicalDeviceTransformFeedbackFeaturesEXT>(pCreateInfo->pNext);
if (transform_feedback_features) {
state_tracker->enabled_features.transform_feedback_features = *transform_feedback_features;
}
const auto *float16_int8_features = lvl_find_in_chain<VkPhysicalDeviceFloat16Int8FeaturesKHR>(pCreateInfo->pNext);
if (float16_int8_features) {
state_tracker->enabled_features.float16_int8 = *float16_int8_features;
}
const auto *vtx_attrib_div_features = lvl_find_in_chain<VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT>(pCreateInfo->pNext);
if (vtx_attrib_div_features) {
state_tracker->enabled_features.vtx_attrib_divisor_features = *vtx_attrib_div_features;
}
const auto *uniform_buffer_standard_layout_features =
lvl_find_in_chain<VkPhysicalDeviceUniformBufferStandardLayoutFeaturesKHR>(pCreateInfo->pNext);
if (uniform_buffer_standard_layout_features) {
state_tracker->enabled_features.uniform_buffer_standard_layout = *uniform_buffer_standard_layout_features;
}
const auto *scalar_block_layout_features = lvl_find_in_chain<VkPhysicalDeviceScalarBlockLayoutFeaturesEXT>(pCreateInfo->pNext);
if (scalar_block_layout_features) {
state_tracker->enabled_features.scalar_block_layout_features = *scalar_block_layout_features;
}
const auto *buffer_address = lvl_find_in_chain<VkPhysicalDeviceBufferAddressFeaturesEXT>(pCreateInfo->pNext);
if (buffer_address) {
state_tracker->enabled_features.buffer_address = *buffer_address;
}
const auto *cooperative_matrix_features = lvl_find_in_chain<VkPhysicalDeviceCooperativeMatrixFeaturesNV>(pCreateInfo->pNext);
if (cooperative_matrix_features) {
state_tracker->enabled_features.cooperative_matrix_features = *cooperative_matrix_features;
}
const auto *float_controls_features = lvl_find_in_chain<VkPhysicalDeviceFloatControlsPropertiesKHR>(pCreateInfo->pNext);
if (float_controls_features) {
state_tracker->enabled_features.float_controls = *float_controls_features;
}
const auto *host_query_reset_features = lvl_find_in_chain<VkPhysicalDeviceHostQueryResetFeaturesEXT>(pCreateInfo->pNext);
if (host_query_reset_features) {
state_tracker->enabled_features.host_query_reset_features = *host_query_reset_features;
}
const auto *compute_shader_derivatives_features =
lvl_find_in_chain<VkPhysicalDeviceComputeShaderDerivativesFeaturesNV>(pCreateInfo->pNext);
if (compute_shader_derivatives_features) {
state_tracker->enabled_features.compute_shader_derivatives_features = *compute_shader_derivatives_features;
}
const auto *fragment_shader_barycentric_features =
lvl_find_in_chain<VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV>(pCreateInfo->pNext);
if (fragment_shader_barycentric_features) {
state_tracker->enabled_features.fragment_shader_barycentric_features = *fragment_shader_barycentric_features;
}
const auto *shader_image_footprint_features =
lvl_find_in_chain<VkPhysicalDeviceShaderImageFootprintFeaturesNV>(pCreateInfo->pNext);
if (shader_image_footprint_features) {
state_tracker->enabled_features.shader_image_footprint_features = *shader_image_footprint_features;
}
const auto *fragment_shader_interlock_features =
lvl_find_in_chain<VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT>(pCreateInfo->pNext);
if (fragment_shader_interlock_features) {
state_tracker->enabled_features.fragment_shader_interlock_features = *fragment_shader_interlock_features;
}
const auto *demote_to_helper_invocation_features =
lvl_find_in_chain<VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT>(pCreateInfo->pNext);
if (demote_to_helper_invocation_features) {
state_tracker->enabled_features.demote_to_helper_invocation_features = *demote_to_helper_invocation_features;
}
const auto *texel_buffer_alignment_features =
lvl_find_in_chain<VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT>(pCreateInfo->pNext);
if (texel_buffer_alignment_features) {
state_tracker->enabled_features.texel_buffer_alignment_features = *texel_buffer_alignment_features;
}
const auto *imageless_framebuffer_features =
lvl_find_in_chain<VkPhysicalDeviceImagelessFramebufferFeaturesKHR>(pCreateInfo->pNext);
if (imageless_framebuffer_features) {
state_tracker->enabled_features.imageless_framebuffer_features = *imageless_framebuffer_features;
}
// Store physical device properties and physical device mem limits into CoreChecks structs
DispatchGetPhysicalDeviceMemoryProperties(gpu, &state_tracker->phys_dev_mem_props);
DispatchGetPhysicalDeviceProperties(gpu, &state_tracker->phys_dev_props);
const auto &dev_ext = state_tracker->device_extensions;
auto *phys_dev_props = &state_tracker->phys_dev_ext_props;
if (dev_ext.vk_khr_push_descriptor) {
// Get the needed push_descriptor limits
VkPhysicalDevicePushDescriptorPropertiesKHR push_descriptor_prop;
GetPhysicalDeviceExtProperties(gpu, dev_ext.vk_khr_push_descriptor, &push_descriptor_prop);
phys_dev_props->max_push_descriptors = push_descriptor_prop.maxPushDescriptors;
}
GetPhysicalDeviceExtProperties(gpu, dev_ext.vk_ext_descriptor_indexing, &phys_dev_props->descriptor_indexing_props);
GetPhysicalDeviceExtProperties(gpu, dev_ext.vk_nv_shading_rate_image, &phys_dev_props->shading_rate_image_props);
GetPhysicalDeviceExtProperties(gpu, dev_ext.vk_nv_mesh_shader, &phys_dev_props->mesh_shader_props);
GetPhysicalDeviceExtProperties(gpu, dev_ext.vk_ext_inline_uniform_block, &phys_dev_props->inline_uniform_block_props);
GetPhysicalDeviceExtProperties(gpu, dev_ext.vk_ext_vertex_attribute_divisor, &phys_dev_props->vtx_attrib_divisor_props);
GetPhysicalDeviceExtProperties(gpu, dev_ext.vk_khr_depth_stencil_resolve, &phys_dev_props->depth_stencil_resolve_props);
GetPhysicalDeviceExtProperties(gpu, dev_ext.vk_ext_transform_feedback, &phys_dev_props->transform_feedback_props);
GetPhysicalDeviceExtProperties(gpu, dev_ext.vk_nv_ray_tracing, &phys_dev_props->ray_tracing_props);
GetPhysicalDeviceExtProperties(gpu, dev_ext.vk_ext_texel_buffer_alignment, &phys_dev_props->texel_buffer_alignment_props);
if (state_tracker->device_extensions.vk_nv_cooperative_matrix) {
// Get the needed cooperative_matrix properties
auto cooperative_matrix_props = lvl_init_struct<VkPhysicalDeviceCooperativeMatrixPropertiesNV>();
auto prop2 = lvl_init_struct<VkPhysicalDeviceProperties2KHR>(&cooperative_matrix_props);
instance_dispatch_table.GetPhysicalDeviceProperties2KHR(gpu, &prop2);
state_tracker->phys_dev_ext_props.cooperative_matrix_props = cooperative_matrix_props;
uint32_t numCooperativeMatrixProperties = 0;
instance_dispatch_table.GetPhysicalDeviceCooperativeMatrixPropertiesNV(gpu, &numCooperativeMatrixProperties, NULL);
state_tracker->cooperative_matrix_properties.resize(numCooperativeMatrixProperties,
lvl_init_struct<VkCooperativeMatrixPropertiesNV>());
instance_dispatch_table.GetPhysicalDeviceCooperativeMatrixPropertiesNV(gpu, &numCooperativeMatrixProperties,
state_tracker->cooperative_matrix_properties.data());
}
if (state_tracker->api_version >= VK_API_VERSION_1_1) {
// Get the needed subgroup limits
auto subgroup_prop = lvl_init_struct<VkPhysicalDeviceSubgroupProperties>();
auto prop2 = lvl_init_struct<VkPhysicalDeviceProperties2KHR>(&subgroup_prop);
instance_dispatch_table.GetPhysicalDeviceProperties2(gpu, &prop2);
state_tracker->phys_dev_ext_props.subgroup_props = subgroup_prop;
}
// Store queue family data
if ((pCreateInfo != nullptr) && (pCreateInfo->pQueueCreateInfos != nullptr)) {
for (uint32_t i = 0; i < pCreateInfo->queueCreateInfoCount; ++i) {
state_tracker->queue_family_index_map.insert(
std::make_pair(pCreateInfo->pQueueCreateInfos[i].queueFamilyIndex, pCreateInfo->pQueueCreateInfos[i].queueCount));
}
}
}
void ValidationStateTracker::PreCallRecordDestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
if (!device) return;
pipelineMap.clear();
renderPassMap.clear();
// Reset all command buffers before destroying them, to unlink object_bindings.
for (auto &commandBuffer : commandBufferMap) {
ResetCommandBufferState(commandBuffer.first);
}
commandBufferMap.clear();
// This will also delete all sets in the pool & remove them from setMap
DeleteDescriptorSetPools();
// All sets should be removed
assert(setMap.empty());
descriptorSetLayoutMap.clear();
imageViewMap.clear();
imageMap.clear();
bufferViewMap.clear();
bufferMap.clear();
// Queues persist until device is destroyed
queueMap.clear();
layer_debug_utils_destroy_device(device);
}
void CoreChecks::PreCallRecordDestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
if (!device) return;
if (enabled.gpu_validation) {
GpuPreCallRecordDestroyDevice();
}
imageSubresourceMap.clear();
imageLayoutMap.clear();
StateTracker::PreCallRecordDestroyDevice(device, pAllocator);
}
// For given stage mask, if Geometry shader stage is on w/o GS being enabled, report geo_error_id
// and if Tessellation Control or Evaluation shader stages are on w/o TS being enabled, report tess_error_id.
// Similarly for mesh and task shaders.
bool CoreChecks::ValidateStageMaskGsTsEnables(VkPipelineStageFlags stageMask, const char *caller, const char *geo_error_id,
const char *tess_error_id, const char *mesh_error_id,
const char *task_error_id) const {
bool skip = false;
if (!enabled_features.core.geometryShader && (stageMask & VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, geo_error_id,
"%s call includes a stageMask with VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT bit set when device does not have "
"geometryShader feature enabled.",
caller);
}
if (!enabled_features.core.tessellationShader &&
(stageMask & (VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT))) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, tess_error_id,
"%s call includes a stageMask with VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT and/or "
"VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT bit(s) set when device does not have "
"tessellationShader feature enabled.",
caller);
}
if (!enabled_features.mesh_shader.meshShader && (stageMask & VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, mesh_error_id,
"%s call includes a stageMask with VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV bit set when device does not have "
"VkPhysicalDeviceMeshShaderFeaturesNV::meshShader feature enabled.",
caller);
}
if (!enabled_features.mesh_shader.taskShader && (stageMask & VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, task_error_id,
"%s call includes a stageMask with VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV bit set when device does not have "
"VkPhysicalDeviceMeshShaderFeaturesNV::taskShader feature enabled.",
caller);
}
return skip;
}
// Loop through bound objects and increment their in_use counts.
void ValidationStateTracker::IncrementBoundObjects(CMD_BUFFER_STATE const *cb_node) {
for (auto obj : cb_node->object_bindings) {
auto base_obj = GetStateStructPtrFromObject(obj);
if (base_obj) {
base_obj->in_use.fetch_add(1);
}
}
}
// Track which resources are in-flight by atomically incrementing their "in_use" count
void ValidationStateTracker::IncrementResources(CMD_BUFFER_STATE *cb_node) {
cb_node->submitCount++;
cb_node->in_use.fetch_add(1);
// First Increment for all "generic" objects bound to cmd buffer, followed by special-case objects below
IncrementBoundObjects(cb_node);
// TODO : We should be able to remove the NULL look-up checks from the code below as long as
// all the corresponding cases are verified to cause CB_INVALID state and the CB_INVALID state
// should then be flagged prior to calling this function
for (auto event : cb_node->writeEventsBeforeWait) {
auto event_state = GetEventState(event);
if (event_state) event_state->write_in_use++;
}
}
// Note: This function assumes that the global lock is held by the calling thread.
// For the given queue, verify the queue state up to the given seq number.
// Currently the only check is to make sure that if there are events to be waited on prior to
// a QueryReset, make sure that all such events have been signalled.
bool CoreChecks::VerifyQueueStateToSeq(QUEUE_STATE *initial_queue, uint64_t initial_seq) {
bool skip = false;
// sequence number we want to validate up to, per queue
std::unordered_map<QUEUE_STATE *, uint64_t> target_seqs{{initial_queue, initial_seq}};
// sequence number we've completed validation for, per queue
std::unordered_map<QUEUE_STATE *, uint64_t> done_seqs;
std::vector<QUEUE_STATE *> worklist{initial_queue};
while (worklist.size()) {
auto queue = worklist.back();
worklist.pop_back();
auto target_seq = target_seqs[queue];
auto seq = std::max(done_seqs[queue], queue->seq);
auto sub_it = queue->submissions.begin() + int(seq - queue->seq); // seq >= queue->seq
for (; seq < target_seq; ++sub_it, ++seq) {
for (auto &wait : sub_it->waitSemaphores) {
auto other_queue = GetQueueState(wait.queue);
if (other_queue == queue) continue; // semaphores /always/ point backwards, so no point here.
auto other_target_seq = std::max(target_seqs[other_queue], wait.seq);
auto other_done_seq = std::max(done_seqs[other_queue], other_queue->seq);
// if this wait is for another queue, and covers new sequence
// numbers beyond what we've already validated, mark the new
// target seq and (possibly-re)add the queue to the worklist.
if (other_done_seq < other_target_seq) {
target_seqs[other_queue] = other_target_seq;
worklist.push_back(other_queue);
}
}
}
// finally mark the point we've now validated this queue to.
done_seqs[queue] = seq;
}
return skip;
}
// When the given fence is retired, verify outstanding queue operations through the point of the fence
bool CoreChecks::VerifyQueueStateToFence(VkFence fence) {
auto fence_state = GetFenceState(fence);
if (fence_state && fence_state->scope == kSyncScopeInternal && VK_NULL_HANDLE != fence_state->signaler.first) {
return VerifyQueueStateToSeq(GetQueueState(fence_state->signaler.first), fence_state->signaler.second);
}
return false;
}
// Decrement in-use count for objects bound to command buffer
void ValidationStateTracker::DecrementBoundResources(CMD_BUFFER_STATE const *cb_node) {
BASE_NODE *base_obj = nullptr;
for (auto obj : cb_node->object_bindings) {
base_obj = GetStateStructPtrFromObject(obj);
if (base_obj) {
base_obj->in_use.fetch_sub(1);
}
}
}
void ValidationStateTracker::RetireWorkOnQueue(QUEUE_STATE *pQueue, uint64_t seq, bool switch_finished_queries) {
std::unordered_map<VkQueue, uint64_t> otherQueueSeqs;
// Roll this queue forward, one submission at a time.
while (pQueue->seq < seq) {
auto &submission = pQueue->submissions.front();
for (auto &wait : submission.waitSemaphores) {
auto pSemaphore = GetSemaphoreState(wait.semaphore);
if (pSemaphore) {
pSemaphore->in_use.fetch_sub(1);
}
auto &lastSeq = otherQueueSeqs[wait.queue];
lastSeq = std::max(lastSeq, wait.seq);
}
for (auto &semaphore : submission.signalSemaphores) {
auto pSemaphore = GetSemaphoreState(semaphore);
if (pSemaphore) {
pSemaphore->in_use.fetch_sub(1);
}
}
for (auto &semaphore : submission.externalSemaphores) {
auto pSemaphore = GetSemaphoreState(semaphore);
if (pSemaphore) {
pSemaphore->in_use.fetch_sub(1);
}
}
for (auto cb : submission.cbs) {
auto cb_node = GetCBState(cb);
if (!cb_node) {
continue;
}
// First perform decrement on general case bound objects
DecrementBoundResources(cb_node);
for (auto event : cb_node->writeEventsBeforeWait) {
auto eventNode = eventMap.find(event);
if (eventNode != eventMap.end()) {
eventNode->second.write_in_use--;
}
}
for (auto queryStatePair : cb_node->queryToStateMap) {
const QueryState newState =
((queryStatePair.second == QUERYSTATE_ENDED && switch_finished_queries) ? QUERYSTATE_AVAILABLE
: queryStatePair.second);
pQueue->queryToStateMap[queryStatePair.first] = newState;
queryToStateMap[queryStatePair.first] = newState;
}
for (auto eventStagePair : cb_node->eventToStageMap) {
eventMap[eventStagePair.first].stageMask = eventStagePair.second;
}
cb_node->in_use.fetch_sub(1);
}
auto pFence = GetFenceState(submission.fence);
if (pFence && pFence->scope == kSyncScopeInternal) {
pFence->state = FENCE_RETIRED;
}
pQueue->submissions.pop_front();
pQueue->seq++;
}
// Roll other queues forward to the highest seq we saw a wait for
for (auto qs : otherQueueSeqs) {
RetireWorkOnQueue(GetQueueState(qs.first), qs.second, switch_finished_queries);
}
}
// Submit a fence to a queue, delimiting previous fences and previous untracked
// work by it.
static void SubmitFence(QUEUE_STATE *pQueue, FENCE_STATE *pFence, uint64_t submitCount) {
pFence->state = FENCE_INFLIGHT;
pFence->signaler.first = pQueue->queue;
pFence->signaler.second = pQueue->seq + pQueue->submissions.size() + submitCount;
}
bool CoreChecks::ValidateCommandBufferSimultaneousUse(const CMD_BUFFER_STATE *pCB, int current_submit_count) const {
bool skip = false;
if ((pCB->in_use.load() || current_submit_count > 1) &&
!(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
"VUID-vkQueueSubmit-pCommandBuffers-00071", "%s is already in use and is not marked for simultaneous use.",
report_data->FormatHandle(pCB->commandBuffer).c_str());
}
return skip;
}
bool CoreChecks::ValidateCommandBufferState(const CMD_BUFFER_STATE *cb_state, const char *call_source, int current_submit_count,
const char *vu_id) const {
bool skip = false;
if (disabled.command_buffer_state) return skip;
// Validate ONE_TIME_SUBMIT_BIT CB is not being submitted more than once
if ((cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) &&
(cb_state->submitCount + current_submit_count > 1)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
kVUID_Core_DrawState_CommandBufferSingleSubmitViolation,
"%s was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT set, but has been submitted 0x%" PRIxLEAST64
"times.",
report_data->FormatHandle(cb_state->commandBuffer).c_str(), cb_state->submitCount + current_submit_count);
}
// Validate that cmd buffers have been updated
switch (cb_state->state) {
case CB_INVALID_INCOMPLETE:
case CB_INVALID_COMPLETE:
skip |= ReportInvalidCommandBuffer(cb_state, call_source);
break;
case CB_NEW:
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
(uint64_t)(cb_state->commandBuffer), vu_id,
"%s used in the call to %s is unrecorded and contains no commands.",
report_data->FormatHandle(cb_state->commandBuffer).c_str(), call_source);
break;
case CB_RECORDING:
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), kVUID_Core_DrawState_NoEndCommandBuffer,
"You must call vkEndCommandBuffer() on %s before this call to %s!",
report_data->FormatHandle(cb_state->commandBuffer).c_str(), call_source);
break;
default: /* recorded */
break;
}
return skip;
}
// Check that the queue family index of 'queue' matches one of the entries in pQueueFamilyIndices
bool CoreChecks::ValidImageBufferQueue(const CMD_BUFFER_STATE *cb_node, const VulkanTypedHandle &object, VkQueue queue,
uint32_t count, const uint32_t *indices) const {
bool found = false;
bool skip = false;
auto queue_state = GetQueueState(queue);
if (queue_state) {
for (uint32_t i = 0; i < count; i++) {
if (indices[i] == queue_state->queueFamilyIndex) {
found = true;
break;
}
}
if (!found) {
skip = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, get_debug_report_enum[object.type], object.handle,
kVUID_Core_DrawState_InvalidQueueFamily,
"vkQueueSubmit: %s contains %s which was not created allowing concurrent access to "
"this queue family %d.",
report_data->FormatHandle(cb_node->commandBuffer).c_str(), report_data->FormatHandle(object).c_str(),
queue_state->queueFamilyIndex);
}
}
return skip;
}
// Validate that queueFamilyIndices of primary command buffers match this queue
// Secondary command buffers were previously validated in vkCmdExecuteCommands().
bool CoreChecks::ValidateQueueFamilyIndices(const CMD_BUFFER_STATE *pCB, VkQueue queue) const {
bool skip = false;
auto pPool = GetCommandPoolState(pCB->createInfo.commandPool);
auto queue_state = GetQueueState(queue);
if (pPool && queue_state) {
if (pPool->queueFamilyIndex != queue_state->queueFamilyIndex) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCB->commandBuffer), "VUID-vkQueueSubmit-pCommandBuffers-00074",
"vkQueueSubmit: Primary %s created in queue family %d is being submitted on %s "
"from queue family %d.",
report_data->FormatHandle(pCB->commandBuffer).c_str(), pPool->queueFamilyIndex,
report_data->FormatHandle(queue).c_str(), queue_state->queueFamilyIndex);
}
// Ensure that any bound images or buffers created with SHARING_MODE_CONCURRENT have access to the current queue family
for (const auto &object : pCB->object_bindings) {
if (object.type == kVulkanObjectTypeImage) {
auto image_state = GetImageState(object.Cast<VkImage>());
if (image_state && image_state->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
skip |= ValidImageBufferQueue(pCB, object, queue, image_state->createInfo.queueFamilyIndexCount,
image_state->createInfo.pQueueFamilyIndices);
}
} else if (object.type == kVulkanObjectTypeBuffer) {
auto buffer_state = GetBufferState(object.Cast<VkBuffer>());
if (buffer_state && buffer_state->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
skip |= ValidImageBufferQueue(pCB, object, queue, buffer_state->createInfo.queueFamilyIndexCount,
buffer_state->createInfo.pQueueFamilyIndices);
}
}
}
}
return skip;
}
bool CoreChecks::ValidatePrimaryCommandBufferState(const CMD_BUFFER_STATE *pCB, int current_submit_count,
QFOTransferCBScoreboards<VkImageMemoryBarrier> *qfo_image_scoreboards,
QFOTransferCBScoreboards<VkBufferMemoryBarrier> *qfo_buffer_scoreboards) const {
// Track in-use for resources off of primary and any secondary CBs
bool skip = false;
// If USAGE_SIMULTANEOUS_USE_BIT not set then CB cannot already be executing on device
skip |= ValidateCommandBufferSimultaneousUse(pCB, current_submit_count);
skip |= ValidateQueuedQFOTransfers(pCB, qfo_image_scoreboards, qfo_buffer_scoreboards);
for (auto pSubCB : pCB->linkedCommandBuffers) {
skip |= ValidateQueuedQFOTransfers(pSubCB, qfo_image_scoreboards, qfo_buffer_scoreboards);
// TODO: replace with InvalidateCommandBuffers() at recording.
if ((pSubCB->primaryCommandBuffer != pCB->commandBuffer) &&
!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
"VUID-vkQueueSubmit-pCommandBuffers-00073",
"%s was submitted with secondary %s but that buffer has subsequently been bound to "
"primary %s and it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set.",
report_data->FormatHandle(pCB->commandBuffer).c_str(), report_data->FormatHandle(pSubCB->commandBuffer).c_str(),
report_data->FormatHandle(pSubCB->primaryCommandBuffer).c_str());
}
}
skip |= ValidateCommandBufferState(pCB, "vkQueueSubmit()", current_submit_count, "VUID-vkQueueSubmit-pCommandBuffers-00072");
return skip;
}
bool CoreChecks::ValidateFenceForSubmit(const FENCE_STATE *pFence) const {
bool skip = false;
if (pFence && pFence->scope == kSyncScopeInternal) {
if (pFence->state == FENCE_INFLIGHT) {
// TODO: opportunities for "VUID-vkQueueSubmit-fence-00064", "VUID-vkQueueBindSparse-fence-01114",
// "VUID-vkAcquireNextImageKHR-fence-01287"
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
HandleToUint64(pFence->fence), kVUID_Core_DrawState_InvalidFence,
"%s is already in use by another submission.", report_data->FormatHandle(pFence->fence).c_str());
}
else if (pFence->state == FENCE_RETIRED) {
// TODO: opportunities for "VUID-vkQueueSubmit-fence-00063", "VUID-vkQueueBindSparse-fence-01113",
// "VUID-vkAcquireNextImageKHR-fence-01287"
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
HandleToUint64(pFence->fence), kVUID_Core_MemTrack_FenceState,
"%s submitted in SIGNALED state. Fences must be reset before being submitted",
report_data->FormatHandle(pFence->fence).c_str());
}
}
return skip;
}
void ValidationStateTracker::PostCallRecordQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits,
VkFence fence, VkResult result) {
uint64_t early_retire_seq = 0;
auto pQueue = GetQueueState(queue);
auto pFence = GetFenceState(fence);
if (pFence) {
if (pFence->scope == kSyncScopeInternal) {
// Mark fence in use
SubmitFence(pQueue, pFence, std::max(1u, submitCount));
if (!submitCount) {
// If no submissions, but just dropping a fence on the end of the queue,
// record an empty submission with just the fence, so we can determine
// its completion.
pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(), std::vector<SEMAPHORE_WAIT>(),
std::vector<VkSemaphore>(), std::vector<VkSemaphore>(), fence);
}
} else {
// Retire work up until this fence early, we will not see the wait that corresponds to this signal
early_retire_seq = pQueue->seq + pQueue->submissions.size();
if (!external_sync_warning) {
external_sync_warning = true;
log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, HandleToUint64(fence),
kVUID_Core_DrawState_QueueForwardProgress,
"vkQueueSubmit(): Signaling external %s on %s will disable validation of preceding command "
"buffer lifecycle states and the in-use status of associated objects.",
report_data->FormatHandle(fence).c_str(), report_data->FormatHandle(queue).c_str());
}
}
}
// Now process each individual submit
for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
std::vector<VkCommandBuffer> cbs;
const VkSubmitInfo *submit = &pSubmits[submit_idx];
vector<SEMAPHORE_WAIT> semaphore_waits;
vector<VkSemaphore> semaphore_signals;
vector<VkSemaphore> semaphore_externals;
for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
VkSemaphore semaphore = submit->pWaitSemaphores[i];
auto pSemaphore = GetSemaphoreState(semaphore);
if (pSemaphore) {
if (pSemaphore->scope == kSyncScopeInternal) {
if (pSemaphore->signaler.first != VK_NULL_HANDLE) {
semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second});
pSemaphore->in_use.fetch_add(1);
}
pSemaphore->signaler.first = VK_NULL_HANDLE;
pSemaphore->signaled = false;
} else {
semaphore_externals.push_back(semaphore);
pSemaphore->in_use.fetch_add(1);
if (pSemaphore->scope == kSyncScopeExternalTemporary) {
pSemaphore->scope = kSyncScopeInternal;
}
}
}
}
for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
VkSemaphore semaphore = submit->pSignalSemaphores[i];
auto pSemaphore = GetSemaphoreState(semaphore);
if (pSemaphore) {
if (pSemaphore->scope == kSyncScopeInternal) {
pSemaphore->signaler.first = queue;
pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1;
pSemaphore->signaled = true;
pSemaphore->in_use.fetch_add(1);
semaphore_signals.push_back(semaphore);
} else {
// Retire work up until this submit early, we will not see the wait that corresponds to this signal
early_retire_seq = std::max(early_retire_seq, pQueue->seq + pQueue->submissions.size() + 1);
if (!external_sync_warning) {
external_sync_warning = true;
log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
HandleToUint64(semaphore), kVUID_Core_DrawState_QueueForwardProgress,
"vkQueueSubmit(): Signaling external %s on %s will disable validation of preceding "
"command buffer lifecycle states and the in-use status of associated objects.",
report_data->FormatHandle(semaphore).c_str(), report_data->FormatHandle(queue).c_str());
}
}
}
}
for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
auto cb_node = GetCBState(submit->pCommandBuffers[i]);
if (cb_node) {
cbs.push_back(submit->pCommandBuffers[i]);
for (auto secondaryCmdBuffer : cb_node->linkedCommandBuffers) {
cbs.push_back(secondaryCmdBuffer->commandBuffer);
IncrementResources(secondaryCmdBuffer);
}
IncrementResources(cb_node);
}
}
pQueue->submissions.emplace_back(cbs, semaphore_waits, semaphore_signals, semaphore_externals,
submit_idx == submitCount - 1 ? fence : (VkFence)VK_NULL_HANDLE);
}
if (early_retire_seq) {
RetireWorkOnQueue(pQueue, early_retire_seq, true);
}
}
void CoreChecks::PostCallRecordQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence,
VkResult result) {
StateTracker::PostCallRecordQueueSubmit(queue, submitCount, pSubmits, fence, result);
// The triply nested for duplicates that in the StateTracker, but avoids the need for two additional callbacks.
for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
const VkSubmitInfo *submit = &pSubmits[submit_idx];
for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
auto cb_node = GetCBState(submit->pCommandBuffers[i]);
if (cb_node) {
for (auto secondaryCmdBuffer : cb_node->linkedCommandBuffers) {
UpdateCmdBufImageLayouts(secondaryCmdBuffer);
RecordQueuedQFOTransfers(secondaryCmdBuffer);
}
UpdateCmdBufImageLayouts(cb_node);
RecordQueuedQFOTransfers(cb_node);
}
}
}
if (enabled.gpu_validation) {
GpuPostCallQueueSubmit(queue, submitCount, pSubmits, fence);
}
}
bool CoreChecks::ValidateSemaphoresForSubmit(VkQueue queue, const VkSubmitInfo *submit,
unordered_set<VkSemaphore> *unsignaled_sema_arg,
unordered_set<VkSemaphore> *signaled_sema_arg,
unordered_set<VkSemaphore> *internal_sema_arg) const {
bool skip = false;
unordered_set<VkSemaphore> &signaled_semaphores = *signaled_sema_arg;
unordered_set<VkSemaphore> &unsignaled_semaphores = *unsignaled_sema_arg;
unordered_set<VkSemaphore> &internal_semaphores = *internal_sema_arg;
for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
skip |=
ValidateStageMaskGsTsEnables(submit->pWaitDstStageMask[i], "vkQueueSubmit()",
"VUID-VkSubmitInfo-pWaitDstStageMask-00076", "VUID-VkSubmitInfo-pWaitDstStageMask-00077",
"VUID-VkSubmitInfo-pWaitDstStageMask-02089", "VUID-VkSubmitInfo-pWaitDstStageMask-02090");
VkSemaphore semaphore = submit->pWaitSemaphores[i];
const auto *pSemaphore = GetSemaphoreState(semaphore);
if (pSemaphore && (pSemaphore->scope == kSyncScopeInternal || internal_semaphores.count(semaphore))) {
if (unsignaled_semaphores.count(semaphore) || (!(signaled_semaphores.count(semaphore)) && !(pSemaphore->signaled))) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
HandleToUint64(semaphore), kVUID_Core_DrawState_QueueForwardProgress,
"%s is waiting on %s that has no way to be signaled.", report_data->FormatHandle(queue).c_str(),
report_data->FormatHandle(semaphore).c_str());
} else {
signaled_semaphores.erase(semaphore);
unsignaled_semaphores.insert(semaphore);
}
}
if (pSemaphore && pSemaphore->scope == kSyncScopeExternalTemporary) {
internal_semaphores.insert(semaphore);
}
}
for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
VkSemaphore semaphore = submit->pSignalSemaphores[i];
const auto *pSemaphore = GetSemaphoreState(semaphore);
if (pSemaphore && (pSemaphore->scope == kSyncScopeInternal || internal_semaphores.count(semaphore))) {
if (signaled_semaphores.count(semaphore) || (!(unsignaled_semaphores.count(semaphore)) && pSemaphore->signaled)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
HandleToUint64(semaphore), kVUID_Core_DrawState_QueueForwardProgress,
"%s is signaling %s that was previously signaled by %s but has not since "
"been waited on by any queue.",
report_data->FormatHandle(queue).c_str(), report_data->FormatHandle(semaphore).c_str(),
report_data->FormatHandle(pSemaphore->signaler.first).c_str());
} else {
unsignaled_semaphores.erase(semaphore);
signaled_semaphores.insert(semaphore);
}
}
}
return skip;
}
bool CoreChecks::ValidateCommandBuffersForSubmit(VkQueue queue, const VkSubmitInfo *submit,
ImageSubresPairLayoutMap *localImageLayoutMap_arg,
vector<VkCommandBuffer> *current_cmds_arg) const {
bool skip = false;
ImageSubresPairLayoutMap &localImageLayoutMap = *localImageLayoutMap_arg;
vector<VkCommandBuffer> ¤t_cmds = *current_cmds_arg;
QFOTransferCBScoreboards<VkImageMemoryBarrier> qfo_image_scoreboards;
QFOTransferCBScoreboards<VkBufferMemoryBarrier> qfo_buffer_scoreboards;
for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
const auto *cb_node = GetCBState(submit->pCommandBuffers[i]);
if (cb_node) {
skip |= ValidateCmdBufImageLayouts(cb_node, imageLayoutMap, &localImageLayoutMap);
current_cmds.push_back(submit->pCommandBuffers[i]);
skip |= ValidatePrimaryCommandBufferState(
cb_node, (int)std::count(current_cmds.begin(), current_cmds.end(), submit->pCommandBuffers[i]),
&qfo_image_scoreboards, &qfo_buffer_scoreboards);
skip |= ValidateQueueFamilyIndices(cb_node, queue);
// Potential early exit here as bad object state may crash in delayed function calls
if (skip) {
return true;
}
// Call submit-time functions to validate/update state
for (auto &function : cb_node->queue_submit_functions) {
skip |= function();
}
for (auto &function : cb_node->eventUpdates) {
skip |= function(queue);
}
for (auto &function : cb_node->queryUpdates) {
skip |= function(queue);
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
const auto *pFence = GetFenceState(fence);
bool skip = ValidateFenceForSubmit(pFence);
if (skip) {
return true;
}
unordered_set<VkSemaphore> signaled_semaphores;
unordered_set<VkSemaphore> unsignaled_semaphores;
unordered_set<VkSemaphore> internal_semaphores;
vector<VkCommandBuffer> current_cmds;
ImageSubresPairLayoutMap localImageLayoutMap;
// Now verify each individual submit
for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
const VkSubmitInfo *submit = &pSubmits[submit_idx];
skip |= ValidateSemaphoresForSubmit(queue, submit, &unsignaled_semaphores, &signaled_semaphores, &internal_semaphores);
skip |= ValidateCommandBuffersForSubmit(queue, submit, &localImageLayoutMap, ¤t_cmds);
auto chained_device_group_struct = lvl_find_in_chain<VkDeviceGroupSubmitInfo>(submit->pNext);
if (chained_device_group_struct && chained_device_group_struct->commandBufferCount > 0) {
for (uint32_t i = 0; i < chained_device_group_struct->commandBufferCount; ++i) {
skip |= ValidateDeviceMaskToPhysicalDeviceCount(chained_device_group_struct->pCommandBufferDeviceMasks[i],
VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT, HandleToUint64(queue),
"VUID-VkDeviceGroupSubmitInfo-pCommandBufferDeviceMasks-00086");
}
}
}
return skip;
}
void CoreChecks::PreCallRecordQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
if (enabled.gpu_validation && device_extensions.vk_ext_descriptor_indexing) {
GpuPreCallRecordQueueSubmit(queue, submitCount, pSubmits, fence);
}
}
#ifdef VK_USE_PLATFORM_ANDROID_KHR
// Android-specific validation that uses types defined only on Android and only for NDK versions
// that support the VK_ANDROID_external_memory_android_hardware_buffer extension.
// This chunk could move into a seperate core_validation_android.cpp file... ?
// clang-format off
// Map external format and usage flags to/from equivalent Vulkan flags
// (Tables as of v1.1.92)
// AHardwareBuffer Format Vulkan Format
// ====================== =============
// AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM VK_FORMAT_R8G8B8A8_UNORM
// AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM VK_FORMAT_R8G8B8A8_UNORM
// AHARDWAREBUFFER_FORMAT_R8G8B8_UNORM VK_FORMAT_R8G8B8_UNORM
// AHARDWAREBUFFER_FORMAT_R5G6B5_UNORM VK_FORMAT_R5G6B5_UNORM_PACK16
// AHARDWAREBUFFER_FORMAT_R16G16B16A16_FLOAT VK_FORMAT_R16G16B16A16_SFLOAT
// AHARDWAREBUFFER_FORMAT_R10G10B10A2_UNORM VK_FORMAT_A2B10G10R10_UNORM_PACK32
// AHARDWAREBUFFER_FORMAT_D16_UNORM VK_FORMAT_D16_UNORM
// AHARDWAREBUFFER_FORMAT_D24_UNORM VK_FORMAT_X8_D24_UNORM_PACK32
// AHARDWAREBUFFER_FORMAT_D24_UNORM_S8_UINT VK_FORMAT_D24_UNORM_S8_UINT
// AHARDWAREBUFFER_FORMAT_D32_FLOAT VK_FORMAT_D32_SFLOAT
// AHARDWAREBUFFER_FORMAT_D32_FLOAT_S8_UINT VK_FORMAT_D32_SFLOAT_S8_UINT
// AHARDWAREBUFFER_FORMAT_S8_UINT VK_FORMAT_S8_UINT
// The AHARDWAREBUFFER_FORMAT_* are an enum in the NDK headers, but get passed in to Vulkan
// as uint32_t. Casting the enums here avoids scattering casts around in the code.
std::map<uint32_t, VkFormat> ahb_format_map_a2v = {
{ (uint32_t)AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM, VK_FORMAT_R8G8B8A8_UNORM },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM, VK_FORMAT_R8G8B8A8_UNORM },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_R8G8B8_UNORM, VK_FORMAT_R8G8B8_UNORM },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_R5G6B5_UNORM, VK_FORMAT_R5G6B5_UNORM_PACK16 },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_R16G16B16A16_FLOAT, VK_FORMAT_R16G16B16A16_SFLOAT },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_R10G10B10A2_UNORM, VK_FORMAT_A2B10G10R10_UNORM_PACK32 },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_D16_UNORM, VK_FORMAT_D16_UNORM },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_D24_UNORM, VK_FORMAT_X8_D24_UNORM_PACK32 },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_D24_UNORM_S8_UINT, VK_FORMAT_D24_UNORM_S8_UINT },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_D32_FLOAT, VK_FORMAT_D32_SFLOAT },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_D32_FLOAT_S8_UINT, VK_FORMAT_D32_SFLOAT_S8_UINT },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_S8_UINT, VK_FORMAT_S8_UINT }
};
// AHardwareBuffer Usage Vulkan Usage or Creation Flag (Intermixed - Aargh!)
// ===================== ===================================================
// None VK_IMAGE_USAGE_TRANSFER_SRC_BIT
// None VK_IMAGE_USAGE_TRANSFER_DST_BIT
// AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE VK_IMAGE_USAGE_SAMPLED_BIT
// AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT
// AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT
// AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT
// AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE None
// AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT VK_IMAGE_CREATE_PROTECTED_BIT
// None VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT
// None VK_IMAGE_CREATE_EXTENDED_USAGE_BIT
// Same casting rationale. De-mixing the table to prevent type confusion and aliasing
std::map<uint64_t, VkImageUsageFlags> ahb_usage_map_a2v = {
{ (uint64_t)AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE, (VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT) },
{ (uint64_t)AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT },
{ (uint64_t)AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE, 0 }, // No equivalent
};
std::map<uint64_t, VkImageCreateFlags> ahb_create_map_a2v = {
{ (uint64_t)AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP, VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT },
{ (uint64_t)AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT, VK_IMAGE_CREATE_PROTECTED_BIT },
{ (uint64_t)AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE, 0 }, // No equivalent
};
std::map<VkImageUsageFlags, uint64_t> ahb_usage_map_v2a = {
{ VK_IMAGE_USAGE_SAMPLED_BIT, (uint64_t)AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE },
{ VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, (uint64_t)AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE },
{ VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, (uint64_t)AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT },
};
std::map<VkImageCreateFlags, uint64_t> ahb_create_map_v2a = {
{ VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT, (uint64_t)AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP },
{ VK_IMAGE_CREATE_PROTECTED_BIT, (uint64_t)AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT },
};
// clang-format on
//
// AHB-extension new APIs
//
bool CoreChecks::PreCallValidateGetAndroidHardwareBufferPropertiesANDROID(VkDevice device, const struct AHardwareBuffer *buffer,
VkAndroidHardwareBufferPropertiesANDROID *pProperties) {
bool skip = false;
// buffer must be a valid Android hardware buffer object with at least one of the AHARDWAREBUFFER_USAGE_GPU_* usage flags.
AHardwareBuffer_Desc ahb_desc;
AHardwareBuffer_describe(buffer, &ahb_desc);
uint32_t required_flags = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE | AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT |
AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP | AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE |
AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER;
if (0 == (ahb_desc.usage & required_flags)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-vkGetAndroidHardwareBufferPropertiesANDROID-buffer-01884",
"vkGetAndroidHardwareBufferPropertiesANDROID: The AHardwareBuffer's AHardwareBuffer_Desc.usage (0x%" PRIx64
") does not have any AHARDWAREBUFFER_USAGE_GPU_* flags set.",
ahb_desc.usage);
}
return skip;
}
void CoreChecks::PostCallRecordGetAndroidHardwareBufferPropertiesANDROID(VkDevice device, const struct AHardwareBuffer *buffer,
VkAndroidHardwareBufferPropertiesANDROID *pProperties,
VkResult result) {
if (VK_SUCCESS != result) return;
auto ahb_format_props = lvl_find_in_chain<VkAndroidHardwareBufferFormatPropertiesANDROID>(pProperties->pNext);
if (ahb_format_props) {
ahb_ext_formats_set.insert(ahb_format_props->externalFormat);
}
}
bool CoreChecks::PreCallValidateGetMemoryAndroidHardwareBufferANDROID(VkDevice device,
const VkMemoryGetAndroidHardwareBufferInfoANDROID *pInfo,
struct AHardwareBuffer **pBuffer) {
bool skip = false;
const DEVICE_MEMORY_STATE *mem_info = GetDevMemState(pInfo->memory);
// VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID must have been included in
// VkExportMemoryAllocateInfoKHR::handleTypes when memory was created.
if (!mem_info->is_export ||
(0 == (mem_info->export_handle_type_flags & VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID))) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkMemoryGetAndroidHardwareBufferInfoANDROID-handleTypes-01882",
"vkGetMemoryAndroidHardwareBufferANDROID: %s was not allocated for export, or the "
"export handleTypes (0x%" PRIx32
") did not contain VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID.",
report_data->FormatHandle(pInfo->memory).c_str(), mem_info->export_handle_type_flags);
}
// If the pNext chain of the VkMemoryAllocateInfo used to allocate memory included a VkMemoryDedicatedAllocateInfo
// with non-NULL image member, then that image must already be bound to memory.
if (mem_info->is_dedicated && (VK_NULL_HANDLE != mem_info->dedicated_image)) {
const auto image_state = GetImageState(mem_info->dedicated_image);
if ((nullptr == image_state) || (0 == (image_state->GetBoundMemory().count(pInfo->memory)))) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkMemoryGetAndroidHardwareBufferInfoANDROID-pNext-01883",
"vkGetMemoryAndroidHardwareBufferANDROID: %s was allocated using a dedicated "
"%s, but that image is not bound to the VkDeviceMemory object.",
report_data->FormatHandle(pInfo->memory).c_str(),
report_data->FormatHandle(mem_info->dedicated_image).c_str());
}
}
return skip;
}
//
// AHB-specific validation within non-AHB APIs
//
bool CoreChecks::ValidateAllocateMemoryANDROID(const VkMemoryAllocateInfo *alloc_info) const {
bool skip = false;
auto import_ahb_info = lvl_find_in_chain<VkImportAndroidHardwareBufferInfoANDROID>(alloc_info->pNext);
auto exp_mem_alloc_info = lvl_find_in_chain<VkExportMemoryAllocateInfo>(alloc_info->pNext);
auto mem_ded_alloc_info = lvl_find_in_chain<VkMemoryDedicatedAllocateInfo>(alloc_info->pNext);
if ((import_ahb_info) && (NULL != import_ahb_info->buffer)) {
// This is an import with handleType of VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID
AHardwareBuffer_Desc ahb_desc = {};
AHardwareBuffer_describe(import_ahb_info->buffer, &ahb_desc);
// If buffer is not NULL, it must be a valid Android hardware buffer object with AHardwareBuffer_Desc::format and
// AHardwareBuffer_Desc::usage compatible with Vulkan as described in Android Hardware Buffers.
//
// BLOB & GPU_DATA_BUFFER combo specifically allowed
if ((AHARDWAREBUFFER_FORMAT_BLOB != ahb_desc.format) || (0 == (ahb_desc.usage & AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER))) {
// Otherwise, must be a combination from the AHardwareBuffer Format and Usage Equivalence tables
// Usage must have at least one bit from the table. It may have additional bits not in the table
uint64_t ahb_equiv_usage_bits = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE | AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT |
AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP | AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE |
AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT;
if ((0 == (ahb_desc.usage & ahb_equiv_usage_bits)) || (0 == ahb_format_map_a2v.count(ahb_desc.format))) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkImportAndroidHardwareBufferInfoANDROID-buffer-01881",
"vkAllocateMemory: The AHardwareBuffer_Desc's format ( %u ) and/or usage ( 0x%" PRIx64
" ) are not compatible with Vulkan.",
ahb_desc.format, ahb_desc.usage);
}
}
// Collect external buffer info
VkPhysicalDeviceExternalBufferInfo pdebi = {};
pdebi.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO;
pdebi.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
if (AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE & ahb_desc.usage) {
pdebi.usage |= ahb_usage_map_a2v[AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE];
}
if (AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT & ahb_desc.usage) {
pdebi.usage |= ahb_usage_map_a2v[AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT];
}
VkExternalBufferProperties ext_buf_props = {};
ext_buf_props.sType = VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES;
DispatchGetPhysicalDeviceExternalBufferProperties(physical_device, &pdebi, &ext_buf_props);
// Collect external format info
VkPhysicalDeviceExternalImageFormatInfo pdeifi = {};
pdeifi.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO;
pdeifi.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
VkPhysicalDeviceImageFormatInfo2 pdifi2 = {};
pdifi2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2;
pdifi2.pNext = &pdeifi;
if (0 < ahb_format_map_a2v.count(ahb_desc.format)) pdifi2.format = ahb_format_map_a2v[ahb_desc.format];
pdifi2.type = VK_IMAGE_TYPE_2D; // Seems likely
pdifi2.tiling = VK_IMAGE_TILING_OPTIMAL; // Ditto
if (AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE & ahb_desc.usage) {
pdifi2.usage |= ahb_usage_map_a2v[AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE];
}
if (AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT & ahb_desc.usage) {
pdifi2.usage |= ahb_usage_map_a2v[AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT];
}
if (AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP & ahb_desc.usage) {
pdifi2.flags |= ahb_create_map_a2v[AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP];
}
if (AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT & ahb_desc.usage) {
pdifi2.flags |= ahb_create_map_a2v[AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT];
}
VkExternalImageFormatProperties ext_img_fmt_props = {};
ext_img_fmt_props.sType = VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES;
VkImageFormatProperties2 ifp2 = {};
ifp2.sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2;
ifp2.pNext = &ext_img_fmt_props;
VkResult fmt_lookup_result = GetPDImageFormatProperties2(&pdifi2, &ifp2);
// If buffer is not NULL, Android hardware buffers must be supported for import, as reported by
// VkExternalImageFormatProperties or VkExternalBufferProperties.
if (0 == (ext_buf_props.externalMemoryProperties.externalMemoryFeatures & VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT)) {
if ((VK_SUCCESS != fmt_lookup_result) || (0 == (ext_img_fmt_props.externalMemoryProperties.externalMemoryFeatures &
VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT))) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkImportAndroidHardwareBufferInfoANDROID-buffer-01880",
"vkAllocateMemory: Neither the VkExternalImageFormatProperties nor the VkExternalBufferProperties "
"structs for the AHardwareBuffer include the VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT flag.");
}
}
// Retrieve buffer and format properties of the provided AHardwareBuffer
VkAndroidHardwareBufferFormatPropertiesANDROID ahb_format_props = {};
ahb_format_props.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_ANDROID;
VkAndroidHardwareBufferPropertiesANDROID ahb_props = {};
ahb_props.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_PROPERTIES_ANDROID;
ahb_props.pNext = &ahb_format_props;
DispatchGetAndroidHardwareBufferPropertiesANDROID(device, import_ahb_info->buffer, &ahb_props);
// allocationSize must be the size returned by vkGetAndroidHardwareBufferPropertiesANDROID for the Android hardware buffer
if (alloc_info->allocationSize != ahb_props.allocationSize) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkMemoryAllocateInfo-allocationSize-02383",
"vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID "
"struct, allocationSize (%" PRId64
") does not match the AHardwareBuffer's reported allocationSize (%" PRId64 ").",
alloc_info->allocationSize, ahb_props.allocationSize);
}
// memoryTypeIndex must be one of those returned by vkGetAndroidHardwareBufferPropertiesANDROID for the AHardwareBuffer
// Note: memoryTypeIndex is an index, memoryTypeBits is a bitmask
uint32_t mem_type_bitmask = 1 << alloc_info->memoryTypeIndex;
if (0 == (mem_type_bitmask & ahb_props.memoryTypeBits)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkMemoryAllocateInfo-memoryTypeIndex-02385",
"vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID "
"struct, memoryTypeIndex (%" PRId32
") does not correspond to a bit set in AHardwareBuffer's reported "
"memoryTypeBits bitmask (0x%" PRIx32 ").",
alloc_info->memoryTypeIndex, ahb_props.memoryTypeBits);
}
// Checks for allocations without a dedicated allocation requirement
if ((nullptr == mem_ded_alloc_info) || (VK_NULL_HANDLE == mem_ded_alloc_info->image)) {
// the Android hardware buffer must have a format of AHARDWAREBUFFER_FORMAT_BLOB and a usage that includes
// AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER
if (((uint64_t)AHARDWAREBUFFER_FORMAT_BLOB != ahb_desc.format) ||
(0 == (ahb_desc.usage & AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER))) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkMemoryAllocateInfo-pNext-02384",
"vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID "
"struct without a dedicated allocation requirement, while the AHardwareBuffer_Desc's format ( %u ) is not "
"AHARDWAREBUFFER_FORMAT_BLOB or usage (0x%" PRIx64 ") does not include AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER.",
ahb_desc.format, ahb_desc.usage);
}
} else { // Checks specific to import with a dedicated allocation requirement
const VkImageCreateInfo *ici = &(GetImageState(mem_ded_alloc_info->image)->createInfo);
// The Android hardware buffer's usage must include at least one of AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT or
// AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE
if (0 == (ahb_desc.usage & (AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT | AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE))) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkMemoryAllocateInfo-pNext-02386",
"vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID and a "
"dedicated allocation requirement, while the AHardwareBuffer's usage (0x%" PRIx64
") contains neither AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT nor AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE.",
ahb_desc.usage);
}
// the format of image must be VK_FORMAT_UNDEFINED or the format returned by
// vkGetAndroidHardwareBufferPropertiesANDROID
if ((ici->format != ahb_format_props.format) && (VK_FORMAT_UNDEFINED != ici->format)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkMemoryAllocateInfo-pNext-02387",
"vkAllocateMemory: VkMemoryAllocateInfo struct with chained "
"VkImportAndroidHardwareBufferInfoANDROID, the dedicated allocation image's "
"format (%s) is not VK_FORMAT_UNDEFINED and does not match the AHardwareBuffer's format (%s).",
string_VkFormat(ici->format), string_VkFormat(ahb_format_props.format));
}
// The width, height, and array layer dimensions of image and the Android hardwarebuffer must be identical
if ((ici->extent.width != ahb_desc.width) || (ici->extent.height != ahb_desc.height) ||
(ici->arrayLayers != ahb_desc.layers)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkMemoryAllocateInfo-pNext-02388",
"vkAllocateMemory: VkMemoryAllocateInfo struct with chained "
"VkImportAndroidHardwareBufferInfoANDROID, the dedicated allocation image's "
"width, height, and arrayLayers (%" PRId32 " %" PRId32 " %" PRId32
") do not match those of the AHardwareBuffer (%" PRId32 " %" PRId32 " %" PRId32 ").",
ici->extent.width, ici->extent.height, ici->arrayLayers, ahb_desc.width, ahb_desc.height,
ahb_desc.layers);
}
// If the Android hardware buffer's usage includes AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE, the image must
// have either a full mipmap chain or exactly 1 mip level.
//
// NOTE! The language of this VUID contradicts the language in the spec (1.1.93), which says "The
// AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE flag does not correspond to a Vulkan image usage or creation flag. Instead,
// its presence indicates that the Android hardware buffer contains a complete mipmap chain, and its absence indicates
// that the Android hardware buffer contains only a single mip level."
//
// TODO: This code implements the VUID's meaning, but it seems likely that the spec text is actually correct.
// Clarification requested.
if ((ahb_desc.usage & AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE) && (ici->mipLevels != 1) &&
(ici->mipLevels != FullMipChainLevels(ici->extent))) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkMemoryAllocateInfo-pNext-02389",
"vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID, "
"usage includes AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE but mipLevels (%" PRId32
") is neither 1 nor full mip "
"chain levels (%" PRId32 ").",
ici->mipLevels, FullMipChainLevels(ici->extent));
}
// each bit set in the usage of image must be listed in AHardwareBuffer Usage Equivalence, and if there is a
// corresponding AHARDWAREBUFFER_USAGE bit listed that bit must be included in the Android hardware buffer's
// AHardwareBuffer_Desc::usage
if (ici->usage &
~(VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT)) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkMemoryAllocateInfo-pNext-02390",
"vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID, "
"dedicated image usage bits include one or more with no AHardwareBuffer equivalent.");
}
bool illegal_usage = false;
std::vector<VkImageUsageFlags> usages = {VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT,
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT};
for (VkImageUsageFlags ubit : usages) {
if (ici->usage & ubit) {
uint64_t ahb_usage = ahb_usage_map_v2a[ubit];
if (0 == (ahb_usage & ahb_desc.usage)) illegal_usage = true;
}
}
if (illegal_usage) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkMemoryAllocateInfo-pNext-02390",
"vkAllocateMemory: VkMemoryAllocateInfo struct with chained "
"VkImportAndroidHardwareBufferInfoANDROID, one or more AHardwareBuffer usage bits equivalent to "
"the provided image's usage bits are missing from AHardwareBuffer_Desc.usage.");
}
}
} else { // Not an import
if ((exp_mem_alloc_info) && (mem_ded_alloc_info) &&
(0 != (VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID & exp_mem_alloc_info->handleTypes)) &&
(VK_NULL_HANDLE != mem_ded_alloc_info->image)) {
// This is an Android HW Buffer export
if (0 != alloc_info->allocationSize) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkMemoryAllocateInfo-pNext-01874",
"vkAllocateMemory: pNext chain indicates a dedicated Android Hardware Buffer export allocation, "
"but allocationSize is non-zero.");
}
} else {
if (0 == alloc_info->allocationSize) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkMemoryAllocateInfo-pNext-01874",
"vkAllocateMemory: pNext chain does not indicate a dedicated export allocation, but allocationSize is 0.");
};
}
}
return skip;
}
bool CoreChecks::ValidateGetImageMemoryRequirements2ANDROID(const VkImage image) const {
bool skip = false;
const IMAGE_STATE *image_state = GetImageState(image);
if (image_state->imported_ahb && (0 == image_state->GetBoundMemory().size())) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image),
"VUID-VkImageMemoryRequirementsInfo2-image-01897",
"vkGetImageMemoryRequirements2: Attempt to query layout from an image created with "
"VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID handleType, which has not yet been "
"bound to memory.");
}
return skip;
}
static bool ValidateGetPhysicalDeviceImageFormatProperties2ANDROID(const debug_report_data *report_data,
const VkPhysicalDeviceImageFormatInfo2 *pImageFormatInfo,
const VkImageFormatProperties2 *pImageFormatProperties) {
bool skip = false;
const VkAndroidHardwareBufferUsageANDROID *ahb_usage =
lvl_find_in_chain<VkAndroidHardwareBufferUsageANDROID>(pImageFormatProperties->pNext);
if (nullptr != ahb_usage) {
const VkPhysicalDeviceExternalImageFormatInfo *pdeifi =
lvl_find_in_chain<VkPhysicalDeviceExternalImageFormatInfo>(pImageFormatInfo->pNext);
if ((nullptr == pdeifi) || (VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID != pdeifi->handleType)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-vkGetPhysicalDeviceImageFormatProperties2-pNext-01868",
"vkGetPhysicalDeviceImageFormatProperties2: pImageFormatProperties includes a chained "
"VkAndroidHardwareBufferUsageANDROID struct, but pImageFormatInfo does not include a chained "
"VkPhysicalDeviceExternalImageFormatInfo struct with handleType "
"VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID.");
}
}
return skip;
}
bool CoreChecks::ValidateCreateSamplerYcbcrConversionANDROID(const VkSamplerYcbcrConversionCreateInfo *create_info) const {
const VkExternalFormatANDROID *ext_format_android = lvl_find_in_chain<VkExternalFormatANDROID>(create_info->pNext);
if ((nullptr != ext_format_android) && (0 != ext_format_android->externalFormat)) {
if (VK_FORMAT_UNDEFINED != create_info->format) {
return log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_EXT, 0,
"VUID-VkSamplerYcbcrConversionCreateInfo-format-01904",
"vkCreateSamplerYcbcrConversion[KHR]: CreateInfo format is not VK_FORMAT_UNDEFINED while "
"there is a chained VkExternalFormatANDROID struct.");
}
} else if (VK_FORMAT_UNDEFINED == create_info->format) {
return log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_EXT, 0,
"VUID-VkSamplerYcbcrConversionCreateInfo-format-01904",
"vkCreateSamplerYcbcrConversion[KHR]: CreateInfo format is VK_FORMAT_UNDEFINED with no chained "
"VkExternalFormatANDROID struct.");
}
return false;
}
void ValidationStateTracker::RecordCreateSamplerYcbcrConversionANDROID(const VkSamplerYcbcrConversionCreateInfo *create_info,
VkSamplerYcbcrConversion ycbcr_conversion) {
const VkExternalFormatANDROID *ext_format_android = lvl_find_in_chain<VkExternalFormatANDROID>(create_info->pNext);
if (ext_format_android && (0 != ext_format_android->externalFormat)) {
ycbcr_conversion_ahb_fmt_map.emplace(ycbcr_conversion, ext_format_android->externalFormat);
}
};
void ValidationStateTracker::RecordDestroySamplerYcbcrConversionANDROID(VkSamplerYcbcrConversion ycbcr_conversion) {
ycbcr_conversion_ahb_fmt_map.erase(ycbcr_conversion);
};
#else // !VK_USE_PLATFORM_ANDROID_KHR
bool CoreChecks::ValidateAllocateMemoryANDROID(const VkMemoryAllocateInfo *alloc_info) const { return false; }
static bool ValidateGetPhysicalDeviceImageFormatProperties2ANDROID(const debug_report_data *report_data,
const VkPhysicalDeviceImageFormatInfo2 *pImageFormatInfo,
const VkImageFormatProperties2 *pImageFormatProperties) {
return false;
}
bool CoreChecks::ValidateCreateSamplerYcbcrConversionANDROID(const VkSamplerYcbcrConversionCreateInfo *create_info) const {
return false;
}
bool CoreChecks::ValidateGetImageMemoryRequirements2ANDROID(const VkImage image) const { return false; }
void ValidationStateTracker::RecordCreateSamplerYcbcrConversionANDROID(const VkSamplerYcbcrConversionCreateInfo *create_info,
VkSamplerYcbcrConversion ycbcr_conversion){};
void ValidationStateTracker::RecordDestroySamplerYcbcrConversionANDROID(VkSamplerYcbcrConversion ycbcr_conversion){};
#endif // VK_USE_PLATFORM_ANDROID_KHR
bool CoreChecks::PreCallValidateAllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) {
bool skip = false;
if (memObjMap.size() >= phys_dev_props.limits.maxMemoryAllocationCount) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
kVUIDUndefined, "Number of currently valid memory objects is not less than the maximum allowed (%u).",
phys_dev_props.limits.maxMemoryAllocationCount);
}
if (device_extensions.vk_android_external_memory_android_hardware_buffer) {
skip |= ValidateAllocateMemoryANDROID(pAllocateInfo);
} else {
if (0 == pAllocateInfo->allocationSize) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkMemoryAllocateInfo-allocationSize-00638", "vkAllocateMemory: allocationSize is 0.");
};
}
auto chained_flags_struct = lvl_find_in_chain<VkMemoryAllocateFlagsInfo>(pAllocateInfo->pNext);
if (chained_flags_struct && chained_flags_struct->flags == VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT) {
skip |= ValidateDeviceMaskToPhysicalDeviceCount(chained_flags_struct->deviceMask, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkMemoryAllocateFlagsInfo-deviceMask-00675");
skip |= ValidateDeviceMaskToZero(chained_flags_struct->deviceMask, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkMemoryAllocateFlagsInfo-deviceMask-00676");
}
// TODO: VUIDs ending in 00643, 00644, 00646, 00647, 01742, 01743, 01745, 00645, 00648, 01744
return skip;
}
void ValidationStateTracker::PostCallRecordAllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory,
VkResult result) {
if (VK_SUCCESS == result) {
AddMemObjInfo(device, *pMemory, pAllocateInfo);
}
return;
}
// For given obj node, if it is use, flag a validation error and return callback result, else return false
bool CoreChecks::ValidateObjectNotInUse(const BASE_NODE *obj_node, const VulkanTypedHandle &obj_struct, const char *caller_name,
const char *error_code) const {
if (disabled.object_in_use) return false;
bool skip = false;
if (obj_node->in_use.load()) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, get_debug_report_enum[obj_struct.type], obj_struct.handle,
error_code, "Cannot call %s on %s that is currently in use by a command buffer.", caller_name,
report_data->FormatHandle(obj_struct).c_str());
}
return skip;
}
bool CoreChecks::PreCallValidateFreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) {
const DEVICE_MEMORY_STATE *mem_info = GetDevMemState(mem);
const VulkanTypedHandle obj_struct(mem, kVulkanObjectTypeDeviceMemory);
bool skip = false;
if (mem_info) {
skip |= ValidateObjectNotInUse(mem_info, obj_struct, "vkFreeMemory", "VUID-vkFreeMemory-memory-00677");
for (const auto &obj : mem_info->obj_bindings) {
log_msg(report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, get_debug_report_enum[obj.type], obj.handle,
kVUID_Core_MemTrack_FreedMemRef, "%s still has a reference to %s.", report_data->FormatHandle(obj).c_str(),
report_data->FormatHandle(mem_info->mem).c_str());
}
}
return skip;
}
void ValidationStateTracker::PreCallRecordFreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) {
if (!mem) return;
DEVICE_MEMORY_STATE *mem_info = GetDevMemState(mem);
const VulkanTypedHandle obj_struct(mem, kVulkanObjectTypeDeviceMemory);
// Clear mem binding for any bound objects
for (const auto &obj : mem_info->obj_bindings) {
BINDABLE *bindable_state = nullptr;
switch (obj.type) {
case kVulkanObjectTypeImage:
bindable_state = GetImageState(obj.Cast<VkImage>());
break;
case kVulkanObjectTypeBuffer:
bindable_state = GetBufferState(obj.Cast<VkBuffer>());
break;
case kVulkanObjectTypeAccelerationStructureNV:
bindable_state = GetAccelerationStructureState(obj.Cast<VkAccelerationStructureNV>());
break;
default:
// Should only have acceleration structure, buffer, or image objects bound to memory
assert(0);
}
assert(bindable_state);
bindable_state->binding.mem = MEMORY_UNBOUND;
bindable_state->UpdateBoundMemorySet();
}
// Any bound cmd buffers are now invalid
InvalidateCommandBuffers(mem_info->cb_bindings, obj_struct);
memObjMap.erase(mem);
}
// Validate that given Map memory range is valid. This means that the memory should not already be mapped,
// and that the size of the map range should be:
// 1. Not zero
// 2. Within the size of the memory allocation
bool CoreChecks::ValidateMapMemRange(VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
bool skip = false;
if (size == 0) {
skip =
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, HandleToUint64(mem),
kVUID_Core_MemTrack_InvalidMap, "VkMapMemory: Attempting to map memory range of size zero");
}
auto mem_element = memObjMap.find(mem);
if (mem_element != memObjMap.end()) {
auto mem_info = mem_element->second.get();
// It is an application error to call VkMapMemory on an object that is already mapped
if (mem_info->mem_range.size != 0) {
skip =
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
HandleToUint64(mem), kVUID_Core_MemTrack_InvalidMap,
"VkMapMemory: Attempting to map memory on an already-mapped %s.", report_data->FormatHandle(mem).c_str());
}
// Validate that offset + size is within object's allocationSize
if (size == VK_WHOLE_SIZE) {
if (offset >= mem_info->alloc_info.allocationSize) {
skip = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
HandleToUint64(mem), kVUID_Core_MemTrack_InvalidMap,
"Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64
" with size of VK_WHOLE_SIZE oversteps total array size 0x%" PRIx64,
offset, mem_info->alloc_info.allocationSize, mem_info->alloc_info.allocationSize);
}
} else {
if ((offset + size) > mem_info->alloc_info.allocationSize) {
skip = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
HandleToUint64(mem), "VUID-vkMapMemory-size-00681",
"Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64 " oversteps total array size 0x%" PRIx64 ".",
offset, size + offset, mem_info->alloc_info.allocationSize);
}
}
}
return skip;
}
void CoreChecks::StoreMemRanges(VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
auto mem_info = GetDevMemState(mem);
if (mem_info) {
mem_info->mem_range.offset = offset;
mem_info->mem_range.size = size;
}
}
// Guard value for pad data
static char NoncoherentMemoryFillValue = 0xb;
void CoreChecks::InitializeAndTrackMemory(VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, void **ppData) {
auto mem_info = GetDevMemState(mem);
if (mem_info) {
mem_info->p_driver_data = *ppData;
uint32_t index = mem_info->alloc_info.memoryTypeIndex;
if (phys_dev_mem_props.memoryTypes[index].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) {
mem_info->shadow_copy = 0;
} else {
if (size == VK_WHOLE_SIZE) {
size = mem_info->alloc_info.allocationSize - offset;
}
mem_info->shadow_pad_size = phys_dev_props.limits.minMemoryMapAlignment;
assert(SafeModulo(mem_info->shadow_pad_size, phys_dev_props.limits.minMemoryMapAlignment) == 0);
// Ensure start of mapped region reflects hardware alignment constraints
uint64_t map_alignment = phys_dev_props.limits.minMemoryMapAlignment;
// From spec: (ppData - offset) must be aligned to at least limits::minMemoryMapAlignment.
uint64_t start_offset = offset % map_alignment;
// Data passed to driver will be wrapped by a guardband of data to detect over- or under-writes.
mem_info->shadow_copy_base =
malloc(static_cast<size_t>(2 * mem_info->shadow_pad_size + size + map_alignment + start_offset));
mem_info->shadow_copy =
reinterpret_cast<char *>((reinterpret_cast<uintptr_t>(mem_info->shadow_copy_base) + map_alignment) &
~(map_alignment - 1)) +
start_offset;
assert(SafeModulo(reinterpret_cast<uintptr_t>(mem_info->shadow_copy) + mem_info->shadow_pad_size - start_offset,
map_alignment) == 0);
memset(mem_info->shadow_copy, NoncoherentMemoryFillValue, static_cast<size_t>(2 * mem_info->shadow_pad_size + size));
*ppData = static_cast<char *>(mem_info->shadow_copy) + mem_info->shadow_pad_size;
}
}
}
void CoreChecks::RetireFence(VkFence fence) {
auto pFence = GetFenceState(fence);
if (pFence && pFence->scope == kSyncScopeInternal) {
if (pFence->signaler.first != VK_NULL_HANDLE) {
// Fence signaller is a queue -- use this as proof that prior operations on that queue have completed.
RetireWorkOnQueue(GetQueueState(pFence->signaler.first), pFence->signaler.second, true);
} else {
// Fence signaller is the WSI. We're not tracking what the WSI op actually /was/ in CV yet, but we need to mark
// the fence as retired.
pFence->state = FENCE_RETIRED;
}
}
}
bool CoreChecks::PreCallValidateWaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll,
uint64_t timeout) {
// Verify fence status of submitted fences
bool skip = false;
for (uint32_t i = 0; i < fenceCount; i++) {
skip |= VerifyQueueStateToFence(pFences[i]);
}
return skip;
}
void CoreChecks::PostCallRecordWaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll,
uint64_t timeout, VkResult result) {
if (VK_SUCCESS != result) return;
// When we know that all fences are complete we can clean/remove their CBs
if ((VK_TRUE == waitAll) || (1 == fenceCount)) {
for (uint32_t i = 0; i < fenceCount; i++) {
RetireFence(pFences[i]);
}
}
// NOTE : Alternate case not handled here is when some fences have completed. In
// this case for app to guarantee which fences completed it will have to call
// vkGetFenceStatus() at which point we'll clean/remove their CBs if complete.
}
void CoreChecks::PostCallRecordGetFenceStatus(VkDevice device, VkFence fence, VkResult result) {
if (VK_SUCCESS != result) return;
RetireFence(fence);
}
void CoreChecks::RecordGetDeviceQueueState(uint32_t queue_family_index, VkQueue queue) {
// Add queue to tracking set only if it is new
auto queue_is_new = queues.emplace(queue);
if (queue_is_new.second == true) {
QUEUE_STATE *queue_state = &queueMap[queue];
queue_state->queue = queue;
queue_state->queueFamilyIndex = queue_family_index;
queue_state->seq = 0;
}
}
bool CoreChecks::ValidateGetDeviceQueue(uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue *pQueue, const char *valid_qfi_vuid,
const char *qfi_in_range_vuid) {
bool skip = false;
skip |= ValidateDeviceQueueFamily(queueFamilyIndex, "vkGetDeviceQueue", "queueFamilyIndex", valid_qfi_vuid);
const auto &queue_data = queue_family_index_map.find(queueFamilyIndex);
if (queue_data != queue_family_index_map.end() && queue_data->second <= queueIndex) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
qfi_in_range_vuid,
"vkGetDeviceQueue: queueIndex (=%" PRIu32
") is not less than the number of queues requested from queueFamilyIndex (=%" PRIu32
") when the device was created (i.e. is not less than %" PRIu32 ").",
queueIndex, queueFamilyIndex, queue_data->second);
}
return skip;
}
bool CoreChecks::PreCallValidateGetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue *pQueue) {
return ValidateGetDeviceQueue(queueFamilyIndex, queueIndex, pQueue, "VUID-vkGetDeviceQueue-queueFamilyIndex-00384",
"VUID-vkGetDeviceQueue-queueIndex-00385");
}
void CoreChecks::PostCallRecordGetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue *pQueue) {
RecordGetDeviceQueueState(queueFamilyIndex, *pQueue);
}
void CoreChecks::PostCallRecordGetDeviceQueue2(VkDevice device, const VkDeviceQueueInfo2 *pQueueInfo, VkQueue *pQueue) {
RecordGetDeviceQueueState(pQueueInfo->queueFamilyIndex, *pQueue);
}
bool CoreChecks::PreCallValidateQueueWaitIdle(VkQueue queue) {
QUEUE_STATE *queue_state = GetQueueState(queue);
return VerifyQueueStateToSeq(queue_state, queue_state->seq + queue_state->submissions.size());
}
void CoreChecks::PostCallRecordQueueWaitIdle(VkQueue queue, VkResult result) {
if (VK_SUCCESS != result) return;
QUEUE_STATE *queue_state = GetQueueState(queue);
RetireWorkOnQueue(queue_state, queue_state->seq + queue_state->submissions.size(), true);
}
bool CoreChecks::PreCallValidateDeviceWaitIdle(VkDevice device) {
bool skip = false;
for (auto &queue : queueMap) {
skip |= VerifyQueueStateToSeq(&queue.second, queue.second.seq + queue.second.submissions.size());
}
return skip;
}
void CoreChecks::PostCallRecordDeviceWaitIdle(VkDevice device, VkResult result) {
if (VK_SUCCESS != result) return;
for (auto &queue : queueMap) {
RetireWorkOnQueue(&queue.second, queue.second.seq + queue.second.submissions.size(), true);
}
}
bool CoreChecks::PreCallValidateDestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) {
const FENCE_STATE *fence_node = GetFenceState(fence);
bool skip = false;
if (fence_node) {
if (fence_node->scope == kSyncScopeInternal && fence_node->state == FENCE_INFLIGHT) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, HandleToUint64(fence),
"VUID-vkDestroyFence-fence-01120", "%s is in use.", report_data->FormatHandle(fence).c_str());
}
}
return skip;
}
void ValidationStateTracker::PreCallRecordDestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) {
if (!fence) return;
fenceMap.erase(fence);
}
bool CoreChecks::PreCallValidateDestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) {
const SEMAPHORE_STATE *sema_node = GetSemaphoreState(semaphore);
const VulkanTypedHandle obj_struct(semaphore, kVulkanObjectTypeSemaphore);
bool skip = false;
if (sema_node) {
skip |= ValidateObjectNotInUse(sema_node, obj_struct, "vkDestroySemaphore", "VUID-vkDestroySemaphore-semaphore-01137");
}
return skip;
}
void ValidationStateTracker::PreCallRecordDestroySemaphore(VkDevice device, VkSemaphore semaphore,
const VkAllocationCallbacks *pAllocator) {
if (!semaphore) return;
semaphoreMap.erase(semaphore);
}
bool CoreChecks::PreCallValidateDestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
const EVENT_STATE *event_state = GetEventState(event);
const VulkanTypedHandle obj_struct(event, kVulkanObjectTypeEvent);
bool skip = false;
if (event_state) {
skip |= ValidateObjectNotInUse(event_state, obj_struct, "vkDestroyEvent", "VUID-vkDestroyEvent-event-01145");
}
return skip;
}
void ValidationStateTracker::PreCallRecordDestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
if (!event) return;
EVENT_STATE *event_state = GetEventState(event);
const VulkanTypedHandle obj_struct(event, kVulkanObjectTypeEvent);
InvalidateCommandBuffers(event_state->cb_bindings, obj_struct);
eventMap.erase(event);
}
bool CoreChecks::PreCallValidateDestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) {
if (disabled.query_validation) return false;
const QUERY_POOL_STATE *qp_state = GetQueryPoolState(queryPool);
const VulkanTypedHandle obj_struct(queryPool, kVulkanObjectTypeQueryPool);
bool skip = false;
if (qp_state) {
skip |= ValidateObjectNotInUse(qp_state, obj_struct, "vkDestroyQueryPool", "VUID-vkDestroyQueryPool-queryPool-00793");
}
return skip;
}
void ValidationStateTracker::PreCallRecordDestroyQueryPool(VkDevice device, VkQueryPool queryPool,
const VkAllocationCallbacks *pAllocator) {
if (!queryPool) return;
QUERY_POOL_STATE *qp_state = GetQueryPoolState(queryPool);
const VulkanTypedHandle obj_struct(queryPool, kVulkanObjectTypeQueryPool);
InvalidateCommandBuffers(qp_state->cb_bindings, obj_struct);
queryPoolMap.erase(queryPool);
}
bool CoreChecks::ValidateGetQueryPoolResultsFlags(VkQueryPool queryPool, VkQueryResultFlags flags) const {
bool skip = false;
const auto query_pool_state = GetQueryPoolState(queryPool);
if (query_pool_state) {
if ((query_pool_state->createInfo.queryType == VK_QUERY_TYPE_TIMESTAMP) && (flags & VK_QUERY_RESULT_PARTIAL_BIT)) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, HandleToUint64(queryPool),
"VUID-vkGetQueryPoolResults-queryType-00818",
"%s was created with a queryType of VK_QUERY_TYPE_TIMESTAMP but flags contains VK_QUERY_RESULT_PARTIAL_BIT.",
report_data->FormatHandle(queryPool).c_str());
}
}
return skip;
}
bool CoreChecks::ValidateGetQueryPoolResultsQueries(VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) const {
bool skip = false;
QueryObject query_obj{queryPool, 0u};
for (uint32_t i = 0; i < queryCount; ++i) {
query_obj.query = firstQuery + i;
if (queryToStateMap.count(query_obj) == 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT,
HandleToUint64(queryPool), kVUID_Core_DrawState_InvalidQuery,
"vkGetQueryPoolResults() on %s and query %" PRIu32 ": unknown query",
report_data->FormatHandle(queryPool).c_str(), query_obj.query);
}
}
return skip;
}
bool CoreChecks::PreCallValidateGetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery,
uint32_t queryCount, size_t dataSize, void *pData, VkDeviceSize stride,
VkQueryResultFlags flags) {
if (disabled.query_validation) return false;
bool skip = false;
skip |= ValidateQueryPoolStride("VUID-vkGetQueryPoolResults-flags-00814", "VUID-vkGetQueryPoolResults-flags-00815", stride,
"dataSize", dataSize, flags);
skip |= ValidateGetQueryPoolResultsFlags(queryPool, flags);
skip |= ValidateGetQueryPoolResultsQueries(queryPool, firstQuery, queryCount);
return skip;
}
bool CoreChecks::ValidateInsertMemoryRange(const VulkanTypedHandle &typed_handle, const DEVICE_MEMORY_STATE *mem_info,
VkDeviceSize memoryOffset, const VkMemoryRequirements &memRequirements, bool is_linear,
const char *api_name) const {
bool skip = false;
if (memoryOffset >= mem_info->alloc_info.allocationSize) {
const char *error_code = nullptr;
if (typed_handle.type == kVulkanObjectTypeBuffer) {
error_code = "VUID-vkBindBufferMemory-memoryOffset-01031";
} else if (typed_handle.type == kVulkanObjectTypeImage) {
error_code = "VUID-vkBindImageMemory-memoryOffset-01046";
} else if (typed_handle.type == kVulkanObjectTypeAccelerationStructureNV) {
error_code = "VUID-VkBindAccelerationStructureMemoryInfoNV-memoryOffset-02451";
} else {
// Unsupported object type
assert(false);
}
skip = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
HandleToUint64(mem_info->mem), error_code,
"In %s, attempting to bind %s to %s, memoryOffset=0x%" PRIxLEAST64
" must be less than the memory allocation size 0x%" PRIxLEAST64 ".",
api_name, report_data->FormatHandle(mem_info->mem).c_str(), report_data->FormatHandle(typed_handle).c_str(),
memoryOffset, mem_info->alloc_info.allocationSize);
}
return skip;
}
// Object with given handle is being bound to memory w/ given mem_info struct.
// Track the newly bound memory range with given memoryOffset
// Also scan any previous ranges, track aliased ranges with new range, and flag an error if a linear
// and non-linear range incorrectly overlap.
// Return true if an error is flagged and the user callback returns "true", otherwise false
// is_image indicates an image object, otherwise handle is for a buffer
// is_linear indicates a buffer or linear image
void ValidationStateTracker::InsertMemoryRange(const VulkanTypedHandle &typed_handle, DEVICE_MEMORY_STATE *mem_info,
VkDeviceSize memoryOffset, VkMemoryRequirements memRequirements, bool is_linear) {
if (typed_handle.type == kVulkanObjectTypeImage) {
mem_info->bound_images.insert(typed_handle.handle);
} else if (typed_handle.type == kVulkanObjectTypeBuffer) {
mem_info->bound_buffers.insert(typed_handle.handle);
} else if (typed_handle.type == kVulkanObjectTypeAccelerationStructureNV) {
mem_info->bound_acceleration_structures.insert(typed_handle.handle);
} else {
// Unsupported object type
assert(false);
}
}
bool CoreChecks::ValidateInsertImageMemoryRange(VkImage image, const DEVICE_MEMORY_STATE *mem_info, VkDeviceSize mem_offset,
const VkMemoryRequirements &mem_reqs, bool is_linear, const char *api_name) const {
return ValidateInsertMemoryRange(VulkanTypedHandle(image, kVulkanObjectTypeImage), mem_info, mem_offset, mem_reqs, is_linear,
api_name);
}
void ValidationStateTracker::InsertImageMemoryRange(VkImage image, DEVICE_MEMORY_STATE *mem_info, VkDeviceSize mem_offset,
VkMemoryRequirements mem_reqs, bool is_linear) {
InsertMemoryRange(VulkanTypedHandle(image, kVulkanObjectTypeImage), mem_info, mem_offset, mem_reqs, is_linear);
}
bool CoreChecks::ValidateInsertBufferMemoryRange(VkBuffer buffer, const DEVICE_MEMORY_STATE *mem_info, VkDeviceSize mem_offset,
const VkMemoryRequirements &mem_reqs, const char *api_name) const {
return ValidateInsertMemoryRange(VulkanTypedHandle(buffer, kVulkanObjectTypeBuffer), mem_info, mem_offset, mem_reqs, true,
api_name);
}
void ValidationStateTracker::InsertBufferMemoryRange(VkBuffer buffer, DEVICE_MEMORY_STATE *mem_info, VkDeviceSize mem_offset,
const VkMemoryRequirements &mem_reqs) {
InsertMemoryRange(VulkanTypedHandle(buffer, kVulkanObjectTypeBuffer), mem_info, mem_offset, mem_reqs, true);
}
bool CoreChecks::ValidateInsertAccelerationStructureMemoryRange(VkAccelerationStructureNV as, const DEVICE_MEMORY_STATE *mem_info,
VkDeviceSize mem_offset, const VkMemoryRequirements &mem_reqs,
const char *api_name) const {
return ValidateInsertMemoryRange(VulkanTypedHandle(as, kVulkanObjectTypeAccelerationStructureNV), mem_info, mem_offset,
mem_reqs, true, api_name);
}
void ValidationStateTracker::InsertAccelerationStructureMemoryRange(VkAccelerationStructureNV as, DEVICE_MEMORY_STATE *mem_info,
VkDeviceSize mem_offset, const VkMemoryRequirements &mem_reqs) {
InsertMemoryRange(VulkanTypedHandle(as, kVulkanObjectTypeAccelerationStructureNV), mem_info, mem_offset, mem_reqs, true);
}
// This function will remove the handle-to-index mapping from the appropriate map.
static void RemoveMemoryRange(uint64_t handle, DEVICE_MEMORY_STATE *mem_info, VulkanObjectType object_type) {
if (object_type == kVulkanObjectTypeImage) {
mem_info->bound_images.erase(handle);
} else if (object_type == kVulkanObjectTypeBuffer) {
mem_info->bound_buffers.erase(handle);
} else if (object_type == kVulkanObjectTypeAccelerationStructureNV) {
mem_info->bound_acceleration_structures.erase(handle);
} else {
// Unsupported object type
assert(false);
}
}
void ValidationStateTracker::RemoveBufferMemoryRange(uint64_t handle, DEVICE_MEMORY_STATE *mem_info) {
RemoveMemoryRange(handle, mem_info, kVulkanObjectTypeBuffer);
}
void ValidationStateTracker::RemoveImageMemoryRange(uint64_t handle, DEVICE_MEMORY_STATE *mem_info) {
RemoveMemoryRange(handle, mem_info, kVulkanObjectTypeImage);
}
void ValidationStateTracker::RemoveAccelerationStructureMemoryRange(uint64_t handle, DEVICE_MEMORY_STATE *mem_info) {
RemoveMemoryRange(handle, mem_info, kVulkanObjectTypeAccelerationStructureNV);
}
bool CoreChecks::ValidateMemoryTypes(const DEVICE_MEMORY_STATE *mem_info, const uint32_t memory_type_bits, const char *funcName,
const char *msgCode) const {
bool skip = false;
if (((1 << mem_info->alloc_info.memoryTypeIndex) & memory_type_bits) == 0) {
skip = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
HandleToUint64(mem_info->mem), msgCode,
"%s(): MemoryRequirements->memoryTypeBits (0x%X) for this object type are not compatible with the memory "
"type (0x%X) of %s.",
funcName, memory_type_bits, mem_info->alloc_info.memoryTypeIndex,
report_data->FormatHandle(mem_info->mem).c_str());
}
return skip;
}
bool CoreChecks::ValidateBindBufferMemory(VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset,
const char *api_name) const {
const BUFFER_STATE *buffer_state = GetBufferState(buffer);
bool skip = false;
if (buffer_state) {
// Track objects tied to memory
uint64_t buffer_handle = HandleToUint64(buffer);
const VulkanTypedHandle obj_struct(buffer, kVulkanObjectTypeBuffer);
skip = ValidateSetMemBinding(mem, obj_struct, api_name);
if (!buffer_state->memory_requirements_checked) {
// There's not an explicit requirement in the spec to call vkGetBufferMemoryRequirements() prior to calling
// BindBufferMemory, but it's implied in that memory being bound must conform with VkMemoryRequirements from
// vkGetBufferMemoryRequirements()
skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, buffer_handle,
kVUID_Core_BindBuffer_NoMemReqQuery,
"%s: Binding memory to %s but vkGetBufferMemoryRequirements() has not been called on that buffer.",
api_name, report_data->FormatHandle(buffer).c_str());
// In the following we'll use the information we got in CreateBuffer
}
// Validate bound memory range information
const auto mem_info = GetDevMemState(mem);
if (mem_info) {
skip |= ValidateInsertBufferMemoryRange(buffer, mem_info, memoryOffset, buffer_state->requirements, api_name);
skip |= ValidateMemoryTypes(mem_info, buffer_state->requirements.memoryTypeBits, api_name,
"VUID-vkBindBufferMemory-memory-01035");
}
// Validate memory requirements alignment
if (SafeModulo(memoryOffset, buffer_state->requirements.alignment) != 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, buffer_handle,
"VUID-vkBindBufferMemory-memoryOffset-01036",
"%s: memoryOffset is 0x%" PRIxLEAST64
" but must be an integer multiple of the VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
", returned from a call to vkGetBufferMemoryRequirements with buffer.",
api_name, memoryOffset, buffer_state->requirements.alignment);
}
if (mem_info) {
// Validate memory requirements size
if (buffer_state->requirements.size > (mem_info->alloc_info.allocationSize - memoryOffset)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, buffer_handle,
"VUID-vkBindBufferMemory-size-01037",
"%s: memory size minus memoryOffset is 0x%" PRIxLEAST64
" but must be at least as large as VkMemoryRequirements::size value 0x%" PRIxLEAST64
", returned from a call to vkGetBufferMemoryRequirements with buffer.",
api_name, mem_info->alloc_info.allocationSize - memoryOffset, buffer_state->requirements.size);
}
// Validate dedicated allocation
if (mem_info->is_dedicated && ((mem_info->dedicated_buffer != buffer) || (memoryOffset != 0))) {
// TODO: Add vkBindBufferMemory2KHR error message when added to spec.
auto validation_error = kVUIDUndefined;
if (strcmp(api_name, "vkBindBufferMemory()") == 0) {
validation_error = "VUID-vkBindBufferMemory-memory-01508";
}
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, buffer_handle,
validation_error,
"%s: for dedicated %s, VkMemoryDedicatedAllocateInfoKHR::buffer %s must be equal "
"to %s and memoryOffset 0x%" PRIxLEAST64 " must be zero.",
api_name, report_data->FormatHandle(mem).c_str(),
report_data->FormatHandle(mem_info->dedicated_buffer).c_str(),
report_data->FormatHandle(buffer).c_str(), memoryOffset);
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateBindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
const char *api_name = "vkBindBufferMemory()";
return ValidateBindBufferMemory(buffer, mem, memoryOffset, api_name);
}
void ValidationStateTracker::UpdateBindBufferMemoryState(VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
BUFFER_STATE *buffer_state = GetBufferState(buffer);
if (buffer_state) {
// Track bound memory range information
auto mem_info = GetDevMemState(mem);
if (mem_info) {
InsertBufferMemoryRange(buffer, mem_info, memoryOffset, buffer_state->requirements);
}
// Track objects tied to memory
SetMemBinding(mem, buffer_state, memoryOffset, VulkanTypedHandle(buffer, kVulkanObjectTypeBuffer));
}
}
void ValidationStateTracker::PostCallRecordBindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem,
VkDeviceSize memoryOffset, VkResult result) {
if (VK_SUCCESS != result) return;
UpdateBindBufferMemoryState(buffer, mem, memoryOffset);
}
bool CoreChecks::PreCallValidateBindBufferMemory2(VkDevice device, uint32_t bindInfoCount,
const VkBindBufferMemoryInfoKHR *pBindInfos) {
char api_name[64];
bool skip = false;
for (uint32_t i = 0; i < bindInfoCount; i++) {
sprintf(api_name, "vkBindBufferMemory2() pBindInfos[%u]", i);
skip |= ValidateBindBufferMemory(pBindInfos[i].buffer, pBindInfos[i].memory, pBindInfos[i].memoryOffset, api_name);
}
return skip;
}
bool CoreChecks::PreCallValidateBindBufferMemory2KHR(VkDevice device, uint32_t bindInfoCount,
const VkBindBufferMemoryInfoKHR *pBindInfos) {
char api_name[64];
bool skip = false;
for (uint32_t i = 0; i < bindInfoCount; i++) {
sprintf(api_name, "vkBindBufferMemory2KHR() pBindInfos[%u]", i);
skip |= ValidateBindBufferMemory(pBindInfos[i].buffer, pBindInfos[i].memory, pBindInfos[i].memoryOffset, api_name);
}
return skip;
}
void ValidationStateTracker::PostCallRecordBindBufferMemory2(VkDevice device, uint32_t bindInfoCount,
const VkBindBufferMemoryInfoKHR *pBindInfos, VkResult result) {
for (uint32_t i = 0; i < bindInfoCount; i++) {
UpdateBindBufferMemoryState(pBindInfos[i].buffer, pBindInfos[i].memory, pBindInfos[i].memoryOffset);
}
}
void ValidationStateTracker::PostCallRecordBindBufferMemory2KHR(VkDevice device, uint32_t bindInfoCount,
const VkBindBufferMemoryInfoKHR *pBindInfos, VkResult result) {
for (uint32_t i = 0; i < bindInfoCount; i++) {
UpdateBindBufferMemoryState(pBindInfos[i].buffer, pBindInfos[i].memory, pBindInfos[i].memoryOffset);
}
}
void CoreChecks::RecordGetBufferMemoryRequirementsState(VkBuffer buffer, VkMemoryRequirements *pMemoryRequirements) {
BUFFER_STATE *buffer_state = GetBufferState(buffer);
if (buffer_state) {
buffer_state->requirements = *pMemoryRequirements;
buffer_state->memory_requirements_checked = true;
}
}
void CoreChecks::PostCallRecordGetBufferMemoryRequirements(VkDevice device, VkBuffer buffer,
VkMemoryRequirements *pMemoryRequirements) {
RecordGetBufferMemoryRequirementsState(buffer, pMemoryRequirements);
}
void CoreChecks::PostCallRecordGetBufferMemoryRequirements2(VkDevice device, const VkBufferMemoryRequirementsInfo2KHR *pInfo,
VkMemoryRequirements2KHR *pMemoryRequirements) {
RecordGetBufferMemoryRequirementsState(pInfo->buffer, &pMemoryRequirements->memoryRequirements);
}
void CoreChecks::PostCallRecordGetBufferMemoryRequirements2KHR(VkDevice device, const VkBufferMemoryRequirementsInfo2KHR *pInfo,
VkMemoryRequirements2KHR *pMemoryRequirements) {
RecordGetBufferMemoryRequirementsState(pInfo->buffer, &pMemoryRequirements->memoryRequirements);
}
bool CoreChecks::ValidateGetImageMemoryRequirements2(const VkImageMemoryRequirementsInfo2 *pInfo) {
bool skip = false;
if (device_extensions.vk_android_external_memory_android_hardware_buffer) {
skip |= ValidateGetImageMemoryRequirements2ANDROID(pInfo->image);
}
return skip;
}
bool CoreChecks::PreCallValidateGetImageMemoryRequirements2(VkDevice device, const VkImageMemoryRequirementsInfo2 *pInfo,
VkMemoryRequirements2 *pMemoryRequirements) {
return ValidateGetImageMemoryRequirements2(pInfo);
}
bool CoreChecks::PreCallValidateGetImageMemoryRequirements2KHR(VkDevice device, const VkImageMemoryRequirementsInfo2 *pInfo,
VkMemoryRequirements2 *pMemoryRequirements) {
return ValidateGetImageMemoryRequirements2(pInfo);
}
void CoreChecks::RecordGetImageMemoryRequiementsState(VkImage image, VkMemoryRequirements *pMemoryRequirements) {
IMAGE_STATE *image_state = GetImageState(image);
if (image_state) {
image_state->requirements = *pMemoryRequirements;
image_state->memory_requirements_checked = true;
}
}
void CoreChecks::PostCallRecordGetImageMemoryRequirements(VkDevice device, VkImage image,
VkMemoryRequirements *pMemoryRequirements) {
RecordGetImageMemoryRequiementsState(image, pMemoryRequirements);
}
void CoreChecks::PostCallRecordGetImageMemoryRequirements2(VkDevice device, const VkImageMemoryRequirementsInfo2 *pInfo,
VkMemoryRequirements2 *pMemoryRequirements) {
RecordGetImageMemoryRequiementsState(pInfo->image, &pMemoryRequirements->memoryRequirements);
}
void CoreChecks::PostCallRecordGetImageMemoryRequirements2KHR(VkDevice device, const VkImageMemoryRequirementsInfo2 *pInfo,
VkMemoryRequirements2 *pMemoryRequirements) {
RecordGetImageMemoryRequiementsState(pInfo->image, &pMemoryRequirements->memoryRequirements);
}
static void RecordGetImageSparseMemoryRequirementsState(IMAGE_STATE *image_state,
VkSparseImageMemoryRequirements *sparse_image_memory_requirements) {
image_state->sparse_requirements.emplace_back(*sparse_image_memory_requirements);
if (sparse_image_memory_requirements->formatProperties.aspectMask & VK_IMAGE_ASPECT_METADATA_BIT) {
image_state->sparse_metadata_required = true;
}
}
void CoreChecks::PostCallRecordGetImageSparseMemoryRequirements(VkDevice device, VkImage image,
uint32_t *pSparseMemoryRequirementCount,
VkSparseImageMemoryRequirements *pSparseMemoryRequirements) {
auto image_state = GetImageState(image);
image_state->get_sparse_reqs_called = true;
if (!pSparseMemoryRequirements) return;
for (uint32_t i = 0; i < *pSparseMemoryRequirementCount; i++) {
RecordGetImageSparseMemoryRequirementsState(image_state, &pSparseMemoryRequirements[i]);
}
}
void CoreChecks::PostCallRecordGetImageSparseMemoryRequirements2(VkDevice device,
const VkImageSparseMemoryRequirementsInfo2KHR *pInfo,
uint32_t *pSparseMemoryRequirementCount,
VkSparseImageMemoryRequirements2KHR *pSparseMemoryRequirements) {
auto image_state = GetImageState(pInfo->image);
image_state->get_sparse_reqs_called = true;
if (!pSparseMemoryRequirements) return;
for (uint32_t i = 0; i < *pSparseMemoryRequirementCount; i++) {
assert(!pSparseMemoryRequirements[i].pNext); // TODO: If an extension is ever added here we need to handle it
RecordGetImageSparseMemoryRequirementsState(image_state, &pSparseMemoryRequirements[i].memoryRequirements);
}
}
void CoreChecks::PostCallRecordGetImageSparseMemoryRequirements2KHR(
VkDevice device, const VkImageSparseMemoryRequirementsInfo2KHR *pInfo, uint32_t *pSparseMemoryRequirementCount,
VkSparseImageMemoryRequirements2KHR *pSparseMemoryRequirements) {
auto image_state = GetImageState(pInfo->image);
image_state->get_sparse_reqs_called = true;
if (!pSparseMemoryRequirements) return;
for (uint32_t i = 0; i < *pSparseMemoryRequirementCount; i++) {
assert(!pSparseMemoryRequirements[i].pNext); // TODO: If an extension is ever added here we need to handle it
RecordGetImageSparseMemoryRequirementsState(image_state, &pSparseMemoryRequirements[i].memoryRequirements);
}
}
bool CoreChecks::PreCallValidateGetPhysicalDeviceImageFormatProperties2(VkPhysicalDevice physicalDevice,
const VkPhysicalDeviceImageFormatInfo2 *pImageFormatInfo,
VkImageFormatProperties2 *pImageFormatProperties) {
// Can't wrap AHB-specific validation in a device extension check here, but no harm
bool skip = ValidateGetPhysicalDeviceImageFormatProperties2ANDROID(report_data, pImageFormatInfo, pImageFormatProperties);
return skip;
}
bool CoreChecks::PreCallValidateGetPhysicalDeviceImageFormatProperties2KHR(VkPhysicalDevice physicalDevice,
const VkPhysicalDeviceImageFormatInfo2 *pImageFormatInfo,
VkImageFormatProperties2 *pImageFormatProperties) {
// Can't wrap AHB-specific validation in a device extension check here, but no harm
bool skip = ValidateGetPhysicalDeviceImageFormatProperties2ANDROID(report_data, pImageFormatInfo, pImageFormatProperties);
return skip;
}
void ValidationStateTracker::PreCallRecordDestroyShaderModule(VkDevice device, VkShaderModule shaderModule,
const VkAllocationCallbacks *pAllocator) {
if (!shaderModule) return;
shaderModuleMap.erase(shaderModule);
}
bool CoreChecks::PreCallValidateDestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) {
PIPELINE_STATE *pipeline_state = GetPipelineState(pipeline);
const VulkanTypedHandle obj_struct(pipeline, kVulkanObjectTypePipeline);
bool skip = false;
if (pipeline_state) {
skip |= ValidateObjectNotInUse(pipeline_state, obj_struct, "vkDestroyPipeline", "VUID-vkDestroyPipeline-pipeline-00765");
}
return skip;
}
void ValidationStateTracker::PreCallRecordDestroyPipeline(VkDevice device, VkPipeline pipeline,
const VkAllocationCallbacks *pAllocator) {
if (!pipeline) return;
PIPELINE_STATE *pipeline_state = GetPipelineState(pipeline);
const VulkanTypedHandle obj_struct(pipeline, kVulkanObjectTypePipeline);
// Any bound cmd buffers are now invalid
InvalidateCommandBuffers(pipeline_state->cb_bindings, obj_struct);
pipelineMap.erase(pipeline);
}
void CoreChecks::PreCallRecordDestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) {
if (pipeline && enabled.gpu_validation) {
GpuPreCallRecordDestroyPipeline(pipeline);
}
StateTracker::PreCallRecordDestroyPipeline(device, pipeline, pAllocator);
}
void ValidationStateTracker::PreCallRecordDestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout,
const VkAllocationCallbacks *pAllocator) {
if (!pipelineLayout) return;
pipelineLayoutMap.erase(pipelineLayout);
}
bool CoreChecks::PreCallValidateDestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) {
const SAMPLER_STATE *sampler_state = GetSamplerState(sampler);
const VulkanTypedHandle obj_struct(sampler, kVulkanObjectTypeSampler);
bool skip = false;
if (sampler_state) {
skip |= ValidateObjectNotInUse(sampler_state, obj_struct, "vkDestroySampler", "VUID-vkDestroySampler-sampler-01082");
}
return skip;
}
void ValidationStateTracker ::PreCallRecordDestroySampler(VkDevice device, VkSampler sampler,
const VkAllocationCallbacks *pAllocator) {
if (!sampler) return;
SAMPLER_STATE *sampler_state = GetSamplerState(sampler);
const VulkanTypedHandle obj_struct(sampler, kVulkanObjectTypeSampler);
// Any bound cmd buffers are now invalid
if (sampler_state) {
InvalidateCommandBuffers(sampler_state->cb_bindings, obj_struct);
}
samplerMap.erase(sampler);
}
void ValidationStateTracker::PreCallRecordDestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout,
const VkAllocationCallbacks *pAllocator) {
if (!descriptorSetLayout) return;
auto layout_it = descriptorSetLayoutMap.find(descriptorSetLayout);
if (layout_it != descriptorSetLayoutMap.end()) {
layout_it->second.get()->MarkDestroyed();
descriptorSetLayoutMap.erase(layout_it);
}
}
bool CoreChecks::PreCallValidateDestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
const VkAllocationCallbacks *pAllocator) {
DESCRIPTOR_POOL_STATE *desc_pool_state = GetDescriptorPoolState(descriptorPool);
const VulkanTypedHandle obj_struct(descriptorPool, kVulkanObjectTypeDescriptorPool);
bool skip = false;
if (desc_pool_state) {
skip |= ValidateObjectNotInUse(desc_pool_state, obj_struct, "vkDestroyDescriptorPool",
"VUID-vkDestroyDescriptorPool-descriptorPool-00303");
}
return skip;
}
void ValidationStateTracker::PreCallRecordDestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
const VkAllocationCallbacks *pAllocator) {
if (!descriptorPool) return;
DESCRIPTOR_POOL_STATE *desc_pool_state = GetDescriptorPoolState(descriptorPool);
const VulkanTypedHandle obj_struct(descriptorPool, kVulkanObjectTypeDescriptorPool);
if (desc_pool_state) {
// Any bound cmd buffers are now invalid
InvalidateCommandBuffers(desc_pool_state->cb_bindings, obj_struct);
// Free sets that were in this pool
for (auto ds : desc_pool_state->sets) {
FreeDescriptorSet(ds);
}
descriptorPoolMap.erase(descriptorPool);
}
}
// Verify cmdBuffer in given cb_node is not in global in-flight set, and return skip result
// If this is a secondary command buffer, then make sure its primary is also in-flight
// If primary is not in-flight, then remove secondary from global in-flight set
// This function is only valid at a point when cmdBuffer is being reset or freed
bool CoreChecks::CheckCommandBufferInFlight(const CMD_BUFFER_STATE *cb_node, const char *action, const char *error_code) const {
bool skip = false;
if (cb_node->in_use.load()) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), error_code, "Attempt to %s %s which is in use.", action,
report_data->FormatHandle(cb_node->commandBuffer).c_str());
}
return skip;
}
// Iterate over all cmdBuffers in given commandPool and verify that each is not in use
bool CoreChecks::CheckCommandBuffersInFlight(const COMMAND_POOL_STATE *pPool, const char *action, const char *error_code) const {
bool skip = false;
for (auto cmd_buffer : pPool->commandBuffers) {
skip |= CheckCommandBufferInFlight(GetCBState(cmd_buffer), action, error_code);
}
return skip;
}
// Free all command buffers in given list, removing all references/links to them using ResetCommandBufferState
void ValidationStateTracker::FreeCommandBufferStates(COMMAND_POOL_STATE *pool_state, const uint32_t command_buffer_count,
const VkCommandBuffer *command_buffers) {
for (uint32_t i = 0; i < command_buffer_count; i++) {
auto cb_state = GetCBState(command_buffers[i]);
// Remove references to command buffer's state and delete
if (cb_state) {
// reset prior to delete, removing various references to it.
// TODO: fix this, it's insane.
ResetCommandBufferState(cb_state->commandBuffer);
// Remove the cb_state's references from COMMAND_POOL_STATEs
pool_state->commandBuffers.erase(command_buffers[i]);
// Remove the cb debug labels
EraseCmdDebugUtilsLabel(report_data, cb_state->commandBuffer);
// Remove CBState from CB map
commandBufferMap.erase(cb_state->commandBuffer);
}
}
}
bool CoreChecks::PreCallValidateFreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount,
const VkCommandBuffer *pCommandBuffers) {
bool skip = false;
for (uint32_t i = 0; i < commandBufferCount; i++) {
const auto *cb_node = GetCBState(pCommandBuffers[i]);
// Delete CB information structure, and remove from commandBufferMap
if (cb_node) {
skip |= CheckCommandBufferInFlight(cb_node, "free", "VUID-vkFreeCommandBuffers-pCommandBuffers-00047");
}
}
return skip;
}
void ValidationStateTracker::PreCallRecordFreeCommandBuffers(VkDevice device, VkCommandPool commandPool,
uint32_t commandBufferCount, const VkCommandBuffer *pCommandBuffers) {
auto pPool = GetCommandPoolState(commandPool);
FreeCommandBufferStates(pPool, commandBufferCount, pCommandBuffers);
}
bool CoreChecks::PreCallValidateCreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkCommandPool *pCommandPool) {
return ValidateDeviceQueueFamily(pCreateInfo->queueFamilyIndex, "vkCreateCommandPool", "pCreateInfo->queueFamilyIndex",
"VUID-vkCreateCommandPool-queueFamilyIndex-01937");
}
void ValidationStateTracker::PostCallRecordCreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkCommandPool *pCommandPool,
VkResult result) {
if (VK_SUCCESS != result) return;
std::unique_ptr<COMMAND_POOL_STATE> cmd_pool_state(new COMMAND_POOL_STATE{});
cmd_pool_state->createFlags = pCreateInfo->flags;
cmd_pool_state->queueFamilyIndex = pCreateInfo->queueFamilyIndex;
commandPoolMap[*pCommandPool] = std::move(cmd_pool_state);
}
bool CoreChecks::PreCallValidateCreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) {
if (disabled.query_validation) return false;
bool skip = false;
if (pCreateInfo && pCreateInfo->queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS) {
if (!enabled_features.core.pipelineStatisticsQuery) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
"VUID-VkQueryPoolCreateInfo-queryType-00791",
"Query pool with type VK_QUERY_TYPE_PIPELINE_STATISTICS created on a device with "
"VkDeviceCreateInfo.pEnabledFeatures.pipelineStatisticsQuery == VK_FALSE.");
}
}
return skip;
}
void ValidationStateTracker::PostCallRecordCreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool,
VkResult result) {
if (VK_SUCCESS != result) return;
std::unique_ptr<QUERY_POOL_STATE> query_pool_state(new QUERY_POOL_STATE{});
query_pool_state->createInfo = *pCreateInfo;
queryPoolMap[*pQueryPool] = std::move(query_pool_state);
QueryObject query_obj{*pQueryPool, 0u};
for (uint32_t i = 0; i < pCreateInfo->queryCount; ++i) {
query_obj.query = i;
queryToStateMap[query_obj] = QUERYSTATE_UNKNOWN;
}
}
bool CoreChecks::PreCallValidateDestroyCommandPool(VkDevice device, VkCommandPool commandPool,
const VkAllocationCallbacks *pAllocator) {
const COMMAND_POOL_STATE *cp_state = GetCommandPoolState(commandPool);
bool skip = false;
if (cp_state) {
// Verify that command buffers in pool are complete (not in-flight)
skip |= CheckCommandBuffersInFlight(cp_state, "destroy command pool with", "VUID-vkDestroyCommandPool-commandPool-00041");
}
return skip;
}
void ValidationStateTracker::PreCallRecordDestroyCommandPool(VkDevice device, VkCommandPool commandPool,
const VkAllocationCallbacks *pAllocator) {
if (!commandPool) return;
COMMAND_POOL_STATE *cp_state = GetCommandPoolState(commandPool);
// Remove cmdpool from cmdpoolmap, after freeing layer data for the command buffers
// "When a pool is destroyed, all command buffers allocated from the pool are freed."
if (cp_state) {
// Create a vector, as FreeCommandBufferStates deletes from cp_state->commandBuffers during iteration.
std::vector<VkCommandBuffer> cb_vec{cp_state->commandBuffers.begin(), cp_state->commandBuffers.end()};
FreeCommandBufferStates(cp_state, static_cast<uint32_t>(cb_vec.size()), cb_vec.data());
commandPoolMap.erase(commandPool);
}
}
bool CoreChecks::PreCallValidateResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) {
const auto *command_pool_state = GetCommandPoolState(commandPool);
return CheckCommandBuffersInFlight(command_pool_state, "reset command pool with", "VUID-vkResetCommandPool-commandPool-00040");
}
void ValidationStateTracker::PostCallRecordResetCommandPool(VkDevice device, VkCommandPool commandPool,
VkCommandPoolResetFlags flags, VkResult result) {
if (VK_SUCCESS != result) return;
// Reset all of the CBs allocated from this pool
auto command_pool_state = GetCommandPoolState(commandPool);
for (auto cmdBuffer : command_pool_state->commandBuffers) {
ResetCommandBufferState(cmdBuffer);
}
}
bool CoreChecks::PreCallValidateResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) {
bool skip = false;
for (uint32_t i = 0; i < fenceCount; ++i) {
auto pFence = GetFenceState(pFences[i]);
if (pFence && pFence->scope == kSyncScopeInternal && pFence->state == FENCE_INFLIGHT) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
HandleToUint64(pFences[i]), "VUID-vkResetFences-pFences-01123", "%s is in use.",
report_data->FormatHandle(pFences[i]).c_str());
}
}
return skip;
}
void CoreChecks::PostCallRecordResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkResult result) {
for (uint32_t i = 0; i < fenceCount; ++i) {
auto pFence = GetFenceState(pFences[i]);
if (pFence) {
if (pFence->scope == kSyncScopeInternal) {
pFence->state = FENCE_UNSIGNALED;
} else if (pFence->scope == kSyncScopeExternalTemporary) {
pFence->scope = kSyncScopeInternal;
}
}
}
}
// For given cb_nodes, invalidate them and track object causing invalidation
void ValidationStateTracker::InvalidateCommandBuffers(std::unordered_set<CMD_BUFFER_STATE *> const &cb_nodes,
const VulkanTypedHandle &obj) {
for (auto cb_node : cb_nodes) {
if (cb_node->state == CB_RECORDING) {
cb_node->state = CB_INVALID_INCOMPLETE;
} else if (cb_node->state == CB_RECORDED) {
cb_node->state = CB_INVALID_COMPLETE;
}
cb_node->broken_bindings.push_back(obj);
// if secondary, then propagate the invalidation to the primaries that will call us.
if (cb_node->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
InvalidateCommandBuffers(cb_node->linkedCommandBuffers, obj);
}
}
}
bool CoreChecks::PreCallValidateDestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer,
const VkAllocationCallbacks *pAllocator) {
const FRAMEBUFFER_STATE *framebuffer_state = GetFramebufferState(framebuffer);
const VulkanTypedHandle obj_struct(framebuffer, kVulkanObjectTypeFramebuffer);
bool skip = false;
if (framebuffer_state) {
skip |= ValidateObjectNotInUse(framebuffer_state, obj_struct, "vkDestroyFramebuffer",
"VUID-vkDestroyFramebuffer-framebuffer-00892");
}
return skip;
}
void ValidationStateTracker::PreCallRecordDestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer,
const VkAllocationCallbacks *pAllocator) {
if (!framebuffer) return;
FRAMEBUFFER_STATE *framebuffer_state = GetFramebufferState(framebuffer);
const VulkanTypedHandle obj_struct(framebuffer, kVulkanObjectTypeFramebuffer);
InvalidateCommandBuffers(framebuffer_state->cb_bindings, obj_struct);
frameBufferMap.erase(framebuffer);
}
bool CoreChecks::PreCallValidateDestroyRenderPass(VkDevice device, VkRenderPass renderPass,
const VkAllocationCallbacks *pAllocator) {
const RENDER_PASS_STATE *rp_state = GetRenderPassState(renderPass);
const VulkanTypedHandle obj_struct(renderPass, kVulkanObjectTypeRenderPass);
bool skip = false;
if (rp_state) {
skip |= ValidateObjectNotInUse(rp_state, obj_struct, "vkDestroyRenderPass", "VUID-vkDestroyRenderPass-renderPass-00873");
}
return skip;
}
void ValidationStateTracker::PreCallRecordDestroyRenderPass(VkDevice device, VkRenderPass renderPass,
const VkAllocationCallbacks *pAllocator) {
if (!renderPass) return;
RENDER_PASS_STATE *rp_state = GetRenderPassState(renderPass);
const VulkanTypedHandle obj_struct(renderPass, kVulkanObjectTypeRenderPass);
InvalidateCommandBuffers(rp_state->cb_bindings, obj_struct);
renderPassMap.erase(renderPass);
}
// Access helper functions for external modules
VkFormatProperties CoreChecks::GetPDFormatProperties(const VkFormat format) const {
VkFormatProperties format_properties;
DispatchGetPhysicalDeviceFormatProperties(physical_device, format, &format_properties);
return format_properties;
}
VkResult CoreChecks::GetPDImageFormatProperties(const VkImageCreateInfo *image_ci,
VkImageFormatProperties *pImageFormatProperties) {
return DispatchGetPhysicalDeviceImageFormatProperties(physical_device, image_ci->format, image_ci->imageType, image_ci->tiling,
image_ci->usage, image_ci->flags, pImageFormatProperties);
}
VkResult CoreChecks::GetPDImageFormatProperties2(const VkPhysicalDeviceImageFormatInfo2 *phys_dev_image_fmt_info,
VkImageFormatProperties2 *pImageFormatProperties) const {
if (!instance_extensions.vk_khr_get_physical_device_properties_2) return VK_ERROR_EXTENSION_NOT_PRESENT;
return DispatchGetPhysicalDeviceImageFormatProperties2(physical_device, phys_dev_image_fmt_info, pImageFormatProperties);
}
void ValidationStateTracker::PostCallRecordCreateFence(VkDevice device, const VkFenceCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkFence *pFence, VkResult result) {
if (VK_SUCCESS != result) return;
std::unique_ptr<FENCE_STATE> fence_state(new FENCE_STATE{});
fence_state->fence = *pFence;
fence_state->createInfo = *pCreateInfo;
fence_state->state = (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) ? FENCE_RETIRED : FENCE_UNSIGNALED;
fenceMap[*pFence] = std::move(fence_state);
}
// Validation cache:
// CV is the bottommost implementor of this extension. Don't pass calls down.
// utility function to set collective state for pipeline
void SetPipelineState(PIPELINE_STATE *pPipe) {
// If any attachment used by this pipeline has blendEnable, set top-level blendEnable
if (pPipe->graphicsPipelineCI.pColorBlendState) {
for (size_t i = 0; i < pPipe->attachments.size(); ++i) {
if (VK_TRUE == pPipe->attachments[i].blendEnable) {
if (((pPipe->attachments[i].dstAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
(pPipe->attachments[i].dstAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
((pPipe->attachments[i].dstColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
(pPipe->attachments[i].dstColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
((pPipe->attachments[i].srcAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
(pPipe->attachments[i].srcAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
((pPipe->attachments[i].srcColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
(pPipe->attachments[i].srcColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA))) {
pPipe->blendConstantsEnabled = true;
}
}
}
}
}
bool CoreChecks::ValidatePipelineVertexDivisors(std::vector<std::unique_ptr<PIPELINE_STATE>> const &pipe_state_vec,
const uint32_t count, const VkGraphicsPipelineCreateInfo *pipe_cis) const {
bool skip = false;
const VkPhysicalDeviceLimits *device_limits = &phys_dev_props.limits;
for (uint32_t i = 0; i < count; i++) {
auto pvids_ci = lvl_find_in_chain<VkPipelineVertexInputDivisorStateCreateInfoEXT>(pipe_cis[i].pVertexInputState->pNext);
if (nullptr == pvids_ci) continue;
const PIPELINE_STATE *pipe_state = pipe_state_vec[i].get();
for (uint32_t j = 0; j < pvids_ci->vertexBindingDivisorCount; j++) {
const VkVertexInputBindingDivisorDescriptionEXT *vibdd = &(pvids_ci->pVertexBindingDivisors[j]);
if (vibdd->binding >= device_limits->maxVertexInputBindings) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkVertexInputBindingDivisorDescriptionEXT-binding-01869",
"vkCreateGraphicsPipelines(): Pipeline[%1u] with chained VkPipelineVertexInputDivisorStateCreateInfoEXT, "
"pVertexBindingDivisors[%1u] binding index of (%1u) exceeds device maxVertexInputBindings (%1u).",
i, j, vibdd->binding, device_limits->maxVertexInputBindings);
}
if (vibdd->divisor > phys_dev_ext_props.vtx_attrib_divisor_props.maxVertexAttribDivisor) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkVertexInputBindingDivisorDescriptionEXT-divisor-01870",
"vkCreateGraphicsPipelines(): Pipeline[%1u] with chained VkPipelineVertexInputDivisorStateCreateInfoEXT, "
"pVertexBindingDivisors[%1u] divisor of (%1u) exceeds extension maxVertexAttribDivisor (%1u).",
i, j, vibdd->divisor, phys_dev_ext_props.vtx_attrib_divisor_props.maxVertexAttribDivisor);
}
if ((0 == vibdd->divisor) && !enabled_features.vtx_attrib_divisor_features.vertexAttributeInstanceRateZeroDivisor) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkVertexInputBindingDivisorDescriptionEXT-vertexAttributeInstanceRateZeroDivisor-02228",
"vkCreateGraphicsPipelines(): Pipeline[%1u] with chained VkPipelineVertexInputDivisorStateCreateInfoEXT, "
"pVertexBindingDivisors[%1u] divisor must not be 0 when vertexAttributeInstanceRateZeroDivisor feature is not "
"enabled.",
i, j);
}
if ((1 != vibdd->divisor) && !enabled_features.vtx_attrib_divisor_features.vertexAttributeInstanceRateDivisor) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkVertexInputBindingDivisorDescriptionEXT-vertexAttributeInstanceRateDivisor-02229",
"vkCreateGraphicsPipelines(): Pipeline[%1u] with chained VkPipelineVertexInputDivisorStateCreateInfoEXT, "
"pVertexBindingDivisors[%1u] divisor (%1u) must be 1 when vertexAttributeInstanceRateDivisor feature is not "
"enabled.",
i, j, vibdd->divisor);
}
// Find the corresponding binding description and validate input rate setting
bool failed_01871 = true;
for (size_t k = 0; k < pipe_state->vertex_binding_descriptions_.size(); k++) {
if ((vibdd->binding == pipe_state->vertex_binding_descriptions_[k].binding) &&
(VK_VERTEX_INPUT_RATE_INSTANCE == pipe_state->vertex_binding_descriptions_[k].inputRate)) {
failed_01871 = false;
break;
}
}
if (failed_01871) { // Description not found, or has incorrect inputRate value
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkVertexInputBindingDivisorDescriptionEXT-inputRate-01871",
"vkCreateGraphicsPipelines(): Pipeline[%1u] with chained VkPipelineVertexInputDivisorStateCreateInfoEXT, "
"pVertexBindingDivisors[%1u] specifies binding index (%1u), but that binding index's "
"VkVertexInputBindingDescription.inputRate member is not VK_VERTEX_INPUT_RATE_INSTANCE.",
i, j, vibdd->binding);
}
}
}
return skip;
}
bool ValidationStateTracker::PreCallValidateCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
const VkGraphicsPipelineCreateInfo *pCreateInfos,
const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines,
void *cgpl_state_data) {
// Set up the state that CoreChecks, gpu_validation and later StateTracker Record will use.
create_graphics_pipeline_api_state *cgpl_state = reinterpret_cast<create_graphics_pipeline_api_state *>(cgpl_state_data);
cgpl_state->pCreateInfos = pCreateInfos; // GPU validation can alter this, so we have to set a default value for the Chassis
cgpl_state->pipe_state.reserve(count);
for (uint32_t i = 0; i < count; i++) {
cgpl_state->pipe_state.push_back(std::unique_ptr<PIPELINE_STATE>(new PIPELINE_STATE));
(cgpl_state->pipe_state)[i]->initGraphicsPipeline(this, &pCreateInfos[i],
GetRenderPassStateSharedPtr(pCreateInfos[i].renderPass));
(cgpl_state->pipe_state)[i]->pipeline_layout = *GetPipelineLayout(pCreateInfos[i].layout);
}
return false;
}
bool CoreChecks::PreCallValidateCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
const VkGraphicsPipelineCreateInfo *pCreateInfos,
const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines,
void *cgpl_state_data) {
bool skip = StateTracker::PreCallValidateCreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator,
pPipelines, cgpl_state_data);
create_graphics_pipeline_api_state *cgpl_state = reinterpret_cast<create_graphics_pipeline_api_state *>(cgpl_state_data);
for (uint32_t i = 0; i < count; i++) {
skip |= ValidatePipelineLocked(cgpl_state->pipe_state, i);
}
for (uint32_t i = 0; i < count; i++) {
skip |= ValidatePipelineUnlocked(cgpl_state->pipe_state[i].get(), i);
}
if (device_extensions.vk_ext_vertex_attribute_divisor) {
skip |= ValidatePipelineVertexDivisors(cgpl_state->pipe_state, count, pCreateInfos);
}
return skip;
}
// GPU validation may replace pCreateInfos for the down-chain call
void CoreChecks::PreCallRecordCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
const VkGraphicsPipelineCreateInfo *pCreateInfos,
const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines,
void *cgpl_state_data) {
// GPU Validation may replace instrumented shaders with non-instrumented ones, so allow it to modify the createinfos.
if (enabled.gpu_validation) {
create_graphics_pipeline_api_state *cgpl_state = reinterpret_cast<create_graphics_pipeline_api_state *>(cgpl_state_data);
cgpl_state->gpu_create_infos = GpuPreCallRecordCreateGraphicsPipelines(pipelineCache, count, pCreateInfos, pAllocator,
pPipelines, cgpl_state->pipe_state);
cgpl_state->pCreateInfos = reinterpret_cast<VkGraphicsPipelineCreateInfo *>(cgpl_state->gpu_create_infos.data());
}
}
void ValidationStateTracker::PostCallRecordCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
const VkGraphicsPipelineCreateInfo *pCreateInfos,
const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines,
VkResult result, void *cgpl_state_data) {
create_graphics_pipeline_api_state *cgpl_state = reinterpret_cast<create_graphics_pipeline_api_state *>(cgpl_state_data);
// This API may create pipelines regardless of the return value
for (uint32_t i = 0; i < count; i++) {
if (pPipelines[i] != VK_NULL_HANDLE) {
(cgpl_state->pipe_state)[i]->pipeline = pPipelines[i];
pipelineMap[pPipelines[i]] = std::move((cgpl_state->pipe_state)[i]);
}
}
cgpl_state->pipe_state.clear();
}
void CoreChecks::PostCallRecordCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
const VkGraphicsPipelineCreateInfo *pCreateInfos,
const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines,
VkResult result, void *cgpl_state_data) {
StateTracker::PostCallRecordCreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines, result,
cgpl_state_data);
// GPU val needs clean up regardless of result
if (enabled.gpu_validation) {
create_graphics_pipeline_api_state *cgpl_state = reinterpret_cast<create_graphics_pipeline_api_state *>(cgpl_state_data);
GpuPostCallRecordCreateGraphicsPipelines(count, pCreateInfos, pAllocator, pPipelines);
cgpl_state->gpu_create_infos.clear();
}
}
bool ValidationStateTracker::PreCallValidateCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
const VkComputePipelineCreateInfo *pCreateInfos,
const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines,
void *ccpl_state_data) {
auto *ccpl_state = reinterpret_cast<create_compute_pipeline_api_state *>(ccpl_state_data);
ccpl_state->pCreateInfos = pCreateInfos; // GPU validation can alter this, so we have to set a default value for the Chassis
ccpl_state->pipe_state.reserve(count);
for (uint32_t i = 0; i < count; i++) {
// Create and initialize internal tracking data structure
ccpl_state->pipe_state.push_back(unique_ptr<PIPELINE_STATE>(new PIPELINE_STATE));
ccpl_state->pipe_state.back()->initComputePipeline(this, &pCreateInfos[i]);
ccpl_state->pipe_state.back()->pipeline_layout = *GetPipelineLayout(pCreateInfos[i].layout);
}
return false;
}
bool CoreChecks::PreCallValidateCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
const VkComputePipelineCreateInfo *pCreateInfos,
const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines,
void *ccpl_state_data) {
bool skip = StateTracker::PreCallValidateCreateComputePipelines(device, pipelineCache, count, pCreateInfos, pAllocator,
pPipelines, ccpl_state_data);
auto *ccpl_state = reinterpret_cast<create_compute_pipeline_api_state *>(ccpl_state_data);
for (uint32_t i = 0; i < count; i++) {
// TODO: Add Compute Pipeline Verification
skip |= ValidateComputePipeline(ccpl_state->pipe_state.back().get());
}
return skip;
}
void CoreChecks::PreCallRecordCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
const VkComputePipelineCreateInfo *pCreateInfos,
const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines,
void *ccpl_state_data) {
// GPU Validation may replace instrumented shaders with non-instrumented ones, so allow it to modify the createinfos.
if (enabled.gpu_validation) {
auto *ccpl_state = reinterpret_cast<create_compute_pipeline_api_state *>(ccpl_state_data);
ccpl_state->gpu_create_infos = GpuPreCallRecordCreateComputePipelines(pipelineCache, count, pCreateInfos, pAllocator,
pPipelines, ccpl_state->pipe_state);
ccpl_state->pCreateInfos = reinterpret_cast<VkComputePipelineCreateInfo *>(ccpl_state->gpu_create_infos.data());
}
}
void ValidationStateTracker::PostCallRecordCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
const VkComputePipelineCreateInfo *pCreateInfos,
const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines,
VkResult result, void *ccpl_state_data) {
create_compute_pipeline_api_state *ccpl_state = reinterpret_cast<create_compute_pipeline_api_state *>(ccpl_state_data);
// This API may create pipelines regardless of the return value
for (uint32_t i = 0; i < count; i++) {
if (pPipelines[i] != VK_NULL_HANDLE) {
(ccpl_state->pipe_state)[i]->pipeline = pPipelines[i];
pipelineMap[pPipelines[i]] = std::move((ccpl_state->pipe_state)[i]);
}
}
ccpl_state->pipe_state.clear();
}
void CoreChecks::PostCallRecordCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
const VkComputePipelineCreateInfo *pCreateInfos,
const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines,
VkResult result, void *ccpl_state_data) {
StateTracker::PostCallRecordCreateComputePipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines, result,
ccpl_state_data);
// GPU val needs clean up regardless of result
if (enabled.gpu_validation) {
create_compute_pipeline_api_state *ccpl_state = reinterpret_cast<create_compute_pipeline_api_state *>(ccpl_state_data);
GpuPostCallRecordCreateComputePipelines(count, pCreateInfos, pAllocator, pPipelines);
ccpl_state->gpu_create_infos.clear();
}
}
bool ValidationStateTracker::PreCallValidateCreateRayTracingPipelinesNV(VkDevice device, VkPipelineCache pipelineCache,
uint32_t count,
const VkRayTracingPipelineCreateInfoNV *pCreateInfos,
const VkAllocationCallbacks *pAllocator,
VkPipeline *pPipelines, void *crtpl_state_data) {
auto *crtpl_state = reinterpret_cast<create_ray_tracing_pipeline_api_state *>(crtpl_state_data);
crtpl_state->pipe_state.reserve(count);
for (uint32_t i = 0; i < count; i++) {
// Create and initialize internal tracking data structure
crtpl_state->pipe_state.push_back(unique_ptr<PIPELINE_STATE>(new PIPELINE_STATE));
crtpl_state->pipe_state.back()->initRayTracingPipelineNV(this, &pCreateInfos[i]);
crtpl_state->pipe_state.back()->pipeline_layout = *GetPipelineLayout(pCreateInfos[i].layout);
}
return false;
}
bool CoreChecks::PreCallValidateCreateRayTracingPipelinesNV(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
const VkRayTracingPipelineCreateInfoNV *pCreateInfos,
const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines,
void *crtpl_state_data) {
bool skip = StateTracker::PreCallValidateCreateRayTracingPipelinesNV(device, pipelineCache, count, pCreateInfos, pAllocator,
pPipelines, crtpl_state_data);
auto *crtpl_state = reinterpret_cast<create_ray_tracing_pipeline_api_state *>(crtpl_state_data);
for (uint32_t i = 0; i < count; i++) {
skip |= ValidateRayTracingPipelineNV(crtpl_state->pipe_state[i].get());
}
return skip;
}
void CoreChecks::PreCallRecordCreateRayTracingPipelinesNV(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
const VkRayTracingPipelineCreateInfoNV *pCreateInfos,
const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines,
void *crtpl_state_data) {
// GPU Validation may replace instrumented shaders with non-instrumented ones, so allow it to modify the createinfos.
if (enabled.gpu_validation) {
auto *crtpl_state = reinterpret_cast<create_ray_tracing_pipeline_api_state *>(crtpl_state_data);
crtpl_state->gpu_create_infos = GpuPreCallRecordCreateRayTracingPipelinesNV(pipelineCache, count, pCreateInfos, pAllocator,
pPipelines, crtpl_state->pipe_state);
crtpl_state->pCreateInfos = reinterpret_cast<VkRayTracingPipelineCreateInfoNV *>(crtpl_state->gpu_create_infos.data());
}
}
void ValidationStateTracker::PostCallRecordCreateRayTracingPipelinesNV(
VkDevice device, VkPipelineCache pipelineCache, uint32_t count, const VkRayTracingPipelineCreateInfoNV *pCreateInfos,
const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines, VkResult result, void *crtpl_state_data) {
auto *crtpl_state = reinterpret_cast<create_ray_tracing_pipeline_api_state *>(crtpl_state_data);
// This API may create pipelines regardless of the return value
for (uint32_t i = 0; i < count; i++) {
if (pPipelines[i] != VK_NULL_HANDLE) {
(crtpl_state->pipe_state)[i]->pipeline = pPipelines[i];
pipelineMap[pPipelines[i]] = std::move((crtpl_state->pipe_state)[i]);
}
}
crtpl_state->pipe_state.clear();
}
void CoreChecks::PostCallRecordCreateRayTracingPipelinesNV(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
const VkRayTracingPipelineCreateInfoNV *pCreateInfos,
const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines,
VkResult result, void *crtpl_state_data) {
StateTracker::PostCallRecordCreateRayTracingPipelinesNV(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines,
result, crtpl_state_data);
// GPU val needs clean up regardless of result
if (enabled.gpu_validation) {
auto *crtpl_state = reinterpret_cast<create_ray_tracing_pipeline_api_state *>(crtpl_state_data);
GpuPostCallRecordCreateRayTracingPipelinesNV(count, pCreateInfos, pAllocator, pPipelines);
crtpl_state->gpu_create_infos.clear();
}
}
void ValidationStateTracker::PostCallRecordCreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSampler *pSampler,
VkResult result) {
samplerMap[*pSampler] = unique_ptr<SAMPLER_STATE>(new SAMPLER_STATE(pSampler, pCreateInfo));
}
bool CoreChecks::PreCallValidateCreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkDescriptorSetLayout *pSetLayout) {
return cvdescriptorset::ValidateDescriptorSetLayoutCreateInfo(
report_data, pCreateInfo, device_extensions.vk_khr_push_descriptor, phys_dev_ext_props.max_push_descriptors,
device_extensions.vk_ext_descriptor_indexing, &enabled_features.descriptor_indexing, &enabled_features.inline_uniform_block,
&phys_dev_ext_props.inline_uniform_block_props);
}
void ValidationStateTracker::PostCallRecordCreateDescriptorSetLayout(VkDevice device,
const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkDescriptorSetLayout *pSetLayout, VkResult result) {
if (VK_SUCCESS != result) return;
descriptorSetLayoutMap[*pSetLayout] = std::make_shared<cvdescriptorset::DescriptorSetLayout>(pCreateInfo, *pSetLayout);
}
// Used by CreatePipelineLayout and CmdPushConstants.
// Note that the index argument is optional and only used by CreatePipelineLayout.
bool CoreChecks::ValidatePushConstantRange(const uint32_t offset, const uint32_t size, const char *caller_name,
uint32_t index = 0) const {
if (disabled.push_constant_range) return false;
uint32_t const maxPushConstantsSize = phys_dev_props.limits.maxPushConstantsSize;
bool skip = false;
// Check that offset + size don't exceed the max.
// Prevent arithetic overflow here by avoiding addition and testing in this order.
if ((offset >= maxPushConstantsSize) || (size > maxPushConstantsSize - offset)) {
// This is a pain just to adapt the log message to the caller, but better to sort it out only when there is a problem.
if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
if (offset >= maxPushConstantsSize) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPushConstantRange-offset-00294",
"%s call has push constants index %u with offset %u that exceeds this device's maxPushConstantSize of %u.",
caller_name, index, offset, maxPushConstantsSize);
}
if (size > maxPushConstantsSize - offset) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPushConstantRange-size-00298",
"%s call has push constants index %u with offset %u and size %u that exceeds this device's "
"maxPushConstantSize of %u.",
caller_name, index, offset, size, maxPushConstantsSize);
}
} else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
if (offset >= maxPushConstantsSize) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-vkCmdPushConstants-offset-00370",
"%s call has push constants index %u with offset %u that exceeds this device's maxPushConstantSize of %u.",
caller_name, index, offset, maxPushConstantsSize);
}
if (size > maxPushConstantsSize - offset) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-vkCmdPushConstants-size-00371",
"%s call has push constants index %u with offset %u and size %u that exceeds this device's "
"maxPushConstantSize of %u.",
caller_name, index, offset, size, maxPushConstantsSize);
}
} else {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_Core_DrawState_InternalError, "%s caller not supported.", caller_name);
}
}
// size needs to be non-zero and a multiple of 4.
if ((size == 0) || ((size & 0x3) != 0)) {
if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
if (size == 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPushConstantRange-size-00296",
"%s call has push constants index %u with size %u. Size must be greater than zero.", caller_name,
index, size);
}
if (size & 0x3) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPushConstantRange-size-00297",
"%s call has push constants index %u with size %u. Size must be a multiple of 4.", caller_name,
index, size);
}
} else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
if (size == 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-vkCmdPushConstants-size-arraylength",
"%s call has push constants index %u with size %u. Size must be greater than zero.", caller_name,
index, size);
}
if (size & 0x3) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-vkCmdPushConstants-size-00369",
"%s call has push constants index %u with size %u. Size must be a multiple of 4.", caller_name,
index, size);
}
} else {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_Core_DrawState_InternalError, "%s caller not supported.", caller_name);
}
}
// offset needs to be a multiple of 4.
if ((offset & 0x3) != 0) {
if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPushConstantRange-offset-00295",
"%s call has push constants index %u with offset %u. Offset must be a multiple of 4.", caller_name,
index, offset);
} else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-vkCmdPushConstants-offset-00368",
"%s call has push constants with offset %u. Offset must be a multiple of 4.", caller_name, offset);
} else {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_Core_DrawState_InternalError, "%s caller not supported.", caller_name);
}
}
return skip;
}
enum DSL_DESCRIPTOR_GROUPS {
DSL_TYPE_SAMPLERS = 0,
DSL_TYPE_UNIFORM_BUFFERS,
DSL_TYPE_STORAGE_BUFFERS,
DSL_TYPE_SAMPLED_IMAGES,
DSL_TYPE_STORAGE_IMAGES,
DSL_TYPE_INPUT_ATTACHMENTS,
DSL_TYPE_INLINE_UNIFORM_BLOCK,
DSL_NUM_DESCRIPTOR_GROUPS
};
// Used by PreCallValidateCreatePipelineLayout.
// Returns an array of size DSL_NUM_DESCRIPTOR_GROUPS of the maximum number of descriptors used in any single pipeline stage
std::valarray<uint32_t> GetDescriptorCountMaxPerStage(
const DeviceFeatures *enabled_features,
const std::vector<std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> &set_layouts, bool skip_update_after_bind) {
// Identify active pipeline stages
std::vector<VkShaderStageFlags> stage_flags = {VK_SHADER_STAGE_VERTEX_BIT, VK_SHADER_STAGE_FRAGMENT_BIT,
VK_SHADER_STAGE_COMPUTE_BIT};
if (enabled_features->core.geometryShader) {
stage_flags.push_back(VK_SHADER_STAGE_GEOMETRY_BIT);
}
if (enabled_features->core.tessellationShader) {
stage_flags.push_back(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT);
stage_flags.push_back(VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT);
}
// Allow iteration over enum values
std::vector<DSL_DESCRIPTOR_GROUPS> dsl_groups = {
DSL_TYPE_SAMPLERS, DSL_TYPE_UNIFORM_BUFFERS, DSL_TYPE_STORAGE_BUFFERS, DSL_TYPE_SAMPLED_IMAGES,
DSL_TYPE_STORAGE_IMAGES, DSL_TYPE_INPUT_ATTACHMENTS, DSL_TYPE_INLINE_UNIFORM_BLOCK};
// Sum by layouts per stage, then pick max of stages per type
std::valarray<uint32_t> max_sum(0U, DSL_NUM_DESCRIPTOR_GROUPS); // max descriptor sum among all pipeline stages
for (auto stage : stage_flags) {
std::valarray<uint32_t> stage_sum(0U, DSL_NUM_DESCRIPTOR_GROUPS); // per-stage sums
for (auto dsl : set_layouts) {
if (skip_update_after_bind &&
(dsl->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT)) {
continue;
}
for (uint32_t binding_idx = 0; binding_idx < dsl->GetBindingCount(); binding_idx++) {
const VkDescriptorSetLayoutBinding *binding = dsl->GetDescriptorSetLayoutBindingPtrFromIndex(binding_idx);
// Bindings with a descriptorCount of 0 are "reserved" and should be skipped
if (0 != (stage & binding->stageFlags) && binding->descriptorCount > 0) {
switch (binding->descriptorType) {
case VK_DESCRIPTOR_TYPE_SAMPLER:
stage_sum[DSL_TYPE_SAMPLERS] += binding->descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
stage_sum[DSL_TYPE_UNIFORM_BUFFERS] += binding->descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
stage_sum[DSL_TYPE_STORAGE_BUFFERS] += binding->descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
stage_sum[DSL_TYPE_SAMPLED_IMAGES] += binding->descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
stage_sum[DSL_TYPE_STORAGE_IMAGES] += binding->descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
stage_sum[DSL_TYPE_SAMPLED_IMAGES] += binding->descriptorCount;
stage_sum[DSL_TYPE_SAMPLERS] += binding->descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
stage_sum[DSL_TYPE_INPUT_ATTACHMENTS] += binding->descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT:
// count one block per binding. descriptorCount is number of bytes
stage_sum[DSL_TYPE_INLINE_UNIFORM_BLOCK]++;
break;
default:
break;
}
}
}
}
for (auto type : dsl_groups) {
max_sum[type] = std::max(stage_sum[type], max_sum[type]);
}
}
return max_sum;
}
// Used by PreCallValidateCreatePipelineLayout.
// Returns a map indexed by VK_DESCRIPTOR_TYPE_* enum of the summed descriptors by type.
// Note: descriptors only count against the limit once even if used by multiple stages.
std::map<uint32_t, uint32_t> GetDescriptorSum(
const std::vector<std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> &set_layouts, bool skip_update_after_bind) {
std::map<uint32_t, uint32_t> sum_by_type;
for (auto dsl : set_layouts) {
if (skip_update_after_bind && (dsl->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT)) {
continue;
}
for (uint32_t binding_idx = 0; binding_idx < dsl->GetBindingCount(); binding_idx++) {
const VkDescriptorSetLayoutBinding *binding = dsl->GetDescriptorSetLayoutBindingPtrFromIndex(binding_idx);
// Bindings with a descriptorCount of 0 are "reserved" and should be skipped
if (binding->descriptorCount > 0) {
if (binding->descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT) {
// count one block per binding. descriptorCount is number of bytes
sum_by_type[binding->descriptorType]++;
} else {
sum_by_type[binding->descriptorType] += binding->descriptorCount;
}
}
}
}
return sum_by_type;
}
bool CoreChecks::PreCallValidateCreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout) {
bool skip = false;
// Validate layout count against device physical limit
if (pCreateInfo->setLayoutCount > phys_dev_props.limits.maxBoundDescriptorSets) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-setLayoutCount-00286",
"vkCreatePipelineLayout(): setLayoutCount (%d) exceeds physical device maxBoundDescriptorSets limit (%d).",
pCreateInfo->setLayoutCount, phys_dev_props.limits.maxBoundDescriptorSets);
}
// Validate Push Constant ranges
uint32_t i, j;
for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
skip |= ValidatePushConstantRange(pCreateInfo->pPushConstantRanges[i].offset, pCreateInfo->pPushConstantRanges[i].size,
"vkCreatePipelineLayout()", i);
if (0 == pCreateInfo->pPushConstantRanges[i].stageFlags) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPushConstantRange-stageFlags-requiredbitmask",
"vkCreatePipelineLayout() call has no stageFlags set.");
}
}
// As of 1.0.28, there is a VU that states that a stage flag cannot appear more than once in the list of push constant ranges.
for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
for (j = i + 1; j < pCreateInfo->pushConstantRangeCount; ++j) {
if (0 != (pCreateInfo->pPushConstantRanges[i].stageFlags & pCreateInfo->pPushConstantRanges[j].stageFlags)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pPushConstantRanges-00292",
"vkCreatePipelineLayout() Duplicate stage flags found in ranges %d and %d.", i, j);
}
}
}
// Early-out
if (skip) return skip;
std::vector<std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> set_layouts(pCreateInfo->setLayoutCount, nullptr);
unsigned int push_descriptor_set_count = 0;
{
for (i = 0; i < pCreateInfo->setLayoutCount; ++i) {
set_layouts[i] = GetDescriptorSetLayout(this, pCreateInfo->pSetLayouts[i]);
if (set_layouts[i]->IsPushDescriptor()) ++push_descriptor_set_count;
}
}
if (push_descriptor_set_count > 1) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00293",
"vkCreatePipelineLayout() Multiple push descriptor sets found.");
}
// Max descriptors by type, within a single pipeline stage
std::valarray<uint32_t> max_descriptors_per_stage = GetDescriptorCountMaxPerStage(&enabled_features, set_layouts, true);
// Samplers
if (max_descriptors_per_stage[DSL_TYPE_SAMPLERS] > phys_dev_props.limits.maxPerStageDescriptorSamplers) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00287",
"vkCreatePipelineLayout(): max per-stage sampler bindings count (%d) exceeds device "
"maxPerStageDescriptorSamplers limit (%d).",
max_descriptors_per_stage[DSL_TYPE_SAMPLERS], phys_dev_props.limits.maxPerStageDescriptorSamplers);
}
// Uniform buffers
if (max_descriptors_per_stage[DSL_TYPE_UNIFORM_BUFFERS] > phys_dev_props.limits.maxPerStageDescriptorUniformBuffers) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00288",
"vkCreatePipelineLayout(): max per-stage uniform buffer bindings count (%d) exceeds device "
"maxPerStageDescriptorUniformBuffers limit (%d).",
max_descriptors_per_stage[DSL_TYPE_UNIFORM_BUFFERS], phys_dev_props.limits.maxPerStageDescriptorUniformBuffers);
}
// Storage buffers
if (max_descriptors_per_stage[DSL_TYPE_STORAGE_BUFFERS] > phys_dev_props.limits.maxPerStageDescriptorStorageBuffers) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00289",
"vkCreatePipelineLayout(): max per-stage storage buffer bindings count (%d) exceeds device "
"maxPerStageDescriptorStorageBuffers limit (%d).",
max_descriptors_per_stage[DSL_TYPE_STORAGE_BUFFERS], phys_dev_props.limits.maxPerStageDescriptorStorageBuffers);
}
// Sampled images
if (max_descriptors_per_stage[DSL_TYPE_SAMPLED_IMAGES] > phys_dev_props.limits.maxPerStageDescriptorSampledImages) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00290",
"vkCreatePipelineLayout(): max per-stage sampled image bindings count (%d) exceeds device "
"maxPerStageDescriptorSampledImages limit (%d).",
max_descriptors_per_stage[DSL_TYPE_SAMPLED_IMAGES], phys_dev_props.limits.maxPerStageDescriptorSampledImages);
}
// Storage images
if (max_descriptors_per_stage[DSL_TYPE_STORAGE_IMAGES] > phys_dev_props.limits.maxPerStageDescriptorStorageImages) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00291",
"vkCreatePipelineLayout(): max per-stage storage image bindings count (%d) exceeds device "
"maxPerStageDescriptorStorageImages limit (%d).",
max_descriptors_per_stage[DSL_TYPE_STORAGE_IMAGES], phys_dev_props.limits.maxPerStageDescriptorStorageImages);
}
// Input attachments
if (max_descriptors_per_stage[DSL_TYPE_INPUT_ATTACHMENTS] > phys_dev_props.limits.maxPerStageDescriptorInputAttachments) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01676",
"vkCreatePipelineLayout(): max per-stage input attachment bindings count (%d) exceeds device "
"maxPerStageDescriptorInputAttachments limit (%d).",
max_descriptors_per_stage[DSL_TYPE_INPUT_ATTACHMENTS],
phys_dev_props.limits.maxPerStageDescriptorInputAttachments);
}
// Inline uniform blocks
if (max_descriptors_per_stage[DSL_TYPE_INLINE_UNIFORM_BLOCK] >
phys_dev_ext_props.inline_uniform_block_props.maxPerStageDescriptorInlineUniformBlocks) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-descriptorType-02214",
"vkCreatePipelineLayout(): max per-stage inline uniform block bindings count (%d) exceeds device "
"maxPerStageDescriptorInlineUniformBlocks limit (%d).",
max_descriptors_per_stage[DSL_TYPE_INLINE_UNIFORM_BLOCK],
phys_dev_ext_props.inline_uniform_block_props.maxPerStageDescriptorInlineUniformBlocks);
}
// Total descriptors by type
//
std::map<uint32_t, uint32_t> sum_all_stages = GetDescriptorSum(set_layouts, true);
// Samplers
uint32_t sum = sum_all_stages[VK_DESCRIPTOR_TYPE_SAMPLER] + sum_all_stages[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER];
if (sum > phys_dev_props.limits.maxDescriptorSetSamplers) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01677",
"vkCreatePipelineLayout(): sum of sampler bindings among all stages (%d) exceeds device "
"maxDescriptorSetSamplers limit (%d).",
sum, phys_dev_props.limits.maxDescriptorSetSamplers);
}
// Uniform buffers
if (sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER] > phys_dev_props.limits.maxDescriptorSetUniformBuffers) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01678",
"vkCreatePipelineLayout(): sum of uniform buffer bindings among all stages (%d) exceeds device "
"maxDescriptorSetUniformBuffers limit (%d).",
sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER], phys_dev_props.limits.maxDescriptorSetUniformBuffers);
}
// Dynamic uniform buffers
if (sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC] > phys_dev_props.limits.maxDescriptorSetUniformBuffersDynamic) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01679",
"vkCreatePipelineLayout(): sum of dynamic uniform buffer bindings among all stages (%d) exceeds device "
"maxDescriptorSetUniformBuffersDynamic limit (%d).",
sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC],
phys_dev_props.limits.maxDescriptorSetUniformBuffersDynamic);
}
// Storage buffers
if (sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER] > phys_dev_props.limits.maxDescriptorSetStorageBuffers) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01680",
"vkCreatePipelineLayout(): sum of storage buffer bindings among all stages (%d) exceeds device "
"maxDescriptorSetStorageBuffers limit (%d).",
sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER], phys_dev_props.limits.maxDescriptorSetStorageBuffers);
}
// Dynamic storage buffers
if (sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC] > phys_dev_props.limits.maxDescriptorSetStorageBuffersDynamic) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01681",
"vkCreatePipelineLayout(): sum of dynamic storage buffer bindings among all stages (%d) exceeds device "
"maxDescriptorSetStorageBuffersDynamic limit (%d).",
sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC],
phys_dev_props.limits.maxDescriptorSetStorageBuffersDynamic);
}
// Sampled images
sum = sum_all_stages[VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE] + sum_all_stages[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER] +
sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER];
if (sum > phys_dev_props.limits.maxDescriptorSetSampledImages) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01682",
"vkCreatePipelineLayout(): sum of sampled image bindings among all stages (%d) exceeds device "
"maxDescriptorSetSampledImages limit (%d).",
sum, phys_dev_props.limits.maxDescriptorSetSampledImages);
}
// Storage images
sum = sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_IMAGE] + sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER];
if (sum > phys_dev_props.limits.maxDescriptorSetStorageImages) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01683",
"vkCreatePipelineLayout(): sum of storage image bindings among all stages (%d) exceeds device "
"maxDescriptorSetStorageImages limit (%d).",
sum, phys_dev_props.limits.maxDescriptorSetStorageImages);
}
// Input attachments
if (sum_all_stages[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT] > phys_dev_props.limits.maxDescriptorSetInputAttachments) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01684",
"vkCreatePipelineLayout(): sum of input attachment bindings among all stages (%d) exceeds device "
"maxDescriptorSetInputAttachments limit (%d).",
sum_all_stages[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT], phys_dev_props.limits.maxDescriptorSetInputAttachments);
}
// Inline uniform blocks
if (sum_all_stages[VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT] >
phys_dev_ext_props.inline_uniform_block_props.maxDescriptorSetInlineUniformBlocks) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-descriptorType-02216",
"vkCreatePipelineLayout(): sum of inline uniform block bindings among all stages (%d) exceeds device "
"maxDescriptorSetInlineUniformBlocks limit (%d).",
sum_all_stages[VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT],
phys_dev_ext_props.inline_uniform_block_props.maxDescriptorSetInlineUniformBlocks);
}
if (device_extensions.vk_ext_descriptor_indexing) {
// XXX TODO: replace with correct VU messages
// Max descriptors by type, within a single pipeline stage
std::valarray<uint32_t> max_descriptors_per_stage_update_after_bind =
GetDescriptorCountMaxPerStage(&enabled_features, set_layouts, false);
// Samplers
if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLERS] >
phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindSamplers) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-descriptorType-03022",
"vkCreatePipelineLayout(): max per-stage sampler bindings count (%d) exceeds device "
"maxPerStageDescriptorUpdateAfterBindSamplers limit (%d).",
max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLERS],
phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindSamplers);
}
// Uniform buffers
if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_UNIFORM_BUFFERS] >
phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindUniformBuffers) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-descriptorType-03023",
"vkCreatePipelineLayout(): max per-stage uniform buffer bindings count (%d) exceeds device "
"maxPerStageDescriptorUpdateAfterBindUniformBuffers limit (%d).",
max_descriptors_per_stage_update_after_bind[DSL_TYPE_UNIFORM_BUFFERS],
phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindUniformBuffers);
}
// Storage buffers
if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_BUFFERS] >
phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindStorageBuffers) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-descriptorType-03024",
"vkCreatePipelineLayout(): max per-stage storage buffer bindings count (%d) exceeds device "
"maxPerStageDescriptorUpdateAfterBindStorageBuffers limit (%d).",
max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_BUFFERS],
phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindStorageBuffers);
}
// Sampled images
if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLED_IMAGES] >
phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindSampledImages) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-descriptorType-03025",
"vkCreatePipelineLayout(): max per-stage sampled image bindings count (%d) exceeds device "
"maxPerStageDescriptorUpdateAfterBindSampledImages limit (%d).",
max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLED_IMAGES],
phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindSampledImages);
}
// Storage images
if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_IMAGES] >
phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindStorageImages) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-descriptorType-03026",
"vkCreatePipelineLayout(): max per-stage storage image bindings count (%d) exceeds device "
"maxPerStageDescriptorUpdateAfterBindStorageImages limit (%d).",
max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_IMAGES],
phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindStorageImages);
}
// Input attachments
if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_INPUT_ATTACHMENTS] >
phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindInputAttachments) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-descriptorType-03027",
"vkCreatePipelineLayout(): max per-stage input attachment bindings count (%d) exceeds device "
"maxPerStageDescriptorUpdateAfterBindInputAttachments limit (%d).",
max_descriptors_per_stage_update_after_bind[DSL_TYPE_INPUT_ATTACHMENTS],
phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindInputAttachments);
}
// Inline uniform blocks
if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_INLINE_UNIFORM_BLOCK] >
phys_dev_ext_props.inline_uniform_block_props.maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-descriptorType-02215",
"vkCreatePipelineLayout(): max per-stage inline uniform block bindings count (%d) exceeds device "
"maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks limit (%d).",
max_descriptors_per_stage_update_after_bind[DSL_TYPE_INLINE_UNIFORM_BLOCK],
phys_dev_ext_props.inline_uniform_block_props.maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks);
}
// Total descriptors by type, summed across all pipeline stages
//
std::map<uint32_t, uint32_t> sum_all_stages_update_after_bind = GetDescriptorSum(set_layouts, false);
// Samplers
sum = sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_SAMPLER] +
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER];
if (sum > phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindSamplers) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03036",
"vkCreatePipelineLayout(): sum of sampler bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindSamplers limit (%d).",
sum, phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindSamplers);
}
// Uniform buffers
if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER] >
phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindUniformBuffers) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03037",
"vkCreatePipelineLayout(): sum of uniform buffer bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindUniformBuffers limit (%d).",
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER],
phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindUniformBuffers);
}
// Dynamic uniform buffers
if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC] >
phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindUniformBuffersDynamic) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03038",
"vkCreatePipelineLayout(): sum of dynamic uniform buffer bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindUniformBuffersDynamic limit (%d).",
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC],
phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindUniformBuffersDynamic);
}
// Storage buffers
if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER] >
phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindStorageBuffers) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03039",
"vkCreatePipelineLayout(): sum of storage buffer bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindStorageBuffers limit (%d).",
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER],
phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindStorageBuffers);
}
// Dynamic storage buffers
if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC] >
phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindStorageBuffersDynamic) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03040",
"vkCreatePipelineLayout(): sum of dynamic storage buffer bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindStorageBuffersDynamic limit (%d).",
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC],
phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindStorageBuffersDynamic);
}
// Sampled images
sum = sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE] +
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER] +
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER];
if (sum > phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindSampledImages) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03041",
"vkCreatePipelineLayout(): sum of sampled image bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindSampledImages limit (%d).",
sum, phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindSampledImages);
}
// Storage images
sum = sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_IMAGE] +
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER];
if (sum > phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindStorageImages) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03042",
"vkCreatePipelineLayout(): sum of storage image bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindStorageImages limit (%d).",
sum, phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindStorageImages);
}
// Input attachments
if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT] >
phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindInputAttachments) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03043",
"vkCreatePipelineLayout(): sum of input attachment bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindInputAttachments limit (%d).",
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT],
phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindInputAttachments);
}
// Inline uniform blocks
if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT] >
phys_dev_ext_props.inline_uniform_block_props.maxDescriptorSetUpdateAfterBindInlineUniformBlocks) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-descriptorType-02217",
"vkCreatePipelineLayout(): sum of inline uniform block bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindInlineUniformBlocks limit (%d).",
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT],
phys_dev_ext_props.inline_uniform_block_props.maxDescriptorSetUpdateAfterBindInlineUniformBlocks);
}
}
return skip;
}
// For repeatable sorting, not very useful for "memory in range" search
struct PushConstantRangeCompare {
bool operator()(const VkPushConstantRange *lhs, const VkPushConstantRange *rhs) const {
if (lhs->offset == rhs->offset) {
if (lhs->size == rhs->size) {
// The comparison is arbitrary, but avoids false aliasing by comparing all fields.
return lhs->stageFlags < rhs->stageFlags;
}
// If the offsets are the same then sorting by the end of range is useful for validation
return lhs->size < rhs->size;
}
return lhs->offset < rhs->offset;
}
};
static PushConstantRangesDict push_constant_ranges_dict;
PushConstantRangesId GetCanonicalId(const VkPipelineLayoutCreateInfo *info) {
if (!info->pPushConstantRanges) {
// Hand back the empty entry (creating as needed)...
return push_constant_ranges_dict.look_up(PushConstantRanges());
}
// Sort the input ranges to ensure equivalent ranges map to the same id
std::set<const VkPushConstantRange *, PushConstantRangeCompare> sorted;
for (uint32_t i = 0; i < info->pushConstantRangeCount; i++) {
sorted.insert(info->pPushConstantRanges + i);
}
PushConstantRanges ranges(sorted.size());
for (const auto range : sorted) {
ranges.emplace_back(*range);
}
return push_constant_ranges_dict.look_up(std::move(ranges));
}
// Dictionary of canoncial form of the pipeline set layout of descriptor set layouts
static PipelineLayoutSetLayoutsDict pipeline_layout_set_layouts_dict;
// Dictionary of canonical form of the "compatible for set" records
static PipelineLayoutCompatDict pipeline_layout_compat_dict;
static PipelineLayoutCompatId GetCanonicalId(const uint32_t set_index, const PushConstantRangesId pcr_id,
const PipelineLayoutSetLayoutsId set_layouts_id) {
return pipeline_layout_compat_dict.look_up(PipelineLayoutCompatDef(set_index, pcr_id, set_layouts_id));
}
void CoreChecks::PreCallRecordCreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout,
void *cpl_state_data) {
create_pipeline_layout_api_state *cpl_state = reinterpret_cast<create_pipeline_layout_api_state *>(cpl_state_data);
if (enabled.gpu_validation) {
GpuPreCallCreatePipelineLayout(pCreateInfo, pAllocator, pPipelineLayout, &cpl_state->new_layouts,
&cpl_state->modified_create_info);
}
}
void CoreChecks::PostCallRecordCreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout,
VkResult result) {
StateTracker::PostCallRecordCreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout, result);
// Clean up GPU validation
if (enabled.gpu_validation) {
GpuPostCallCreatePipelineLayout(result);
}
}
void ValidationStateTracker::PostCallRecordCreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkPipelineLayout *pPipelineLayout, VkResult result) {
if (VK_SUCCESS != result) return;
std::unique_ptr<PIPELINE_LAYOUT_STATE> pipeline_layout_state(new PIPELINE_LAYOUT_STATE{});
pipeline_layout_state->layout = *pPipelineLayout;
pipeline_layout_state->set_layouts.resize(pCreateInfo->setLayoutCount);
PipelineLayoutSetLayoutsDef set_layouts(pCreateInfo->setLayoutCount);
for (uint32_t i = 0; i < pCreateInfo->setLayoutCount; ++i) {
pipeline_layout_state->set_layouts[i] = GetDescriptorSetLayout(this, pCreateInfo->pSetLayouts[i]);
set_layouts[i] = pipeline_layout_state->set_layouts[i]->GetLayoutId();
}
// Get canonical form IDs for the "compatible for set" contents
pipeline_layout_state->push_constant_ranges = GetCanonicalId(pCreateInfo);
auto set_layouts_id = pipeline_layout_set_layouts_dict.look_up(set_layouts);
pipeline_layout_state->compat_for_set.reserve(pCreateInfo->setLayoutCount);
// Create table of "compatible for set N" cannonical forms for trivial accept validation
for (uint32_t i = 0; i < pCreateInfo->setLayoutCount; ++i) {
pipeline_layout_state->compat_for_set.emplace_back(
GetCanonicalId(i, pipeline_layout_state->push_constant_ranges, set_layouts_id));
}
pipelineLayoutMap[*pPipelineLayout] = std::move(pipeline_layout_state);
}
void ValidationStateTracker::PostCallRecordCreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkDescriptorPool *pDescriptorPool, VkResult result) {
if (VK_SUCCESS != result) return;
descriptorPoolMap[*pDescriptorPool] =
std::unique_ptr<DESCRIPTOR_POOL_STATE>(new DESCRIPTOR_POOL_STATE(*pDescriptorPool, pCreateInfo));
}
bool CoreChecks::PreCallValidateResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
VkDescriptorPoolResetFlags flags) {
// Make sure sets being destroyed are not currently in-use
if (disabled.idle_descriptor_set) return false;
bool skip = false;
DESCRIPTOR_POOL_STATE *pPool = GetDescriptorPoolState(descriptorPool);
if (pPool != nullptr) {
for (auto ds : pPool->sets) {
if (ds && ds->in_use.load()) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
HandleToUint64(descriptorPool), "VUID-vkResetDescriptorPool-descriptorPool-00313",
"It is invalid to call vkResetDescriptorPool() with descriptor sets in use by a command buffer.");
if (skip) break;
}
}
}
return skip;
}
void ValidationStateTracker::PostCallRecordResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
VkDescriptorPoolResetFlags flags, VkResult result) {
if (VK_SUCCESS != result) return;
DESCRIPTOR_POOL_STATE *pPool = GetDescriptorPoolState(descriptorPool);
// TODO: validate flags
// For every set off of this pool, clear it, remove from setMap, and free cvdescriptorset::DescriptorSet
for (auto ds : pPool->sets) {
FreeDescriptorSet(ds);
}
pPool->sets.clear();
// Reset available count for each type and available sets for this pool
for (auto it = pPool->availableDescriptorTypeCount.begin(); it != pPool->availableDescriptorTypeCount.end(); ++it) {
pPool->availableDescriptorTypeCount[it->first] = pPool->maxDescriptorTypeCount[it->first];
}
pPool->availableSets = pPool->maxSets;
}
// Ensure the pool contains enough descriptors and descriptor sets to satisfy
// an allocation request. Fills common_data with the total number of descriptors of each type required,
// as well as DescriptorSetLayout ptrs used for later update.
bool CoreChecks::PreCallValidateAllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo,
VkDescriptorSet *pDescriptorSets, void *ads_state_data) {
// Always update common data
cvdescriptorset::AllocateDescriptorSetsData *ads_state =
reinterpret_cast<cvdescriptorset::AllocateDescriptorSetsData *>(ads_state_data);
UpdateAllocateDescriptorSetsData(pAllocateInfo, ads_state);
// All state checks for AllocateDescriptorSets is done in single function
return ValidateAllocateDescriptorSets(pAllocateInfo, ads_state);
}
// Allocation state was good and call down chain was made so update state based on allocating descriptor sets
void ValidationStateTracker::PostCallRecordAllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo,
VkDescriptorSet *pDescriptorSets, VkResult result,
void *ads_state_data) {
if (VK_SUCCESS != result) return;
// All the updates are contained in a single cvdescriptorset function
cvdescriptorset::AllocateDescriptorSetsData *ads_state =
reinterpret_cast<cvdescriptorset::AllocateDescriptorSetsData *>(ads_state_data);
PerformAllocateDescriptorSets(pAllocateInfo, pDescriptorSets, ads_state);
}
bool CoreChecks::PreCallValidateFreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count,
const VkDescriptorSet *pDescriptorSets) {
// Make sure that no sets being destroyed are in-flight
bool skip = false;
// First make sure sets being destroyed are not currently in-use
for (uint32_t i = 0; i < count; ++i) {
if (pDescriptorSets[i] != VK_NULL_HANDLE) {
skip |= ValidateIdleDescriptorSet(pDescriptorSets[i], "vkFreeDescriptorSets");
}
}
DESCRIPTOR_POOL_STATE *pool_state = GetDescriptorPoolState(descriptorPool);
if (pool_state && !(VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT & pool_state->createInfo.flags)) {
// Can't Free from a NON_FREE pool
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
HandleToUint64(descriptorPool), "VUID-vkFreeDescriptorSets-descriptorPool-00312",
"It is invalid to call vkFreeDescriptorSets() with a pool created without setting "
"VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT.");
}
return skip;
}
void ValidationStateTracker::PreCallRecordFreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count,
const VkDescriptorSet *pDescriptorSets) {
DESCRIPTOR_POOL_STATE *pool_state = GetDescriptorPoolState(descriptorPool);
// Update available descriptor sets in pool
pool_state->availableSets += count;
// For each freed descriptor add its resources back into the pool as available and remove from pool and setMap
for (uint32_t i = 0; i < count; ++i) {
if (pDescriptorSets[i] != VK_NULL_HANDLE) {
auto descriptor_set = setMap[pDescriptorSets[i]].get();
uint32_t type_index = 0, descriptor_count = 0;
for (uint32_t j = 0; j < descriptor_set->GetBindingCount(); ++j) {
type_index = static_cast<uint32_t>(descriptor_set->GetTypeFromIndex(j));
descriptor_count = descriptor_set->GetDescriptorCountFromIndex(j);
pool_state->availableDescriptorTypeCount[type_index] += descriptor_count;
}
FreeDescriptorSet(descriptor_set);
pool_state->sets.erase(descriptor_set);
}
}
}
bool CoreChecks::PreCallValidateUpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount,
const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
const VkCopyDescriptorSet *pDescriptorCopies) {
// First thing to do is perform map look-ups.
// NOTE : UpdateDescriptorSets is somewhat unique in that it's operating on a number of DescriptorSets
// so we can't just do a single map look-up up-front, but do them individually in functions below
// Now make call(s) that validate state, but don't perform state updates in this function
// Note, here DescriptorSets is unique in that we don't yet have an instance. Using a helper function in the
// namespace which will parse params and make calls into specific class instances
return ValidateUpdateDescriptorSets(descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, pDescriptorCopies,
"vkUpdateDescriptorSets()");
}
void ValidationStateTracker::PreCallRecordUpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount,
const VkWriteDescriptorSet *pDescriptorWrites,
uint32_t descriptorCopyCount,
const VkCopyDescriptorSet *pDescriptorCopies) {
cvdescriptorset::PerformUpdateDescriptorSets(this, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
pDescriptorCopies);
}
void ValidationStateTracker::PostCallRecordAllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pCreateInfo,
VkCommandBuffer *pCommandBuffer, VkResult result) {
if (VK_SUCCESS != result) return;
auto pPool = GetCommandPoolState(pCreateInfo->commandPool);
if (pPool) {
for (uint32_t i = 0; i < pCreateInfo->commandBufferCount; i++) {
// Add command buffer to its commandPool map
pPool->commandBuffers.insert(pCommandBuffer[i]);
std::unique_ptr<CMD_BUFFER_STATE> pCB(new CMD_BUFFER_STATE{});
pCB->createInfo = *pCreateInfo;
pCB->device = device;
// Add command buffer to map
commandBufferMap[pCommandBuffer[i]] = std::move(pCB);
ResetCommandBufferState(pCommandBuffer[i]);
}
}
}
// Add bindings between the given cmd buffer & framebuffer and the framebuffer's children
void ValidationStateTracker::AddFramebufferBinding(CMD_BUFFER_STATE *cb_state, FRAMEBUFFER_STATE *fb_state) {
AddCommandBufferBinding(&fb_state->cb_bindings, VulkanTypedHandle(fb_state->framebuffer, kVulkanObjectTypeFramebuffer),
cb_state);
const uint32_t attachmentCount = fb_state->createInfo.attachmentCount;
for (uint32_t attachment = 0; attachment < attachmentCount; ++attachment) {
auto view_state = GetAttachmentImageViewState(fb_state, attachment);
if (view_state) {
AddCommandBufferBindingImageView(cb_state, view_state);
}
}
}
bool CoreChecks::PreCallValidateBeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
if (!cb_state) return false;
bool skip = false;
if (cb_state->in_use.load()) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkBeginCommandBuffer-commandBuffer-00049",
"Calling vkBeginCommandBuffer() on active %s before it has completed. You must check "
"command buffer fence before this call.",
report_data->FormatHandle(commandBuffer).c_str());
}
if (cb_state->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
// Secondary Command Buffer
const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo;
if (!pInfo) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkBeginCommandBuffer-commandBuffer-00051",
"vkBeginCommandBuffer(): Secondary %s must have inheritance info.",
report_data->FormatHandle(commandBuffer).c_str());
} else {
if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
assert(pInfo->renderPass);
const auto *framebuffer = GetFramebufferState(pInfo->framebuffer);
if (framebuffer) {
if (framebuffer->createInfo.renderPass != pInfo->renderPass) {
const auto *render_pass = GetRenderPassState(pInfo->renderPass);
// renderPass that framebuffer was created with must be compatible with local renderPass
skip |= ValidateRenderPassCompatibility("framebuffer", framebuffer->rp_state.get(), "command buffer",
render_pass, "vkBeginCommandBuffer()",
"VUID-VkCommandBufferBeginInfo-flags-00055");
}
}
}
if ((pInfo->occlusionQueryEnable == VK_FALSE || enabled_features.core.occlusionQueryPrecise == VK_FALSE) &&
(pInfo->queryFlags & VK_QUERY_CONTROL_PRECISE_BIT)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkBeginCommandBuffer-commandBuffer-00052",
"vkBeginCommandBuffer(): Secondary %s must not have VK_QUERY_CONTROL_PRECISE_BIT if "
"occulusionQuery is disabled or the device does not support precise occlusion queries.",
report_data->FormatHandle(commandBuffer).c_str());
}
}
if (pInfo && pInfo->renderPass != VK_NULL_HANDLE) {
const auto *renderPass = GetRenderPassState(pInfo->renderPass);
if (renderPass) {
if (pInfo->subpass >= renderPass->createInfo.subpassCount) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-VkCommandBufferBeginInfo-flags-00054",
"vkBeginCommandBuffer(): Secondary %s must have a subpass index (%d) that is "
"less than the number of subpasses (%d).",
report_data->FormatHandle(commandBuffer).c_str(), pInfo->subpass,
renderPass->createInfo.subpassCount);
}
}
}
}
if (CB_RECORDING == cb_state->state) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkBeginCommandBuffer-commandBuffer-00049",
"vkBeginCommandBuffer(): Cannot call Begin on %s in the RECORDING state. Must first call "
"vkEndCommandBuffer().",
report_data->FormatHandle(commandBuffer).c_str());
} else if (CB_RECORDED == cb_state->state || CB_INVALID_COMPLETE == cb_state->state) {
VkCommandPool cmdPool = cb_state->createInfo.commandPool;
const auto *pPool = GetCommandPoolState(cmdPool);
if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkBeginCommandBuffer-commandBuffer-00050",
"Call to vkBeginCommandBuffer() on %s attempts to implicitly reset cmdBuffer created from "
"%s that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
report_data->FormatHandle(commandBuffer).c_str(), report_data->FormatHandle(cmdPool).c_str());
}
}
auto chained_device_group_struct = lvl_find_in_chain<VkDeviceGroupCommandBufferBeginInfo>(pBeginInfo->pNext);
if (chained_device_group_struct) {
skip |= ValidateDeviceMaskToPhysicalDeviceCount(
chained_device_group_struct->deviceMask, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer),
"VUID-VkDeviceGroupCommandBufferBeginInfo-deviceMask-00106");
skip |=
ValidateDeviceMaskToZero(chained_device_group_struct->deviceMask, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-VkDeviceGroupCommandBufferBeginInfo-deviceMask-00107");
}
return skip;
}
void ValidationStateTracker::PreCallRecordBeginCommandBuffer(VkCommandBuffer commandBuffer,
const VkCommandBufferBeginInfo *pBeginInfo) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
if (!cb_state) return;
// This implicitly resets the Cmd Buffer so make sure any fence is done and then clear memory references
ClearCmdBufAndMemReferences(cb_state);
if (cb_state->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
// Secondary Command Buffer
const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo;
if (pInfo) {
if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
assert(pInfo->renderPass);
auto framebuffer = GetFramebufferState(pInfo->framebuffer);
if (framebuffer) {
// Connect this framebuffer and its children to this cmdBuffer
AddFramebufferBinding(cb_state, framebuffer);
}
}
}
}
if (CB_RECORDED == cb_state->state || CB_INVALID_COMPLETE == cb_state->state) {
ResetCommandBufferState(commandBuffer);
}
// Set updated state here in case implicit reset occurs above
cb_state->state = CB_RECORDING;
cb_state->beginInfo = *pBeginInfo;
if (cb_state->beginInfo.pInheritanceInfo) {
cb_state->inheritanceInfo = *(cb_state->beginInfo.pInheritanceInfo);
cb_state->beginInfo.pInheritanceInfo = &cb_state->inheritanceInfo;
// If we are a secondary command-buffer and inheriting. Update the items we should inherit.
if ((cb_state->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) &&
(cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
cb_state->activeRenderPass = GetRenderPassState(cb_state->beginInfo.pInheritanceInfo->renderPass);
cb_state->activeSubpass = cb_state->beginInfo.pInheritanceInfo->subpass;
cb_state->activeFramebuffer = cb_state->beginInfo.pInheritanceInfo->framebuffer;
cb_state->framebuffers.insert(cb_state->beginInfo.pInheritanceInfo->framebuffer);
}
}
auto chained_device_group_struct = lvl_find_in_chain<VkDeviceGroupCommandBufferBeginInfo>(pBeginInfo->pNext);
if (chained_device_group_struct) {
cb_state->initial_device_mask = chained_device_group_struct->deviceMask;
} else {
cb_state->initial_device_mask = (1 << physical_device_count) - 1;
}
}
bool CoreChecks::PreCallValidateEndCommandBuffer(VkCommandBuffer commandBuffer) {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
if (!cb_state) return false;
bool skip = false;
if ((VK_COMMAND_BUFFER_LEVEL_PRIMARY == cb_state->createInfo.level) ||
!(cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
// This needs spec clarification to update valid usage, see comments in PR:
// https://github.com/KhronosGroup/Vulkan-ValidationLayers/issues/165
skip |= InsideRenderPass(cb_state, "vkEndCommandBuffer()", "VUID-vkEndCommandBuffer-commandBuffer-00060");
}
skip |= ValidateCmd(cb_state, CMD_ENDCOMMANDBUFFER, "vkEndCommandBuffer()");
for (auto query : cb_state->activeQueries) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkEndCommandBuffer-commandBuffer-00061",
"Ending command buffer with in progress query: %s, query %d.",
report_data->FormatHandle(query.pool).c_str(), query.query);
}
return skip;
}
void ValidationStateTracker::PostCallRecordEndCommandBuffer(VkCommandBuffer commandBuffer, VkResult result) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
if (!cb_state) return;
// Cached validation is specific to a specific recording of a specific command buffer.
for (auto descriptor_set : cb_state->validated_descriptor_sets) {
descriptor_set->ClearCachedValidation(cb_state);
}
cb_state->validated_descriptor_sets.clear();
if (VK_SUCCESS == result) {
cb_state->state = CB_RECORDED;
}
}
bool CoreChecks::PreCallValidateResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) {
bool skip = false;
const CMD_BUFFER_STATE *pCB = GetCBState(commandBuffer);
if (!pCB) return false;
VkCommandPool cmdPool = pCB->createInfo.commandPool;
const auto *pPool = GetCommandPoolState(cmdPool);
if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkResetCommandBuffer-commandBuffer-00046",
"Attempt to reset %s created from %s that does NOT have the "
"VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
report_data->FormatHandle(commandBuffer).c_str(), report_data->FormatHandle(cmdPool).c_str());
}
skip |= CheckCommandBufferInFlight(pCB, "reset", "VUID-vkResetCommandBuffer-commandBuffer-00045");
return skip;
}
void ValidationStateTracker::PostCallRecordResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags,
VkResult result) {
if (VK_SUCCESS == result) {
ResetCommandBufferState(commandBuffer);
}
}
static const char *GetPipelineTypeName(VkPipelineBindPoint pipelineBindPoint) {
switch (pipelineBindPoint) {
case VK_PIPELINE_BIND_POINT_GRAPHICS:
return "graphics";
case VK_PIPELINE_BIND_POINT_COMPUTE:
return "compute";
case VK_PIPELINE_BIND_POINT_RAY_TRACING_NV:
return "ray-tracing";
default:
return "unknown";
}
}
bool CoreChecks::PreCallValidateCmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
VkPipeline pipeline) {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdBindPipeline()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdBindPipeline-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_BINDPIPELINE, "vkCmdBindPipeline()");
static const std::map<VkPipelineBindPoint, std::string> bindpoint_errors = {
std::make_pair(VK_PIPELINE_BIND_POINT_GRAPHICS, "VUID-vkCmdBindPipeline-pipelineBindPoint-00777"),
std::make_pair(VK_PIPELINE_BIND_POINT_COMPUTE, "VUID-vkCmdBindPipeline-pipelineBindPoint-00778"),
std::make_pair(VK_PIPELINE_BIND_POINT_RAY_TRACING_NV, "VUID-vkCmdBindPipeline-pipelineBindPoint-02391")};
skip |= ValidatePipelineBindPoint(cb_state, pipelineBindPoint, "vkCmdBindPipeline()", bindpoint_errors);
const auto *pipeline_state = GetPipelineState(pipeline);
assert(pipeline_state);
const auto &pipeline_state_bind_point = pipeline_state->getPipelineType();
if (pipelineBindPoint != pipeline_state_bind_point) {
if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_GRAPHICS) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdBindPipeline-pipelineBindPoint-00779",
"Cannot bind a pipeline of type %s to the graphics pipeline bind point",
GetPipelineTypeName(pipeline_state_bind_point));
} else if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_COMPUTE) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdBindPipeline-pipelineBindPoint-00780",
"Cannot bind a pipeline of type %s to the compute pipeline bind point",
GetPipelineTypeName(pipeline_state_bind_point));
} else if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_RAY_TRACING_NV) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdBindPipeline-pipelineBindPoint-02392",
"Cannot bind a pipeline of type %s to the ray-tracing pipeline bind point",
GetPipelineTypeName(pipeline_state_bind_point));
}
}
return skip;
}
void ValidationStateTracker::PreCallRecordCmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
VkPipeline pipeline) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
auto pipe_state = GetPipelineState(pipeline);
if (VK_PIPELINE_BIND_POINT_GRAPHICS == pipelineBindPoint) {
cb_state->status &= ~cb_state->static_status;
cb_state->static_status = MakeStaticStateMask(pipe_state->graphicsPipelineCI.ptr()->pDynamicState);
cb_state->status |= cb_state->static_status;
}
cb_state->lastBound[pipelineBindPoint].pipeline_state = pipe_state;
SetPipelineState(pipe_state);
AddCommandBufferBinding(&pipe_state->cb_bindings, VulkanTypedHandle(pipeline, kVulkanObjectTypePipeline), cb_state);
}
bool CoreChecks::PreCallValidateCmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount,
const VkViewport *pViewports) {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip =
ValidateCmdQueueFlags(cb_state, "vkCmdSetViewport()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdSetViewport-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETVIEWPORT, "vkCmdSetViewport()");
if (cb_state->static_status & CBSTATUS_VIEWPORT_SET) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetViewport-None-01221",
"vkCmdSetViewport(): pipeline was created without VK_DYNAMIC_STATE_VIEWPORT flag.");
}
return skip;
}
void ValidationStateTracker::PreCallRecordCmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport,
uint32_t viewportCount, const VkViewport *pViewports) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
cb_state->viewportMask |= ((1u << viewportCount) - 1u) << firstViewport;
cb_state->status |= CBSTATUS_VIEWPORT_SET;
}
bool CoreChecks::PreCallValidateCmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount,
const VkRect2D *pScissors) {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip =
ValidateCmdQueueFlags(cb_state, "vkCmdSetScissor()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdSetScissor-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETSCISSOR, "vkCmdSetScissor()");
if (cb_state->static_status & CBSTATUS_SCISSOR_SET) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetScissor-None-00590",
"vkCmdSetScissor(): pipeline was created without VK_DYNAMIC_STATE_SCISSOR flag..");
}
return skip;
}
void ValidationStateTracker ::PreCallRecordCmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor,
uint32_t scissorCount, const VkRect2D *pScissors) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
cb_state->scissorMask |= ((1u << scissorCount) - 1u) << firstScissor;
cb_state->status |= CBSTATUS_SCISSOR_SET;
}
bool CoreChecks::PreCallValidateCmdSetExclusiveScissorNV(VkCommandBuffer commandBuffer, uint32_t firstExclusiveScissor,
uint32_t exclusiveScissorCount, const VkRect2D *pExclusiveScissors) {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetExclusiveScissorNV()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetExclusiveScissorNV-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETEXCLUSIVESCISSORNV, "vkCmdSetExclusiveScissorNV()");
if (cb_state->static_status & CBSTATUS_EXCLUSIVE_SCISSOR_SET) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetExclusiveScissorNV-None-02032",
"vkCmdSetExclusiveScissorNV(): pipeline was created without VK_DYNAMIC_STATE_EXCLUSIVE_SCISSOR_NV flag.");
}
if (!enabled_features.exclusive_scissor.exclusiveScissor) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetExclusiveScissorNV-None-02031",
"vkCmdSetExclusiveScissorNV: The exclusiveScissor feature is disabled.");
}
return skip;
}
void ValidationStateTracker::PreCallRecordCmdSetExclusiveScissorNV(VkCommandBuffer commandBuffer, uint32_t firstExclusiveScissor,
uint32_t exclusiveScissorCount,
const VkRect2D *pExclusiveScissors) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
// TODO: We don't have VUIDs for validating that all exclusive scissors have been set.
// cb_state->exclusiveScissorMask |= ((1u << exclusiveScissorCount) - 1u) << firstExclusiveScissor;
cb_state->status |= CBSTATUS_EXCLUSIVE_SCISSOR_SET;
}
bool CoreChecks::PreCallValidateCmdBindShadingRateImageNV(VkCommandBuffer commandBuffer, VkImageView imageView,
VkImageLayout imageLayout) {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdBindShadingRateImageNV()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdBindShadingRateImageNV-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_BINDSHADINGRATEIMAGENV, "vkCmdBindShadingRateImageNV()");
if (!enabled_features.shading_rate_image.shadingRateImage) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdBindShadingRateImageNV-None-02058",
"vkCmdBindShadingRateImageNV: The shadingRateImage feature is disabled.");
}
if (imageView != VK_NULL_HANDLE) {
const auto view_state = GetImageViewState(imageView);
auto &ivci = view_state->create_info;
if (!view_state || (ivci.viewType != VK_IMAGE_VIEW_TYPE_2D && ivci.viewType != VK_IMAGE_VIEW_TYPE_2D_ARRAY)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
HandleToUint64(imageView), "VUID-vkCmdBindShadingRateImageNV-imageView-02059",
"vkCmdBindShadingRateImageNV: If imageView is not VK_NULL_HANDLE, it must be a valid "
"VkImageView handle of type VK_IMAGE_VIEW_TYPE_2D or VK_IMAGE_VIEW_TYPE_2D_ARRAY.");
}
if (view_state && ivci.format != VK_FORMAT_R8_UINT) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT, HandleToUint64(imageView),
"VUID-vkCmdBindShadingRateImageNV-imageView-02060",
"vkCmdBindShadingRateImageNV: If imageView is not VK_NULL_HANDLE, it must have a format of VK_FORMAT_R8_UINT.");
}
const VkImageCreateInfo *ici = view_state ? &GetImageState(view_state->create_info.image)->createInfo : nullptr;
if (ici && !(ici->usage & VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
HandleToUint64(imageView), "VUID-vkCmdBindShadingRateImageNV-imageView-02061",
"vkCmdBindShadingRateImageNV: If imageView is not VK_NULL_HANDLE, the image must have been "
"created with VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV set.");
}
if (view_state) {
const auto image_state = GetImageState(view_state->create_info.image);
bool hit_error = false;
// XXX TODO: While the VUID says "each subresource", only the base mip level is
// actually used. Since we don't have an existing convenience function to iterate
// over all mip levels, just don't bother with non-base levels.
VkImageSubresourceRange &range = view_state->create_info.subresourceRange;
VkImageSubresourceLayers subresource = {range.aspectMask, range.baseMipLevel, range.baseArrayLayer, range.layerCount};
if (image_state) {
skip |= VerifyImageLayout(cb_state, image_state, subresource, imageLayout, VK_IMAGE_LAYOUT_SHADING_RATE_OPTIMAL_NV,
"vkCmdCopyImage()", "VUID-vkCmdBindShadingRateImageNV-imageLayout-02063",
"VUID-vkCmdBindShadingRateImageNV-imageView-02062", &hit_error);
}
}
}
return skip;
}
void ValidationStateTracker::PreCallRecordCmdBindShadingRateImageNV(VkCommandBuffer commandBuffer, VkImageView imageView,
VkImageLayout imageLayout) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
if (imageView != VK_NULL_HANDLE) {
auto view_state = GetImageViewState(imageView);
AddCommandBufferBindingImageView(cb_state, view_state);
}
}
bool CoreChecks::PreCallValidateCmdSetViewportShadingRatePaletteNV(VkCommandBuffer commandBuffer, uint32_t firstViewport,
uint32_t viewportCount,
const VkShadingRatePaletteNV *pShadingRatePalettes) {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetViewportShadingRatePaletteNV()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetViewportShadingRatePaletteNV-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETVIEWPORTSHADINGRATEPALETTENV, "vkCmdSetViewportShadingRatePaletteNV()");
if (!enabled_features.shading_rate_image.shadingRateImage) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetViewportShadingRatePaletteNV-None-02064",
"vkCmdSetViewportShadingRatePaletteNV: The shadingRateImage feature is disabled.");
}
if (cb_state->static_status & CBSTATUS_SHADING_RATE_PALETTE_SET) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetViewportShadingRatePaletteNV-None-02065",
"vkCmdSetViewportShadingRatePaletteNV(): pipeline was created without "
"VK_DYNAMIC_STATE_VIEWPORT_SHADING_RATE_PALETTE_NV flag.");
}
for (uint32_t i = 0; i < viewportCount; ++i) {
auto *palette = &pShadingRatePalettes[i];
if (palette->shadingRatePaletteEntryCount == 0 ||
palette->shadingRatePaletteEntryCount > phys_dev_ext_props.shading_rate_image_props.shadingRatePaletteSize) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-VkShadingRatePaletteNV-shadingRatePaletteEntryCount-02071",
"vkCmdSetViewportShadingRatePaletteNV: shadingRatePaletteEntryCount must be between 1 and shadingRatePaletteSize.");
}
}
return skip;
}
void ValidationStateTracker::PreCallRecordCmdSetViewportShadingRatePaletteNV(VkCommandBuffer commandBuffer, uint32_t firstViewport,
uint32_t viewportCount,
const VkShadingRatePaletteNV *pShadingRatePalettes) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
// TODO: We don't have VUIDs for validating that all shading rate palettes have been set.
// cb_state->shadingRatePaletteMask |= ((1u << viewportCount) - 1u) << firstViewport;
cb_state->status |= CBSTATUS_SHADING_RATE_PALETTE_SET;
}
bool CoreChecks::ValidateGeometryTrianglesNV(const VkGeometryTrianglesNV &triangles, VkDebugReportObjectTypeEXT object_type,
uint64_t object_handle, const char *func_name) const {
bool skip = false;
const BUFFER_STATE *vb_state = GetBufferState(triangles.vertexData);
if (vb_state != nullptr && vb_state->binding.size <= triangles.vertexOffset) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object_handle,
"VUID-VkGeometryTrianglesNV-vertexOffset-02428", "%s", func_name);
}
const BUFFER_STATE *ib_state = GetBufferState(triangles.indexData);
if (ib_state != nullptr && ib_state->binding.size <= triangles.indexOffset) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object_handle,
"VUID-VkGeometryTrianglesNV-indexOffset-02431", "%s", func_name);
}
const BUFFER_STATE *td_state = GetBufferState(triangles.transformData);
if (td_state != nullptr && td_state->binding.size <= triangles.transformOffset) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object_handle,
"VUID-VkGeometryTrianglesNV-transformOffset-02437", "%s", func_name);
}
return skip;
}
bool CoreChecks::ValidateGeometryAABBNV(const VkGeometryAABBNV &aabbs, VkDebugReportObjectTypeEXT object_type,
uint64_t object_handle, const char *func_name) const {
bool skip = false;
const BUFFER_STATE *aabb_state = GetBufferState(aabbs.aabbData);
if (aabb_state != nullptr && aabb_state->binding.size > 0 && aabb_state->binding.size <= aabbs.offset) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object_handle,
"VUID-VkGeometryAABBNV-offset-02439", "%s", func_name);
}
return skip;
}
bool CoreChecks::ValidateGeometryNV(const VkGeometryNV &geometry, VkDebugReportObjectTypeEXT object_type, uint64_t object_handle,
const char *func_name) const {
bool skip = false;
if (geometry.geometryType == VK_GEOMETRY_TYPE_TRIANGLES_NV) {
skip = ValidateGeometryTrianglesNV(geometry.geometry.triangles, object_type, object_handle, func_name);
} else if (geometry.geometryType == VK_GEOMETRY_TYPE_AABBS_NV) {
skip = ValidateGeometryAABBNV(geometry.geometry.aabbs, object_type, object_handle, func_name);
}
return skip;
}
bool CoreChecks::PreCallValidateCreateAccelerationStructureNV(VkDevice device,
const VkAccelerationStructureCreateInfoNV *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkAccelerationStructureNV *pAccelerationStructure) {
bool skip = false;
if (pCreateInfo != nullptr && pCreateInfo->info.type == VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV) {
for (uint32_t i = 0; i < pCreateInfo->info.geometryCount; i++) {
skip |= ValidateGeometryNV(pCreateInfo->info.pGeometries[i], VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "vkCreateAccelerationStructureNV():");
}
}
return skip;
}
void ValidationStateTracker::PostCallRecordCreateAccelerationStructureNV(VkDevice device,
const VkAccelerationStructureCreateInfoNV *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkAccelerationStructureNV *pAccelerationStructure,
VkResult result) {
if (VK_SUCCESS != result) return;
std::unique_ptr<ACCELERATION_STRUCTURE_STATE> as_state(new ACCELERATION_STRUCTURE_STATE(*pAccelerationStructure, pCreateInfo));
// Query the requirements in case the application doesn't (to avoid bind/validation time query)
VkAccelerationStructureMemoryRequirementsInfoNV as_memory_requirements_info = {};
as_memory_requirements_info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_INFO_NV;
as_memory_requirements_info.type = VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_NV;
as_memory_requirements_info.accelerationStructure = as_state->acceleration_structure;
DispatchGetAccelerationStructureMemoryRequirementsNV(device, &as_memory_requirements_info, &as_state->memory_requirements);
VkAccelerationStructureMemoryRequirementsInfoNV scratch_memory_req_info = {};
scratch_memory_req_info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_INFO_NV;
scratch_memory_req_info.type = VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_BUILD_SCRATCH_NV;
scratch_memory_req_info.accelerationStructure = as_state->acceleration_structure;
DispatchGetAccelerationStructureMemoryRequirementsNV(device, &scratch_memory_req_info,
&as_state->build_scratch_memory_requirements);
VkAccelerationStructureMemoryRequirementsInfoNV update_memory_req_info = {};
update_memory_req_info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_INFO_NV;
update_memory_req_info.type = VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_UPDATE_SCRATCH_NV;
update_memory_req_info.accelerationStructure = as_state->acceleration_structure;
DispatchGetAccelerationStructureMemoryRequirementsNV(device, &update_memory_req_info,
&as_state->update_scratch_memory_requirements);
accelerationStructureMap[*pAccelerationStructure] = std::move(as_state);
}
void CoreChecks::PostCallRecordGetAccelerationStructureMemoryRequirementsNV(
VkDevice device, const VkAccelerationStructureMemoryRequirementsInfoNV *pInfo, VkMemoryRequirements2KHR *pMemoryRequirements) {
ACCELERATION_STRUCTURE_STATE *as_state = GetAccelerationStructureState(pInfo->accelerationStructure);
if (as_state != nullptr) {
if (pInfo->type == VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_NV) {
as_state->memory_requirements = *pMemoryRequirements;
as_state->memory_requirements_checked = true;
} else if (pInfo->type == VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_BUILD_SCRATCH_NV) {
as_state->build_scratch_memory_requirements = *pMemoryRequirements;
as_state->build_scratch_memory_requirements_checked = true;
} else if (pInfo->type == VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_UPDATE_SCRATCH_NV) {
as_state->update_scratch_memory_requirements = *pMemoryRequirements;
as_state->update_scratch_memory_requirements_checked = true;
}
}
}
bool CoreChecks::ValidateBindAccelerationStructureMemoryNV(VkDevice device,
const VkBindAccelerationStructureMemoryInfoNV &info) const {
bool skip = false;
const ACCELERATION_STRUCTURE_STATE *as_state = GetAccelerationStructureState(info.accelerationStructure);
if (!as_state) {
return skip;
}
uint64_t as_handle = HandleToUint64(info.accelerationStructure);
if (!as_state->GetBoundMemory().empty()) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV_EXT,
as_handle, "VUID-VkBindAccelerationStructureMemoryInfoNV-accelerationStructure-02450",
"vkBindAccelerationStructureMemoryNV(): accelerationStructure must not already be backed by a memory object.");
}
if (!as_state->memory_requirements_checked) {
// There's not an explicit requirement in the spec to call vkGetImageMemoryRequirements() prior to calling
// BindAccelerationStructureMemoryNV but it's implied in that memory being bound must conform with
// VkAccelerationStructureMemoryRequirementsInfoNV from vkGetAccelerationStructureMemoryRequirementsNV
skip |= log_msg(
report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, as_handle,
kVUID_Core_BindAccelNV_NoMemReqQuery,
"vkBindAccelerationStructureMemoryNV(): "
"Binding memory to %s but vkGetAccelerationStructureMemoryRequirementsNV() has not been called on that structure.",
report_data->FormatHandle(info.accelerationStructure).c_str());
// Use requirements gathered at create time for validation below...
}
// Validate bound memory range information
const auto mem_info = GetDevMemState(info.memory);
if (mem_info) {
skip |= ValidateInsertAccelerationStructureMemoryRange(info.accelerationStructure, mem_info, info.memoryOffset,
as_state->memory_requirements.memoryRequirements,
"vkBindAccelerationStructureMemoryNV()");
skip |= ValidateMemoryTypes(mem_info, as_state->memory_requirements.memoryRequirements.memoryTypeBits,
"vkBindAccelerationStructureMemoryNV()",
"VUID-VkBindAccelerationStructureMemoryInfoNV-memory-02593");
}
// Validate memory requirements alignment
if (SafeModulo(info.memoryOffset, as_state->memory_requirements.memoryRequirements.alignment) != 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV_EXT,
as_handle, "VUID-VkBindAccelerationStructureMemoryInfoNV-memoryOffset-02594",
"vkBindAccelerationStructureMemoryNV(): memoryOffset is 0x%" PRIxLEAST64
" but must be an integer multiple of the VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
", returned from a call to vkGetAccelerationStructureMemoryRequirementsNV with accelerationStructure"
"and type of VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_NV.",
info.memoryOffset, as_state->memory_requirements.memoryRequirements.alignment);
}
if (mem_info) {
// Validate memory requirements size
if (as_state->memory_requirements.memoryRequirements.size > (mem_info->alloc_info.allocationSize - info.memoryOffset)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, as_handle,
"VUID-VkBindAccelerationStructureMemoryInfoNV-size-02595",
"vkBindAccelerationStructureMemoryNV(): memory size minus memoryOffset is 0x%" PRIxLEAST64
" but must be at least as large as VkMemoryRequirements::size value 0x%" PRIxLEAST64
", returned from a call to vkGetAccelerationStructureMemoryRequirementsNV with accelerationStructure"
"and type of VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_NV.",
mem_info->alloc_info.allocationSize - info.memoryOffset,
as_state->memory_requirements.memoryRequirements.size);
}
}
return skip;
}
bool CoreChecks::PreCallValidateBindAccelerationStructureMemoryNV(VkDevice device, uint32_t bindInfoCount,
const VkBindAccelerationStructureMemoryInfoNV *pBindInfos) {
bool skip = false;
for (uint32_t i = 0; i < bindInfoCount; i++) {
skip |= ValidateBindAccelerationStructureMemoryNV(device, pBindInfos[i]);
}
return skip;
}
bool CoreChecks::PreCallValidateGetAccelerationStructureHandleNV(VkDevice device, VkAccelerationStructureNV accelerationStructure,
size_t dataSize, void *pData) {
bool skip = false;
const ACCELERATION_STRUCTURE_STATE *as_state = GetAccelerationStructureState(accelerationStructure);
if (as_state != nullptr) {
// TODO: update the fake VUID below once the real one is generated.
skip = ValidateMemoryIsBoundToAccelerationStructure(
as_state, "vkGetAccelerationStructureHandleNV",
"UNASSIGNED-vkGetAccelerationStructureHandleNV-accelerationStructure-XXXX");
}
return skip;
}
void ValidationStateTracker::PostCallRecordBindAccelerationStructureMemoryNV(
VkDevice device, uint32_t bindInfoCount, const VkBindAccelerationStructureMemoryInfoNV *pBindInfos, VkResult result) {
if (VK_SUCCESS != result) return;
for (uint32_t i = 0; i < bindInfoCount; i++) {
const VkBindAccelerationStructureMemoryInfoNV &info = pBindInfos[i];
ACCELERATION_STRUCTURE_STATE *as_state = GetAccelerationStructureState(info.accelerationStructure);
if (as_state) {
// Track bound memory range information
auto mem_info = GetDevMemState(info.memory);
if (mem_info) {
InsertAccelerationStructureMemoryRange(info.accelerationStructure, mem_info, info.memoryOffset,
as_state->requirements);
}
// Track objects tied to memory
SetMemBinding(info.memory, as_state, info.memoryOffset,
VulkanTypedHandle(info.accelerationStructure, kVulkanObjectTypeAccelerationStructureNV));
}
}
}
bool CoreChecks::PreCallValidateCmdBuildAccelerationStructureNV(VkCommandBuffer commandBuffer,
const VkAccelerationStructureInfoNV *pInfo, VkBuffer instanceData,
VkDeviceSize instanceOffset, VkBool32 update,
VkAccelerationStructureNV dst, VkAccelerationStructureNV src,
VkBuffer scratch, VkDeviceSize scratchOffset) {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdBuildAccelerationStructureNV()", VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdBuildAccelerationStructureNV-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_BUILDACCELERATIONSTRUCTURENV, "vkCmdBuildAccelerationStructureNV()");
if (pInfo != nullptr && pInfo->type == VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV) {
for (uint32_t i = 0; i < pInfo->geometryCount; i++) {
skip |= ValidateGeometryNV(pInfo->pGeometries[i], VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV_EXT,
HandleToUint64(device), "vkCmdBuildAccelerationStructureNV():");
}
}
if (pInfo != nullptr && pInfo->geometryCount > phys_dev_ext_props.ray_tracing_props.maxGeometryCount) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdBuildAccelerationStructureNV-geometryCount-02241",
"vkCmdBuildAccelerationStructureNV(): geometryCount [%d] must be less than or equal to "
"VkPhysicalDeviceRayTracingPropertiesNV::maxGeometryCount.",
pInfo->geometryCount);
}
const ACCELERATION_STRUCTURE_STATE *dst_as_state = GetAccelerationStructureState(dst);
const ACCELERATION_STRUCTURE_STATE *src_as_state = GetAccelerationStructureState(src);
const BUFFER_STATE *scratch_buffer_state = GetBufferState(scratch);
if (dst_as_state != nullptr && pInfo != nullptr) {
if (dst_as_state->create_info.info.type != pInfo->type) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdBuildAccelerationStructureNV-dst-02488",
"vkCmdBuildAccelerationStructureNV(): create info VkAccelerationStructureInfoNV::type"
"[%s] must be identical to build info VkAccelerationStructureInfoNV::type [%s].",
string_VkAccelerationStructureTypeNV(dst_as_state->create_info.info.type),
string_VkAccelerationStructureTypeNV(pInfo->type));
}
if (dst_as_state->create_info.info.flags != pInfo->flags) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdBuildAccelerationStructureNV-dst-02488",
"vkCmdBuildAccelerationStructureNV(): create info VkAccelerationStructureInfoNV::flags"
"[0x%X] must be identical to build info VkAccelerationStructureInfoNV::flags [0x%X].",
dst_as_state->create_info.info.flags, pInfo->flags);
}
if (dst_as_state->create_info.info.instanceCount < pInfo->instanceCount) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdBuildAccelerationStructureNV-dst-02488",
"vkCmdBuildAccelerationStructureNV(): create info VkAccelerationStructureInfoNV::instanceCount "
"[%d] must be greater than or equal to build info VkAccelerationStructureInfoNV::instanceCount [%d].",
dst_as_state->create_info.info.instanceCount, pInfo->instanceCount);
}
if (dst_as_state->create_info.info.geometryCount < pInfo->geometryCount) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdBuildAccelerationStructureNV-dst-02488",
"vkCmdBuildAccelerationStructureNV(): create info VkAccelerationStructureInfoNV::geometryCount"
"[%d] must be greater than or equal to build info VkAccelerationStructureInfoNV::geometryCount [%d].",
dst_as_state->create_info.info.geometryCount, pInfo->geometryCount);
} else {
for (uint32_t i = 0; i < pInfo->geometryCount; i++) {
const VkGeometryDataNV &create_geometry_data = dst_as_state->create_info.info.pGeometries[i].geometry;
const VkGeometryDataNV &build_geometry_data = pInfo->pGeometries[i].geometry;
if (create_geometry_data.triangles.vertexCount < build_geometry_data.triangles.vertexCount) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdBuildAccelerationStructureNV-dst-02488",
"vkCmdBuildAccelerationStructureNV(): create info pGeometries[%d].geometry.triangles.vertexCount [%d]"
"must be greater than or equal to build info pGeometries[%d].geometry.triangles.vertexCount [%d].",
i, create_geometry_data.triangles.vertexCount, i, build_geometry_data.triangles.vertexCount);
break;
}
if (create_geometry_data.triangles.indexCount < build_geometry_data.triangles.indexCount) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdBuildAccelerationStructureNV-dst-02488",
"vkCmdBuildAccelerationStructureNV(): create info pGeometries[%d].geometry.triangles.indexCount [%d]"
"must be greater than or equal to build info pGeometries[%d].geometry.triangles.indexCount [%d].",
i, create_geometry_data.triangles.indexCount, i, build_geometry_data.triangles.indexCount);
break;
}
if (create_geometry_data.aabbs.numAABBs < build_geometry_data.aabbs.numAABBs) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdBuildAccelerationStructureNV-dst-02488",
"vkCmdBuildAccelerationStructureNV(): create info pGeometries[%d].geometry.aabbs.numAABBs [%d]"
"must be greater than or equal to build info pGeometries[%d].geometry.aabbs.numAABBs [%d].",
i, create_geometry_data.aabbs.numAABBs, i, build_geometry_data.aabbs.numAABBs);
break;
}
}
}
}
if (dst_as_state != nullptr) {
skip |= ValidateMemoryIsBoundToAccelerationStructure(
dst_as_state, "vkCmdBuildAccelerationStructureNV()",
"UNASSIGNED-CoreValidation-DrawState-InvalidCommandBuffer-VkAccelerationStructureNV");
}
if (update == VK_TRUE) {
if (src == VK_NULL_HANDLE) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdBuildAccelerationStructureNV-update-02489",
"vkCmdBuildAccelerationStructureNV(): If update is VK_TRUE, src must not be VK_NULL_HANDLE.");
} else {
if (src_as_state == nullptr || !src_as_state->built ||
!(src_as_state->build_info.flags & VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_NV)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdBuildAccelerationStructureNV-update-02489",
"vkCmdBuildAccelerationStructureNV(): If update is VK_TRUE, src must have been built before "
"with VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_NV set in "
"VkAccelerationStructureInfoNV::flags.");
}
}
if (dst_as_state != nullptr && !dst_as_state->update_scratch_memory_requirements_checked) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV_EXT,
HandleToUint64(dst), kVUID_Core_CmdBuildAccelNV_NoUpdateMemReqQuery,
"vkCmdBuildAccelerationStructureNV(): Updating %s but vkGetAccelerationStructureMemoryRequirementsNV() "
"has not been called for update scratch memory.",
report_data->FormatHandle(dst_as_state->acceleration_structure).c_str());
// Use requirements fetched at create time
}
if (scratch_buffer_state != nullptr && dst_as_state != nullptr &&
dst_as_state->update_scratch_memory_requirements.memoryRequirements.size >
(scratch_buffer_state->binding.size - (scratch_buffer_state->binding.offset + scratchOffset))) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdBuildAccelerationStructureNV-update-02492",
"vkCmdBuildAccelerationStructureNV(): If update is VK_TRUE, The size member of the "
"VkMemoryRequirements structure returned from a call to "
"vkGetAccelerationStructureMemoryRequirementsNV with "
"VkAccelerationStructureMemoryRequirementsInfoNV::accelerationStructure set to dst and "
"VkAccelerationStructureMemoryRequirementsInfoNV::type set to "
"VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_UPDATE_SCRATCH_NV must be less than "
"or equal to the size of scratch minus scratchOffset");
}
} else {
if (dst_as_state != nullptr && !dst_as_state->build_scratch_memory_requirements_checked) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV_EXT,
HandleToUint64(dst), kVUID_Core_CmdBuildAccelNV_NoScratchMemReqQuery,
"vkCmdBuildAccelerationStructureNV(): Assigning scratch buffer to %s but "
"vkGetAccelerationStructureMemoryRequirementsNV() has not been called for scratch memory.",
report_data->FormatHandle(dst_as_state->acceleration_structure).c_str());
// Use requirements fetched at create time
}
if (scratch_buffer_state != nullptr && dst_as_state != nullptr &&
dst_as_state->build_scratch_memory_requirements.memoryRequirements.size >
(scratch_buffer_state->binding.size - (scratch_buffer_state->binding.offset + scratchOffset))) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdBuildAccelerationStructureNV-update-02491",
"vkCmdBuildAccelerationStructureNV(): If update is VK_FALSE, The size member of the "
"VkMemoryRequirements structure returned from a call to "
"vkGetAccelerationStructureMemoryRequirementsNV with "
"VkAccelerationStructureMemoryRequirementsInfoNV::accelerationStructure set to dst and "
"VkAccelerationStructureMemoryRequirementsInfoNV::type set to "
"VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_BUILD_SCRATCH_NV must be less than "
"or equal to the size of scratch minus scratchOffset");
}
}
return skip;
}
void ValidationStateTracker::PostCallRecordCmdBuildAccelerationStructureNV(
VkCommandBuffer commandBuffer, const VkAccelerationStructureInfoNV *pInfo, VkBuffer instanceData, VkDeviceSize instanceOffset,
VkBool32 update, VkAccelerationStructureNV dst, VkAccelerationStructureNV src, VkBuffer scratch, VkDeviceSize scratchOffset) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
if (cb_state) {
ACCELERATION_STRUCTURE_STATE *dst_as_state = GetAccelerationStructureState(dst);
ACCELERATION_STRUCTURE_STATE *src_as_state = GetAccelerationStructureState(src);
if (dst_as_state != nullptr) {
dst_as_state->built = true;
dst_as_state->build_info.initialize(pInfo);
AddCommandBufferBindingAccelerationStructure(cb_state, dst_as_state);
}
if (src_as_state != nullptr) {
AddCommandBufferBindingAccelerationStructure(cb_state, src_as_state);
}
}
}
bool CoreChecks::PreCallValidateCmdCopyAccelerationStructureNV(VkCommandBuffer commandBuffer, VkAccelerationStructureNV dst,
VkAccelerationStructureNV src,
VkCopyAccelerationStructureModeNV mode) {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdCopyAccelerationStructureNV()", VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdCopyAccelerationStructureNV-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_COPYACCELERATIONSTRUCTURENV, "vkCmdCopyAccelerationStructureNV()");
const ACCELERATION_STRUCTURE_STATE *dst_as_state = GetAccelerationStructureState(dst);
const ACCELERATION_STRUCTURE_STATE *src_as_state = GetAccelerationStructureState(src);
if (dst_as_state != nullptr) {
skip |= ValidateMemoryIsBoundToAccelerationStructure(
dst_as_state, "vkCmdBuildAccelerationStructureNV()",
"UNASSIGNED-CoreValidation-DrawState-InvalidCommandBuffer-VkAccelerationStructureNV");
}
if (mode == VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_NV) {
if (src_as_state != nullptr &&
(!src_as_state->built || !(src_as_state->build_info.flags & VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_NV))) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdCopyAccelerationStructureNV-src-02497",
"vkCmdCopyAccelerationStructureNV(): src must have been built with "
"VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_NV if mode is "
"VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_NV.");
}
}
return skip;
}
void ValidationStateTracker::PostCallRecordCmdCopyAccelerationStructureNV(VkCommandBuffer commandBuffer,
VkAccelerationStructureNV dst,
VkAccelerationStructureNV src,
VkCopyAccelerationStructureModeNV mode) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
if (cb_state) {
ACCELERATION_STRUCTURE_STATE *src_as_state = GetAccelerationStructureState(src);
ACCELERATION_STRUCTURE_STATE *dst_as_state = GetAccelerationStructureState(dst);
if (dst_as_state != nullptr && src_as_state != nullptr) {
dst_as_state->built = true;
dst_as_state->build_info = src_as_state->build_info;
AddCommandBufferBindingAccelerationStructure(cb_state, dst_as_state);
AddCommandBufferBindingAccelerationStructure(cb_state, src_as_state);
}
}
}
bool CoreChecks::PreCallValidateDestroyAccelerationStructureNV(VkDevice device, VkAccelerationStructureNV accelerationStructure,
const VkAllocationCallbacks *pAllocator) {
const ACCELERATION_STRUCTURE_STATE *as_state = GetAccelerationStructureState(accelerationStructure);
const VulkanTypedHandle obj_struct(accelerationStructure, kVulkanObjectTypeAccelerationStructureNV);
bool skip = false;
if (as_state) {
skip |= ValidateObjectNotInUse(as_state, obj_struct, "vkDestroyAccelerationStructureNV",
"VUID-vkDestroyAccelerationStructureNV-accelerationStructure-02442");
}
return skip;
}
void ValidationStateTracker::PreCallRecordDestroyAccelerationStructureNV(VkDevice device,
VkAccelerationStructureNV accelerationStructure,
const VkAllocationCallbacks *pAllocator) {
if (!accelerationStructure) return;
auto *as_state = GetAccelerationStructureState(accelerationStructure);
if (as_state) {
const VulkanTypedHandle obj_struct(accelerationStructure, kVulkanObjectTypeAccelerationStructureNV);
InvalidateCommandBuffers(as_state->cb_bindings, obj_struct);
for (auto mem_binding : as_state->GetBoundMemory()) {
auto mem_info = GetDevMemState(mem_binding);
if (mem_info) {
RemoveAccelerationStructureMemoryRange(HandleToUint64(accelerationStructure), mem_info);
}
}
ClearMemoryObjectBindings(obj_struct);
accelerationStructureMap.erase(accelerationStructure);
}
}
bool CoreChecks::PreCallValidateCmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetLineWidth()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetLineWidth-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETLINEWIDTH, "vkCmdSetLineWidth()");
if (cb_state->static_status & CBSTATUS_LINE_WIDTH_SET) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetLineWidth-None-00787",
"vkCmdSetLineWidth called but pipeline was created without VK_DYNAMIC_STATE_LINE_WIDTH flag.");
}
return skip;
}
void ValidationStateTracker::PreCallRecordCmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
cb_state->status |= CBSTATUS_LINE_WIDTH_SET;
}
bool CoreChecks::PreCallValidateCmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp,
float depthBiasSlopeFactor) {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetDepthBias()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetDepthBias-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETDEPTHBIAS, "vkCmdSetDepthBias()");
if (cb_state->static_status & CBSTATUS_DEPTH_BIAS_SET) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetDepthBias-None-00789",
"vkCmdSetDepthBias(): pipeline was created without VK_DYNAMIC_STATE_DEPTH_BIAS flag..");
}
if ((depthBiasClamp != 0.0) && (!enabled_features.core.depthBiasClamp)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetDepthBias-depthBiasClamp-00790",
"vkCmdSetDepthBias(): the depthBiasClamp device feature is disabled: the depthBiasClamp parameter must "
"be set to 0.0.");
}
return skip;
}
void ValidationStateTracker::PreCallRecordCmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor,
float depthBiasClamp, float depthBiasSlopeFactor) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
cb_state->status |= CBSTATUS_DEPTH_BIAS_SET;
}
bool CoreChecks::PreCallValidateCmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetBlendConstants()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetBlendConstants-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETBLENDCONSTANTS, "vkCmdSetBlendConstants()");
if (cb_state->static_status & CBSTATUS_BLEND_CONSTANTS_SET) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetBlendConstants-None-00612",
"vkCmdSetBlendConstants(): pipeline was created without VK_DYNAMIC_STATE_BLEND_CONSTANTS flag..");
}
return skip;
}
void ValidationStateTracker::PreCallRecordCmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
cb_state->status |= CBSTATUS_BLEND_CONSTANTS_SET;
}
bool CoreChecks::PreCallValidateCmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetDepthBounds()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetDepthBounds-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETDEPTHBOUNDS, "vkCmdSetDepthBounds()");
if (cb_state->static_status & CBSTATUS_DEPTH_BOUNDS_SET) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetDepthBounds-None-00599",
"vkCmdSetDepthBounds(): pipeline was created without VK_DYNAMIC_STATE_DEPTH_BOUNDS flag..");
}
return skip;
}
void ValidationStateTracker::PreCallRecordCmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds,
float maxDepthBounds) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
cb_state->status |= CBSTATUS_DEPTH_BOUNDS_SET;
}
bool CoreChecks::PreCallValidateCmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask,
uint32_t compareMask) {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetStencilCompareMask()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetStencilCompareMask-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETSTENCILCOMPAREMASK, "vkCmdSetStencilCompareMask()");
if (cb_state->static_status & CBSTATUS_STENCIL_READ_MASK_SET) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetStencilCompareMask-None-00602",
"vkCmdSetStencilCompareMask(): pipeline was created without VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK flag..");
}
return skip;
}
void ValidationStateTracker::PreCallRecordCmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask,
uint32_t compareMask) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
cb_state->status |= CBSTATUS_STENCIL_READ_MASK_SET;
}
bool CoreChecks::PreCallValidateCmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask,
uint32_t writeMask) {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetStencilWriteMask()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetStencilWriteMask-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETSTENCILWRITEMASK, "vkCmdSetStencilWriteMask()");
if (cb_state->static_status & CBSTATUS_STENCIL_WRITE_MASK_SET) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetStencilWriteMask-None-00603",
"vkCmdSetStencilWriteMask(): pipeline was created without VK_DYNAMIC_STATE_STENCIL_WRITE_MASK flag..");
}
return skip;
}
void ValidationStateTracker::PreCallRecordCmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask,
uint32_t writeMask) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
cb_state->status |= CBSTATUS_STENCIL_WRITE_MASK_SET;
}
bool CoreChecks::PreCallValidateCmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask,
uint32_t reference) {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetStencilReference()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetStencilReference-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETSTENCILREFERENCE, "vkCmdSetStencilReference()");
if (cb_state->static_status & CBSTATUS_STENCIL_REFERENCE_SET) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetStencilReference-None-00604",
"vkCmdSetStencilReference(): pipeline was created without VK_DYNAMIC_STATE_STENCIL_REFERENCE flag..");
}
return skip;
}
void ValidationStateTracker::PreCallRecordCmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask,
uint32_t reference) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
cb_state->status |= CBSTATUS_STENCIL_REFERENCE_SET;
}
// Update pipeline_layout bind points applying the "Pipeline Layout Compatibility" rules
void ValidationStateTracker::UpdateLastBoundDescriptorSets(CMD_BUFFER_STATE *cb_state, VkPipelineBindPoint pipeline_bind_point,
const PIPELINE_LAYOUT_STATE *pipeline_layout, uint32_t first_set,
uint32_t set_count,
const std::vector<cvdescriptorset::DescriptorSet *> descriptor_sets,
uint32_t dynamic_offset_count, const uint32_t *p_dynamic_offsets) {
// Defensive
assert(set_count);
if (0 == set_count) return;
assert(pipeline_layout);
if (!pipeline_layout) return;
uint32_t required_size = first_set + set_count;
const uint32_t last_binding_index = required_size - 1;
assert(last_binding_index < pipeline_layout->compat_for_set.size());
// Some useful shorthand
auto &last_bound = cb_state->lastBound[pipeline_bind_point];
auto &pipe_compat_ids = pipeline_layout->compat_for_set;
const uint32_t current_size = static_cast<uint32_t>(last_bound.per_set.size());
// We need this three times in this function, but nowhere else
auto push_descriptor_cleanup = [&last_bound](const cvdescriptorset::DescriptorSet *ds) -> bool {
if (ds && ds->IsPushDescriptor()) {
assert(ds == last_bound.push_descriptor_set.get());
last_bound.push_descriptor_set = nullptr;
return true;
}
return false;
};
// Clean up the "disturbed" before and after the range to be set
if (required_size < current_size) {
if (last_bound.per_set[last_binding_index].compat_id_for_set != pipe_compat_ids[last_binding_index]) {
// We're disturbing those after last, we'll shrink below, but first need to check for and cleanup the push_descriptor
for (auto set_idx = required_size; set_idx < current_size; ++set_idx) {
if (push_descriptor_cleanup(last_bound.per_set[set_idx].bound_descriptor_set)) break;
}
} else {
// We're not disturbing past last, so leave the upper binding data alone.
required_size = current_size;
}
}
// We resize if we need more set entries or if those past "last" are disturbed
if (required_size != current_size) {
last_bound.per_set.resize(required_size);
}
// For any previously bound sets, need to set them to "invalid" if they were disturbed by this update
for (uint32_t set_idx = 0; set_idx < first_set; ++set_idx) {
if (last_bound.per_set[set_idx].compat_id_for_set != pipe_compat_ids[set_idx]) {
push_descriptor_cleanup(last_bound.per_set[set_idx].bound_descriptor_set);
last_bound.per_set[set_idx].bound_descriptor_set = nullptr;
last_bound.per_set[set_idx].dynamicOffsets.clear();
last_bound.per_set[set_idx].compat_id_for_set = pipe_compat_ids[set_idx];
}
}
// Now update the bound sets with the input sets
const uint32_t *input_dynamic_offsets = p_dynamic_offsets; // "read" pointer for dynamic offset data
for (uint32_t input_idx = 0; input_idx < set_count; input_idx++) {
auto set_idx = input_idx + first_set; // set_idx is index within layout, input_idx is index within input descriptor sets
cvdescriptorset::DescriptorSet *descriptor_set = descriptor_sets[input_idx];
// Record binding (or push)
if (descriptor_set != last_bound.push_descriptor_set.get()) {
// Only cleanup the push descriptors if they aren't the currently used set.
push_descriptor_cleanup(last_bound.per_set[set_idx].bound_descriptor_set);
}
last_bound.per_set[set_idx].bound_descriptor_set = descriptor_set;
last_bound.per_set[set_idx].compat_id_for_set = pipe_compat_ids[set_idx]; // compat ids are canonical *per* set index
if (descriptor_set) {
auto set_dynamic_descriptor_count = descriptor_set->GetDynamicDescriptorCount();
// TODO: Add logic for tracking push_descriptor offsets (here or in caller)
if (set_dynamic_descriptor_count && input_dynamic_offsets) {
const uint32_t *end_offset = input_dynamic_offsets + set_dynamic_descriptor_count;
last_bound.per_set[set_idx].dynamicOffsets = std::vector<uint32_t>(input_dynamic_offsets, end_offset);
input_dynamic_offsets = end_offset;
assert(input_dynamic_offsets <= (p_dynamic_offsets + dynamic_offset_count));
} else {
last_bound.per_set[set_idx].dynamicOffsets.clear();
}
if (!descriptor_set->IsPushDescriptor()) {
// Can't cache validation of push_descriptors
cb_state->validated_descriptor_sets.insert(descriptor_set);
}
}
}
}
// Update the bound state for the bind point, including the effects of incompatible pipeline layouts
void ValidationStateTracker::PreCallRecordCmdBindDescriptorSets(VkCommandBuffer commandBuffer,
VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout,
uint32_t firstSet, uint32_t setCount,
const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount,
const uint32_t *pDynamicOffsets) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
auto pipeline_layout = GetPipelineLayout(layout);
std::vector<cvdescriptorset::DescriptorSet *> descriptor_sets;
descriptor_sets.reserve(setCount);
// Resize binding arrays
uint32_t last_set_index = firstSet + setCount - 1;
if (last_set_index >= cb_state->lastBound[pipelineBindPoint].per_set.size()) {
cb_state->lastBound[pipelineBindPoint].per_set.resize(last_set_index + 1);
}
// Construct a list of the descriptors
bool found_non_null = false;
for (uint32_t i = 0; i < setCount; i++) {
cvdescriptorset::DescriptorSet *descriptor_set = GetSetNode(pDescriptorSets[i]);
descriptor_sets.emplace_back(descriptor_set);
found_non_null |= descriptor_set != nullptr;
}
if (found_non_null) { // which implies setCount > 0
UpdateLastBoundDescriptorSets(cb_state, pipelineBindPoint, pipeline_layout, firstSet, setCount, descriptor_sets,
dynamicOffsetCount, pDynamicOffsets);
cb_state->lastBound[pipelineBindPoint].pipeline_layout = layout;
}
}
static bool ValidateDynamicOffsetAlignment(const debug_report_data *report_data, const VkDescriptorSetLayoutBinding *binding,
VkDescriptorType test_type, VkDeviceSize alignment, const uint32_t *pDynamicOffsets,
const char *err_msg, const char *limit_name, uint32_t *offset_idx) {
bool skip = false;
if (binding->descriptorType == test_type) {
const auto end_idx = *offset_idx + binding->descriptorCount;
for (uint32_t current_idx = *offset_idx; current_idx < end_idx; current_idx++) {
if (SafeModulo(pDynamicOffsets[current_idx], alignment) != 0) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, err_msg,
"vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of device limit %s 0x%" PRIxLEAST64
".",
current_idx, pDynamicOffsets[current_idx], limit_name, alignment);
}
}
*offset_idx = end_idx;
}
return skip;
}
bool CoreChecks::PreCallValidateCmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
VkPipelineLayout layout, uint32_t firstSet, uint32_t setCount,
const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount,
const uint32_t *pDynamicOffsets) {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
skip |= ValidateCmdQueueFlags(cb_state, "vkCmdBindDescriptorSets()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdBindDescriptorSets-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_BINDDESCRIPTORSETS, "vkCmdBindDescriptorSets()");
// Track total count of dynamic descriptor types to make sure we have an offset for each one
uint32_t total_dynamic_descriptors = 0;
string error_string = "";
const auto *pipeline_layout = GetPipelineLayout(layout);
for (uint32_t set_idx = 0; set_idx < setCount; set_idx++) {
const cvdescriptorset::DescriptorSet *descriptor_set = GetSetNode(pDescriptorSets[set_idx]);
if (descriptor_set) {
// Verify that set being bound is compatible with overlapping setLayout of pipelineLayout
if (!VerifySetLayoutCompatibility(descriptor_set, pipeline_layout, set_idx + firstSet, error_string)) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
HandleToUint64(pDescriptorSets[set_idx]), "VUID-vkCmdBindDescriptorSets-pDescriptorSets-00358",
"descriptorSet #%u being bound is not compatible with overlapping descriptorSetLayout at index %u of "
"%s due to: %s.",
set_idx, set_idx + firstSet, report_data->FormatHandle(layout).c_str(), error_string.c_str());
}
auto set_dynamic_descriptor_count = descriptor_set->GetDynamicDescriptorCount();
if (set_dynamic_descriptor_count) {
// First make sure we won't overstep bounds of pDynamicOffsets array
if ((total_dynamic_descriptors + set_dynamic_descriptor_count) > dynamicOffsetCount) {
// Test/report this here, such that we don't run past the end of pDynamicOffsets in the else clause
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
HandleToUint64(pDescriptorSets[set_idx]), "VUID-vkCmdBindDescriptorSets-dynamicOffsetCount-00359",
"descriptorSet #%u (%s) requires %u dynamicOffsets, but only %u dynamicOffsets are left in "
"pDynamicOffsets array. There must be one dynamic offset for each dynamic descriptor being bound.",
set_idx, report_data->FormatHandle(pDescriptorSets[set_idx]).c_str(),
descriptor_set->GetDynamicDescriptorCount(), (dynamicOffsetCount - total_dynamic_descriptors));
// Set the number found to the maximum to prevent duplicate messages, or subsquent descriptor sets from
// testing against the "short tail" we're skipping below.
total_dynamic_descriptors = dynamicOffsetCount;
} else { // Validate dynamic offsets and Dynamic Offset Minimums
uint32_t cur_dyn_offset = total_dynamic_descriptors;
const auto dsl = descriptor_set->GetLayout();
const auto binding_count = dsl->GetBindingCount();
const auto &limits = phys_dev_props.limits;
for (uint32_t binding_idx = 0; binding_idx < binding_count; binding_idx++) {
const auto *binding = dsl->GetDescriptorSetLayoutBindingPtrFromIndex(binding_idx);
skip |= ValidateDynamicOffsetAlignment(report_data, binding, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC,
limits.minUniformBufferOffsetAlignment, pDynamicOffsets,
"VUID-vkCmdBindDescriptorSets-pDynamicOffsets-01971",
"minUniformBufferOffsetAlignment", &cur_dyn_offset);
skip |= ValidateDynamicOffsetAlignment(report_data, binding, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC,
limits.minStorageBufferOffsetAlignment, pDynamicOffsets,
"VUID-vkCmdBindDescriptorSets-pDynamicOffsets-01972",
"minStorageBufferOffsetAlignment", &cur_dyn_offset);
}
// Keep running total of dynamic descriptor count to verify at the end
total_dynamic_descriptors += set_dynamic_descriptor_count;
}
}
} else {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
HandleToUint64(pDescriptorSets[set_idx]), kVUID_Core_DrawState_InvalidSet,
"Attempt to bind %s that doesn't exist!", report_data->FormatHandle(pDescriptorSets[set_idx]).c_str());
}
}
// dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound
if (total_dynamic_descriptors != dynamicOffsetCount) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdBindDescriptorSets-dynamicOffsetCount-00359",
"Attempting to bind %u descriptorSets with %u dynamic descriptors, but dynamicOffsetCount is %u. It should "
"exactly match the number of dynamic descriptors.",
setCount, total_dynamic_descriptors, dynamicOffsetCount);
}
return skip;
}
// Validates that the supplied bind point is supported for the command buffer (vis. the command pool)
// Takes array of error codes as some of the VUID's (e.g. vkCmdBindPipeline) are written per bindpoint
// TODO add vkCmdBindPipeline bind_point validation using this call.
bool CoreChecks::ValidatePipelineBindPoint(const CMD_BUFFER_STATE *cb_state, VkPipelineBindPoint bind_point, const char *func_name,
const std::map<VkPipelineBindPoint, std::string> &bind_errors) const {
bool skip = false;
auto pool = GetCommandPoolState(cb_state->createInfo.commandPool);
if (pool) { // The loss of a pool in a recording cmd is reported in DestroyCommandPool
static const std::map<VkPipelineBindPoint, VkQueueFlags> flag_mask = {
std::make_pair(VK_PIPELINE_BIND_POINT_GRAPHICS, static_cast<VkQueueFlags>(VK_QUEUE_GRAPHICS_BIT)),
std::make_pair(VK_PIPELINE_BIND_POINT_COMPUTE, static_cast<VkQueueFlags>(VK_QUEUE_COMPUTE_BIT)),
std::make_pair(VK_PIPELINE_BIND_POINT_RAY_TRACING_NV,
static_cast<VkQueueFlags>(VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT)),
};
const auto &qfp = GetPhysicalDeviceState()->queue_family_properties[pool->queueFamilyIndex];
if (0 == (qfp.queueFlags & flag_mask.at(bind_point))) {
const std::string &error = bind_errors.at(bind_point);
auto cb_u64 = HandleToUint64(cb_state->commandBuffer);
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, cb_u64,
error, "%s: %s was allocated from %s that does not support bindpoint %s.", func_name,
report_data->FormatHandle(cb_state->commandBuffer).c_str(),
report_data->FormatHandle(cb_state->createInfo.commandPool).c_str(),
string_VkPipelineBindPoint(bind_point));
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdPushDescriptorSetKHR(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
VkPipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount,
const VkWriteDescriptorSet *pDescriptorWrites) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
const char *func_name = "vkCmdPushDescriptorSetKHR()";
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_PUSHDESCRIPTORSETKHR, func_name);
skip |= ValidateCmdQueueFlags(cb_state, func_name, (VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT),
"VUID-vkCmdPushDescriptorSetKHR-commandBuffer-cmdpool");
static const std::map<VkPipelineBindPoint, std::string> bind_errors = {
std::make_pair(VK_PIPELINE_BIND_POINT_GRAPHICS, "VUID-vkCmdPushDescriptorSetKHR-pipelineBindPoint-00363"),
std::make_pair(VK_PIPELINE_BIND_POINT_COMPUTE, "VUID-vkCmdPushDescriptorSetKHR-pipelineBindPoint-00363"),
std::make_pair(VK_PIPELINE_BIND_POINT_RAY_TRACING_NV, "VUID-vkCmdPushDescriptorSetKHR-pipelineBindPoint-00363")};
skip |= ValidatePipelineBindPoint(cb_state, pipelineBindPoint, func_name, bind_errors);
auto layout_data = GetPipelineLayout(layout);
// Validate the set index points to a push descriptor set and is in range
if (layout_data) {
const auto &set_layouts = layout_data->set_layouts;
const auto layout_u64 = HandleToUint64(layout);
if (set < set_layouts.size()) {
const auto dsl = set_layouts[set];
if (dsl) {
if (!dsl->IsPushDescriptor()) {
skip = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT,
layout_u64, "VUID-vkCmdPushDescriptorSetKHR-set-00365",
"%s: Set index %" PRIu32 " does not match push descriptor set layout index for %s.", func_name,
set, report_data->FormatHandle(layout).c_str());
} else {
// Create an empty proxy in order to use the existing descriptor set update validation
// TODO move the validation (like this) that doesn't need descriptor set state to the DSL object so we
// don't have to do this.
cvdescriptorset::DescriptorSet proxy_ds(VK_NULL_HANDLE, VK_NULL_HANDLE, dsl, 0, this);
skip |= ValidatePushDescriptorsUpdate(&proxy_ds, descriptorWriteCount, pDescriptorWrites, func_name);
}
}
} else {
skip = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, layout_u64,
"VUID-vkCmdPushDescriptorSetKHR-set-00364",
"%s: Set index %" PRIu32 " is outside of range for %s (set < %" PRIu32 ").", func_name, set,
report_data->FormatHandle(layout).c_str(), static_cast<uint32_t>(set_layouts.size()));
}
}
return skip;
}
void CoreChecks::RecordCmdPushDescriptorSetState(CMD_BUFFER_STATE *cb_state, VkPipelineBindPoint pipelineBindPoint,
VkPipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount,
const VkWriteDescriptorSet *pDescriptorWrites) {
const auto &pipeline_layout = GetPipelineLayout(layout);
// Short circuit invalid updates
if (!pipeline_layout || (set >= pipeline_layout->set_layouts.size()) || !pipeline_layout->set_layouts[set] ||
!pipeline_layout->set_layouts[set]->IsPushDescriptor())
return;
// We need a descriptor set to update the bindings with, compatible with the passed layout
const auto dsl = pipeline_layout->set_layouts[set];
auto &last_bound = cb_state->lastBound[pipelineBindPoint];
auto &push_descriptor_set = last_bound.push_descriptor_set;
// If we are disturbing the current push_desriptor_set clear it
if (!push_descriptor_set || !CompatForSet(set, last_bound, pipeline_layout->compat_for_set)) {
last_bound.UnbindAndResetPushDescriptorSet(new cvdescriptorset::DescriptorSet(0, 0, dsl, 0, this));
}
std::vector<cvdescriptorset::DescriptorSet *> descriptor_sets = {push_descriptor_set.get()};
UpdateLastBoundDescriptorSets(cb_state, pipelineBindPoint, pipeline_layout, set, 1, descriptor_sets, 0, nullptr);
last_bound.pipeline_layout = layout;
// Now that we have either the new or extant push_descriptor set ... do the write updates against it
push_descriptor_set->PerformPushDescriptorsUpdate(descriptorWriteCount, pDescriptorWrites);
}
void CoreChecks::PreCallRecordCmdPushDescriptorSetKHR(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
VkPipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount,
const VkWriteDescriptorSet *pDescriptorWrites) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
RecordCmdPushDescriptorSetState(cb_state, pipelineBindPoint, layout, set, descriptorWriteCount, pDescriptorWrites);
}
static VkDeviceSize GetIndexAlignment(VkIndexType indexType) {
switch (indexType) {
case VK_INDEX_TYPE_UINT16:
return 2;
case VK_INDEX_TYPE_UINT32:
return 4;
default:
// Not a real index type. Express no alignment requirement here; we expect upper layer
// to have already picked up on the enum being nonsense.
return 1;
}
}
bool CoreChecks::PreCallValidateCmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
VkIndexType indexType) {
const auto buffer_state = GetBufferState(buffer);
const auto cb_node = GetCBState(commandBuffer);
assert(buffer_state);
assert(cb_node);
bool skip =
ValidateBufferUsageFlags(buffer_state, VK_BUFFER_USAGE_INDEX_BUFFER_BIT, true, "VUID-vkCmdBindIndexBuffer-buffer-00433",
"vkCmdBindIndexBuffer()", "VK_BUFFER_USAGE_INDEX_BUFFER_BIT");
skip |= ValidateCmdQueueFlags(cb_node, "vkCmdBindIndexBuffer()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdBindIndexBuffer-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_node, CMD_BINDINDEXBUFFER, "vkCmdBindIndexBuffer()");
skip |= ValidateMemoryIsBoundToBuffer(buffer_state, "vkCmdBindIndexBuffer()", "VUID-vkCmdBindIndexBuffer-buffer-00434");
const auto offset_align = GetIndexAlignment(indexType);
if (offset % offset_align) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdBindIndexBuffer-offset-00432",
"vkCmdBindIndexBuffer() offset (0x%" PRIxLEAST64 ") does not fall on alignment (%s) boundary.", offset,
string_VkIndexType(indexType));
}
return skip;
}
void ValidationStateTracker::PreCallRecordCmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
VkIndexType indexType) {
auto buffer_state = GetBufferState(buffer);
auto cb_state = GetCBState(commandBuffer);
cb_state->status |= CBSTATUS_INDEX_BUFFER_BOUND;
cb_state->index_buffer_binding.buffer = buffer;
cb_state->index_buffer_binding.size = buffer_state->createInfo.size;
cb_state->index_buffer_binding.offset = offset;
cb_state->index_buffer_binding.index_type = indexType;
// Add binding for this index buffer to this commandbuffer
AddCommandBufferBindingBuffer(cb_state, buffer_state);
}
bool CoreChecks::PreCallValidateCmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount,
const VkBuffer *pBuffers, const VkDeviceSize *pOffsets) {
const auto cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdBindVertexBuffers()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdBindVertexBuffers-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_BINDVERTEXBUFFERS, "vkCmdBindVertexBuffers()");
for (uint32_t i = 0; i < bindingCount; ++i) {
const auto buffer_state = GetBufferState(pBuffers[i]);
assert(buffer_state);
skip |= ValidateBufferUsageFlags(buffer_state, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, true,
"VUID-vkCmdBindVertexBuffers-pBuffers-00627", "vkCmdBindVertexBuffers()",
"VK_BUFFER_USAGE_VERTEX_BUFFER_BIT");
skip |=
ValidateMemoryIsBoundToBuffer(buffer_state, "vkCmdBindVertexBuffers()", "VUID-vkCmdBindVertexBuffers-pBuffers-00628");
if (pOffsets[i] >= buffer_state->createInfo.size) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
HandleToUint64(buffer_state->buffer), "VUID-vkCmdBindVertexBuffers-pOffsets-00626",
"vkCmdBindVertexBuffers() offset (0x%" PRIxLEAST64 ") is beyond the end of the buffer.", pOffsets[i]);
}
}
return skip;
}
void ValidationStateTracker::PreCallRecordCmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding,
uint32_t bindingCount, const VkBuffer *pBuffers,
const VkDeviceSize *pOffsets) {
auto cb_state = GetCBState(commandBuffer);
uint32_t end = firstBinding + bindingCount;
if (cb_state->current_vertex_buffer_binding_info.vertex_buffer_bindings.size() < end) {
cb_state->current_vertex_buffer_binding_info.vertex_buffer_bindings.resize(end);
}
for (uint32_t i = 0; i < bindingCount; ++i) {
auto &vertex_buffer_binding = cb_state->current_vertex_buffer_binding_info.vertex_buffer_bindings[i + firstBinding];
vertex_buffer_binding.buffer = pBuffers[i];
vertex_buffer_binding.offset = pOffsets[i];
// Add binding for this vertex buffer to this commandbuffer
AddCommandBufferBindingBuffer(cb_state, GetBufferState(pBuffers[i]));
}
}
// Validate that an image's sampleCount matches the requirement for a specific API call
bool CoreChecks::ValidateImageSampleCount(const IMAGE_STATE *image_state, VkSampleCountFlagBits sample_count, const char *location,
const std::string &msgCode) const {
bool skip = false;
if (image_state->createInfo.samples != sample_count) {
skip =
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), msgCode, "%s for %s was created with a sample count of %s but must be %s.",
location, report_data->FormatHandle(image_state->image).c_str(),
string_VkSampleCountFlagBits(image_state->createInfo.samples), string_VkSampleCountFlagBits(sample_count));
}
return skip;
}
bool CoreChecks::PreCallValidateCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
VkDeviceSize dataSize, const void *pData) {
const auto cb_state = GetCBState(commandBuffer);
assert(cb_state);
const auto dst_buffer_state = GetBufferState(dstBuffer);
assert(dst_buffer_state);
bool skip = false;
skip |= ValidateMemoryIsBoundToBuffer(dst_buffer_state, "vkCmdUpdateBuffer()", "VUID-vkCmdUpdateBuffer-dstBuffer-00035");
// Validate that DST buffer has correct usage flags set
skip |=
ValidateBufferUsageFlags(dst_buffer_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "VUID-vkCmdUpdateBuffer-dstBuffer-00034",
"vkCmdUpdateBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
skip |=
ValidateCmdQueueFlags(cb_state, "vkCmdUpdateBuffer()", VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdUpdateBuffer-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_UPDATEBUFFER, "vkCmdUpdateBuffer()");
skip |= InsideRenderPass(cb_state, "vkCmdUpdateBuffer()", "VUID-vkCmdUpdateBuffer-renderpass");
return skip;
}
void ValidationStateTracker::PostCallRecordCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer,
VkDeviceSize dstOffset, VkDeviceSize dataSize, const void *pData) {
auto cb_state = GetCBState(commandBuffer);
auto dst_buffer_state = GetBufferState(dstBuffer);
// Update bindings between buffer and cmd buffer
AddCommandBufferBindingBuffer(cb_state, dst_buffer_state);
}
bool CoreChecks::SetEventStageMask(VkQueue queue, VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
CMD_BUFFER_STATE *pCB = GetCBState(commandBuffer);
if (pCB) {
pCB->eventToStageMap[event] = stageMask;
}
auto queue_data = queueMap.find(queue);
if (queue_data != queueMap.end()) {
queue_data->second.eventToStageMap[event] = stageMask;
}
return false;
}
bool CoreChecks::PreCallValidateCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetEvent()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdSetEvent-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETEVENT, "vkCmdSetEvent()");
skip |= InsideRenderPass(cb_state, "vkCmdSetEvent()", "VUID-vkCmdSetEvent-renderpass");
skip |= ValidateStageMaskGsTsEnables(stageMask, "vkCmdSetEvent()", "VUID-vkCmdSetEvent-stageMask-01150",
"VUID-vkCmdSetEvent-stageMask-01151", "VUID-vkCmdSetEvent-stageMask-02107",
"VUID-vkCmdSetEvent-stageMask-02108");
return skip;
}
void CoreChecks::PreCallRecordCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
auto event_state = GetEventState(event);
if (event_state) {
AddCommandBufferBinding(&event_state->cb_bindings, VulkanTypedHandle(event, kVulkanObjectTypeEvent), cb_state);
event_state->cb_bindings.insert(cb_state);
}
cb_state->events.push_back(event);
if (!cb_state->waitedEvents.count(event)) {
cb_state->writeEventsBeforeWait.push_back(event);
}
cb_state->eventUpdates.emplace_back([=](VkQueue q) { return SetEventStageMask(q, commandBuffer, event, stageMask); });
}
bool CoreChecks::PreCallValidateCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdResetEvent()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdResetEvent-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_RESETEVENT, "vkCmdResetEvent()");
skip |= InsideRenderPass(cb_state, "vkCmdResetEvent()", "VUID-vkCmdResetEvent-renderpass");
skip |= ValidateStageMaskGsTsEnables(stageMask, "vkCmdResetEvent()", "VUID-vkCmdResetEvent-stageMask-01154",
"VUID-vkCmdResetEvent-stageMask-01155", "VUID-vkCmdResetEvent-stageMask-02109",
"VUID-vkCmdResetEvent-stageMask-02110");
return skip;
}
void CoreChecks::PreCallRecordCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
auto event_state = GetEventState(event);
if (event_state) {
AddCommandBufferBinding(&event_state->cb_bindings, VulkanTypedHandle(event, kVulkanObjectTypeEvent), cb_state);
event_state->cb_bindings.insert(cb_state);
}
cb_state->events.push_back(event);
if (!cb_state->waitedEvents.count(event)) {
cb_state->writeEventsBeforeWait.push_back(event);
}
// TODO : Add check for "VUID-vkResetEvent-event-01148"
cb_state->eventUpdates.emplace_back(
[=](VkQueue q) { return SetEventStageMask(q, commandBuffer, event, VkPipelineStageFlags(0)); });
}
// Return input pipeline stage flags, expanded for individual bits if VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT is set
static VkPipelineStageFlags ExpandPipelineStageFlags(const DeviceExtensions &extensions, VkPipelineStageFlags inflags) {
if (~inflags & VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT) return inflags;
return (inflags & ~VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT) |
(VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT | VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT |
(extensions.vk_nv_mesh_shader ? (VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV | VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV) : 0) |
VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT |
VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT |
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT |
(extensions.vk_ext_conditional_rendering ? VK_PIPELINE_STAGE_CONDITIONAL_RENDERING_BIT_EXT : 0) |
(extensions.vk_ext_transform_feedback ? VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT : 0) |
(extensions.vk_nv_shading_rate_image ? VK_PIPELINE_STAGE_SHADING_RATE_IMAGE_BIT_NV : 0) |
(extensions.vk_ext_fragment_density_map ? VK_PIPELINE_STAGE_FRAGMENT_DENSITY_PROCESS_BIT_EXT : 0));
}
static bool HasNonFramebufferStagePipelineStageFlags(VkPipelineStageFlags inflags) {
return (inflags & ~(VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT |
VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT)) != 0;
}
static int GetGraphicsPipelineStageLogicalOrdinal(VkPipelineStageFlagBits flag) {
// Note that the list (and lookup) ignore invalid-for-enabled-extension condition. This should be checked elsewhere
// and would greatly complicate this intentionally simple implementation
// clang-format off
const VkPipelineStageFlagBits ordered_array[] = {
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT,
VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
VK_PIPELINE_STAGE_VERTEX_SHADER_BIT,
VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT,
VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT,
VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT,
VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT,
// Including the task/mesh shaders here is not technically correct, as they are in a
// separate logical pipeline - but it works for the case this is currently used, and
// fixing it would require significant rework and end up with the code being far more
// verbose for no practical gain.
// However, worth paying attention to this if using this function in a new way.
VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV,
VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV,
VK_PIPELINE_STAGE_SHADING_RATE_IMAGE_BIT_NV,
VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT
};
// clang-format on
const int ordered_array_length = sizeof(ordered_array) / sizeof(VkPipelineStageFlagBits);
for (int i = 0; i < ordered_array_length; ++i) {
if (ordered_array[i] == flag) {
return i;
}
}
return -1;
}
// The following two functions technically have O(N^2) complexity, but it's for a value of O that's largely
// stable and also rather tiny - this could definitely be rejigged to work more efficiently, but the impact
// on runtime is currently negligible, so it wouldn't gain very much.
// If we add a lot more graphics pipeline stages, this set of functions should be rewritten to accomodate.
static VkPipelineStageFlagBits GetLogicallyEarliestGraphicsPipelineStage(VkPipelineStageFlags inflags) {
VkPipelineStageFlagBits earliest_bit = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
int earliest_bit_order = GetGraphicsPipelineStageLogicalOrdinal(earliest_bit);
for (std::size_t i = 0; i < sizeof(VkPipelineStageFlagBits); ++i) {
VkPipelineStageFlagBits current_flag = (VkPipelineStageFlagBits)((inflags & 0x1u) << i);
if (current_flag) {
int new_order = GetGraphicsPipelineStageLogicalOrdinal(current_flag);
if (new_order != -1 && new_order < earliest_bit_order) {
earliest_bit_order = new_order;
earliest_bit = current_flag;
}
}
inflags = inflags >> 1;
}
return earliest_bit;
}
static VkPipelineStageFlagBits GetLogicallyLatestGraphicsPipelineStage(VkPipelineStageFlags inflags) {
VkPipelineStageFlagBits latest_bit = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
int latest_bit_order = GetGraphicsPipelineStageLogicalOrdinal(latest_bit);
for (std::size_t i = 0; i < sizeof(VkPipelineStageFlagBits); ++i) {
if (inflags & 0x1u) {
int new_order = GetGraphicsPipelineStageLogicalOrdinal((VkPipelineStageFlagBits)((inflags & 0x1u) << i));
if (new_order != -1 && new_order > latest_bit_order) {
latest_bit_order = new_order;
latest_bit = (VkPipelineStageFlagBits)((inflags & 0x1u) << i);
}
}
inflags = inflags >> 1;
}
return latest_bit;
}
// Verify image barrier image state and that the image is consistent with FB image
bool CoreChecks::ValidateImageBarrierImage(const char *funcName, CMD_BUFFER_STATE const *cb_state, VkFramebuffer framebuffer,
uint32_t active_subpass, const safe_VkSubpassDescription2KHR &sub_desc,
const VulkanTypedHandle &rp_handle, uint32_t img_index,
const VkImageMemoryBarrier &img_barrier) {
bool skip = false;
const auto &fb_state = GetFramebufferState(framebuffer);
assert(fb_state);
const auto img_bar_image = img_barrier.image;
bool image_match = false;
bool sub_image_found = false; // Do we find a corresponding subpass description
VkImageLayout sub_image_layout = VK_IMAGE_LAYOUT_UNDEFINED;
uint32_t attach_index = 0;
// Verify that a framebuffer image matches barrier image
const auto attachmentCount = fb_state->createInfo.attachmentCount;
for (uint32_t attachment = 0; attachment < attachmentCount; ++attachment) {
auto view_state = GetAttachmentImageViewState(fb_state, attachment);
if (view_state && (img_bar_image == view_state->create_info.image)) {
image_match = true;
attach_index = attachment;
break;
}
}
if (image_match) { // Make sure subpass is referring to matching attachment
if (sub_desc.pDepthStencilAttachment && sub_desc.pDepthStencilAttachment->attachment == attach_index) {
sub_image_layout = sub_desc.pDepthStencilAttachment->layout;
sub_image_found = true;
} else if (device_extensions.vk_khr_depth_stencil_resolve) {
const auto *resolve = lvl_find_in_chain<VkSubpassDescriptionDepthStencilResolveKHR>(sub_desc.pNext);
if (resolve && resolve->pDepthStencilResolveAttachment &&
resolve->pDepthStencilResolveAttachment->attachment == attach_index) {
sub_image_layout = resolve->pDepthStencilResolveAttachment->layout;
sub_image_found = true;
}
} else {
for (uint32_t j = 0; j < sub_desc.colorAttachmentCount; ++j) {
if (sub_desc.pColorAttachments && sub_desc.pColorAttachments[j].attachment == attach_index) {
sub_image_layout = sub_desc.pColorAttachments[j].layout;
sub_image_found = true;
break;
} else if (sub_desc.pResolveAttachments && sub_desc.pResolveAttachments[j].attachment == attach_index) {
sub_image_layout = sub_desc.pResolveAttachments[j].layout;
sub_image_found = true;
break;
}
}
}
if (!sub_image_found) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
rp_handle.handle, "VUID-vkCmdPipelineBarrier-image-02635",
"%s: Barrier pImageMemoryBarriers[%d].%s is not referenced by the VkSubpassDescription for "
"active subpass (%d) of current %s.",
funcName, img_index, report_data->FormatHandle(img_bar_image).c_str(), active_subpass,
report_data->FormatHandle(rp_handle).c_str());
}
} else { // !image_match
auto const fb_handle = HandleToUint64(fb_state->framebuffer);
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT, fb_handle,
"VUID-vkCmdPipelineBarrier-image-02635",
"%s: Barrier pImageMemoryBarriers[%d].%s does not match an image from the current %s.", funcName, img_index,
report_data->FormatHandle(img_bar_image).c_str(), report_data->FormatHandle(fb_state->framebuffer).c_str());
}
if (img_barrier.oldLayout != img_barrier.newLayout) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdPipelineBarrier-oldLayout-01181",
"%s: As the Image Barrier for %s is being executed within a render pass instance, oldLayout must "
"equal newLayout yet they are %s and %s.",
funcName, report_data->FormatHandle(img_barrier.image).c_str(), string_VkImageLayout(img_barrier.oldLayout),
string_VkImageLayout(img_barrier.newLayout));
} else {
if (sub_image_found && sub_image_layout != img_barrier.oldLayout) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
rp_handle.handle, "VUID-vkCmdPipelineBarrier-oldLayout-02636",
"%s: Barrier pImageMemoryBarriers[%d].%s is referenced by the VkSubpassDescription for active "
"subpass (%d) of current %s as having layout %s, but image barrier has layout %s.",
funcName, img_index, report_data->FormatHandle(img_bar_image).c_str(), active_subpass,
report_data->FormatHandle(rp_handle).c_str(), string_VkImageLayout(sub_image_layout),
string_VkImageLayout(img_barrier.oldLayout));
}
}
return skip;
}
// Validate image barriers within a renderPass
bool CoreChecks::ValidateRenderPassImageBarriers(const char *funcName, CMD_BUFFER_STATE *cb_state, uint32_t active_subpass,
const safe_VkSubpassDescription2KHR &sub_desc, const VulkanTypedHandle &rp_handle,
const safe_VkSubpassDependency2KHR *dependencies,
const std::vector<uint32_t> &self_dependencies, uint32_t image_mem_barrier_count,
const VkImageMemoryBarrier *image_barriers) {
bool skip = false;
for (uint32_t i = 0; i < image_mem_barrier_count; ++i) {
const auto &img_barrier = image_barriers[i];
const auto &img_src_access_mask = img_barrier.srcAccessMask;
const auto &img_dst_access_mask = img_barrier.dstAccessMask;
bool access_mask_match = false;
for (const auto self_dep_index : self_dependencies) {
const auto &sub_dep = dependencies[self_dep_index];
access_mask_match = (img_src_access_mask == (sub_dep.srcAccessMask & img_src_access_mask)) &&
(img_dst_access_mask == (sub_dep.dstAccessMask & img_dst_access_mask));
if (access_mask_match) break;
}
if (!access_mask_match) {
std::stringstream self_dep_ss;
stream_join(self_dep_ss, ", ", self_dependencies);
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
rp_handle.handle, "VUID-vkCmdPipelineBarrier-pDependencies-02285",
"%s: Barrier pImageMemoryBarriers[%d].srcAccessMask(0x%X) is not a subset of VkSubpassDependency "
"srcAccessMask of subpass %d of %s. Candidate VkSubpassDependency are pDependencies entries [%s].",
funcName, i, img_src_access_mask, active_subpass, report_data->FormatHandle(rp_handle).c_str(),
self_dep_ss.str().c_str());
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
rp_handle.handle, "VUID-vkCmdPipelineBarrier-pDependencies-02285",
"%s: Barrier pImageMemoryBarriers[%d].dstAccessMask(0x%X) is not a subset of VkSubpassDependency "
"dstAccessMask of subpass %d of %s. Candidate VkSubpassDependency are pDependencies entries [%s].",
funcName, i, img_dst_access_mask, active_subpass, report_data->FormatHandle(rp_handle).c_str(),
self_dep_ss.str().c_str());
}
if (VK_QUEUE_FAMILY_IGNORED != img_barrier.srcQueueFamilyIndex ||
VK_QUEUE_FAMILY_IGNORED != img_barrier.dstQueueFamilyIndex) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
rp_handle.handle, "VUID-vkCmdPipelineBarrier-srcQueueFamilyIndex-01182",
"%s: Barrier pImageMemoryBarriers[%d].srcQueueFamilyIndex is %d and "
"pImageMemoryBarriers[%d].dstQueueFamilyIndex is %d but both must be VK_QUEUE_FAMILY_IGNORED.",
funcName, i, img_barrier.srcQueueFamilyIndex, i, img_barrier.dstQueueFamilyIndex);
}
// Secondary CBs can have null framebuffer so queue up validation in that case 'til FB is known
if (VK_NULL_HANDLE == cb_state->activeFramebuffer) {
assert(VK_COMMAND_BUFFER_LEVEL_SECONDARY == cb_state->createInfo.level);
// Secondary CB case w/o FB specified delay validation
cb_state->cmd_execute_commands_functions.emplace_back([=](CMD_BUFFER_STATE *primary_cb, VkFramebuffer fb) {
return ValidateImageBarrierImage(funcName, cb_state, fb, active_subpass, sub_desc, rp_handle, i, img_barrier);
});
} else {
skip |= ValidateImageBarrierImage(funcName, cb_state, cb_state->activeFramebuffer, active_subpass, sub_desc, rp_handle,
i, img_barrier);
}
}
return skip;
}
// Validate VUs for Pipeline Barriers that are within a renderPass
// Pre: cb_state->activeRenderPass must be a pointer to valid renderPass state
bool CoreChecks::ValidateRenderPassPipelineBarriers(const char *funcName, CMD_BUFFER_STATE *cb_state,
VkPipelineStageFlags src_stage_mask, VkPipelineStageFlags dst_stage_mask,
VkDependencyFlags dependency_flags, uint32_t mem_barrier_count,
const VkMemoryBarrier *mem_barriers, uint32_t buffer_mem_barrier_count,
const VkBufferMemoryBarrier *buffer_mem_barriers,
uint32_t image_mem_barrier_count, const VkImageMemoryBarrier *image_barriers) {
bool skip = false;
const auto rp_state = cb_state->activeRenderPass;
const auto active_subpass = cb_state->activeSubpass;
const VulkanTypedHandle rp_handle(rp_state->renderPass, kVulkanObjectTypeRenderPass);
const auto &self_dependencies = rp_state->self_dependencies[active_subpass];
const auto &dependencies = rp_state->createInfo.pDependencies;
if (self_dependencies.size() == 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle.handle,
"VUID-vkCmdPipelineBarrier-pDependencies-02285",
"%s: Barriers cannot be set during subpass %d of %s with no self-dependency specified.", funcName,
active_subpass, report_data->FormatHandle(rp_handle).c_str());
} else {
// Grab ref to current subpassDescription up-front for use below
const auto &sub_desc = rp_state->createInfo.pSubpasses[active_subpass];
// Look for matching mask in any self-dependency
bool stage_mask_match = false;
for (const auto self_dep_index : self_dependencies) {
const auto &sub_dep = dependencies[self_dep_index];
const auto &sub_src_stage_mask = ExpandPipelineStageFlags(device_extensions, sub_dep.srcStageMask);
const auto &sub_dst_stage_mask = ExpandPipelineStageFlags(device_extensions, sub_dep.dstStageMask);
stage_mask_match = ((sub_src_stage_mask == VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) ||
(src_stage_mask == (sub_src_stage_mask & src_stage_mask))) &&
((sub_dst_stage_mask == VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) ||
(dst_stage_mask == (sub_dst_stage_mask & dst_stage_mask)));
if (stage_mask_match) break;
}
if (!stage_mask_match) {
std::stringstream self_dep_ss;
stream_join(self_dep_ss, ", ", self_dependencies);
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
rp_handle.handle, "VUID-vkCmdPipelineBarrier-pDependencies-02285",
"%s: Barrier srcStageMask(0x%X) is not a subset of VkSubpassDependency srcStageMask of any "
"self-dependency of subpass %d of %s for which dstStageMask is also a subset. "
"Candidate VkSubpassDependency are pDependencies entries [%s].",
funcName, src_stage_mask, active_subpass, report_data->FormatHandle(rp_handle).c_str(),
self_dep_ss.str().c_str());
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
rp_handle.handle, "VUID-vkCmdPipelineBarrier-pDependencies-02285",
"%s: Barrier dstStageMask(0x%X) is not a subset of VkSubpassDependency dstStageMask of any "
"self-dependency of subpass %d of %s for which srcStageMask is also a subset. "
"Candidate VkSubpassDependency are pDependencies entries [%s].",
funcName, dst_stage_mask, active_subpass, report_data->FormatHandle(rp_handle).c_str(),
self_dep_ss.str().c_str());
}
if (0 != buffer_mem_barrier_count) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
rp_handle.handle, "VUID-vkCmdPipelineBarrier-bufferMemoryBarrierCount-01178",
"%s: bufferMemoryBarrierCount is non-zero (%d) for subpass %d of %s.", funcName,
buffer_mem_barrier_count, active_subpass, report_data->FormatHandle(rp_handle).c_str());
}
for (uint32_t i = 0; i < mem_barrier_count; ++i) {
const auto &mb_src_access_mask = mem_barriers[i].srcAccessMask;
const auto &mb_dst_access_mask = mem_barriers[i].dstAccessMask;
bool access_mask_match = false;
for (const auto self_dep_index : self_dependencies) {
const auto &sub_dep = dependencies[self_dep_index];
access_mask_match = (mb_src_access_mask == (sub_dep.srcAccessMask & mb_src_access_mask)) &&
(mb_dst_access_mask == (sub_dep.dstAccessMask & mb_dst_access_mask));
if (access_mask_match) break;
}
if (!access_mask_match) {
std::stringstream self_dep_ss;
stream_join(self_dep_ss, ", ", self_dependencies);
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle.handle,
"VUID-vkCmdPipelineBarrier-pDependencies-02285",
"%s: Barrier pMemoryBarriers[%d].srcAccessMask(0x%X) is not a subset of VkSubpassDependency srcAccessMask "
"for any self-dependency of subpass %d of %s for which dstAccessMask is also a subset. "
"Candidate VkSubpassDependency are pDependencies entries [%s].",
funcName, i, mb_src_access_mask, active_subpass, report_data->FormatHandle(rp_handle).c_str(),
self_dep_ss.str().c_str());
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle.handle,
"VUID-vkCmdPipelineBarrier-pDependencies-02285",
"%s: Barrier pMemoryBarriers[%d].dstAccessMask(0x%X) is not a subset of VkSubpassDependency dstAccessMask "
"for any self-dependency of subpass %d of %s for which srcAccessMask is also a subset. "
"Candidate VkSubpassDependency are pDependencies entries [%s].",
funcName, i, mb_dst_access_mask, active_subpass, report_data->FormatHandle(rp_handle).c_str(),
self_dep_ss.str().c_str());
}
}
skip |= ValidateRenderPassImageBarriers(funcName, cb_state, active_subpass, sub_desc, rp_handle, dependencies,
self_dependencies, image_mem_barrier_count, image_barriers);
bool flag_match = false;
for (const auto self_dep_index : self_dependencies) {
const auto &sub_dep = dependencies[self_dep_index];
flag_match = sub_dep.dependencyFlags == dependency_flags;
if (flag_match) break;
}
if (!flag_match) {
std::stringstream self_dep_ss;
stream_join(self_dep_ss, ", ", self_dependencies);
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
rp_handle.handle, "VUID-vkCmdPipelineBarrier-pDependencies-02285",
"%s: dependencyFlags param (0x%X) does not equal VkSubpassDependency dependencyFlags value for any "
"self-dependency of subpass %d of %s. Candidate VkSubpassDependency are pDependencies entries [%s].",
funcName, dependency_flags, cb_state->activeSubpass, report_data->FormatHandle(rp_handle).c_str(),
self_dep_ss.str().c_str());
}
}
return skip;
}
// Array to mask individual accessMask to corresponding stageMask
// accessMask active bit position (0-31) maps to index
const static VkPipelineStageFlags AccessMaskToPipeStage[28] = {
// VK_ACCESS_INDIRECT_COMMAND_READ_BIT = 0
VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT,
// VK_ACCESS_INDEX_READ_BIT = 1
VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
// VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT = 2
VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
// VK_ACCESS_UNIFORM_READ_BIT = 3
VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV |
VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV | VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_NV,
// VK_ACCESS_INPUT_ATTACHMENT_READ_BIT = 4
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
// VK_ACCESS_SHADER_READ_BIT = 5
VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV |
VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV | VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_NV,
// VK_ACCESS_SHADER_WRITE_BIT = 6
VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV |
VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV | VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_NV,
// VK_ACCESS_COLOR_ATTACHMENT_READ_BIT = 7
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
// VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT = 8
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
// VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT = 9
VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
// VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT = 10
VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
// VK_ACCESS_TRANSFER_READ_BIT = 11
VK_PIPELINE_STAGE_TRANSFER_BIT,
// VK_ACCESS_TRANSFER_WRITE_BIT = 12
VK_PIPELINE_STAGE_TRANSFER_BIT,
// VK_ACCESS_HOST_READ_BIT = 13
VK_PIPELINE_STAGE_HOST_BIT,
// VK_ACCESS_HOST_WRITE_BIT = 14
VK_PIPELINE_STAGE_HOST_BIT,
// VK_ACCESS_MEMORY_READ_BIT = 15
VK_ACCESS_FLAG_BITS_MAX_ENUM, // Always match
// VK_ACCESS_MEMORY_WRITE_BIT = 16
VK_ACCESS_FLAG_BITS_MAX_ENUM, // Always match
// VK_ACCESS_COMMAND_PROCESS_READ_BIT_NVX = 17
VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX,
// VK_ACCESS_COMMAND_PROCESS_WRITE_BIT_NVX = 18
VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX,
// VK_ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT = 19
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
// VK_ACCESS_CONDITIONAL_RENDERING_READ_BIT_EXT = 20
VK_PIPELINE_STAGE_CONDITIONAL_RENDERING_BIT_EXT,
// VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_NV = 21
VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_NV | VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_NV,
// VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_NV = 22
VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_NV,
// VK_ACCESS_SHADING_RATE_IMAGE_READ_BIT_NV = 23
VK_PIPELINE_STAGE_SHADING_RATE_IMAGE_BIT_NV,
// VK_ACCESS_FRAGMENT_DENSITY_MAP_READ_BIT_EXT = 24
VK_PIPELINE_STAGE_FRAGMENT_DENSITY_PROCESS_BIT_EXT,
// VK_ACCESS_TRANSFORM_FEEDBACK_WRITE_BIT_EXT = 25
VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT,
// VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT = 26
VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT,
// VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT = 27
VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT,
};
// Verify that all bits of access_mask are supported by the src_stage_mask
static bool ValidateAccessMaskPipelineStage(const DeviceExtensions &extensions, VkAccessFlags access_mask,
VkPipelineStageFlags stage_mask) {
// Early out if all commands set, or access_mask NULL
if ((stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) || (0 == access_mask)) return true;
stage_mask = ExpandPipelineStageFlags(extensions, stage_mask);
int index = 0;
// for each of the set bits in access_mask, make sure that supporting stage mask bit(s) are set
while (access_mask) {
index = (u_ffs(access_mask) - 1);
assert(index >= 0);
// Must have "!= 0" compare to prevent warning from MSVC
if ((AccessMaskToPipeStage[index] & stage_mask) == 0) return false; // early out
access_mask &= ~(1 << index); // Mask off bit that's been checked
}
return true;
}
namespace barrier_queue_families {
enum VuIndex {
kSrcOrDstMustBeIgnore,
kSpecialOrIgnoreOnly,
kSrcIgnoreRequiresDstIgnore,
kDstValidOrSpecialIfNotIgnore,
kSrcValidOrSpecialIfNotIgnore,
kSrcAndDestMustBeIgnore,
kBothIgnoreOrBothValid,
kSubmitQueueMustMatchSrcOrDst
};
static const char *vu_summary[] = {"Source or destination queue family must be ignored.",
"Source or destination queue family must be special or ignored.",
"Destination queue family must be ignored if source queue family is.",
"Destination queue family must be valid, ignored, or special.",
"Source queue family must be valid, ignored, or special.",
"Source and destination queue family must both be ignored.",
"Source and destination queue family must both be ignore or both valid.",
"Source or destination queue family must match submit queue family, if not ignored."};
static const std::string image_error_codes[] = {
"VUID-VkImageMemoryBarrier-image-01381", // kSrcOrDstMustBeIgnore
"VUID-VkImageMemoryBarrier-image-01766", // kSpecialOrIgnoreOnly
"VUID-VkImageMemoryBarrier-image-01201", // kSrcIgnoreRequiresDstIgnore
"VUID-VkImageMemoryBarrier-image-01768", // kDstValidOrSpecialIfNotIgnore
"VUID-VkImageMemoryBarrier-image-01767", // kSrcValidOrSpecialIfNotIgnore
"VUID-VkImageMemoryBarrier-image-01199", // kSrcAndDestMustBeIgnore
"VUID-VkImageMemoryBarrier-image-01200", // kBothIgnoreOrBothValid
"VUID-VkImageMemoryBarrier-image-01205", // kSubmitQueueMustMatchSrcOrDst
};
static const std::string buffer_error_codes[] = {
"VUID-VkBufferMemoryBarrier-buffer-01191", // kSrcOrDstMustBeIgnore
"VUID-VkBufferMemoryBarrier-buffer-01763", // kSpecialOrIgnoreOnly
"VUID-VkBufferMemoryBarrier-buffer-01193", // kSrcIgnoreRequiresDstIgnore
"VUID-VkBufferMemoryBarrier-buffer-01765", // kDstValidOrSpecialIfNotIgnore
"VUID-VkBufferMemoryBarrier-buffer-01764", // kSrcValidOrSpecialIfNotIgnore
"VUID-VkBufferMemoryBarrier-buffer-01190", // kSrcAndDestMustBeIgnore
"VUID-VkBufferMemoryBarrier-buffer-01192", // kBothIgnoreOrBothValid
"VUID-VkBufferMemoryBarrier-buffer-01196", // kSubmitQueueMustMatchSrcOrDst
};
class ValidatorState {
public:
ValidatorState(const CoreChecks *device_data, const char *func_name, const CMD_BUFFER_STATE *cb_state,
const VulkanTypedHandle &barrier_handle, const VkSharingMode sharing_mode, const std::string *val_codes)
: report_data_(device_data->report_data),
func_name_(func_name),
cb_handle64_(HandleToUint64(cb_state->commandBuffer)),
barrier_handle_(barrier_handle),
sharing_mode_(sharing_mode),
val_codes_(val_codes),
limit_(static_cast<uint32_t>(device_data->physical_device_state->queue_family_properties.size())),
mem_ext_(device_data->device_extensions.vk_khr_external_memory) {}
// Create a validator state from an image state... reducing the image specific to the generic version.
ValidatorState(const CoreChecks *device_data, const char *func_name, const CMD_BUFFER_STATE *cb_state,
const VkImageMemoryBarrier *barrier, const IMAGE_STATE *state)
: ValidatorState(device_data, func_name, cb_state, VulkanTypedHandle(barrier->image, kVulkanObjectTypeImage),
state->createInfo.sharingMode, image_error_codes) {}
// Create a validator state from an buffer state... reducing the buffer specific to the generic version.
ValidatorState(const CoreChecks *device_data, const char *func_name, const CMD_BUFFER_STATE *cb_state,
const VkBufferMemoryBarrier *barrier, const BUFFER_STATE *state)
: ValidatorState(device_data, func_name, cb_state, VulkanTypedHandle(barrier->buffer, kVulkanObjectTypeBuffer),
state->createInfo.sharingMode, buffer_error_codes) {}
// Log the messages using boilerplate from object state, and Vu specific information from the template arg
// One and two family versions, in the single family version, Vu holds the name of the passed parameter
bool LogMsg(VuIndex vu_index, uint32_t family, const char *param_name) const {
const std::string &val_code = val_codes_[vu_index];
const char *annotation = GetFamilyAnnotation(family);
return log_msg(report_data_, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, cb_handle64_,
val_code, "%s: Barrier using %s %s created with sharingMode %s, has %s %u%s. %s", func_name_,
GetTypeString(), report_data_->FormatHandle(barrier_handle_).c_str(), GetModeString(), param_name, family,
annotation, vu_summary[vu_index]);
}
bool LogMsg(VuIndex vu_index, uint32_t src_family, uint32_t dst_family) const {
const std::string &val_code = val_codes_[vu_index];
const char *src_annotation = GetFamilyAnnotation(src_family);
const char *dst_annotation = GetFamilyAnnotation(dst_family);
return log_msg(
report_data_, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, cb_handle64_, val_code,
"%s: Barrier using %s %s created with sharingMode %s, has srcQueueFamilyIndex %u%s and dstQueueFamilyIndex %u%s. %s",
func_name_, GetTypeString(), report_data_->FormatHandle(barrier_handle_).c_str(), GetModeString(), src_family,
src_annotation, dst_family, dst_annotation, vu_summary[vu_index]);
}
// This abstract Vu can only be tested at submit time, thus we need a callback from the closure containing the needed
// data. Note that the mem_barrier is copied to the closure as the lambda lifespan exceed the guarantees of validity for
// application input.
static bool ValidateAtQueueSubmit(const VkQueue queue, const CoreChecks *device_data, uint32_t src_family, uint32_t dst_family,
const ValidatorState &val) {
auto queue_data_it = device_data->queueMap.find(queue);
if (queue_data_it == device_data->queueMap.end()) return false;
uint32_t queue_family = queue_data_it->second.queueFamilyIndex;
if ((src_family != queue_family) && (dst_family != queue_family)) {
const std::string &val_code = val.val_codes_[kSubmitQueueMustMatchSrcOrDst];
const char *src_annotation = val.GetFamilyAnnotation(src_family);
const char *dst_annotation = val.GetFamilyAnnotation(dst_family);
return log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
HandleToUint64(queue), val_code,
"%s: Barrier submitted to queue with family index %u, using %s %s created with sharingMode %s, has "
"srcQueueFamilyIndex %u%s and dstQueueFamilyIndex %u%s. %s",
"vkQueueSubmit", queue_family, val.GetTypeString(),
device_data->report_data->FormatHandle(val.barrier_handle_).c_str(), val.GetModeString(), src_family,
src_annotation, dst_family, dst_annotation, vu_summary[kSubmitQueueMustMatchSrcOrDst]);
}
return false;
}
// Logical helpers for semantic clarity
inline bool KhrExternalMem() const { return mem_ext_; }
inline bool IsValid(uint32_t queue_family) const { return (queue_family < limit_); }
inline bool IsValidOrSpecial(uint32_t queue_family) const {
return IsValid(queue_family) || (mem_ext_ && IsSpecial(queue_family));
}
inline bool IsIgnored(uint32_t queue_family) const { return queue_family == VK_QUEUE_FAMILY_IGNORED; }
// Helpers for LogMsg (and log_msg)
const char *GetModeString() const { return string_VkSharingMode(sharing_mode_); }
// Descriptive text for the various types of queue family index
const char *GetFamilyAnnotation(uint32_t family) const {
const char *external = " (VK_QUEUE_FAMILY_EXTERNAL_KHR)";
const char *foreign = " (VK_QUEUE_FAMILY_FOREIGN_EXT)";
const char *ignored = " (VK_QUEUE_FAMILY_IGNORED)";
const char *valid = " (VALID)";
const char *invalid = " (INVALID)";
switch (family) {
case VK_QUEUE_FAMILY_EXTERNAL_KHR:
return external;
case VK_QUEUE_FAMILY_FOREIGN_EXT:
return foreign;
case VK_QUEUE_FAMILY_IGNORED:
return ignored;
default:
if (IsValid(family)) {
return valid;
}
return invalid;
};
}
const char *GetTypeString() const { return object_string[barrier_handle_.type]; }
VkSharingMode GetSharingMode() const { return sharing_mode_; }
protected:
const debug_report_data *const report_data_;
const char *const func_name_;
const uint64_t cb_handle64_;
const VulkanTypedHandle barrier_handle_;
const VkSharingMode sharing_mode_;
const std::string *val_codes_;
const uint32_t limit_;
const bool mem_ext_;
};
bool Validate(const CoreChecks *device_data, const char *func_name, CMD_BUFFER_STATE *cb_state, const ValidatorState &val,
const uint32_t src_queue_family, const uint32_t dst_queue_family) {
bool skip = false;
const bool mode_concurrent = val.GetSharingMode() == VK_SHARING_MODE_CONCURRENT;
const bool src_ignored = val.IsIgnored(src_queue_family);
const bool dst_ignored = val.IsIgnored(dst_queue_family);
if (val.KhrExternalMem()) {
if (mode_concurrent) {
if (!(src_ignored || dst_ignored)) {
skip |= val.LogMsg(kSrcOrDstMustBeIgnore, src_queue_family, dst_queue_family);
}
if ((src_ignored && !(dst_ignored || IsSpecial(dst_queue_family))) ||
(dst_ignored && !(src_ignored || IsSpecial(src_queue_family)))) {
skip |= val.LogMsg(kSpecialOrIgnoreOnly, src_queue_family, dst_queue_family);
}
} else {
// VK_SHARING_MODE_EXCLUSIVE
if (src_ignored && !dst_ignored) {
skip |= val.LogMsg(kSrcIgnoreRequiresDstIgnore, src_queue_family, dst_queue_family);
}
if (!dst_ignored && !val.IsValidOrSpecial(dst_queue_family)) {
skip |= val.LogMsg(kDstValidOrSpecialIfNotIgnore, dst_queue_family, "dstQueueFamilyIndex");
}
if (!src_ignored && !val.IsValidOrSpecial(src_queue_family)) {
skip |= val.LogMsg(kSrcValidOrSpecialIfNotIgnore, src_queue_family, "srcQueueFamilyIndex");
}
}
} else {
// No memory extension
if (mode_concurrent) {
if (!src_ignored || !dst_ignored) {
skip |= val.LogMsg(kSrcAndDestMustBeIgnore, src_queue_family, dst_queue_family);
}
} else {
// VK_SHARING_MODE_EXCLUSIVE
if (!((src_ignored && dst_ignored) || (val.IsValid(src_queue_family) && val.IsValid(dst_queue_family)))) {
skip |= val.LogMsg(kBothIgnoreOrBothValid, src_queue_family, dst_queue_family);
}
}
}
if (!mode_concurrent && !src_ignored && !dst_ignored) {
// Only enqueue submit time check if it is needed. If more submit time checks are added, change the criteria
// TODO create a better named list, or rename the submit time lists to something that matches the broader usage...
// Note: if we want to create a semantic that separates state lookup, validation, and state update this should go
// to a local queue of update_state_actions or something.
cb_state->eventUpdates.emplace_back([device_data, src_queue_family, dst_queue_family, val](VkQueue queue) {
return ValidatorState::ValidateAtQueueSubmit(queue, device_data, src_queue_family, dst_queue_family, val);
});
}
return skip;
}
} // namespace barrier_queue_families
// Type specific wrapper for image barriers
bool CoreChecks::ValidateBarrierQueueFamilies(const char *func_name, CMD_BUFFER_STATE *cb_state,
const VkImageMemoryBarrier &barrier, const IMAGE_STATE *state_data) {
// State data is required
if (!state_data) {
return false;
}
// Create the validator state from the image state
barrier_queue_families::ValidatorState val(this, func_name, cb_state, &barrier, state_data);
const uint32_t src_queue_family = barrier.srcQueueFamilyIndex;
const uint32_t dst_queue_family = barrier.dstQueueFamilyIndex;
return barrier_queue_families::Validate(this, func_name, cb_state, val, src_queue_family, dst_queue_family);
}
// Type specific wrapper for buffer barriers
bool CoreChecks::ValidateBarrierQueueFamilies(const char *func_name, CMD_BUFFER_STATE *cb_state,
const VkBufferMemoryBarrier &barrier, const BUFFER_STATE *state_data) {
// State data is required
if (!state_data) {
return false;
}
// Create the validator state from the buffer state
barrier_queue_families::ValidatorState val(this, func_name, cb_state, &barrier, state_data);
const uint32_t src_queue_family = barrier.srcQueueFamilyIndex;
const uint32_t dst_queue_family = barrier.dstQueueFamilyIndex;
return barrier_queue_families::Validate(this, func_name, cb_state, val, src_queue_family, dst_queue_family);
}
bool CoreChecks::ValidateBarriers(const char *funcName, CMD_BUFFER_STATE *cb_state, VkPipelineStageFlags src_stage_mask,
VkPipelineStageFlags dst_stage_mask, uint32_t memBarrierCount,
const VkMemoryBarrier *pMemBarriers, uint32_t bufferBarrierCount,
const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount,
const VkImageMemoryBarrier *pImageMemBarriers) {
bool skip = false;
for (uint32_t i = 0; i < memBarrierCount; ++i) {
const auto &mem_barrier = pMemBarriers[i];
if (!ValidateAccessMaskPipelineStage(device_extensions, mem_barrier.srcAccessMask, src_stage_mask)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdPipelineBarrier-pMemoryBarriers-01184",
"%s: pMemBarriers[%d].srcAccessMask (0x%X) is not supported by srcStageMask (0x%X).", funcName, i,
mem_barrier.srcAccessMask, src_stage_mask);
}
if (!ValidateAccessMaskPipelineStage(device_extensions, mem_barrier.dstAccessMask, dst_stage_mask)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdPipelineBarrier-pMemoryBarriers-01185",
"%s: pMemBarriers[%d].dstAccessMask (0x%X) is not supported by dstStageMask (0x%X).", funcName, i,
mem_barrier.dstAccessMask, dst_stage_mask);
}
}
for (uint32_t i = 0; i < imageMemBarrierCount; ++i) {
const auto &mem_barrier = pImageMemBarriers[i];
if (!ValidateAccessMaskPipelineStage(device_extensions, mem_barrier.srcAccessMask, src_stage_mask)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdPipelineBarrier-pMemoryBarriers-01184",
"%s: pImageMemBarriers[%d].srcAccessMask (0x%X) is not supported by srcStageMask (0x%X).", funcName, i,
mem_barrier.srcAccessMask, src_stage_mask);
}
if (!ValidateAccessMaskPipelineStage(device_extensions, mem_barrier.dstAccessMask, dst_stage_mask)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdPipelineBarrier-pMemoryBarriers-01185",
"%s: pImageMemBarriers[%d].dstAccessMask (0x%X) is not supported by dstStageMask (0x%X).", funcName, i,
mem_barrier.dstAccessMask, dst_stage_mask);
}
auto image_data = GetImageState(mem_barrier.image);
skip |= ValidateBarrierQueueFamilies(funcName, cb_state, mem_barrier, image_data);
if (mem_barrier.newLayout == VK_IMAGE_LAYOUT_UNDEFINED || mem_barrier.newLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-VkImageMemoryBarrier-newLayout-01198",
"%s: Image Layout cannot be transitioned to UNDEFINED or PREINITIALIZED.", funcName);
}
if (image_data) {
// There is no VUID for this, but there is blanket text:
// "Non-sparse resources must be bound completely and contiguously to a single VkDeviceMemory object before
// recording commands in a command buffer."
// TODO: Update this when VUID is defined
skip |= ValidateMemoryIsBoundToImage(image_data, funcName, kVUIDUndefined);
const auto aspect_mask = mem_barrier.subresourceRange.aspectMask;
skip |= ValidateImageAspectMask(image_data->image, image_data->createInfo.format, aspect_mask, funcName);
const std::string param_name = "pImageMemoryBarriers[" + std::to_string(i) + "].subresourceRange";
skip |= ValidateImageBarrierSubresourceRange(image_data, mem_barrier.subresourceRange, funcName, param_name.c_str());
}
}
for (uint32_t i = 0; i < bufferBarrierCount; ++i) {
const auto &mem_barrier = pBufferMemBarriers[i];
if (!ValidateAccessMaskPipelineStage(device_extensions, mem_barrier.srcAccessMask, src_stage_mask)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdPipelineBarrier-pMemoryBarriers-01184",
"%s: pBufferMemBarriers[%d].srcAccessMask (0x%X) is not supported by srcStageMask (0x%X).", funcName, i,
mem_barrier.srcAccessMask, src_stage_mask);
}
if (!ValidateAccessMaskPipelineStage(device_extensions, mem_barrier.dstAccessMask, dst_stage_mask)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdPipelineBarrier-pMemoryBarriers-01185",
"%s: pBufferMemBarriers[%d].dstAccessMask (0x%X) is not supported by dstStageMask (0x%X).", funcName, i,
mem_barrier.dstAccessMask, dst_stage_mask);
}
// Validate buffer barrier queue family indices
auto buffer_state = GetBufferState(mem_barrier.buffer);
skip |= ValidateBarrierQueueFamilies(funcName, cb_state, mem_barrier, buffer_state);
if (buffer_state) {
// There is no VUID for this, but there is blanket text:
// "Non-sparse resources must be bound completely and contiguously to a single VkDeviceMemory object before
// recording commands in a command buffer"
// TODO: Update this when VUID is defined
skip |= ValidateMemoryIsBoundToBuffer(buffer_state, funcName, kVUIDUndefined);
auto buffer_size = buffer_state->createInfo.size;
if (mem_barrier.offset >= buffer_size) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-VkBufferMemoryBarrier-offset-01187",
"%s: Buffer Barrier %s has offset 0x%" PRIx64 " which is not less than total size 0x%" PRIx64 ".",
funcName, report_data->FormatHandle(mem_barrier.buffer).c_str(), HandleToUint64(mem_barrier.offset),
HandleToUint64(buffer_size));
} else if (mem_barrier.size != VK_WHOLE_SIZE && (mem_barrier.offset + mem_barrier.size > buffer_size)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-VkBufferMemoryBarrier-size-01189",
"%s: Buffer Barrier %s has offset 0x%" PRIx64 " and size 0x%" PRIx64
" whose sum is greater than total size 0x%" PRIx64 ".",
funcName, report_data->FormatHandle(mem_barrier.buffer).c_str(), HandleToUint64(mem_barrier.offset),
HandleToUint64(mem_barrier.size), HandleToUint64(buffer_size));
}
}
}
skip |= ValidateBarriersQFOTransferUniqueness(funcName, cb_state, bufferBarrierCount, pBufferMemBarriers, imageMemBarrierCount,
pImageMemBarriers);
return skip;
}
bool CoreChecks::ValidateEventStageMask(VkQueue queue, CMD_BUFFER_STATE *pCB, uint32_t eventCount, size_t firstEventIndex,
VkPipelineStageFlags sourceStageMask) {
bool skip = false;
VkPipelineStageFlags stageMask = 0;
for (uint32_t i = 0; i < eventCount; ++i) {
auto event = pCB->events[firstEventIndex + i];
auto queue_data = queueMap.find(queue);
if (queue_data == queueMap.end()) return false;
auto event_data = queue_data->second.eventToStageMap.find(event);
if (event_data != queue_data->second.eventToStageMap.end()) {
stageMask |= event_data->second;
} else {
auto global_event_data = GetEventState(event);
if (!global_event_data) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
HandleToUint64(event), kVUID_Core_DrawState_InvalidEvent,
"%s cannot be waited on if it has never been set.", report_data->FormatHandle(event).c_str());
} else {
stageMask |= global_event_data->stageMask;
}
}
}
// TODO: Need to validate that host_bit is only set if set event is called
// but set event can be called at any time.
if (sourceStageMask != stageMask && sourceStageMask != (stageMask | VK_PIPELINE_STAGE_HOST_BIT)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCB->commandBuffer), "VUID-vkCmdWaitEvents-srcStageMask-parameter",
"Submitting cmdbuffer with call to VkCmdWaitEvents using srcStageMask 0x%X which must be the bitwise OR of "
"the stageMask parameters used in calls to vkCmdSetEvent and VK_PIPELINE_STAGE_HOST_BIT if used with "
"vkSetEvent but instead is 0x%X.",
sourceStageMask, stageMask);
}
return skip;
}
// Note that we only check bits that HAVE required queueflags -- don't care entries are skipped
static std::unordered_map<VkPipelineStageFlags, VkQueueFlags> supported_pipeline_stages_table = {
{VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT},
{VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT},
{VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, VK_QUEUE_GRAPHICS_BIT},
{VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
{VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
{VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
{VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
{VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
{VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT, VK_QUEUE_GRAPHICS_BIT},
{VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT, VK_QUEUE_GRAPHICS_BIT},
{VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_QUEUE_GRAPHICS_BIT},
{VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_QUEUE_COMPUTE_BIT},
{VK_PIPELINE_STAGE_TRANSFER_BIT, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT},
{VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, VK_QUEUE_GRAPHICS_BIT}};
static const VkPipelineStageFlags stage_flag_bit_array[] = {VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX,
VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT,
VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
VK_PIPELINE_STAGE_VERTEX_SHADER_BIT,
VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT,
VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT,
VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT,
VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT};
bool CoreChecks::CheckStageMaskQueueCompatibility(VkCommandBuffer command_buffer, VkPipelineStageFlags stage_mask,
VkQueueFlags queue_flags, const char *function, const char *src_or_dest,
const char *error_code) {
bool skip = false;
// Lookup each bit in the stagemask and check for overlap between its table bits and queue_flags
for (const auto &item : stage_flag_bit_array) {
if (stage_mask & item) {
if ((supported_pipeline_stages_table[item] & queue_flags) == 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(command_buffer), error_code,
"%s(): %s flag %s is not compatible with the queue family properties of this command buffer.",
function, src_or_dest, string_VkPipelineStageFlagBits(static_cast<VkPipelineStageFlagBits>(item)));
}
}
}
return skip;
}
// Check if all barriers are of a given operation type.
template <typename Barrier, typename OpCheck>
bool AllTransferOp(const COMMAND_POOL_STATE *pool, OpCheck &op_check, uint32_t count, const Barrier *barriers) {
if (!pool) return false;
for (uint32_t b = 0; b < count; b++) {
if (!op_check(pool, barriers + b)) return false;
}
return true;
}
// Look at the barriers to see if we they are all release or all acquire, the result impacts queue properties validation
BarrierOperationsType CoreChecks::ComputeBarrierOperationsType(CMD_BUFFER_STATE *cb_state, uint32_t buffer_barrier_count,
const VkBufferMemoryBarrier *buffer_barriers,
uint32_t image_barrier_count,
const VkImageMemoryBarrier *image_barriers) {
auto pool = GetCommandPoolState(cb_state->createInfo.commandPool);
BarrierOperationsType op_type = kGeneral;
// Look at the barrier details only if they exist
// Note: AllTransferOp returns true for count == 0
if ((buffer_barrier_count + image_barrier_count) != 0) {
if (AllTransferOp(pool, TempIsReleaseOp<VkBufferMemoryBarrier>, buffer_barrier_count, buffer_barriers) &&
AllTransferOp(pool, TempIsReleaseOp<VkImageMemoryBarrier>, image_barrier_count, image_barriers)) {
op_type = kAllRelease;
} else if (AllTransferOp(pool, IsAcquireOp<VkBufferMemoryBarrier>, buffer_barrier_count, buffer_barriers) &&
AllTransferOp(pool, IsAcquireOp<VkImageMemoryBarrier>, image_barrier_count, image_barriers)) {
op_type = kAllAcquire;
}
}
return op_type;
}
bool CoreChecks::ValidateStageMasksAgainstQueueCapabilities(CMD_BUFFER_STATE const *cb_state,
VkPipelineStageFlags source_stage_mask,
VkPipelineStageFlags dest_stage_mask,
BarrierOperationsType barrier_op_type, const char *function,
const char *error_code) {
bool skip = false;
uint32_t queue_family_index = commandPoolMap[cb_state->createInfo.commandPool].get()->queueFamilyIndex;
auto physical_device_state = GetPhysicalDeviceState();
// Any pipeline stage included in srcStageMask or dstStageMask must be supported by the capabilities of the queue family
// specified by the queueFamilyIndex member of the VkCommandPoolCreateInfo structure that was used to create the VkCommandPool
// that commandBuffer was allocated from, as specified in the table of supported pipeline stages.
if (queue_family_index < physical_device_state->queue_family_properties.size()) {
VkQueueFlags specified_queue_flags = physical_device_state->queue_family_properties[queue_family_index].queueFlags;
// Only check the source stage mask if any barriers aren't "acquire ownership"
if ((barrier_op_type != kAllAcquire) && (source_stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) == 0) {
skip |= CheckStageMaskQueueCompatibility(cb_state->commandBuffer, source_stage_mask, specified_queue_flags, function,
"srcStageMask", error_code);
}
// Only check the dest stage mask if any barriers aren't "release ownership"
if ((barrier_op_type != kAllRelease) && (dest_stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) == 0) {
skip |= CheckStageMaskQueueCompatibility(cb_state->commandBuffer, dest_stage_mask, specified_queue_flags, function,
"dstStageMask", error_code);
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask,
uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
auto barrier_op_type = ComputeBarrierOperationsType(cb_state, bufferMemoryBarrierCount, pBufferMemoryBarriers,
imageMemoryBarrierCount, pImageMemoryBarriers);
bool skip = ValidateStageMasksAgainstQueueCapabilities(cb_state, sourceStageMask, dstStageMask, barrier_op_type,
"vkCmdWaitEvents", "VUID-vkCmdWaitEvents-srcStageMask-01164");
skip |= ValidateStageMaskGsTsEnables(sourceStageMask, "vkCmdWaitEvents()", "VUID-vkCmdWaitEvents-srcStageMask-01159",
"VUID-vkCmdWaitEvents-srcStageMask-01161", "VUID-vkCmdWaitEvents-srcStageMask-02111",
"VUID-vkCmdWaitEvents-srcStageMask-02112");
skip |= ValidateStageMaskGsTsEnables(dstStageMask, "vkCmdWaitEvents()", "VUID-vkCmdWaitEvents-dstStageMask-01160",
"VUID-vkCmdWaitEvents-dstStageMask-01162", "VUID-vkCmdWaitEvents-dstStageMask-02113",
"VUID-vkCmdWaitEvents-dstStageMask-02114");
skip |= ValidateCmdQueueFlags(cb_state, "vkCmdWaitEvents()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdWaitEvents-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_WAITEVENTS, "vkCmdWaitEvents()");
skip |= ValidateBarriersToImages(cb_state, imageMemoryBarrierCount, pImageMemoryBarriers, "vkCmdWaitEvents()");
skip |= ValidateBarriers("vkCmdWaitEvents()", cb_state, sourceStageMask, dstStageMask, memoryBarrierCount, pMemoryBarriers,
bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
return skip;
}
void CoreChecks::PreCallRecordCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask,
uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
auto first_event_index = cb_state->events.size();
for (uint32_t i = 0; i < eventCount; ++i) {
auto event_state = GetEventState(pEvents[i]);
if (event_state) {
AddCommandBufferBinding(&event_state->cb_bindings, VulkanTypedHandle(pEvents[i], kVulkanObjectTypeEvent), cb_state);
event_state->cb_bindings.insert(cb_state);
}
cb_state->waitedEvents.insert(pEvents[i]);
cb_state->events.push_back(pEvents[i]);
}
cb_state->eventUpdates.emplace_back(
[=](VkQueue q) { return ValidateEventStageMask(q, cb_state, eventCount, first_event_index, sourceStageMask); });
TransitionImageLayouts(cb_state, imageMemoryBarrierCount, pImageMemoryBarriers);
if (enabled.gpu_validation) {
GpuPreCallValidateCmdWaitEvents(sourceStageMask);
}
}
void CoreChecks::PostCallRecordCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask,
uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
RecordBarriersQFOTransfers(cb_state, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
pImageMemoryBarriers);
}
bool CoreChecks::PreCallValidateCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
uint32_t bufferMemoryBarrierCount,
const VkBufferMemoryBarrier *pBufferMemoryBarriers,
uint32_t imageMemoryBarrierCount,
const VkImageMemoryBarrier *pImageMemoryBarriers) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
auto barrier_op_type = ComputeBarrierOperationsType(cb_state, bufferMemoryBarrierCount, pBufferMemoryBarriers,
imageMemoryBarrierCount, pImageMemoryBarriers);
skip |= ValidateStageMasksAgainstQueueCapabilities(cb_state, srcStageMask, dstStageMask, barrier_op_type,
"vkCmdPipelineBarrier", "VUID-vkCmdPipelineBarrier-srcStageMask-01183");
skip |= ValidateCmdQueueFlags(cb_state, "vkCmdPipelineBarrier()",
VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdPipelineBarrier-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_PIPELINEBARRIER, "vkCmdPipelineBarrier()");
skip |=
ValidateStageMaskGsTsEnables(srcStageMask, "vkCmdPipelineBarrier()", "VUID-vkCmdPipelineBarrier-srcStageMask-01168",
"VUID-vkCmdPipelineBarrier-srcStageMask-01170", "VUID-vkCmdPipelineBarrier-srcStageMask-02115",
"VUID-vkCmdPipelineBarrier-srcStageMask-02116");
skip |=
ValidateStageMaskGsTsEnables(dstStageMask, "vkCmdPipelineBarrier()", "VUID-vkCmdPipelineBarrier-dstStageMask-01169",
"VUID-vkCmdPipelineBarrier-dstStageMask-01171", "VUID-vkCmdPipelineBarrier-dstStageMask-02117",
"VUID-vkCmdPipelineBarrier-dstStageMask-02118");
if (cb_state->activeRenderPass) {
skip |= ValidateRenderPassPipelineBarriers("vkCmdPipelineBarrier()", cb_state, srcStageMask, dstStageMask, dependencyFlags,
memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
if (skip) return true; // Early return to avoid redundant errors from below calls
}
skip |= ValidateBarriersToImages(cb_state, imageMemoryBarrierCount, pImageMemoryBarriers, "vkCmdPipelineBarrier()");
skip |= ValidateBarriers("vkCmdPipelineBarrier()", cb_state, srcStageMask, dstStageMask, memoryBarrierCount, pMemoryBarriers,
bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
return skip;
}
void CoreChecks::PreCallRecordCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
uint32_t bufferMemoryBarrierCount,
const VkBufferMemoryBarrier *pBufferMemoryBarriers,
uint32_t imageMemoryBarrierCount,
const VkImageMemoryBarrier *pImageMemoryBarriers) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
RecordBarriersQFOTransfers(cb_state, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
pImageMemoryBarriers);
TransitionImageLayouts(cb_state, imageMemoryBarrierCount, pImageMemoryBarriers);
}
bool ValidationStateTracker::SetQueryState(VkQueue queue, VkCommandBuffer commandBuffer, QueryObject object, QueryState value) {
CMD_BUFFER_STATE *pCB = GetCBState(commandBuffer);
if (pCB) {
pCB->queryToStateMap[object] = value;
}
auto queue_data = queueMap.find(queue);
if (queue_data != queueMap.end()) {
queue_data->second.queryToStateMap[object] = value;
}
return false;
}
bool ValidationStateTracker::SetQueryStateMulti(VkQueue queue, VkCommandBuffer commandBuffer, VkQueryPool queryPool,
uint32_t firstQuery, uint32_t queryCount, QueryState value) {
CMD_BUFFER_STATE *pCB = GetCBState(commandBuffer);
auto queue_data = queueMap.find(queue);
for (uint32_t i = 0; i < queryCount; i++) {
QueryObject object = {queryPool, firstQuery + i};
if (pCB) {
pCB->queryToStateMap[object] = value;
}
if (queue_data != queueMap.end()) {
queue_data->second.queryToStateMap[object] = value;
}
}
return false;
}
bool CoreChecks::ValidateBeginQuery(const CMD_BUFFER_STATE *cb_state, const QueryObject &query_obj, VkFlags flags, CMD_TYPE cmd,
const char *cmd_name, const char *vuid_queue_flags, const char *vuid_queue_feedback,
const char *vuid_queue_occlusion, const char *vuid_precise,
const char *vuid_query_count) const {
bool skip = false;
const auto &query_pool_ci = GetQueryPoolState(query_obj.pool)->createInfo;
// There are tighter queue constraints to test for certain query pools
if (query_pool_ci.queryType == VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT) {
skip |= ValidateCmdQueueFlags(cb_state, cmd_name, VK_QUEUE_GRAPHICS_BIT, vuid_queue_feedback);
}
if (query_pool_ci.queryType == VK_QUERY_TYPE_OCCLUSION) {
skip |= ValidateCmdQueueFlags(cb_state, cmd_name, VK_QUEUE_GRAPHICS_BIT, vuid_queue_occlusion);
}
skip |= ValidateCmdQueueFlags(cb_state, cmd_name, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, vuid_queue_flags);
if (flags & VK_QUERY_CONTROL_PRECISE_BIT) {
if (!enabled_features.core.occlusionQueryPrecise) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), vuid_precise,
"%s: VK_QUERY_CONTROL_PRECISE_BIT provided, but precise occlusion queries not enabled on the device.",
cmd_name);
}
if (query_pool_ci.queryType != VK_QUERY_TYPE_OCCLUSION) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), vuid_precise,
"%s: VK_QUERY_CONTROL_PRECISE_BIT provided, but pool query type is not VK_QUERY_TYPE_OCCLUSION", cmd_name);
}
}
if (query_obj.query >= query_pool_ci.queryCount) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), vuid_query_count,
"%s: Query index %" PRIu32 " must be less than query count %" PRIu32 " of %s.", cmd_name, query_obj.query,
query_pool_ci.queryCount, report_data->FormatHandle(query_obj.pool).c_str());
}
skip |= ValidateCmd(cb_state, cmd, cmd_name);
return skip;
}
void ValidationStateTracker::RecordCmdBeginQuery(CMD_BUFFER_STATE *cb_state, const QueryObject &query_obj) {
cb_state->activeQueries.insert(query_obj);
cb_state->startedQueries.insert(query_obj);
cb_state->queryUpdates.emplace_back([this, cb_state, query_obj](VkQueue q) {
SetQueryState(q, cb_state->commandBuffer, query_obj, QUERYSTATE_RUNNING);
return false;
});
AddCommandBufferBinding(&GetQueryPoolState(query_obj.pool)->cb_bindings,
VulkanTypedHandle(query_obj.pool, kVulkanObjectTypeQueryPool), cb_state);
}
bool CoreChecks::PreCallValidateCmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) {
if (disabled.query_validation) return false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
QueryObject query_obj(queryPool, slot);
return ValidateBeginQuery(cb_state, query_obj, flags, CMD_BEGINQUERY, "vkCmdBeginQuery()",
"VUID-vkCmdBeginQuery-commandBuffer-cmdpool", "VUID-vkCmdBeginQuery-queryType-02327",
"VUID-vkCmdBeginQuery-queryType-00803", "VUID-vkCmdBeginQuery-queryType-00800",
"VUID-vkCmdBeginQuery-query-00802");
}
bool CoreChecks::VerifyQueryIsReset(VkQueue queue, VkCommandBuffer commandBuffer, QueryObject query_obj) const {
bool skip = false;
auto queue_data = GetQueueState(queue);
if (!queue_data) return false;
QueryState state = GetQueryState(queue_data, query_obj.pool, query_obj.query);
if (state != QUERYSTATE_RESET) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), kVUID_Core_DrawState_QueryNotReset,
"vkCmdBeginQuery(): %s and query %" PRIu32
": query not reset. "
"After query pool creation, each query must be reset before it is used. "
"Queries must also be reset between uses.",
report_data->FormatHandle(query_obj.pool).c_str(), query_obj.query);
}
return skip;
}
void ValidationStateTracker::PostCallRecordCmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot,
VkFlags flags) {
QueryObject query = {queryPool, slot};
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
RecordCmdBeginQuery(cb_state, query);
}
void CoreChecks::EnqueueVerifyBeginQuery(VkCommandBuffer command_buffer, const QueryObject &query_obj) {
CMD_BUFFER_STATE *cb_state = GetCBState(command_buffer);
// Enqueue the submit time validation here, ahead of the submit time state update in the StateTracker's PostCallRecord
cb_state->queryUpdates.emplace_back(
[this, cb_state, query_obj](VkQueue q) { return VerifyQueryIsReset(q, cb_state->commandBuffer, query_obj); });
}
void CoreChecks::PreCallRecordCmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) {
QueryObject query_obj = {queryPool, slot};
EnqueueVerifyBeginQuery(commandBuffer, query_obj);
}
bool CoreChecks::ValidateCmdEndQuery(const CMD_BUFFER_STATE *cb_state, const QueryObject &query_obj, CMD_TYPE cmd,
const char *cmd_name, const char *vuid_queue_flags, const char *vuid_active_queries) const {
bool skip = false;
if (!cb_state->activeQueries.count(query_obj)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), vuid_active_queries,
"%s: Ending a query before it was started: %s, index %d.", cmd_name,
report_data->FormatHandle(query_obj.pool).c_str(), query_obj.query);
}
skip |= ValidateCmdQueueFlags(cb_state, cmd_name, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, vuid_queue_flags);
skip |= ValidateCmd(cb_state, cmd, cmd_name);
return skip;
}
bool CoreChecks::PreCallValidateCmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) {
if (disabled.query_validation) return false;
QueryObject query_obj = {queryPool, slot};
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
return ValidateCmdEndQuery(cb_state, query_obj, CMD_ENDQUERY, "vkCmdEndQuery()", "VUID-vkCmdEndQuery-commandBuffer-cmdpool",
"VUID-vkCmdEndQuery-None-01923");
}
void ValidationStateTracker::RecordCmdEndQuery(CMD_BUFFER_STATE *cb_state, const QueryObject &query_obj) {
cb_state->activeQueries.erase(query_obj);
cb_state->queryUpdates.emplace_back(
[this, cb_state, query_obj](VkQueue q) { return SetQueryState(q, cb_state->commandBuffer, query_obj, QUERYSTATE_ENDED); });
AddCommandBufferBinding(&GetQueryPoolState(query_obj.pool)->cb_bindings,
VulkanTypedHandle(query_obj.pool, kVulkanObjectTypeQueryPool), cb_state);
}
void ValidationStateTracker::PostCallRecordCmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) {
QueryObject query_obj = {queryPool, slot};
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
RecordCmdEndQuery(cb_state, query_obj);
}
bool CoreChecks::PreCallValidateCmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
uint32_t queryCount) {
if (disabled.query_validation) return false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
bool skip = InsideRenderPass(cb_state, "vkCmdResetQueryPool()", "VUID-vkCmdResetQueryPool-renderpass");
skip |= ValidateCmd(cb_state, CMD_RESETQUERYPOOL, "VkCmdResetQueryPool()");
skip |= ValidateCmdQueueFlags(cb_state, "VkCmdResetQueryPool()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdResetQueryPool-commandBuffer-cmdpool");
return skip;
}
void ValidationStateTracker::PostCallRecordCmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool,
uint32_t firstQuery, uint32_t queryCount) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
cb_state->queryUpdates.emplace_back([this, commandBuffer, queryPool, firstQuery, queryCount](VkQueue q) {
return SetQueryStateMulti(q, commandBuffer, queryPool, firstQuery, queryCount, QUERYSTATE_RESET);
});
AddCommandBufferBinding(&GetQueryPoolState(queryPool)->cb_bindings, VulkanTypedHandle(queryPool, kVulkanObjectTypeQueryPool),
cb_state);
}
QueryState CoreChecks::GetQueryState(const QUEUE_STATE *queue_data, VkQueryPool queryPool, uint32_t queryIndex) const {
QueryObject query = {queryPool, queryIndex};
const std::array<const decltype(queryToStateMap) *, 2> map_list = {&queue_data->queryToStateMap, &queryToStateMap};
for (const auto map : map_list) {
auto query_data = map->find(query);
if (query_data != map->end()) {
return query_data->second;
}
}
return QUERYSTATE_UNKNOWN;
}
static QueryResultType GetQueryResultType(QueryState state, VkQueryResultFlags flags) {
switch (state) {
case QUERYSTATE_UNKNOWN:
return QUERYRESULT_UNKNOWN;
case QUERYSTATE_RESET:
case QUERYSTATE_RUNNING:
if (flags & VK_QUERY_RESULT_WAIT_BIT) {
return ((state == QUERYSTATE_RESET) ? QUERYRESULT_WAIT_ON_RESET : QUERYRESULT_WAIT_ON_RUNNING);
} else if ((flags & VK_QUERY_RESULT_PARTIAL_BIT) || (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)) {
return QUERYRESULT_SOME_DATA;
} else {
return QUERYRESULT_NO_DATA;
}
case QUERYSTATE_ENDED:
if ((flags & VK_QUERY_RESULT_WAIT_BIT) || (flags & VK_QUERY_RESULT_PARTIAL_BIT) ||
(flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)) {
return QUERYRESULT_SOME_DATA;
} else {
return QUERYRESULT_MAYBE_NO_DATA;
}
case QUERYSTATE_AVAILABLE:
return QUERYRESULT_SOME_DATA;
}
assert(false);
return QUERYRESULT_UNKNOWN;
}
bool CoreChecks::ValidateQuery(VkQueue queue, CMD_BUFFER_STATE *pCB, VkQueryPool queryPool, uint32_t firstQuery,
uint32_t queryCount, VkQueryResultFlags flags) const {
bool skip = false;
auto queue_data = GetQueueState(queue);
if (!queue_data) return false;
for (uint32_t i = 0; i < queryCount; i++) {
QueryState state = GetQueryState(queue_data, queryPool, firstQuery + i);
QueryResultType result_type = GetQueryResultType(state, flags);
if (result_type != QUERYRESULT_SOME_DATA) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCB->commandBuffer), kVUID_Core_DrawState_InvalidQuery,
"Requesting a copy from query to buffer on %s query %" PRIu32 ": %s",
report_data->FormatHandle(queryPool).c_str(), firstQuery + i, string_QueryResultType(result_type));
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset,
VkDeviceSize stride, VkQueryResultFlags flags) {
if (disabled.query_validation) return false;
const auto cb_state = GetCBState(commandBuffer);
const auto dst_buff_state = GetBufferState(dstBuffer);
assert(cb_state);
assert(dst_buff_state);
bool skip = ValidateMemoryIsBoundToBuffer(dst_buff_state, "vkCmdCopyQueryPoolResults()",
"VUID-vkCmdCopyQueryPoolResults-dstBuffer-00826");
skip |= ValidateQueryPoolStride("VUID-vkCmdCopyQueryPoolResults-flags-00822", "VUID-vkCmdCopyQueryPoolResults-flags-00823",
stride, "dstOffset", dstOffset, flags);
// Validate that DST buffer has correct usage flags set
skip |= ValidateBufferUsageFlags(dst_buff_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
"VUID-vkCmdCopyQueryPoolResults-dstBuffer-00825", "vkCmdCopyQueryPoolResults()",
"VK_BUFFER_USAGE_TRANSFER_DST_BIT");
skip |= ValidateCmdQueueFlags(cb_state, "vkCmdCopyQueryPoolResults()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdCopyQueryPoolResults-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_COPYQUERYPOOLRESULTS, "vkCmdCopyQueryPoolResults()");
skip |= InsideRenderPass(cb_state, "vkCmdCopyQueryPoolResults()", "VUID-vkCmdCopyQueryPoolResults-renderpass");
return skip;
}
void ValidationStateTracker::PostCallRecordCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool,
uint32_t firstQuery, uint32_t queryCount, VkBuffer dstBuffer,
VkDeviceSize dstOffset, VkDeviceSize stride,
VkQueryResultFlags flags) {
auto cb_state = GetCBState(commandBuffer);
auto dst_buff_state = GetBufferState(dstBuffer);
AddCommandBufferBindingBuffer(cb_state, dst_buff_state);
AddCommandBufferBinding(&GetQueryPoolState(queryPool)->cb_bindings, VulkanTypedHandle(queryPool, kVulkanObjectTypeQueryPool),
cb_state);
}
void CoreChecks::PreCallRecordCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset,
VkDeviceSize stride, VkQueryResultFlags flags) {
auto cb_state = GetCBState(commandBuffer);
cb_state->queryUpdates.emplace_back([this, cb_state, queryPool, firstQuery, queryCount, flags](VkQueue q) {
return ValidateQuery(q, cb_state, queryPool, firstQuery, queryCount, flags);
});
}
bool CoreChecks::PreCallValidateCmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout,
VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size,
const void *pValues) {
bool skip = false;
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
skip |= ValidateCmdQueueFlags(cb_state, "vkCmdPushConstants()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdPushConstants-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_PUSHCONSTANTS, "vkCmdPushConstants()");
skip |= ValidatePushConstantRange(offset, size, "vkCmdPushConstants()");
if (0 == stageFlags) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdPushConstants-stageFlags-requiredbitmask",
"vkCmdPushConstants() call has no stageFlags set.");
}
// Check if pipeline_layout VkPushConstantRange(s) overlapping offset, size have stageFlags set for each stage in the command
// stageFlags argument, *and* that the command stageFlags argument has bits set for the stageFlags in each overlapping range.
if (!skip) {
const auto &ranges = *GetPipelineLayout(layout)->push_constant_ranges;
VkShaderStageFlags found_stages = 0;
for (const auto &range : ranges) {
if ((offset >= range.offset) && (offset + size <= range.offset + range.size)) {
VkShaderStageFlags matching_stages = range.stageFlags & stageFlags;
if (matching_stages != range.stageFlags) {
// "VUID-vkCmdPushConstants-offset-01796" VUID-vkCmdPushConstants-offset-01796
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdPushConstants-offset-01796",
"vkCmdPushConstants(): stageFlags (0x%" PRIx32 ", offset (%" PRIu32 "), and size (%" PRIu32
"), must contain all stages in overlapping VkPushConstantRange stageFlags (0x%" PRIx32
"), offset (%" PRIu32 "), and size (%" PRIu32 ") in %s.",
(uint32_t)stageFlags, offset, size, (uint32_t)range.stageFlags, range.offset, range.size,
report_data->FormatHandle(layout).c_str());
}
// Accumulate all stages we've found
found_stages = matching_stages | found_stages;
}
}
if (found_stages != stageFlags) {
// "VUID-vkCmdPushConstants-offset-01795" VUID-vkCmdPushConstants-offset-01795
uint32_t missing_stages = ~found_stages & stageFlags;
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdPushConstants-offset-01795",
"vkCmdPushConstants(): stageFlags = 0x%" PRIx32
", VkPushConstantRange in %s overlapping offset = %d and size = %d, do not contain "
"stageFlags 0x%" PRIx32 ".",
(uint32_t)stageFlags, report_data->FormatHandle(layout).c_str(), offset, size, missing_stages);
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage,
VkQueryPool queryPool, uint32_t slot) {
if (disabled.query_validation) return false;
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdWriteTimestamp()",
VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT,
"VUID-vkCmdWriteTimestamp-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_WRITETIMESTAMP, "vkCmdWriteTimestamp()");
return skip;
}
void CoreChecks::PostCallRecordCmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage,
VkQueryPool queryPool, uint32_t slot) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
QueryObject query = {queryPool, slot};
cb_state->queryUpdates.emplace_back([this, commandBuffer, query](VkQueue q) {
bool skip = false;
skip |= VerifyQueryIsReset(q, commandBuffer, query);
skip |= SetQueryState(q, commandBuffer, query, QUERYSTATE_ENDED);
return skip;
});
AddCommandBufferBinding(&GetQueryPoolState(queryPool)->cb_bindings, VulkanTypedHandle(queryPool, kVulkanObjectTypeQueryPool),
cb_state);
}
bool CoreChecks::MatchUsage(uint32_t count, const VkAttachmentReference2KHR *attachments, const VkFramebufferCreateInfo *fbci,
VkImageUsageFlagBits usage_flag, const char *error_code) const {
bool skip = false;
if (attachments) {
for (uint32_t attach = 0; attach < count; attach++) {
if (attachments[attach].attachment != VK_ATTACHMENT_UNUSED) {
// Attachment counts are verified elsewhere, but prevent an invalid access
if (attachments[attach].attachment < fbci->attachmentCount) {
if ((fbci->flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR) == 0) {
const VkImageView *image_view = &fbci->pAttachments[attachments[attach].attachment];
auto view_state = GetImageViewState(*image_view);
if (view_state) {
const VkImageCreateInfo *ici = &GetImageState(view_state->create_info.image)->createInfo;
if (ici != nullptr) {
if ((ici->usage & usage_flag) == 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, error_code,
"vkCreateFramebuffer: Framebuffer Attachment (%d) conflicts with the image's "
"IMAGE_USAGE flags (%s).",
attachments[attach].attachment, string_VkImageUsageFlagBits(usage_flag));
}
}
}
} else {
const VkFramebufferAttachmentsCreateInfoKHR *fbaci =
lvl_find_in_chain<VkFramebufferAttachmentsCreateInfoKHR>(fbci->pNext);
if (fbaci != nullptr && fbaci->pAttachmentImageInfos != nullptr &&
fbaci->attachmentImageInfoCount > attachments[attach].attachment) {
uint32_t image_usage = fbaci->pAttachmentImageInfos[attachments[attach].attachment].usage;
if ((image_usage & usage_flag) == 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
0, error_code,
"vkCreateFramebuffer: Framebuffer attachment info (%d) conflicts with the image's "
"IMAGE_USAGE flags (%s).",
attachments[attach].attachment, string_VkImageUsageFlagBits(usage_flag));
}
}
}
}
}
}
}
return skip;
}
// Validate VkFramebufferCreateInfo which includes:
// 1. attachmentCount equals renderPass attachmentCount
// 2. corresponding framebuffer and renderpass attachments have matching formats
// 3. corresponding framebuffer and renderpass attachments have matching sample counts
// 4. fb attachments only have a single mip level
// 5. fb attachment dimensions are each at least as large as the fb
// 6. fb attachments use idenity swizzle
// 7. fb attachments used by renderPass for color/input/ds have correct usage bit set
// 8. fb dimensions are within physical device limits
bool CoreChecks::ValidateFramebufferCreateInfo(const VkFramebufferCreateInfo *pCreateInfo) const {
bool skip = false;
const VkFramebufferAttachmentsCreateInfoKHR *pFramebufferAttachmentsCreateInfo =
lvl_find_in_chain<VkFramebufferAttachmentsCreateInfoKHR>(pCreateInfo->pNext);
if ((pCreateInfo->flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR) != 0) {
if (!enabled_features.imageless_framebuffer_features.imagelessFramebuffer) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkFramebufferCreateInfo-flags-03189",
"vkCreateFramebuffer(): VkFramebufferCreateInfo flags includes VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR, "
"but the imagelessFramebuffer feature is not enabled.");
}
if (pFramebufferAttachmentsCreateInfo == nullptr) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkFramebufferCreateInfo-flags-03190",
"vkCreateFramebuffer(): VkFramebufferCreateInfo flags includes VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR, "
"but no instance of VkFramebufferAttachmentsCreateInfoKHR is present in the pNext chain.");
} else {
if (pFramebufferAttachmentsCreateInfo->attachmentImageInfoCount != 0 &&
pFramebufferAttachmentsCreateInfo->attachmentImageInfoCount != pCreateInfo->attachmentCount) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkFramebufferCreateInfo-flags-03191",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachmentCount is %u, but "
"VkFramebufferAttachmentsCreateInfoKHR attachmentImageInfoCount is %u.",
pCreateInfo->attachmentCount, pFramebufferAttachmentsCreateInfo->attachmentImageInfoCount);
}
}
}
auto rp_state = GetRenderPassState(pCreateInfo->renderPass);
if (rp_state) {
const VkRenderPassCreateInfo2KHR *rpci = rp_state->createInfo.ptr();
if (rpci->attachmentCount != pCreateInfo->attachmentCount) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(pCreateInfo->renderPass), "VUID-VkFramebufferCreateInfo-attachmentCount-00876",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachmentCount of %u does not match attachmentCount "
"of %u of %s being used to create Framebuffer.",
pCreateInfo->attachmentCount, rpci->attachmentCount,
report_data->FormatHandle(pCreateInfo->renderPass).c_str());
} else {
// attachmentCounts match, so make sure corresponding attachment details line up
if ((pCreateInfo->flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR) == 0) {
const VkImageView *image_views = pCreateInfo->pAttachments;
for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
auto view_state = GetImageViewState(image_views[i]);
if (view_state == nullptr) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
HandleToUint64(image_views[i]), "VUID-VkFramebufferCreateInfo-flags-03188",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u is not a valid VkImageView.", i);
} else {
auto &ivci = view_state->create_info;
if (ivci.format != rpci->pAttachments[i].format) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(pCreateInfo->renderPass), "VUID-VkFramebufferCreateInfo-pAttachments-00880",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has format of %s that does not "
"match the format of %s used by the corresponding attachment for %s.",
i, string_VkFormat(ivci.format), string_VkFormat(rpci->pAttachments[i].format),
report_data->FormatHandle(pCreateInfo->renderPass).c_str());
}
const VkImageCreateInfo *ici = &GetImageState(ivci.image)->createInfo;
if (ici->samples != rpci->pAttachments[i].samples) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(pCreateInfo->renderPass), "VUID-VkFramebufferCreateInfo-pAttachments-00881",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has %s samples that do not "
"match the %s "
"samples used by the corresponding attachment for %s.",
i, string_VkSampleCountFlagBits(ici->samples),
string_VkSampleCountFlagBits(rpci->pAttachments[i].samples),
report_data->FormatHandle(pCreateInfo->renderPass).c_str());
}
// Verify that view only has a single mip level
if (ivci.subresourceRange.levelCount != 1) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkFramebufferCreateInfo-pAttachments-00883",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has mip levelCount of %u but "
"only a single mip level (levelCount == 1) is allowed when creating a Framebuffer.",
i, ivci.subresourceRange.levelCount);
}
const uint32_t mip_level = ivci.subresourceRange.baseMipLevel;
uint32_t mip_width = max(1u, ici->extent.width >> mip_level);
uint32_t mip_height = max(1u, ici->extent.height >> mip_level);
if ((ivci.subresourceRange.layerCount < pCreateInfo->layers) || (mip_width < pCreateInfo->width) ||
(mip_height < pCreateInfo->height)) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkFramebufferCreateInfo-pAttachments-00882",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has dimensions "
"smaller than the corresponding framebuffer dimensions. Here are the respective dimensions for "
"attachment #%u, framebuffer:\n"
"width: %u, %u\n"
"height: %u, %u\n"
"layerCount: %u, %u\n",
i, ivci.subresourceRange.baseMipLevel, i, mip_width, pCreateInfo->width, mip_height,
pCreateInfo->height, ivci.subresourceRange.layerCount, pCreateInfo->layers);
}
if (((ivci.components.r != VK_COMPONENT_SWIZZLE_IDENTITY) &&
(ivci.components.r != VK_COMPONENT_SWIZZLE_R)) ||
((ivci.components.g != VK_COMPONENT_SWIZZLE_IDENTITY) &&
(ivci.components.g != VK_COMPONENT_SWIZZLE_G)) ||
((ivci.components.b != VK_COMPONENT_SWIZZLE_IDENTITY) &&
(ivci.components.b != VK_COMPONENT_SWIZZLE_B)) ||
((ivci.components.a != VK_COMPONENT_SWIZZLE_IDENTITY) &&
(ivci.components.a != VK_COMPONENT_SWIZZLE_A))) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkFramebufferCreateInfo-pAttachments-00884",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has non-identy swizzle. All "
"framebuffer attachments must have been created with the identity swizzle. Here are the actual "
"swizzle values:\n"
"r swizzle = %s\n"
"g swizzle = %s\n"
"b swizzle = %s\n"
"a swizzle = %s\n",
i, string_VkComponentSwizzle(ivci.components.r), string_VkComponentSwizzle(ivci.components.g),
string_VkComponentSwizzle(ivci.components.b), string_VkComponentSwizzle(ivci.components.a));
}
}
}
} else if (pFramebufferAttachmentsCreateInfo) {
// VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR is set
for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
auto &aii = pFramebufferAttachmentsCreateInfo->pAttachmentImageInfos[i];
bool formatFound = false;
for (uint32_t j = 0; j < aii.viewFormatCount; ++j) {
if (aii.pViewFormats[j] == rpci->pAttachments[i].format) {
formatFound = true;
}
}
if (!formatFound) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(pCreateInfo->renderPass), "VUID-VkFramebufferCreateInfo-flags-03205",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info #%u does not include "
"format %s used "
"by the corresponding attachment for renderPass (%s).",
i, string_VkFormat(rpci->pAttachments[i].format),
report_data->FormatHandle(pCreateInfo->renderPass).c_str());
}
const char *mismatchedLayersNoMultiviewVuid = device_extensions.vk_khr_multiview
? "VUID-VkFramebufferCreateInfo-renderPass-03199"
: "VUID-VkFramebufferCreateInfo-flags-03200";
if ((rpci->subpassCount == 0) || (rpci->pSubpasses[0].viewMask == 0)) {
if (aii.layerCount < pCreateInfo->layers) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
mismatchedLayersNoMultiviewVuid,
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info #%u has only #%u layers, "
"but framebuffer has #%u layers.",
i, aii.layerCount, pCreateInfo->layers);
}
}
if (!device_extensions.vk_ext_fragment_density_map) {
if (aii.width < pCreateInfo->width) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkFramebufferCreateInfo-flags-03192",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info #%u has a width of only #%u, "
"but framebuffer has a width of #%u.",
i, aii.width, pCreateInfo->width);
}
if (aii.height < pCreateInfo->height) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkFramebufferCreateInfo-flags-03193",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info #%u has a height of only #%u, "
"but framebuffer has a height of #%u.",
i, aii.height, pCreateInfo->height);
}
}
}
// Validate image usage
uint32_t attachment_index = VK_ATTACHMENT_UNUSED;
for (uint32_t i = 0; i < rpci->subpassCount; ++i) {
skip |= MatchUsage(rpci->pSubpasses[i].colorAttachmentCount, rpci->pSubpasses[i].pColorAttachments, pCreateInfo,
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-flags-03201");
skip |=
MatchUsage(rpci->pSubpasses[i].colorAttachmentCount, rpci->pSubpasses[i].pResolveAttachments, pCreateInfo,
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-flags-03201");
skip |= MatchUsage(1, rpci->pSubpasses[i].pDepthStencilAttachment, pCreateInfo,
VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-flags-03202");
skip |= MatchUsage(rpci->pSubpasses[i].inputAttachmentCount, rpci->pSubpasses[i].pInputAttachments, pCreateInfo,
VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-flags-03204");
const VkSubpassDescriptionDepthStencilResolveKHR *pDepthStencilResolve =
lvl_find_in_chain<VkSubpassDescriptionDepthStencilResolveKHR>(rpci->pSubpasses[i].pNext);
if (device_extensions.vk_khr_depth_stencil_resolve && pDepthStencilResolve != nullptr) {
skip |= MatchUsage(1, pDepthStencilResolve->pDepthStencilResolveAttachment, pCreateInfo,
VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-flags-03203");
}
}
if (device_extensions.vk_khr_multiview) {
if ((rpci->subpassCount > 0) && (rpci->pSubpasses[0].viewMask != 0)) {
for (uint32_t i = 0; i < rpci->subpassCount; ++i) {
const VkSubpassDescriptionDepthStencilResolveKHR *pDepthStencilResolve =
lvl_find_in_chain<VkSubpassDescriptionDepthStencilResolveKHR>(rpci->pSubpasses[i].pNext);
uint32_t view_bits = rpci->pSubpasses[i].viewMask;
uint32_t highest_view_bit = 0;
for (int j = 0; j < 32; ++j) {
if (((view_bits >> j) & 1) != 0) {
highest_view_bit = j;
}
}
for (uint32_t j = 0; j < rpci->pSubpasses[i].colorAttachmentCount; ++j) {
attachment_index = rpci->pSubpasses[i].pColorAttachments[j].attachment;
if (attachment_index != VK_ATTACHMENT_UNUSED) {
uint32_t layer_count =
pFramebufferAttachmentsCreateInfo->pAttachmentImageInfos[attachment_index].layerCount;
if (layer_count <= highest_view_bit) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(pCreateInfo->renderPass),
"VUID-VkFramebufferCreateInfo-renderPass-03198",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info %u "
"only specifies %u layers, but the view mask for subpass %u in renderPass (%s) "
"includes layer %u, with that attachment specified as a color attachment %u.",
attachment_index, layer_count, i,
report_data->FormatHandle(pCreateInfo->renderPass).c_str(), highest_view_bit, j);
}
}
if (rpci->pSubpasses[i].pResolveAttachments) {
attachment_index = rpci->pSubpasses[i].pResolveAttachments[j].attachment;
if (attachment_index != VK_ATTACHMENT_UNUSED) {
uint32_t layer_count =
pFramebufferAttachmentsCreateInfo->pAttachmentImageInfos[attachment_index].layerCount;
if (layer_count <= highest_view_bit) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(pCreateInfo->renderPass),
"VUID-VkFramebufferCreateInfo-renderPass-03198",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info %u "
"only specifies %u layers, but the view mask for subpass %u in renderPass (%s) "
"includes layer %u, with that attachment specified as a resolve attachment %u.",
attachment_index, layer_count, i,
report_data->FormatHandle(pCreateInfo->renderPass).c_str(), highest_view_bit, j);
}
}
}
}
for (uint32_t j = 0; j < rpci->pSubpasses[i].inputAttachmentCount; ++j) {
attachment_index = rpci->pSubpasses[i].pInputAttachments[j].attachment;
if (attachment_index != VK_ATTACHMENT_UNUSED) {
uint32_t layer_count =
pFramebufferAttachmentsCreateInfo->pAttachmentImageInfos[attachment_index].layerCount;
if (layer_count <= highest_view_bit) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(pCreateInfo->renderPass),
"VUID-VkFramebufferCreateInfo-renderPass-03198",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info %u "
"only specifies %u layers, but the view mask for subpass %u in renderPass (%s) "
"includes layer %u, with that attachment specified as an input attachment %u.",
attachment_index, layer_count, i,
report_data->FormatHandle(pCreateInfo->renderPass).c_str(), highest_view_bit, j);
}
}
}
if (rpci->pSubpasses[i].pDepthStencilAttachment != nullptr) {
attachment_index = rpci->pSubpasses[i].pDepthStencilAttachment->attachment;
if (attachment_index != VK_ATTACHMENT_UNUSED) {
uint32_t layer_count =
pFramebufferAttachmentsCreateInfo->pAttachmentImageInfos[attachment_index].layerCount;
if (layer_count <= highest_view_bit) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(pCreateInfo->renderPass),
"VUID-VkFramebufferCreateInfo-renderPass-03198",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info %u "
"only specifies %u layers, but the view mask for subpass %u in renderPass (%s) "
"includes layer %u, with that attachment specified as a depth/stencil attachment.",
attachment_index, layer_count, i,
report_data->FormatHandle(pCreateInfo->renderPass).c_str(), highest_view_bit);
}
}
if (device_extensions.vk_khr_depth_stencil_resolve && pDepthStencilResolve != nullptr &&
pDepthStencilResolve->pDepthStencilResolveAttachment != nullptr) {
attachment_index = pDepthStencilResolve->pDepthStencilResolveAttachment->attachment;
if (attachment_index != VK_ATTACHMENT_UNUSED) {
uint32_t layer_count =
pFramebufferAttachmentsCreateInfo->pAttachmentImageInfos[attachment_index].layerCount;
if (layer_count <= highest_view_bit) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(pCreateInfo->renderPass),
"VUID-VkFramebufferCreateInfo-renderPass-03198",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info %u "
"only specifies %u layers, but the view mask for subpass %u in renderPass (%s) "
"includes layer %u, with that attachment specified as a depth/stencil resolve "
"attachment.",
attachment_index, layer_count, i,
report_data->FormatHandle(pCreateInfo->renderPass).c_str(), highest_view_bit);
}
}
}
}
}
}
}
}
if ((pCreateInfo->flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR) == 0) {
// Verify correct attachment usage flags
for (uint32_t subpass = 0; subpass < rpci->subpassCount; subpass++) {
// Verify input attachments:
skip |= MatchUsage(rpci->pSubpasses[subpass].inputAttachmentCount, rpci->pSubpasses[subpass].pInputAttachments,
pCreateInfo, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT,
"VUID-VkFramebufferCreateInfo-pAttachments-00879");
// Verify color attachments:
skip |= MatchUsage(rpci->pSubpasses[subpass].colorAttachmentCount, rpci->pSubpasses[subpass].pColorAttachments,
pCreateInfo, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
"VUID-VkFramebufferCreateInfo-pAttachments-00877");
// Verify depth/stencil attachments:
skip |=
MatchUsage(1, rpci->pSubpasses[subpass].pDepthStencilAttachment, pCreateInfo,
VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-pAttachments-02633");
}
}
}
}
// Verify FB dimensions are within physical device limits
if (pCreateInfo->width > phys_dev_props.limits.maxFramebufferWidth) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkFramebufferCreateInfo-width-00886",
"vkCreateFramebuffer(): Requested VkFramebufferCreateInfo width exceeds physical device limits. Requested "
"width: %u, device max: %u\n",
pCreateInfo->width, phys_dev_props.limits.maxFramebufferWidth);
}
if (pCreateInfo->height > phys_dev_props.limits.maxFramebufferHeight) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkFramebufferCreateInfo-height-00888",
"vkCreateFramebuffer(): Requested VkFramebufferCreateInfo height exceeds physical device limits. Requested "
"height: %u, device max: %u\n",
pCreateInfo->height, phys_dev_props.limits.maxFramebufferHeight);
}
if (pCreateInfo->layers > phys_dev_props.limits.maxFramebufferLayers) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkFramebufferCreateInfo-layers-00890",
"vkCreateFramebuffer(): Requested VkFramebufferCreateInfo layers exceeds physical device limits. Requested "
"layers: %u, device max: %u\n",
pCreateInfo->layers, phys_dev_props.limits.maxFramebufferLayers);
}
// Verify FB dimensions are greater than zero
if (pCreateInfo->width <= 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkFramebufferCreateInfo-width-00885",
"vkCreateFramebuffer(): Requested VkFramebufferCreateInfo width must be greater than zero.");
}
if (pCreateInfo->height <= 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkFramebufferCreateInfo-height-00887",
"vkCreateFramebuffer(): Requested VkFramebufferCreateInfo height must be greater than zero.");
}
if (pCreateInfo->layers <= 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkFramebufferCreateInfo-layers-00889",
"vkCreateFramebuffer(): Requested VkFramebufferCreateInfo layers must be greater than zero.");
}
return skip;
}
bool CoreChecks::PreCallValidateCreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkFramebuffer *pFramebuffer) {
// TODO : Verify that renderPass FB is created with is compatible with FB
bool skip = false;
skip |= ValidateFramebufferCreateInfo(pCreateInfo);
return skip;
}
void ValidationStateTracker::PostCallRecordCreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkFramebuffer *pFramebuffer,
VkResult result) {
if (VK_SUCCESS != result) return;
// Shadow create info and store in map
std::unique_ptr<FRAMEBUFFER_STATE> fb_state(
new FRAMEBUFFER_STATE(*pFramebuffer, pCreateInfo, GetRenderPassStateSharedPtr(pCreateInfo->renderPass)));
if ((pCreateInfo->flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR) == 0) {
for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
VkImageView view = pCreateInfo->pAttachments[i];
auto view_state = GetImageViewState(view);
if (!view_state) {
continue;
}
}
}
frameBufferMap[*pFramebuffer] = std::move(fb_state);
}
static bool FindDependency(const uint32_t index, const uint32_t dependent, const std::vector<DAGNode> &subpass_to_node,
std::unordered_set<uint32_t> &processed_nodes) {
// If we have already checked this node we have not found a dependency path so return false.
if (processed_nodes.count(index)) return false;
processed_nodes.insert(index);
const DAGNode &node = subpass_to_node[index];
// Look for a dependency path. If one exists return true else recurse on the previous nodes.
if (std::find(node.prev.begin(), node.prev.end(), dependent) == node.prev.end()) {
for (auto elem : node.prev) {
if (FindDependency(elem, dependent, subpass_to_node, processed_nodes)) return true;
}
} else {
return true;
}
return false;
}
bool CoreChecks::CheckDependencyExists(const uint32_t subpass, const std::vector<uint32_t> &dependent_subpasses,
const std::vector<DAGNode> &subpass_to_node, bool &skip) const {
bool result = true;
// Loop through all subpasses that share the same attachment and make sure a dependency exists
for (uint32_t k = 0; k < dependent_subpasses.size(); ++k) {
if (static_cast<uint32_t>(subpass) == dependent_subpasses[k]) continue;
const DAGNode &node = subpass_to_node[subpass];
// Check for a specified dependency between the two nodes. If one exists we are done.
auto prev_elem = std::find(node.prev.begin(), node.prev.end(), dependent_subpasses[k]);
auto next_elem = std::find(node.next.begin(), node.next.end(), dependent_subpasses[k]);
if (prev_elem == node.prev.end() && next_elem == node.next.end()) {
// If no dependency exits an implicit dependency still might. If not, throw an error.
std::unordered_set<uint32_t> processed_nodes;
if (!(FindDependency(subpass, dependent_subpasses[k], subpass_to_node, processed_nodes) ||
FindDependency(dependent_subpasses[k], subpass, subpass_to_node, processed_nodes))) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_Core_DrawState_InvalidRenderpass,
"A dependency between subpasses %d and %d must exist but one is not specified.", subpass,
dependent_subpasses[k]);
result = false;
}
}
}
return result;
}
bool CoreChecks::CheckPreserved(const VkRenderPassCreateInfo2KHR *pCreateInfo, const int index, const uint32_t attachment,
const std::vector<DAGNode> &subpass_to_node, int depth, bool &skip) const {
const DAGNode &node = subpass_to_node[index];
// If this node writes to the attachment return true as next nodes need to preserve the attachment.
const VkSubpassDescription2KHR &subpass = pCreateInfo->pSubpasses[index];
for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
if (attachment == subpass.pColorAttachments[j].attachment) return true;
}
for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
if (attachment == subpass.pInputAttachments[j].attachment) return true;
}
if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
if (attachment == subpass.pDepthStencilAttachment->attachment) return true;
}
bool result = false;
// Loop through previous nodes and see if any of them write to the attachment.
for (auto elem : node.prev) {
result |= CheckPreserved(pCreateInfo, elem, attachment, subpass_to_node, depth + 1, skip);
}
// If the attachment was written to by a previous node than this node needs to preserve it.
if (result && depth > 0) {
bool has_preserved = false;
for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
if (subpass.pPreserveAttachments[j] == attachment) {
has_preserved = true;
break;
}
}
if (!has_preserved) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_Core_DrawState_InvalidRenderpass,
"Attachment %d is used by a later subpass and must be preserved in subpass %d.", attachment, index);
}
}
return result;
}
template <class T>
bool IsRangeOverlapping(T offset1, T size1, T offset2, T size2) {
return (((offset1 + size1) > offset2) && ((offset1 + size1) < (offset2 + size2))) ||
((offset1 > offset2) && (offset1 < (offset2 + size2)));
}
bool IsRegionOverlapping(VkImageSubresourceRange range1, VkImageSubresourceRange range2) {
return (IsRangeOverlapping(range1.baseMipLevel, range1.levelCount, range2.baseMipLevel, range2.levelCount) &&
IsRangeOverlapping(range1.baseArrayLayer, range1.layerCount, range2.baseArrayLayer, range2.layerCount));
}
bool CoreChecks::ValidateDependencies(FRAMEBUFFER_STATE const *framebuffer, RENDER_PASS_STATE const *renderPass) const {
bool skip = false;
auto const pFramebufferInfo = framebuffer->createInfo.ptr();
auto const pCreateInfo = renderPass->createInfo.ptr();
auto const &subpass_to_node = renderPass->subpassToNode;
std::vector<std::vector<uint32_t>> output_attachment_to_subpass(pCreateInfo->attachmentCount);
std::vector<std::vector<uint32_t>> input_attachment_to_subpass(pCreateInfo->attachmentCount);
std::vector<std::vector<uint32_t>> overlapping_attachments(pCreateInfo->attachmentCount);
// Find overlapping attachments
for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
for (uint32_t j = i + 1; j < pCreateInfo->attachmentCount; ++j) {
VkImageView viewi = pFramebufferInfo->pAttachments[i];
VkImageView viewj = pFramebufferInfo->pAttachments[j];
if (viewi == viewj) {
overlapping_attachments[i].push_back(j);
overlapping_attachments[j].push_back(i);
continue;
}
auto view_state_i = GetImageViewState(viewi);
auto view_state_j = GetImageViewState(viewj);
if (!view_state_i || !view_state_j) {
continue;
}
auto view_ci_i = view_state_i->create_info;
auto view_ci_j = view_state_j->create_info;
if (view_ci_i.image == view_ci_j.image && IsRegionOverlapping(view_ci_i.subresourceRange, view_ci_j.subresourceRange)) {
overlapping_attachments[i].push_back(j);
overlapping_attachments[j].push_back(i);
continue;
}
auto image_data_i = GetImageState(view_ci_i.image);
auto image_data_j = GetImageState(view_ci_j.image);
if (!image_data_i || !image_data_j) {
continue;
}
if (image_data_i->binding.mem == image_data_j->binding.mem &&
IsRangeOverlapping(image_data_i->binding.offset, image_data_i->binding.size, image_data_j->binding.offset,
image_data_j->binding.size)) {
overlapping_attachments[i].push_back(j);
overlapping_attachments[j].push_back(i);
}
}
}
// Find for each attachment the subpasses that use them.
unordered_set<uint32_t> attachmentIndices;
for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
const VkSubpassDescription2KHR &subpass = pCreateInfo->pSubpasses[i];
attachmentIndices.clear();
for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
uint32_t attachment = subpass.pInputAttachments[j].attachment;
if (attachment == VK_ATTACHMENT_UNUSED) continue;
input_attachment_to_subpass[attachment].push_back(i);
for (auto overlapping_attachment : overlapping_attachments[attachment]) {
input_attachment_to_subpass[overlapping_attachment].push_back(i);
}
}
for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
uint32_t attachment = subpass.pColorAttachments[j].attachment;
if (attachment == VK_ATTACHMENT_UNUSED) continue;
output_attachment_to_subpass[attachment].push_back(i);
for (auto overlapping_attachment : overlapping_attachments[attachment]) {
output_attachment_to_subpass[overlapping_attachment].push_back(i);
}
attachmentIndices.insert(attachment);
}
if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
output_attachment_to_subpass[attachment].push_back(i);
for (auto overlapping_attachment : overlapping_attachments[attachment]) {
output_attachment_to_subpass[overlapping_attachment].push_back(i);
}
if (attachmentIndices.count(attachment)) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_Core_DrawState_InvalidRenderpass,
"Cannot use same attachment (%u) as both color and depth output in same subpass (%u).", attachment, i);
}
}
}
// If there is a dependency needed make sure one exists
for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
const VkSubpassDescription2KHR &subpass = pCreateInfo->pSubpasses[i];
// If the attachment is an input then all subpasses that output must have a dependency relationship
for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
uint32_t attachment = subpass.pInputAttachments[j].attachment;
if (attachment == VK_ATTACHMENT_UNUSED) continue;
CheckDependencyExists(i, output_attachment_to_subpass[attachment], subpass_to_node, skip);
}
// If the attachment is an output then all subpasses that use the attachment must have a dependency relationship
for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
uint32_t attachment = subpass.pColorAttachments[j].attachment;
if (attachment == VK_ATTACHMENT_UNUSED) continue;
CheckDependencyExists(i, output_attachment_to_subpass[attachment], subpass_to_node, skip);
CheckDependencyExists(i, input_attachment_to_subpass[attachment], subpass_to_node, skip);
}
if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
const uint32_t &attachment = subpass.pDepthStencilAttachment->attachment;
CheckDependencyExists(i, output_attachment_to_subpass[attachment], subpass_to_node, skip);
CheckDependencyExists(i, input_attachment_to_subpass[attachment], subpass_to_node, skip);
}
}
// Loop through implicit dependencies, if this pass reads make sure the attachment is preserved for all passes after it was
// written.
for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
const VkSubpassDescription2KHR &subpass = pCreateInfo->pSubpasses[i];
for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
CheckPreserved(pCreateInfo, i, subpass.pInputAttachments[j].attachment, subpass_to_node, 0, skip);
}
}
return skip;
}
void ValidationStateTracker::RecordRenderPassDAG(RenderPassCreateVersion rp_version, const VkRenderPassCreateInfo2KHR *pCreateInfo,
RENDER_PASS_STATE *render_pass) {
auto &subpass_to_node = render_pass->subpassToNode;
subpass_to_node.resize(pCreateInfo->subpassCount);
auto &self_dependencies = render_pass->self_dependencies;
self_dependencies.resize(pCreateInfo->subpassCount);
for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
subpass_to_node[i].pass = i;
self_dependencies[i].clear();
}
for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
const VkSubpassDependency2KHR &dependency = pCreateInfo->pDependencies[i];
if ((dependency.srcSubpass != VK_SUBPASS_EXTERNAL) && (dependency.dstSubpass != VK_SUBPASS_EXTERNAL)) {
if (dependency.srcSubpass == dependency.dstSubpass) {
self_dependencies[dependency.srcSubpass].push_back(i);
} else {
subpass_to_node[dependency.dstSubpass].prev.push_back(dependency.srcSubpass);
subpass_to_node[dependency.srcSubpass].next.push_back(dependency.dstSubpass);
}
}
}
}
bool CoreChecks::ValidateRenderPassDAG(RenderPassCreateVersion rp_version, const VkRenderPassCreateInfo2KHR *pCreateInfo) const {
bool skip = false;
const char *vuid;
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
const VkSubpassDependency2KHR &dependency = pCreateInfo->pDependencies[i];
VkPipelineStageFlagBits latest_src_stage = GetLogicallyLatestGraphicsPipelineStage(dependency.srcStageMask);
VkPipelineStageFlagBits earliest_dst_stage = GetLogicallyEarliestGraphicsPipelineStage(dependency.dstStageMask);
// The first subpass here serves as a good proxy for "is multiview enabled" - since all view masks need to be non-zero if
// any are, which enables multiview.
if (use_rp2 && (dependency.dependencyFlags & VK_DEPENDENCY_VIEW_LOCAL_BIT) && (pCreateInfo->pSubpasses[0].viewMask == 0)) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkRenderPassCreateInfo2KHR-viewMask-03059",
"Dependency %u specifies the VK_DEPENDENCY_VIEW_LOCAL_BIT, but multiview is not enabled for this render pass.", i);
} else if (use_rp2 && !(dependency.dependencyFlags & VK_DEPENDENCY_VIEW_LOCAL_BIT) && dependency.viewOffset != 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkSubpassDependency2KHR-dependencyFlags-03092",
"Dependency %u specifies the VK_DEPENDENCY_VIEW_LOCAL_BIT, but also specifies a view offset of %u.", i,
dependency.viewOffset);
} else if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL || dependency.dstSubpass == VK_SUBPASS_EXTERNAL) {
if (dependency.srcSubpass == dependency.dstSubpass) {
vuid = use_rp2 ? "VUID-VkSubpassDependency2KHR-srcSubpass-03085" : "VUID-VkSubpassDependency-srcSubpass-00865";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
"The src and dst subpasses in dependency %u are both external.", i);
} else if (dependency.dependencyFlags & VK_DEPENDENCY_VIEW_LOCAL_BIT) {
if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL) {
vuid = "VUID-VkSubpassDependency-dependencyFlags-02520";
} else { // dependency.dstSubpass == VK_SUBPASS_EXTERNAL
vuid = "VUID-VkSubpassDependency-dependencyFlags-02521";
}
if (use_rp2) {
// Create render pass 2 distinguishes between source and destination external dependencies.
if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL) {
vuid = "VUID-VkSubpassDependency2KHR-dependencyFlags-03090";
} else {
vuid = "VUID-VkSubpassDependency2KHR-dependencyFlags-03091";
}
}
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
"Dependency %u specifies an external dependency but also specifies VK_DEPENDENCY_VIEW_LOCAL_BIT.", i);
}
} else if (dependency.srcSubpass > dependency.dstSubpass) {
vuid = use_rp2 ? "VUID-VkSubpassDependency2KHR-srcSubpass-03084" : "VUID-VkSubpassDependency-srcSubpass-00864";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
"Dependency %u specifies a dependency from a later subpass (%u) to an earlier subpass (%u), which is "
"disallowed to prevent cyclic dependencies.",
i, dependency.srcSubpass, dependency.dstSubpass);
} else if (dependency.srcSubpass == dependency.dstSubpass) {
if (dependency.viewOffset != 0) {
vuid = use_rp2 ? kVUID_Core_DrawState_InvalidRenderpass : "VUID-VkRenderPassCreateInfo-pNext-01930";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
"Dependency %u specifies a self-dependency but has a non-zero view offset of %u", i,
dependency.viewOffset);
} else if ((dependency.dependencyFlags | VK_DEPENDENCY_VIEW_LOCAL_BIT) != dependency.dependencyFlags &&
pCreateInfo->pSubpasses[dependency.srcSubpass].viewMask > 1) {
vuid =
use_rp2 ? "VUID-VkRenderPassCreateInfo2KHR-pDependencies-03060" : "VUID-VkSubpassDependency-srcSubpass-00872";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
"Dependency %u specifies a self-dependency for subpass %u with a non-zero view mask, but does not "
"specify VK_DEPENDENCY_VIEW_LOCAL_BIT.",
i, dependency.srcSubpass);
} else if ((HasNonFramebufferStagePipelineStageFlags(dependency.srcStageMask) ||
HasNonFramebufferStagePipelineStageFlags(dependency.dstStageMask)) &&
(GetGraphicsPipelineStageLogicalOrdinal(latest_src_stage) >
GetGraphicsPipelineStageLogicalOrdinal(earliest_dst_stage))) {
vuid = use_rp2 ? "VUID-VkSubpassDependency2KHR-srcSubpass-03087" : "VUID-VkSubpassDependency-srcSubpass-00867";
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
"Dependency %u specifies a self-dependency from logically-later stage (%s) to a logically-earlier stage (%s).",
i, string_VkPipelineStageFlagBits(latest_src_stage), string_VkPipelineStageFlagBits(earliest_dst_stage));
}
}
}
return skip;
}
bool CoreChecks::ValidateAttachmentIndex(RenderPassCreateVersion rp_version, uint32_t attachment, uint32_t attachment_count,
const char *type) const {
bool skip = false;
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
const char *const function_name = use_rp2 ? "vkCreateRenderPass2KHR()" : "vkCreateRenderPass()";
if (attachment >= attachment_count && attachment != VK_ATTACHMENT_UNUSED) {
const char *vuid =
use_rp2 ? "VUID-VkRenderPassCreateInfo2KHR-attachment-03051" : "VUID-VkRenderPassCreateInfo-attachment-00834";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
"%s: %s attachment %d must be less than the total number of attachments %d.", type, function_name,
attachment, attachment_count);
}
return skip;
}
enum AttachmentType {
ATTACHMENT_COLOR = 1,
ATTACHMENT_DEPTH = 2,
ATTACHMENT_INPUT = 4,
ATTACHMENT_PRESERVE = 8,
ATTACHMENT_RESOLVE = 16,
};
char const *StringAttachmentType(uint8_t type) {
switch (type) {
case ATTACHMENT_COLOR:
return "color";
case ATTACHMENT_DEPTH:
return "depth";
case ATTACHMENT_INPUT:
return "input";
case ATTACHMENT_PRESERVE:
return "preserve";
case ATTACHMENT_RESOLVE:
return "resolve";
default:
return "(multiple)";
}
}
bool CoreChecks::AddAttachmentUse(RenderPassCreateVersion rp_version, uint32_t subpass, std::vector<uint8_t> &attachment_uses,
std::vector<VkImageLayout> &attachment_layouts, uint32_t attachment, uint8_t new_use,
VkImageLayout new_layout) const {
if (attachment >= attachment_uses.size()) return false; /* out of range, but already reported */
bool skip = false;
auto &uses = attachment_uses[attachment];
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
const char *vuid;
const char *const function_name = use_rp2 ? "vkCreateRenderPass2KHR()" : "vkCreateRenderPass()";
if (uses & new_use) {
if (attachment_layouts[attachment] != new_layout) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2KHR-layout-02528" : "VUID-VkSubpassDescription-layout-02519";
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
"%s: subpass %u already uses attachment %u with a different image layout (%s vs %s).", function_name, subpass,
attachment, string_VkImageLayout(attachment_layouts[attachment]), string_VkImageLayout(new_layout));
}
} else if (uses & ~ATTACHMENT_INPUT || (uses && (new_use == ATTACHMENT_RESOLVE || new_use == ATTACHMENT_PRESERVE))) {
/* Note: input attachments are assumed to be done first. */
vuid = use_rp2 ? "VUID-VkSubpassDescription2KHR-pPreserveAttachments-03074"
: "VUID-VkSubpassDescription-pPreserveAttachments-00854";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
"%s: subpass %u uses attachment %u as both %s and %s attachment.", function_name, subpass, attachment,
StringAttachmentType(uses), StringAttachmentType(new_use));
} else {
attachment_layouts[attachment] = new_layout;
uses |= new_use;
}
return skip;
}
bool CoreChecks::ValidateRenderpassAttachmentUsage(RenderPassCreateVersion rp_version,
const VkRenderPassCreateInfo2KHR *pCreateInfo) const {
bool skip = false;
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
const char *vuid;
const char *const function_name = use_rp2 ? "vkCreateRenderPass2KHR()" : "vkCreateRenderPass()";
for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
const VkSubpassDescription2KHR &subpass = pCreateInfo->pSubpasses[i];
std::vector<uint8_t> attachment_uses(pCreateInfo->attachmentCount);
std::vector<VkImageLayout> attachment_layouts(pCreateInfo->attachmentCount);
if (subpass.pipelineBindPoint != VK_PIPELINE_BIND_POINT_GRAPHICS) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2KHR-pipelineBindPoint-03062"
: "VUID-VkSubpassDescription-pipelineBindPoint-00844";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
"%s: Pipeline bind point for subpass %d must be VK_PIPELINE_BIND_POINT_GRAPHICS.", function_name, i);
}
for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
auto const &attachment_ref = subpass.pInputAttachments[j];
if (attachment_ref.attachment != VK_ATTACHMENT_UNUSED) {
skip |= ValidateAttachmentIndex(rp_version, attachment_ref.attachment, pCreateInfo->attachmentCount, "Input");
if (attachment_ref.aspectMask & VK_IMAGE_ASPECT_METADATA_BIT) {
vuid =
use_rp2 ? kVUID_Core_DrawState_InvalidRenderpass : "VUID-VkInputAttachmentAspectReference-aspectMask-01964";
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
"%s: Aspect mask for input attachment reference %d in subpass %d includes VK_IMAGE_ASPECT_METADATA_BIT.",
function_name, i, j);
}
if (attachment_ref.attachment < pCreateInfo->attachmentCount) {
skip |= AddAttachmentUse(rp_version, i, attachment_uses, attachment_layouts, attachment_ref.attachment,
ATTACHMENT_INPUT, attachment_ref.layout);
vuid = use_rp2 ? kVUID_Core_DrawState_InvalidRenderpass : "VUID-VkRenderPassCreateInfo-pNext-01963";
skip |= ValidateImageAspectMask(VK_NULL_HANDLE, pCreateInfo->pAttachments[attachment_ref.attachment].format,
attachment_ref.aspectMask, function_name, vuid);
}
}
if (rp_version == RENDER_PASS_VERSION_2) {
// These are validated automatically as part of parameter validation for create renderpass 1
// as they are in a struct that only applies to input attachments - not so for v2.
// Check for 0
if (attachment_ref.aspectMask == 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkSubpassDescription2KHR-aspectMask-03176",
"%s: Input attachment (%d) aspect mask must not be 0.", function_name, j);
} else {
const VkImageAspectFlags valid_bits =
(VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT |
VK_IMAGE_ASPECT_METADATA_BIT | VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT |
VK_IMAGE_ASPECT_PLANE_2_BIT);
// Check for valid aspect mask bits
if (attachment_ref.aspectMask & ~valid_bits) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkSubpassDescription2KHR-aspectMask-03175",
"%s: Input attachment (%d) aspect mask (0x%" PRIx32 ")is invalid.", function_name, j,
attachment_ref.aspectMask);
}
}
}
}
for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
uint32_t attachment = subpass.pPreserveAttachments[j];
if (attachment == VK_ATTACHMENT_UNUSED) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2KHR-attachment-03073" : "VUID-VkSubpassDescription-attachment-00853";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
"%s: Preserve attachment (%d) must not be VK_ATTACHMENT_UNUSED.", function_name, j);
} else {
skip |= ValidateAttachmentIndex(rp_version, attachment, pCreateInfo->attachmentCount, "Preserve");
if (attachment < pCreateInfo->attachmentCount) {
skip |= AddAttachmentUse(rp_version, i, attachment_uses, attachment_layouts, attachment, ATTACHMENT_PRESERVE,
VkImageLayout(0) /* preserve doesn't have any layout */);
}
}
}
bool subpass_performs_resolve = false;
for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
if (subpass.pResolveAttachments) {
auto const &attachment_ref = subpass.pResolveAttachments[j];
if (attachment_ref.attachment != VK_ATTACHMENT_UNUSED) {
skip |= ValidateAttachmentIndex(rp_version, attachment_ref.attachment, pCreateInfo->attachmentCount, "Resolve");
if (attachment_ref.attachment < pCreateInfo->attachmentCount) {
skip |= AddAttachmentUse(rp_version, i, attachment_uses, attachment_layouts, attachment_ref.attachment,
ATTACHMENT_RESOLVE, attachment_ref.layout);
subpass_performs_resolve = true;
if (pCreateInfo->pAttachments[attachment_ref.attachment].samples != VK_SAMPLE_COUNT_1_BIT) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2KHR-pResolveAttachments-03067"
: "VUID-VkSubpassDescription-pResolveAttachments-00849";
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
"%s: Subpass %u requests multisample resolve into attachment %u, which must "
"have VK_SAMPLE_COUNT_1_BIT but has %s.",
function_name, i, attachment_ref.attachment,
string_VkSampleCountFlagBits(pCreateInfo->pAttachments[attachment_ref.attachment].samples));
}
}
}
}
}
if (subpass.pDepthStencilAttachment) {
if (subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
skip |= ValidateAttachmentIndex(rp_version, subpass.pDepthStencilAttachment->attachment,
pCreateInfo->attachmentCount, "Depth");
if (subpass.pDepthStencilAttachment->attachment < pCreateInfo->attachmentCount) {
skip |= AddAttachmentUse(rp_version, i, attachment_uses, attachment_layouts,
subpass.pDepthStencilAttachment->attachment, ATTACHMENT_DEPTH,
subpass.pDepthStencilAttachment->layout);
}
}
}
uint32_t last_sample_count_attachment = VK_ATTACHMENT_UNUSED;
for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
auto const &attachment_ref = subpass.pColorAttachments[j];
skip |= ValidateAttachmentIndex(rp_version, attachment_ref.attachment, pCreateInfo->attachmentCount, "Color");
if (attachment_ref.attachment != VK_ATTACHMENT_UNUSED && attachment_ref.attachment < pCreateInfo->attachmentCount) {
skip |= AddAttachmentUse(rp_version, i, attachment_uses, attachment_layouts, attachment_ref.attachment,
ATTACHMENT_COLOR, attachment_ref.layout);
VkSampleCountFlagBits current_sample_count = pCreateInfo->pAttachments[attachment_ref.attachment].samples;
if (last_sample_count_attachment != VK_ATTACHMENT_UNUSED) {
VkSampleCountFlagBits last_sample_count =
pCreateInfo->pAttachments[subpass.pColorAttachments[last_sample_count_attachment].attachment].samples;
if (current_sample_count != last_sample_count) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2KHR-pColorAttachments-03069"
: "VUID-VkSubpassDescription-pColorAttachments-01417";
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
"%s: Subpass %u attempts to render to color attachments with inconsistent sample counts."
"Color attachment ref %u has sample count %s, whereas previous color attachment ref %u has "
"sample count %s.",
function_name, i, j, string_VkSampleCountFlagBits(current_sample_count),
last_sample_count_attachment, string_VkSampleCountFlagBits(last_sample_count));
}
}
last_sample_count_attachment = j;
if (subpass_performs_resolve && current_sample_count == VK_SAMPLE_COUNT_1_BIT) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2KHR-pResolveAttachments-03066"
: "VUID-VkSubpassDescription-pResolveAttachments-00848";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
"%s: Subpass %u requests multisample resolve from attachment %u which has "
"VK_SAMPLE_COUNT_1_BIT.",
function_name, i, attachment_ref.attachment);
}
if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED &&
subpass.pDepthStencilAttachment->attachment < pCreateInfo->attachmentCount) {
const auto depth_stencil_sample_count =
pCreateInfo->pAttachments[subpass.pDepthStencilAttachment->attachment].samples;
if (device_extensions.vk_amd_mixed_attachment_samples) {
if (pCreateInfo->pAttachments[attachment_ref.attachment].samples > depth_stencil_sample_count) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2KHR-pColorAttachments-03070"
: "VUID-VkSubpassDescription-pColorAttachments-01506";
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
"%s: Subpass %u pColorAttachments[%u] has %s which is larger than "
"depth/stencil attachment %s.",
function_name, i, j,
string_VkSampleCountFlagBits(pCreateInfo->pAttachments[attachment_ref.attachment].samples),
string_VkSampleCountFlagBits(depth_stencil_sample_count));
break;
}
}
if (!device_extensions.vk_amd_mixed_attachment_samples && !device_extensions.vk_nv_framebuffer_mixed_samples &&
current_sample_count != depth_stencil_sample_count) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2KHR-pDepthStencilAttachment-03071"
: "VUID-VkSubpassDescription-pDepthStencilAttachment-01418";
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
"%s: Subpass %u attempts to render to use a depth/stencil attachment with sample count that differs "
"from color attachment %u."
"The depth attachment ref has sample count %s, whereas color attachment ref %u has sample count %s.",
function_name, i, j, string_VkSampleCountFlagBits(depth_stencil_sample_count), j,
string_VkSampleCountFlagBits(current_sample_count));
break;
}
}
}
if (subpass_performs_resolve && subpass.pResolveAttachments[j].attachment != VK_ATTACHMENT_UNUSED &&
subpass.pResolveAttachments[j].attachment < pCreateInfo->attachmentCount) {
if (attachment_ref.attachment == VK_ATTACHMENT_UNUSED) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2KHR-pResolveAttachments-03065"
: "VUID-VkSubpassDescription-pResolveAttachments-00847";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
"%s: Subpass %u requests multisample resolve from attachment %u which has "
"attachment=VK_ATTACHMENT_UNUSED.",
function_name, i, attachment_ref.attachment);
} else {
const auto &color_desc = pCreateInfo->pAttachments[attachment_ref.attachment];
const auto &resolve_desc = pCreateInfo->pAttachments[subpass.pResolveAttachments[j].attachment];
if (color_desc.format != resolve_desc.format) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2KHR-pResolveAttachments-03068"
: "VUID-VkSubpassDescription-pResolveAttachments-00850";
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
"%s: Subpass %u pColorAttachments[%u] resolves to an attachment with a "
"different format. color format: %u, resolve format: %u.",
function_name, i, j, color_desc.format, resolve_desc.format);
}
}
}
}
}
return skip;
}
static void MarkAttachmentFirstUse(RENDER_PASS_STATE *render_pass, uint32_t index, bool is_read) {
if (index == VK_ATTACHMENT_UNUSED) return;
if (!render_pass->attachment_first_read.count(index)) render_pass->attachment_first_read[index] = is_read;
}
bool CoreChecks::ValidateCreateRenderPass(VkDevice device, RenderPassCreateVersion rp_version,
const VkRenderPassCreateInfo2KHR *pCreateInfo) const {
bool skip = false;
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
const char *vuid;
const char *const function_name = use_rp2 ? "vkCreateRenderPass2KHR()" : "vkCreateRenderPass()";
// TODO: As part of wrapping up the mem_tracker/core_validation merge the following routine should be consolidated with
// ValidateLayouts.
skip |= ValidateRenderpassAttachmentUsage(rp_version, pCreateInfo);
skip |= ValidateRenderPassDAG(rp_version, pCreateInfo);
// Validate multiview correlation and view masks
bool viewMaskZero = false;
bool viewMaskNonZero = false;
for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
const VkSubpassDescription2KHR &subpass = pCreateInfo->pSubpasses[i];
if (subpass.viewMask != 0) {
viewMaskNonZero = true;
} else {
viewMaskZero = true;
}
if ((subpass.flags & VK_SUBPASS_DESCRIPTION_PER_VIEW_POSITION_X_ONLY_BIT_NVX) != 0 &&
(subpass.flags & VK_SUBPASS_DESCRIPTION_PER_VIEW_ATTRIBUTES_BIT_NVX) == 0) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2KHR-flags-03076" : "VUID-VkSubpassDescription-flags-00856";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
"%s: The flags parameter of subpass description %u includes "
"VK_SUBPASS_DESCRIPTION_PER_VIEW_POSITION_X_ONLY_BIT_NVX but does not also include "
"VK_SUBPASS_DESCRIPTION_PER_VIEW_ATTRIBUTES_BIT_NVX.",
function_name, i);
}
}
if (rp_version == RENDER_PASS_VERSION_2) {
if (viewMaskNonZero && viewMaskZero) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkRenderPassCreateInfo2KHR-viewMask-03058",
"%s: Some view masks are non-zero whilst others are zero.", function_name);
}
if (viewMaskZero && pCreateInfo->correlatedViewMaskCount != 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkRenderPassCreateInfo2KHR-viewMask-03057",
"%s: Multiview is not enabled but correlation masks are still provided", function_name);
}
}
uint32_t aggregated_cvms = 0;
for (uint32_t i = 0; i < pCreateInfo->correlatedViewMaskCount; ++i) {
if (aggregated_cvms & pCreateInfo->pCorrelatedViewMasks[i]) {
vuid = use_rp2 ? "VUID-VkRenderPassCreateInfo2KHR-pCorrelatedViewMasks-03056"
: "VUID-VkRenderPassMultiviewCreateInfo-pCorrelationMasks-00841";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
"%s: pCorrelatedViewMasks[%u] contains a previously appearing view bit.", function_name, i);
}
aggregated_cvms |= pCreateInfo->pCorrelatedViewMasks[i];
}
for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
auto const &dependency = pCreateInfo->pDependencies[i];
if (rp_version == RENDER_PASS_VERSION_2) {
skip |= ValidateStageMaskGsTsEnables(
dependency.srcStageMask, function_name, "VUID-VkSubpassDependency2KHR-srcStageMask-03080",
"VUID-VkSubpassDependency2KHR-srcStageMask-03082", "VUID-VkSubpassDependency2KHR-srcStageMask-02103",
"VUID-VkSubpassDependency2KHR-srcStageMask-02104");
skip |= ValidateStageMaskGsTsEnables(
dependency.dstStageMask, function_name, "VUID-VkSubpassDependency2KHR-dstStageMask-03081",
"VUID-VkSubpassDependency2KHR-dstStageMask-03083", "VUID-VkSubpassDependency2KHR-dstStageMask-02105",
"VUID-VkSubpassDependency2KHR-dstStageMask-02106");
} else {
skip |= ValidateStageMaskGsTsEnables(
dependency.srcStageMask, function_name, "VUID-VkSubpassDependency-srcStageMask-00860",
"VUID-VkSubpassDependency-srcStageMask-00862", "VUID-VkSubpassDependency-srcStageMask-02099",
"VUID-VkSubpassDependency-srcStageMask-02100");
skip |= ValidateStageMaskGsTsEnables(
dependency.dstStageMask, function_name, "VUID-VkSubpassDependency-dstStageMask-00861",
"VUID-VkSubpassDependency-dstStageMask-00863", "VUID-VkSubpassDependency-dstStageMask-02101",
"VUID-VkSubpassDependency-dstStageMask-02102");
}
if (!ValidateAccessMaskPipelineStage(device_extensions, dependency.srcAccessMask, dependency.srcStageMask)) {
vuid = use_rp2 ? "VUID-VkSubpassDependency2KHR-srcAccessMask-03088" : "VUID-VkSubpassDependency-srcAccessMask-00868";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
"%s: pDependencies[%u].srcAccessMask (0x%" PRIx32 ") is not supported by srcStageMask (0x%" PRIx32 ").",
function_name, i, dependency.srcAccessMask, dependency.srcStageMask);
}
if (!ValidateAccessMaskPipelineStage(device_extensions, dependency.dstAccessMask, dependency.dstStageMask)) {
vuid = use_rp2 ? "VUID-VkSubpassDependency2KHR-dstAccessMask-03089" : "VUID-VkSubpassDependency-dstAccessMask-00869";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
"%s: pDependencies[%u].dstAccessMask (0x%" PRIx32 ") is not supported by dstStageMask (0x%" PRIx32 ").",
function_name, i, dependency.dstAccessMask, dependency.dstStageMask);
}
}
if (!skip) {
skip |= ValidateLayouts(rp_version, device, pCreateInfo);
}
return skip;
}
bool CoreChecks::PreCallValidateCreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) {
bool skip = false;
// Handle extension structs from KHR_multiview and KHR_maintenance2 that can only be validated for RP1 (indices out of bounds)
const VkRenderPassMultiviewCreateInfo *pMultiviewInfo = lvl_find_in_chain<VkRenderPassMultiviewCreateInfo>(pCreateInfo->pNext);
if (pMultiviewInfo) {
if (pMultiviewInfo->subpassCount && pMultiviewInfo->subpassCount != pCreateInfo->subpassCount) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkRenderPassCreateInfo-pNext-01928",
"Subpass count is %u but multiview info has a subpass count of %u.", pCreateInfo->subpassCount,
pMultiviewInfo->subpassCount);
} else if (pMultiviewInfo->dependencyCount && pMultiviewInfo->dependencyCount != pCreateInfo->dependencyCount) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkRenderPassCreateInfo-pNext-01929",
"Dependency count is %u but multiview info has a dependency count of %u.", pCreateInfo->dependencyCount,
pMultiviewInfo->dependencyCount);
}
}
const VkRenderPassInputAttachmentAspectCreateInfo *pInputAttachmentAspectInfo =
lvl_find_in_chain<VkRenderPassInputAttachmentAspectCreateInfo>(pCreateInfo->pNext);
if (pInputAttachmentAspectInfo) {
for (uint32_t i = 0; i < pInputAttachmentAspectInfo->aspectReferenceCount; ++i) {
uint32_t subpass = pInputAttachmentAspectInfo->pAspectReferences[i].subpass;
uint32_t attachment = pInputAttachmentAspectInfo->pAspectReferences[i].inputAttachmentIndex;
if (subpass >= pCreateInfo->subpassCount) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkRenderPassCreateInfo-pNext-01926",
"Subpass index %u specified by input attachment aspect info %u is greater than the subpass "
"count of %u for this render pass.",
subpass, i, pCreateInfo->subpassCount);
} else if (pCreateInfo->pSubpasses && attachment >= pCreateInfo->pSubpasses[subpass].inputAttachmentCount) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkRenderPassCreateInfo-pNext-01927",
"Input attachment index %u specified by input attachment aspect info %u is greater than the "
"input attachment count of %u for this subpass.",
attachment, i, pCreateInfo->pSubpasses[subpass].inputAttachmentCount);
}
}
}
if (!skip) {
safe_VkRenderPassCreateInfo2KHR create_info_2;
ConvertVkRenderPassCreateInfoToV2KHR(pCreateInfo, &create_info_2);
skip |= ValidateCreateRenderPass(device, RENDER_PASS_VERSION_1, create_info_2.ptr());
}
return skip;
}
void ValidationStateTracker::RecordCreateRenderPassState(RenderPassCreateVersion rp_version,
std::shared_ptr<RENDER_PASS_STATE> &render_pass,
VkRenderPass *pRenderPass) {
render_pass->renderPass = *pRenderPass;
auto create_info = render_pass->createInfo.ptr();
RecordRenderPassDAG(RENDER_PASS_VERSION_1, create_info, render_pass.get());
for (uint32_t i = 0; i < create_info->subpassCount; ++i) {
const VkSubpassDescription2KHR &subpass = create_info->pSubpasses[i];
for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
MarkAttachmentFirstUse(render_pass.get(), subpass.pColorAttachments[j].attachment, false);
// resolve attachments are considered to be written
if (subpass.pResolveAttachments) {
MarkAttachmentFirstUse(render_pass.get(), subpass.pResolveAttachments[j].attachment, false);
}
}
if (subpass.pDepthStencilAttachment) {
MarkAttachmentFirstUse(render_pass.get(), subpass.pDepthStencilAttachment->attachment, false);
}
for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
MarkAttachmentFirstUse(render_pass.get(), subpass.pInputAttachments[j].attachment, true);
}
}
// Even though render_pass is an rvalue-ref parameter, still must move s.t. move assignment is invoked.
renderPassMap[*pRenderPass] = std::move(render_pass);
}
// Style note:
// Use of rvalue reference exceeds reccommended usage of rvalue refs in google style guide, but intentionally forces caller to move
// or copy. This is clearer than passing a pointer to shared_ptr and avoids the atomic increment/decrement of shared_ptr copy
// construction or assignment.
void ValidationStateTracker::PostCallRecordCreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass,
VkResult result) {
if (VK_SUCCESS != result) return;
auto render_pass_state = std::make_shared<RENDER_PASS_STATE>(pCreateInfo);
RecordCreateRenderPassState(RENDER_PASS_VERSION_1, render_pass_state, pRenderPass);
}
void ValidationStateTracker::PostCallRecordCreateRenderPass2KHR(VkDevice device, const VkRenderPassCreateInfo2KHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass,
VkResult result) {
if (VK_SUCCESS != result) return;
auto render_pass_state = std::make_shared<RENDER_PASS_STATE>(pCreateInfo);
RecordCreateRenderPassState(RENDER_PASS_VERSION_2, render_pass_state, pRenderPass);
}
static bool ValidateDepthStencilResolve(const debug_report_data *report_data,
const VkPhysicalDeviceDepthStencilResolvePropertiesKHR &depth_stencil_resolve_props,
const VkRenderPassCreateInfo2KHR *pCreateInfo) {
bool skip = false;
// If the pNext list of VkSubpassDescription2KHR includes a VkSubpassDescriptionDepthStencilResolveKHR structure,
// then that structure describes depth/stencil resolve operations for the subpass.
for (uint32_t i = 0; i < pCreateInfo->subpassCount; i++) {
VkSubpassDescription2KHR subpass = pCreateInfo->pSubpasses[i];
const auto *resolve = lvl_find_in_chain<VkSubpassDescriptionDepthStencilResolveKHR>(subpass.pNext);
if (resolve == nullptr) {
continue;
}
if (resolve->pDepthStencilResolveAttachment != nullptr &&
resolve->pDepthStencilResolveAttachment->attachment != VK_ATTACHMENT_UNUSED) {
if (subpass.pDepthStencilAttachment->attachment == VK_ATTACHMENT_UNUSED) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkSubpassDescriptionDepthStencilResolveKHR-pDepthStencilResolveAttachment-03177",
"vkCreateRenderPass2KHR(): Subpass %u includes a VkSubpassDescriptionDepthStencilResolveKHR "
"structure with resolve attachment %u, but pDepthStencilAttachment=VK_ATTACHMENT_UNUSED.",
i, resolve->pDepthStencilResolveAttachment->attachment);
}
if (resolve->depthResolveMode == VK_RESOLVE_MODE_NONE_KHR && resolve->stencilResolveMode == VK_RESOLVE_MODE_NONE_KHR) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkSubpassDescriptionDepthStencilResolveKHR-pDepthStencilResolveAttachment-03178",
"vkCreateRenderPass2KHR(): Subpass %u includes a VkSubpassDescriptionDepthStencilResolveKHR "
"structure with resolve attachment %u, but both depth and stencil resolve modes are "
"VK_RESOLVE_MODE_NONE_KHR.",
i, resolve->pDepthStencilResolveAttachment->attachment);
}
}
if (resolve->pDepthStencilResolveAttachment != nullptr &&
pCreateInfo->pAttachments[subpass.pDepthStencilAttachment->attachment].samples == VK_SAMPLE_COUNT_1_BIT) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkSubpassDescriptionDepthStencilResolveKHR-pDepthStencilResolveAttachment-03179",
"vkCreateRenderPass2KHR(): Subpass %u includes a VkSubpassDescriptionDepthStencilResolveKHR "
"structure with resolve attachment %u. However pDepthStencilAttachment has sample count=VK_SAMPLE_COUNT_1_BIT.",
i, resolve->pDepthStencilResolveAttachment->attachment);
}
if (pCreateInfo->pAttachments[resolve->pDepthStencilResolveAttachment->attachment].samples != VK_SAMPLE_COUNT_1_BIT) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkSubpassDescriptionDepthStencilResolveKHR-pDepthStencilResolveAttachment-03180",
"vkCreateRenderPass2KHR(): Subpass %u includes a VkSubpassDescriptionDepthStencilResolveKHR "
"structure with resolve attachment %u which has sample count=VK_SAMPLE_COUNT_1_BIT.",
i, resolve->pDepthStencilResolveAttachment->attachment);
}
VkFormat pDepthStencilAttachmentFormat = pCreateInfo->pAttachments[subpass.pDepthStencilAttachment->attachment].format;
VkFormat pDepthStencilResolveAttachmentFormat =
pCreateInfo->pAttachments[resolve->pDepthStencilResolveAttachment->attachment].format;
if ((FormatDepthSize(pDepthStencilAttachmentFormat) != FormatDepthSize(pDepthStencilResolveAttachmentFormat)) ||
(FormatDepthNumericalType(pDepthStencilAttachmentFormat) !=
FormatDepthNumericalType(pDepthStencilResolveAttachmentFormat))) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkSubpassDescriptionDepthStencilResolveKHR-pDepthStencilResolveAttachment-03181",
"vkCreateRenderPass2KHR(): Subpass %u includes a VkSubpassDescriptionDepthStencilResolveKHR "
"structure with resolve attachment %u which has a depth component (size %u). The depth component "
"of pDepthStencilAttachment must have the same number of bits (currently %u) and the same numerical type.",
i, resolve->pDepthStencilResolveAttachment->attachment,
FormatDepthSize(pDepthStencilResolveAttachmentFormat), FormatDepthSize(pDepthStencilAttachmentFormat));
}
if ((FormatStencilSize(pDepthStencilAttachmentFormat) != FormatStencilSize(pDepthStencilResolveAttachmentFormat)) ||
(FormatStencilNumericalType(pDepthStencilAttachmentFormat) !=
FormatStencilNumericalType(pDepthStencilResolveAttachmentFormat))) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkSubpassDescriptionDepthStencilResolveKHR-pDepthStencilResolveAttachment-03182",
"vkCreateRenderPass2KHR(): Subpass %u includes a VkSubpassDescriptionDepthStencilResolveKHR "
"structure with resolve attachment %u which has a stencil component (size %u). The stencil component "
"of pDepthStencilAttachment must have the same number of bits (currently %u) and the same numerical type.",
i, resolve->pDepthStencilResolveAttachment->attachment,
FormatStencilSize(pDepthStencilResolveAttachmentFormat), FormatStencilSize(pDepthStencilAttachmentFormat));
}
if (!(resolve->depthResolveMode == VK_RESOLVE_MODE_NONE_KHR ||
resolve->depthResolveMode & depth_stencil_resolve_props.supportedDepthResolveModes)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkSubpassDescriptionDepthStencilResolveKHR-depthResolveMode-03183",
"vkCreateRenderPass2KHR(): Subpass %u includes a VkSubpassDescriptionDepthStencilResolveKHR "
"structure with invalid depthResolveMode=%u.",
i, resolve->depthResolveMode);
}
if (!(resolve->stencilResolveMode == VK_RESOLVE_MODE_NONE_KHR ||
resolve->stencilResolveMode & depth_stencil_resolve_props.supportedStencilResolveModes)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkSubpassDescriptionDepthStencilResolveKHR-stencilResolveMode-03184",
"vkCreateRenderPass2KHR(): Subpass %u includes a VkSubpassDescriptionDepthStencilResolveKHR "
"structure with invalid stencilResolveMode=%u.",
i, resolve->stencilResolveMode);
}
if (FormatIsDepthAndStencil(pDepthStencilResolveAttachmentFormat) &&
depth_stencil_resolve_props.independentResolve == VK_FALSE &&
depth_stencil_resolve_props.independentResolveNone == VK_FALSE &&
!(resolve->depthResolveMode == resolve->stencilResolveMode)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkSubpassDescriptionDepthStencilResolveKHR-pDepthStencilResolveAttachment-03185",
"vkCreateRenderPass2KHR(): Subpass %u includes a VkSubpassDescriptionDepthStencilResolveKHR "
"structure. The values of depthResolveMode (%u) and stencilResolveMode (%u) must be identical.",
i, resolve->depthResolveMode, resolve->stencilResolveMode);
}
if (FormatIsDepthAndStencil(pDepthStencilResolveAttachmentFormat) &&
depth_stencil_resolve_props.independentResolve == VK_FALSE &&
depth_stencil_resolve_props.independentResolveNone == VK_TRUE &&
!(resolve->depthResolveMode == resolve->stencilResolveMode || resolve->depthResolveMode == VK_RESOLVE_MODE_NONE_KHR ||
resolve->stencilResolveMode == VK_RESOLVE_MODE_NONE_KHR)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkSubpassDescriptionDepthStencilResolveKHR-pDepthStencilResolveAttachment-03186",
"vkCreateRenderPass2KHR(): Subpass %u includes a VkSubpassDescriptionDepthStencilResolveKHR "
"structure. The values of depthResolveMode (%u) and stencilResolveMode (%u) must be identical, or "
"one of them must be %u.",
i, resolve->depthResolveMode, resolve->stencilResolveMode, VK_RESOLVE_MODE_NONE_KHR);
}
}
return skip;
}
bool CoreChecks::PreCallValidateCreateRenderPass2KHR(VkDevice device, const VkRenderPassCreateInfo2KHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) {
bool skip = false;
if (device_extensions.vk_khr_depth_stencil_resolve) {
skip |= ValidateDepthStencilResolve(report_data, phys_dev_ext_props.depth_stencil_resolve_props, pCreateInfo);
}
safe_VkRenderPassCreateInfo2KHR create_info_2(pCreateInfo);
skip |= ValidateCreateRenderPass(device, RENDER_PASS_VERSION_2, create_info_2.ptr());
return skip;
}
bool CoreChecks::ValidatePrimaryCommandBuffer(const CMD_BUFFER_STATE *pCB, char const *cmd_name, const char *error_code) const {
bool skip = false;
if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCB->commandBuffer), error_code, "Cannot execute command %s on a secondary command buffer.",
cmd_name);
}
return skip;
}
bool CoreChecks::VerifyRenderAreaBounds(const VkRenderPassBeginInfo *pRenderPassBegin) const {
bool skip = false;
const safe_VkFramebufferCreateInfo *pFramebufferInfo = &GetFramebufferState(pRenderPassBegin->framebuffer)->createInfo;
if (pRenderPassBegin->renderArea.offset.x < 0 ||
(pRenderPassBegin->renderArea.offset.x + pRenderPassBegin->renderArea.extent.width) > pFramebufferInfo->width ||
pRenderPassBegin->renderArea.offset.y < 0 ||
(pRenderPassBegin->renderArea.offset.y + pRenderPassBegin->renderArea.extent.height) > pFramebufferInfo->height) {
skip |= static_cast<bool>(log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_Core_DrawState_InvalidRenderArea,
"Cannot execute a render pass with renderArea not within the bound of the framebuffer. RenderArea: x %d, y %d, width "
"%d, height %d. Framebuffer: width %d, height %d.",
pRenderPassBegin->renderArea.offset.x, pRenderPassBegin->renderArea.offset.y, pRenderPassBegin->renderArea.extent.width,
pRenderPassBegin->renderArea.extent.height, pFramebufferInfo->width, pFramebufferInfo->height));
}
return skip;
}
bool CoreChecks::VerifyFramebufferAndRenderPassImageViews(const VkRenderPassBeginInfo *pRenderPassBeginInfo) const {
bool skip = false;
const VkRenderPassAttachmentBeginInfoKHR *pRenderPassAttachmentBeginInfo =
lvl_find_in_chain<VkRenderPassAttachmentBeginInfoKHR>(pRenderPassBeginInfo->pNext);
if (pRenderPassAttachmentBeginInfo && pRenderPassAttachmentBeginInfo->attachmentCount != 0) {
const safe_VkFramebufferCreateInfo *pFramebufferCreateInfo =
&GetFramebufferState(pRenderPassBeginInfo->framebuffer)->createInfo;
const VkFramebufferAttachmentsCreateInfoKHR *pFramebufferAttachmentsCreateInfo =
lvl_find_in_chain<VkFramebufferAttachmentsCreateInfoKHR>(pFramebufferCreateInfo->pNext);
if ((pFramebufferCreateInfo->flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR) == 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(pRenderPassBeginInfo->renderPass), "VUID-VkRenderPassBeginInfo-framebuffer-03207",
"VkRenderPassBeginInfo: Image views specified at render pass begin, but framebuffer not created with "
"VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR");
} else if (pFramebufferAttachmentsCreateInfo) {
if (pFramebufferAttachmentsCreateInfo->attachmentImageInfoCount != pRenderPassAttachmentBeginInfo->attachmentCount) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(pRenderPassBeginInfo->renderPass), "VUID-VkRenderPassBeginInfo-framebuffer-03208",
"VkRenderPassBeginInfo: %u image views specified at render pass begin, but framebuffer "
"created expecting %u attachments",
pRenderPassAttachmentBeginInfo->attachmentCount,
pFramebufferAttachmentsCreateInfo->attachmentImageInfoCount);
} else {
const safe_VkRenderPassCreateInfo2KHR *pRenderPassCreateInfo =
&GetRenderPassState(pRenderPassBeginInfo->renderPass)->createInfo;
for (uint32_t i = 0; i < pRenderPassAttachmentBeginInfo->attachmentCount; ++i) {
const VkImageViewCreateInfo *pImageViewCreateInfo =
&GetImageViewState(pRenderPassAttachmentBeginInfo->pAttachments[i])->create_info;
const VkFramebufferAttachmentImageInfoKHR *pFramebufferAttachmentImageInfo =
&pFramebufferAttachmentsCreateInfo->pAttachmentImageInfos[i];
const VkImageCreateInfo *pImageCreateInfo = &GetImageState(pImageViewCreateInfo->image)->createInfo;
if (pFramebufferAttachmentImageInfo->flags != pImageCreateInfo->flags) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(pRenderPassBeginInfo->renderPass),
"VUID-VkRenderPassBeginInfo-framebuffer-03209",
"VkRenderPassBeginInfo: Image view #%u created from an image with flags set as 0x%X, "
"but image info #%u used to create the framebuffer had flags set as 0x%X",
i, pImageCreateInfo->flags, i, pFramebufferAttachmentImageInfo->flags);
}
if (pFramebufferAttachmentImageInfo->usage != pImageCreateInfo->usage) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(pRenderPassBeginInfo->renderPass),
"VUID-VkRenderPassBeginInfo-framebuffer-03210",
"VkRenderPassBeginInfo: Image view #%u created from an image with usage set as 0x%X, "
"but image info #%u used to create the framebuffer had usage set as 0x%X",
i, pImageCreateInfo->usage, i, pFramebufferAttachmentImageInfo->usage);
}
if (pFramebufferAttachmentImageInfo->width != pImageCreateInfo->extent.width) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(pRenderPassBeginInfo->renderPass),
"VUID-VkRenderPassBeginInfo-framebuffer-03211",
"VkRenderPassBeginInfo: Image view #%u created from an image with width set as %u, "
"but image info #%u used to create the framebuffer had width set as %u",
i, pImageCreateInfo->extent.width, i, pFramebufferAttachmentImageInfo->width);
}
if (pFramebufferAttachmentImageInfo->height != pImageCreateInfo->extent.height) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(pRenderPassBeginInfo->renderPass),
"VUID-VkRenderPassBeginInfo-framebuffer-03212",
"VkRenderPassBeginInfo: Image view #%u created from an image with height set as %u, "
"but image info #%u used to create the framebuffer had height set as %u",
i, pImageCreateInfo->extent.height, i, pFramebufferAttachmentImageInfo->height);
}
if (pFramebufferAttachmentImageInfo->layerCount != pImageViewCreateInfo->subresourceRange.layerCount) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(pRenderPassBeginInfo->renderPass), "VUID-VkRenderPassBeginInfo-framebuffer-03213",
"VkRenderPassBeginInfo: Image view #%u created with a subresource range with a layerCount of %u, "
"but image info #%u used to create the framebuffer had layerCount set as %u",
i, pImageViewCreateInfo->subresourceRange.layerCount, i, pFramebufferAttachmentImageInfo->layerCount);
}
const VkImageFormatListCreateInfoKHR *pImageFormatListCreateInfo =
lvl_find_in_chain<VkImageFormatListCreateInfoKHR>(pImageCreateInfo->pNext);
if (pImageFormatListCreateInfo) {
if (pImageFormatListCreateInfo->viewFormatCount != pFramebufferAttachmentImageInfo->viewFormatCount) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(pRenderPassBeginInfo->renderPass), "VUID-VkRenderPassBeginInfo-framebuffer-03214",
"VkRenderPassBeginInfo: Image view #%u created with an image with a viewFormatCount of %u, "
"but image info #%u used to create the framebuffer had viewFormatCount set as %u",
i, pImageFormatListCreateInfo->viewFormatCount, i,
pFramebufferAttachmentImageInfo->viewFormatCount);
}
for (uint32_t j = 0; j < pImageFormatListCreateInfo->viewFormatCount; ++j) {
bool formatFound = false;
for (uint32_t k = 0; k < pFramebufferAttachmentImageInfo->viewFormatCount; ++k) {
if (pImageFormatListCreateInfo->pViewFormats[j] ==
pFramebufferAttachmentImageInfo->pViewFormats[k]) {
formatFound = true;
}
}
if (!formatFound) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(pRenderPassBeginInfo->renderPass),
"VUID-VkRenderPassBeginInfo-framebuffer-03215",
"VkRenderPassBeginInfo: Image view #%u created with an image including the format "
"%s in its view format list, "
"but image info #%u used to create the framebuffer does not include this format",
i, string_VkFormat(pImageFormatListCreateInfo->pViewFormats[j]), i);
}
}
}
if (pRenderPassCreateInfo->pAttachments[i].format != pImageViewCreateInfo->format) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(pRenderPassBeginInfo->renderPass),
"VUID-VkRenderPassBeginInfo-framebuffer-03216",
"VkRenderPassBeginInfo: Image view #%u created with a format of %s, "
"but render pass attachment description #%u created with a format of %s",
i, string_VkFormat(pImageViewCreateInfo->format), i,
string_VkFormat(pRenderPassCreateInfo->pAttachments[i].format));
}
if (pRenderPassCreateInfo->pAttachments[i].samples != pImageCreateInfo->samples) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(pRenderPassBeginInfo->renderPass),
"VUID-VkRenderPassBeginInfo-framebuffer-03217",
"VkRenderPassBeginInfo: Image view #%u created with an image with %s samples, "
"but render pass attachment description #%u created with %s samples",
i, string_VkSampleCountFlagBits(pImageCreateInfo->samples), i,
string_VkSampleCountFlagBits(pRenderPassCreateInfo->pAttachments[i].samples));
}
if (pImageViewCreateInfo->subresourceRange.levelCount != 1) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
HandleToUint64(pRenderPassAttachmentBeginInfo->pAttachments[i]),
"VUID-VkRenderPassAttachmentBeginInfoKHR-pAttachments-03218",
"VkRenderPassAttachmentBeginInfo: Image view #%u created with multiple (%u) mip levels.", i,
pImageViewCreateInfo->subresourceRange.levelCount);
}
if (((pImageViewCreateInfo->components.r != VK_COMPONENT_SWIZZLE_IDENTITY) &&
(pImageViewCreateInfo->components.r != VK_COMPONENT_SWIZZLE_R)) ||
((pImageViewCreateInfo->components.g != VK_COMPONENT_SWIZZLE_IDENTITY) &&
(pImageViewCreateInfo->components.g != VK_COMPONENT_SWIZZLE_G)) ||
((pImageViewCreateInfo->components.b != VK_COMPONENT_SWIZZLE_IDENTITY) &&
(pImageViewCreateInfo->components.b != VK_COMPONENT_SWIZZLE_B)) ||
((pImageViewCreateInfo->components.a != VK_COMPONENT_SWIZZLE_IDENTITY) &&
(pImageViewCreateInfo->components.a != VK_COMPONENT_SWIZZLE_A))) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
HandleToUint64(pRenderPassAttachmentBeginInfo->pAttachments[i]),
"VUID-VkRenderPassAttachmentBeginInfoKHR-pAttachments-03219",
"VkRenderPassAttachmentBeginInfo: Image view #%u created with non-identity swizzle. All "
"framebuffer attachments must have been created with the identity swizzle. Here are the actual "
"swizzle values:\n"
"r swizzle = %s\n"
"g swizzle = %s\n"
"b swizzle = %s\n"
"a swizzle = %s\n",
i, string_VkComponentSwizzle(pImageViewCreateInfo->components.r),
string_VkComponentSwizzle(pImageViewCreateInfo->components.g),
string_VkComponentSwizzle(pImageViewCreateInfo->components.b),
string_VkComponentSwizzle(pImageViewCreateInfo->components.a));
}
}
}
}
}
return skip;
}
// If this is a stencil format, make sure the stencil[Load|Store]Op flag is checked, while if it is a depth/color attachment the
// [load|store]Op flag must be checked
// TODO: The memory valid flag in DEVICE_MEMORY_STATE should probably be split to track the validity of stencil memory separately.
template <typename T>
static bool FormatSpecificLoadAndStoreOpSettings(VkFormat format, T color_depth_op, T stencil_op, T op) {
if (color_depth_op != op && stencil_op != op) {
return false;
}
bool check_color_depth_load_op = !FormatIsStencilOnly(format);
bool check_stencil_load_op = FormatIsDepthAndStencil(format) || !check_color_depth_load_op;
return ((check_color_depth_load_op && (color_depth_op == op)) || (check_stencil_load_op && (stencil_op == op)));
}
bool CoreChecks::ValidateCmdBeginRenderPass(VkCommandBuffer commandBuffer, RenderPassCreateVersion rp_version,
const VkRenderPassBeginInfo *pRenderPassBegin) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
auto render_pass_state = pRenderPassBegin ? GetRenderPassState(pRenderPassBegin->renderPass) : nullptr;
auto framebuffer = pRenderPassBegin ? GetFramebufferState(pRenderPassBegin->framebuffer) : nullptr;
bool skip = false;
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
const char *vuid;
const char *const function_name = use_rp2 ? "vkCmdBeginRenderPass2KHR()" : "vkCmdBeginRenderPass()";
if (render_pass_state) {
uint32_t clear_op_size = 0; // Make sure pClearValues is at least as large as last LOAD_OP_CLEAR
// Handle extension struct from EXT_sample_locations
const VkRenderPassSampleLocationsBeginInfoEXT *pSampleLocationsBeginInfo =
lvl_find_in_chain<VkRenderPassSampleLocationsBeginInfoEXT>(pRenderPassBegin->pNext);
if (pSampleLocationsBeginInfo) {
for (uint32_t i = 0; i < pSampleLocationsBeginInfo->attachmentInitialSampleLocationsCount; ++i) {
if (pSampleLocationsBeginInfo->pAttachmentInitialSampleLocations[i].attachmentIndex >=
render_pass_state->createInfo.attachmentCount) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkAttachmentSampleLocationsEXT-attachmentIndex-01531",
"Attachment index %u specified by attachment sample locations %u is greater than the "
"attachment count of %u for the render pass being begun.",
pSampleLocationsBeginInfo->pAttachmentInitialSampleLocations[i].attachmentIndex, i,
render_pass_state->createInfo.attachmentCount);
}
}
for (uint32_t i = 0; i < pSampleLocationsBeginInfo->postSubpassSampleLocationsCount; ++i) {
if (pSampleLocationsBeginInfo->pPostSubpassSampleLocations[i].subpassIndex >=
render_pass_state->createInfo.subpassCount) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkSubpassSampleLocationsEXT-subpassIndex-01532",
"Subpass index %u specified by subpass sample locations %u is greater than the subpass count "
"of %u for the render pass being begun.",
pSampleLocationsBeginInfo->pPostSubpassSampleLocations[i].subpassIndex, i,
render_pass_state->createInfo.subpassCount);
}
}
}
for (uint32_t i = 0; i < render_pass_state->createInfo.attachmentCount; ++i) {
auto pAttachment = &render_pass_state->createInfo.pAttachments[i];
if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp, pAttachment->stencilLoadOp,
VK_ATTACHMENT_LOAD_OP_CLEAR)) {
clear_op_size = static_cast<uint32_t>(i) + 1;
}
}
if (clear_op_size > pRenderPassBegin->clearValueCount) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(render_pass_state->renderPass), "VUID-VkRenderPassBeginInfo-clearValueCount-00902",
"In %s the VkRenderPassBeginInfo struct has a clearValueCount of %u but there "
"must be at least %u entries in pClearValues array to account for the highest index attachment in "
"%s that uses VK_ATTACHMENT_LOAD_OP_CLEAR is %u. Note that the pClearValues array is indexed by "
"attachment number so even if some pClearValues entries between 0 and %u correspond to attachments "
"that aren't cleared they will be ignored.",
function_name, pRenderPassBegin->clearValueCount, clear_op_size,
report_data->FormatHandle(render_pass_state->renderPass).c_str(), clear_op_size, clear_op_size - 1);
}
skip |= VerifyFramebufferAndRenderPassImageViews(pRenderPassBegin);
skip |= VerifyRenderAreaBounds(pRenderPassBegin);
skip |= VerifyFramebufferAndRenderPassLayouts(rp_version, cb_state, pRenderPassBegin,
GetFramebufferState(pRenderPassBegin->framebuffer));
if (framebuffer->rp_state->renderPass != render_pass_state->renderPass) {
skip |= ValidateRenderPassCompatibility("render pass", render_pass_state, "framebuffer", framebuffer->rp_state.get(),
function_name, "VUID-VkRenderPassBeginInfo-renderPass-00904");
}
vuid = use_rp2 ? "VUID-vkCmdBeginRenderPass2KHR-renderpass" : "VUID-vkCmdBeginRenderPass-renderpass";
skip |= InsideRenderPass(cb_state, function_name, vuid);
skip |= ValidateDependencies(framebuffer, render_pass_state);
vuid = use_rp2 ? "VUID-vkCmdBeginRenderPass2KHR-bufferlevel" : "VUID-vkCmdBeginRenderPass-bufferlevel";
skip |= ValidatePrimaryCommandBuffer(cb_state, function_name, vuid);
vuid = use_rp2 ? "VUID-vkCmdBeginRenderPass2KHR-commandBuffer-cmdpool" : "VUID-vkCmdBeginRenderPass-commandBuffer-cmdpool";
skip |= ValidateCmdQueueFlags(cb_state, function_name, VK_QUEUE_GRAPHICS_BIT, vuid);
const CMD_TYPE cmd_type = use_rp2 ? CMD_BEGINRENDERPASS2KHR : CMD_BEGINRENDERPASS;
skip |= ValidateCmd(cb_state, cmd_type, function_name);
}
auto chained_device_group_struct = lvl_find_in_chain<VkDeviceGroupRenderPassBeginInfo>(pRenderPassBegin->pNext);
if (chained_device_group_struct) {
skip |= ValidateDeviceMaskToPhysicalDeviceCount(
chained_device_group_struct->deviceMask, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(pRenderPassBegin->renderPass), "VUID-VkDeviceGroupRenderPassBeginInfo-deviceMask-00905");
skip |= ValidateDeviceMaskToZero(chained_device_group_struct->deviceMask, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(pRenderPassBegin->renderPass),
"VUID-VkDeviceGroupRenderPassBeginInfo-deviceMask-00906");
skip |= ValidateDeviceMaskToCommandBuffer(
cb_state, chained_device_group_struct->deviceMask, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(pRenderPassBegin->renderPass), "VUID-VkDeviceGroupRenderPassBeginInfo-deviceMask-00907");
if (chained_device_group_struct->deviceRenderAreaCount != 0 &&
chained_device_group_struct->deviceRenderAreaCount != physical_device_count) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(pRenderPassBegin->renderPass),
"VUID-VkDeviceGroupRenderPassBeginInfo-deviceRenderAreaCount-00908",
"deviceRenderAreaCount[%" PRIu32 "] is invaild. Physical device count is %" PRIu32 ".",
chained_device_group_struct->deviceRenderAreaCount, physical_device_count);
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
VkSubpassContents contents) {
bool skip = ValidateCmdBeginRenderPass(commandBuffer, RENDER_PASS_VERSION_1, pRenderPassBegin);
return skip;
}
bool CoreChecks::PreCallValidateCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
const VkSubpassBeginInfoKHR *pSubpassBeginInfo) {
bool skip = ValidateCmdBeginRenderPass(commandBuffer, RENDER_PASS_VERSION_2, pRenderPassBegin);
return skip;
}
void ValidationStateTracker::RecordCmdBeginRenderPassState(VkCommandBuffer commandBuffer,
const VkRenderPassBeginInfo *pRenderPassBegin,
const VkSubpassContents contents) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
auto render_pass_state = pRenderPassBegin ? GetRenderPassState(pRenderPassBegin->renderPass) : nullptr;
auto framebuffer = pRenderPassBegin ? GetFramebufferState(pRenderPassBegin->framebuffer) : nullptr;
if (render_pass_state) {
cb_state->activeFramebuffer = pRenderPassBegin->framebuffer;
cb_state->activeRenderPass = render_pass_state;
// This is a shallow copy as that is all that is needed for now
cb_state->activeRenderPassBeginInfo = *pRenderPassBegin;
cb_state->activeSubpass = 0;
cb_state->activeSubpassContents = contents;
cb_state->framebuffers.insert(pRenderPassBegin->framebuffer);
// Connect this framebuffer and its children to this cmdBuffer
AddFramebufferBinding(cb_state, framebuffer);
// Connect this RP to cmdBuffer
AddCommandBufferBinding(&render_pass_state->cb_bindings,
VulkanTypedHandle(render_pass_state->renderPass, kVulkanObjectTypeRenderPass), cb_state);
auto chained_device_group_struct = lvl_find_in_chain<VkDeviceGroupRenderPassBeginInfo>(pRenderPassBegin->pNext);
if (chained_device_group_struct) {
cb_state->active_render_pass_device_mask = chained_device_group_struct->deviceMask;
} else {
cb_state->active_render_pass_device_mask = cb_state->initial_device_mask;
}
}
}
void CoreChecks::RecordCmdBeginRenderPassLayouts(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
const VkSubpassContents contents) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
auto render_pass_state = pRenderPassBegin ? GetRenderPassState(pRenderPassBegin->renderPass) : nullptr;
auto framebuffer = pRenderPassBegin ? GetFramebufferState(pRenderPassBegin->framebuffer) : nullptr;
if (render_pass_state) {
// transition attachments to the correct layouts for beginning of renderPass and first subpass
TransitionBeginRenderPassLayouts(cb_state, render_pass_state, framebuffer);
}
}
void ValidationStateTracker::PreCallRecordCmdBeginRenderPass(VkCommandBuffer commandBuffer,
const VkRenderPassBeginInfo *pRenderPassBegin,
VkSubpassContents contents) {
RecordCmdBeginRenderPassState(commandBuffer, pRenderPassBegin, contents);
}
void CoreChecks::PreCallRecordCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
VkSubpassContents contents) {
StateTracker::PreCallRecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
RecordCmdBeginRenderPassLayouts(commandBuffer, pRenderPassBegin, contents);
}
void ValidationStateTracker::PreCallRecordCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer,
const VkRenderPassBeginInfo *pRenderPassBegin,
const VkSubpassBeginInfoKHR *pSubpassBeginInfo) {
RecordCmdBeginRenderPassState(commandBuffer, pRenderPassBegin, pSubpassBeginInfo->contents);
}
void CoreChecks::PreCallRecordCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
const VkSubpassBeginInfoKHR *pSubpassBeginInfo) {
StateTracker::PreCallRecordCmdBeginRenderPass2KHR(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
RecordCmdBeginRenderPassLayouts(commandBuffer, pRenderPassBegin, pSubpassBeginInfo->contents);
}
bool CoreChecks::ValidateCmdNextSubpass(RenderPassCreateVersion rp_version, VkCommandBuffer commandBuffer) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
const char *vuid;
const char *const function_name = use_rp2 ? "vkCmdNextSubpass2KHR()" : "vkCmdNextSubpass()";
vuid = use_rp2 ? "VUID-vkCmdNextSubpass2KHR-bufferlevel" : "VUID-vkCmdNextSubpass-bufferlevel";
skip |= ValidatePrimaryCommandBuffer(cb_state, function_name, vuid);
vuid = use_rp2 ? "VUID-vkCmdNextSubpass2KHR-commandBuffer-cmdpool" : "VUID-vkCmdNextSubpass-commandBuffer-cmdpool";
skip |= ValidateCmdQueueFlags(cb_state, function_name, VK_QUEUE_GRAPHICS_BIT, vuid);
const CMD_TYPE cmd_type = use_rp2 ? CMD_NEXTSUBPASS2KHR : CMD_NEXTSUBPASS;
skip |= ValidateCmd(cb_state, cmd_type, function_name);
vuid = use_rp2 ? "VUID-vkCmdNextSubpass2KHR-renderpass" : "VUID-vkCmdNextSubpass-renderpass";
skip |= OutsideRenderPass(cb_state, function_name, vuid);
auto subpassCount = cb_state->activeRenderPass->createInfo.subpassCount;
if (cb_state->activeSubpass == subpassCount - 1) {
vuid = use_rp2 ? "VUID-vkCmdNextSubpass2KHR-None-03102" : "VUID-vkCmdNextSubpass-None-00909";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), vuid, "%s: Attempted to advance beyond final subpass.", function_name);
}
return skip;
}
bool CoreChecks::PreCallValidateCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
return ValidateCmdNextSubpass(RENDER_PASS_VERSION_1, commandBuffer);
}
bool CoreChecks::PreCallValidateCmdNextSubpass2KHR(VkCommandBuffer commandBuffer, const VkSubpassBeginInfoKHR *pSubpassBeginInfo,
const VkSubpassEndInfoKHR *pSubpassEndInfo) {
return ValidateCmdNextSubpass(RENDER_PASS_VERSION_2, commandBuffer);
}
void ValidationStateTracker::RecordCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
cb_state->activeSubpass++;
cb_state->activeSubpassContents = contents;
}
void CoreChecks::RecordCmdNextSubpassLayouts(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
TransitionSubpassLayouts(cb_state, cb_state->activeRenderPass, cb_state->activeSubpass,
GetFramebufferState(cb_state->activeRenderPassBeginInfo.framebuffer));
}
void ValidationStateTracker::PostCallRecordCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
RecordCmdNextSubpass(commandBuffer, contents);
}
void CoreChecks::PostCallRecordCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
StateTracker::PostCallRecordCmdNextSubpass(commandBuffer, contents);
RecordCmdNextSubpassLayouts(commandBuffer, contents);
}
void ValidationStateTracker::PostCallRecordCmdNextSubpass2KHR(VkCommandBuffer commandBuffer,
const VkSubpassBeginInfoKHR *pSubpassBeginInfo,
const VkSubpassEndInfoKHR *pSubpassEndInfo) {
RecordCmdNextSubpass(commandBuffer, pSubpassBeginInfo->contents);
}
void CoreChecks::PostCallRecordCmdNextSubpass2KHR(VkCommandBuffer commandBuffer, const VkSubpassBeginInfoKHR *pSubpassBeginInfo,
const VkSubpassEndInfoKHR *pSubpassEndInfo) {
StateTracker::PostCallRecordCmdNextSubpass2KHR(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
RecordCmdNextSubpassLayouts(commandBuffer, pSubpassBeginInfo->contents);
}
bool CoreChecks::ValidateCmdEndRenderPass(RenderPassCreateVersion rp_version, VkCommandBuffer commandBuffer) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
const char *vuid;
const char *const function_name = use_rp2 ? "vkCmdEndRenderPass2KHR()" : "vkCmdEndRenderPass()";
RENDER_PASS_STATE *rp_state = cb_state->activeRenderPass;
if (rp_state) {
if (cb_state->activeSubpass != rp_state->createInfo.subpassCount - 1) {
vuid = use_rp2 ? "VUID-vkCmdEndRenderPass2KHR-None-03103" : "VUID-vkCmdEndRenderPass-None-00910";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), vuid, "%s: Called before reaching final subpass.", function_name);
}
}
vuid = use_rp2 ? "VUID-vkCmdEndRenderPass2KHR-renderpass" : "VUID-vkCmdEndRenderPass-renderpass";
skip |= OutsideRenderPass(cb_state, function_name, vuid);
vuid = use_rp2 ? "VUID-vkCmdEndRenderPass2KHR-bufferlevel" : "VUID-vkCmdEndRenderPass-bufferlevel";
skip |= ValidatePrimaryCommandBuffer(cb_state, function_name, vuid);
vuid = use_rp2 ? "VUID-vkCmdEndRenderPass2KHR-commandBuffer-cmdpool" : "VUID-vkCmdEndRenderPass-commandBuffer-cmdpool";
skip |= ValidateCmdQueueFlags(cb_state, function_name, VK_QUEUE_GRAPHICS_BIT, vuid);
const CMD_TYPE cmd_type = use_rp2 ? CMD_ENDRENDERPASS2KHR : CMD_ENDRENDERPASS;
skip |= ValidateCmd(cb_state, cmd_type, function_name);
return skip;
}
bool CoreChecks::PreCallValidateCmdEndRenderPass(VkCommandBuffer commandBuffer) {
bool skip = ValidateCmdEndRenderPass(RENDER_PASS_VERSION_1, commandBuffer);
return skip;
}
bool CoreChecks::PreCallValidateCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer, const VkSubpassEndInfoKHR *pSubpassEndInfo) {
bool skip = ValidateCmdEndRenderPass(RENDER_PASS_VERSION_2, commandBuffer);
return skip;
}
void ValidationStateTracker::RecordCmdEndRenderPassState(VkCommandBuffer commandBuffer) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
cb_state->activeRenderPass = nullptr;
cb_state->activeSubpass = 0;
cb_state->activeFramebuffer = VK_NULL_HANDLE;
}
void CoreChecks::RecordCmdEndRenderPassLayouts(VkCommandBuffer commandBuffer) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
FRAMEBUFFER_STATE *framebuffer = GetFramebufferState(cb_state->activeFramebuffer);
TransitionFinalSubpassLayouts(cb_state, &cb_state->activeRenderPassBeginInfo, framebuffer);
}
void ValidationStateTracker::PostCallRecordCmdEndRenderPass(VkCommandBuffer commandBuffer) {
RecordCmdEndRenderPassState(commandBuffer);
}
void CoreChecks::PostCallRecordCmdEndRenderPass(VkCommandBuffer commandBuffer) {
// Record the end at the CoreLevel to ensure StateTracker cleanup doesn't step on anything we need.
RecordCmdEndRenderPassLayouts(commandBuffer);
StateTracker::PostCallRecordCmdEndRenderPass(commandBuffer);
}
void ValidationStateTracker::PostCallRecordCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer,
const VkSubpassEndInfoKHR *pSubpassEndInfo) {
RecordCmdEndRenderPassState(commandBuffer);
}
void CoreChecks::PostCallRecordCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer, const VkSubpassEndInfoKHR *pSubpassEndInfo) {
StateTracker::PostCallRecordCmdEndRenderPass2KHR(commandBuffer, pSubpassEndInfo);
RecordCmdEndRenderPassLayouts(commandBuffer);
}
bool CoreChecks::ValidateFramebuffer(VkCommandBuffer primaryBuffer, const CMD_BUFFER_STATE *pCB, VkCommandBuffer secondaryBuffer,
const CMD_BUFFER_STATE *pSubCB, const char *caller) {
bool skip = false;
if (!pSubCB->beginInfo.pInheritanceInfo) {
return skip;
}
VkFramebuffer primary_fb = pCB->activeFramebuffer;
VkFramebuffer secondary_fb = pSubCB->beginInfo.pInheritanceInfo->framebuffer;
if (secondary_fb != VK_NULL_HANDLE) {
if (primary_fb != secondary_fb) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(primaryBuffer), "VUID-vkCmdExecuteCommands-pCommandBuffers-00099",
"vkCmdExecuteCommands() called w/ invalid secondary %s which has a %s"
" that is not the same as the primary command buffer's current active %s.",
report_data->FormatHandle(secondaryBuffer).c_str(), report_data->FormatHandle(secondary_fb).c_str(),
report_data->FormatHandle(primary_fb).c_str());
}
auto fb = GetFramebufferState(secondary_fb);
if (!fb) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(primaryBuffer), kVUID_Core_DrawState_InvalidSecondaryCommandBuffer,
"vkCmdExecuteCommands() called w/ invalid %s which has invalid %s.",
report_data->FormatHandle(secondaryBuffer).c_str(), report_data->FormatHandle(secondary_fb).c_str());
return skip;
}
}
return skip;
}
bool CoreChecks::ValidateSecondaryCommandBufferState(CMD_BUFFER_STATE *pCB, CMD_BUFFER_STATE *pSubCB) {
bool skip = false;
unordered_set<int> activeTypes;
if (!disabled.query_validation) {
for (auto queryObject : pCB->activeQueries) {
auto query_pool_state = GetQueryPoolState(queryObject.pool);
if (query_pool_state) {
if (query_pool_state->createInfo.queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS &&
pSubCB->beginInfo.pInheritanceInfo) {
VkQueryPipelineStatisticFlags cmdBufStatistics = pSubCB->beginInfo.pInheritanceInfo->pipelineStatistics;
if ((cmdBufStatistics & query_pool_state->createInfo.pipelineStatistics) != cmdBufStatistics) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCB->commandBuffer), "VUID-vkCmdExecuteCommands-commandBuffer-00104",
"vkCmdExecuteCommands() called w/ invalid %s which has invalid active %s"
". Pipeline statistics is being queried so the command buffer must have all bits set on the queryPool.",
report_data->FormatHandle(pCB->commandBuffer).c_str(),
report_data->FormatHandle(queryObject.pool).c_str());
}
}
activeTypes.insert(query_pool_state->createInfo.queryType);
}
}
for (auto queryObject : pSubCB->startedQueries) {
auto query_pool_state = GetQueryPoolState(queryObject.pool);
if (query_pool_state && activeTypes.count(query_pool_state->createInfo.queryType)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCB->commandBuffer), kVUID_Core_DrawState_InvalidSecondaryCommandBuffer,
"vkCmdExecuteCommands() called w/ invalid %s which has invalid active %s"
" of type %d but a query of that type has been started on secondary %s.",
report_data->FormatHandle(pCB->commandBuffer).c_str(),
report_data->FormatHandle(queryObject.pool).c_str(), query_pool_state->createInfo.queryType,
report_data->FormatHandle(pSubCB->commandBuffer).c_str());
}
}
}
auto primary_pool = GetCommandPoolState(pCB->createInfo.commandPool);
auto secondary_pool = GetCommandPoolState(pSubCB->createInfo.commandPool);
if (primary_pool && secondary_pool && (primary_pool->queueFamilyIndex != secondary_pool->queueFamilyIndex)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pSubCB->commandBuffer), kVUID_Core_DrawState_InvalidQueueFamily,
"vkCmdExecuteCommands(): Primary %s created in queue family %d has secondary "
"%s created in queue family %d.",
report_data->FormatHandle(pCB->commandBuffer).c_str(), primary_pool->queueFamilyIndex,
report_data->FormatHandle(pSubCB->commandBuffer).c_str(), secondary_pool->queueFamilyIndex);
}
return skip;
}
bool CoreChecks::PreCallValidateCmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount,
const VkCommandBuffer *pCommandBuffers) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
CMD_BUFFER_STATE *sub_cb_state = NULL;
std::unordered_set<CMD_BUFFER_STATE *> linked_command_buffers = cb_state->linkedCommandBuffers;
for (uint32_t i = 0; i < commandBuffersCount; i++) {
sub_cb_state = GetCBState(pCommandBuffers[i]);
assert(sub_cb_state);
if (VK_COMMAND_BUFFER_LEVEL_PRIMARY == sub_cb_state->createInfo.level) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCommandBuffers[i]), "VUID-vkCmdExecuteCommands-pCommandBuffers-00088",
"vkCmdExecuteCommands() called w/ Primary %s in element %u of pCommandBuffers array. All "
"cmd buffers in pCommandBuffers array must be secondary.",
report_data->FormatHandle(pCommandBuffers[i]).c_str(), i);
} else if (VK_COMMAND_BUFFER_LEVEL_SECONDARY == sub_cb_state->createInfo.level) {
if (sub_cb_state->beginInfo.pInheritanceInfo != nullptr) {
auto secondary_rp_state = GetRenderPassState(sub_cb_state->beginInfo.pInheritanceInfo->renderPass);
if (cb_state->activeRenderPass &&
!(sub_cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCommandBuffers[i]), "VUID-vkCmdExecuteCommands-pCommandBuffers-00096",
"vkCmdExecuteCommands(): Secondary %s is executed within a %s "
"instance scope, but the Secondary Command Buffer does not have the "
"VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set in VkCommandBufferBeginInfo::flags when "
"the vkBeginCommandBuffer() was called.",
report_data->FormatHandle(pCommandBuffers[i]).c_str(),
report_data->FormatHandle(cb_state->activeRenderPass->renderPass).c_str());
} else if (!cb_state->activeRenderPass &&
(sub_cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCommandBuffers[i]), "VUID-vkCmdExecuteCommands-pCommandBuffers-00100",
"vkCmdExecuteCommands(): Secondary %s is executed outside a render pass "
"instance scope, but the Secondary Command Buffer does have the "
"VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set in VkCommandBufferBeginInfo::flags when "
"the vkBeginCommandBuffer() was called.",
report_data->FormatHandle(pCommandBuffers[i]).c_str());
} else if (cb_state->activeRenderPass &&
(sub_cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
// Make sure render pass is compatible with parent command buffer pass if has continue
if (cb_state->activeRenderPass->renderPass != secondary_rp_state->renderPass) {
skip |= ValidateRenderPassCompatibility(
"primary command buffer", cb_state->activeRenderPass, "secondary command buffer", secondary_rp_state,
"vkCmdExecuteCommands()", "VUID-vkCmdExecuteCommands-pInheritanceInfo-00098");
}
// If framebuffer for secondary CB is not NULL, then it must match active FB from primaryCB
skip |=
ValidateFramebuffer(commandBuffer, cb_state, pCommandBuffers[i], sub_cb_state, "vkCmdExecuteCommands()");
if (!sub_cb_state->cmd_execute_commands_functions.empty()) {
// Inherit primary's activeFramebuffer and while running validate functions
for (auto &function : sub_cb_state->cmd_execute_commands_functions) {
skip |= function(cb_state, cb_state->activeFramebuffer);
}
}
}
}
}
// TODO(mlentine): Move more logic into this method
skip |= ValidateSecondaryCommandBufferState(cb_state, sub_cb_state);
skip |= ValidateCommandBufferState(sub_cb_state, "vkCmdExecuteCommands()", 0,
"VUID-vkCmdExecuteCommands-pCommandBuffers-00089");
if (!(sub_cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
if (sub_cb_state->in_use.load() || linked_command_buffers.count(sub_cb_state)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdExecuteCommands-pCommandBuffers-00090",
"Attempt to simultaneously execute %s without VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set!",
report_data->FormatHandle(cb_state->commandBuffer).c_str());
}
if (cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) {
// Warn that non-simultaneous secondary cmd buffer renders primary non-simultaneous
skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCommandBuffers[i]), kVUID_Core_DrawState_InvalidCommandBufferSimultaneousUse,
"vkCmdExecuteCommands(): Secondary %s does not have "
"VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary "
"%s to be treated as if it does not have "
"VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set, even though it does.",
report_data->FormatHandle(pCommandBuffers[i]).c_str(),
report_data->FormatHandle(cb_state->commandBuffer).c_str());
}
}
if (!cb_state->activeQueries.empty() && !enabled_features.core.inheritedQueries) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCommandBuffers[i]), "VUID-vkCmdExecuteCommands-commandBuffer-00101",
"vkCmdExecuteCommands(): Secondary %s cannot be submitted with a query in flight and "
"inherited queries not supported on this device.",
report_data->FormatHandle(pCommandBuffers[i]).c_str());
}
// Validate initial layout uses vs. the primary cmd buffer state
// Novel Valid usage: "UNASSIGNED-vkCmdExecuteCommands-commandBuffer-00001"
// initial layout usage of secondary command buffers resources must match parent command buffer
const auto *const_cb_state = static_cast<const CMD_BUFFER_STATE *>(cb_state);
for (const auto &sub_layout_map_entry : sub_cb_state->image_layout_map) {
const auto image = sub_layout_map_entry.first;
const auto *image_state = GetImageState(image);
if (!image_state) continue; // Can't set layouts of a dead image
const auto *cb_subres_map = GetImageSubresourceLayoutMap(const_cb_state, image);
// Const getter can be null in which case we have nothing to check against for this image...
if (!cb_subres_map) continue;
const auto &sub_cb_subres_map = sub_layout_map_entry.second;
// Validate the initial_uses, that they match the current state of the primary cb, or absent a current state,
// that the match any initial_layout.
for (auto it_init = sub_cb_subres_map->BeginInitialUse(); !it_init.AtEnd(); ++it_init) {
const auto &sub_layout = (*it_init).layout;
if (VK_IMAGE_LAYOUT_UNDEFINED == sub_layout) continue; // secondary doesn't care about current or initial
const auto &subresource = (*it_init).subresource;
// Look up the current layout (if any)
VkImageLayout cb_layout = cb_subres_map->GetSubresourceLayout(subresource);
const char *layout_type = "current";
if (cb_layout == kInvalidLayout) {
// Find initial layout (if any)
cb_layout = cb_subres_map->GetSubresourceInitialLayout(subresource);
layout_type = "initial";
}
if ((cb_layout != kInvalidLayout) && (cb_layout != sub_layout)) {
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCommandBuffers[i]), "UNASSIGNED-vkCmdExecuteCommands-commandBuffer-00001",
"%s: Executed secondary command buffer using %s (subresource: aspectMask 0x%X array layer %u, "
"mip level %u) which expects layout %s--instead, image %s layout is %s.",
"vkCmdExecuteCommands():", report_data->FormatHandle(image).c_str(), subresource.aspectMask,
subresource.arrayLayer, subresource.mipLevel, string_VkImageLayout(sub_layout), layout_type,
string_VkImageLayout(cb_layout));
}
}
}
linked_command_buffers.insert(sub_cb_state);
}
skip |= ValidatePrimaryCommandBuffer(cb_state, "vkCmdExecuteCommands()", "VUID-vkCmdExecuteCommands-bufferlevel");
skip |= ValidateCmdQueueFlags(cb_state, "vkCmdExecuteCommands()",
VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdExecuteCommands-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_EXECUTECOMMANDS, "vkCmdExecuteCommands()");
return skip;
}
void CoreChecks::PreCallRecordCmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount,
const VkCommandBuffer *pCommandBuffers) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
CMD_BUFFER_STATE *sub_cb_state = NULL;
for (uint32_t i = 0; i < commandBuffersCount; i++) {
sub_cb_state = GetCBState(pCommandBuffers[i]);
assert(sub_cb_state);
if (!(sub_cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
if (cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) {
// TODO: Because this is a state change, clearing the VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT needs to be moved
// from the validation step to the recording step
cb_state->beginInfo.flags &= ~VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
}
}
// Propagate inital layout and current layout state to the primary cmd buffer
for (const auto &sub_layout_map_entry : sub_cb_state->image_layout_map) {
const auto image = sub_layout_map_entry.first;
const auto *image_state = GetImageState(image);
if (!image_state) continue; // Can't set layouts of a dead image
auto *cb_subres_map = GetImageSubresourceLayoutMap(cb_state, *image_state);
const auto *sub_cb_subres_map = sub_layout_map_entry.second.get();
assert(cb_subres_map && sub_cb_subres_map); // Non const get and map traversal should never be null
cb_subres_map->UpdateFrom(*sub_cb_subres_map);
}
sub_cb_state->primaryCommandBuffer = cb_state->commandBuffer;
cb_state->linkedCommandBuffers.insert(sub_cb_state);
sub_cb_state->linkedCommandBuffers.insert(cb_state);
for (auto &function : sub_cb_state->queryUpdates) {
cb_state->queryUpdates.push_back(function);
}
for (auto &function : sub_cb_state->queue_submit_functions) {
cb_state->queue_submit_functions.push_back(function);
}
}
}
bool CoreChecks::PreCallValidateMapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size,
VkFlags flags, void **ppData) {
bool skip = false;
DEVICE_MEMORY_STATE *mem_info = GetDevMemState(mem);
if (mem_info) {
if ((phys_dev_mem_props.memoryTypes[mem_info->alloc_info.memoryTypeIndex].propertyFlags &
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
skip = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
HandleToUint64(mem), "VUID-vkMapMemory-memory-00682",
"Mapping Memory without VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set: %s.",
report_data->FormatHandle(mem).c_str());
}
}
skip |= ValidateMapMemRange(mem, offset, size);
return skip;
}
void CoreChecks::PostCallRecordMapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags,
void **ppData, VkResult result) {
if (VK_SUCCESS != result) return;
// TODO : What's the point of this range? See comment on creating new "bound_range" above, which may replace this
StoreMemRanges(mem, offset, size);
InitializeAndTrackMemory(mem, offset, size, ppData);
}
bool CoreChecks::PreCallValidateUnmapMemory(VkDevice device, VkDeviceMemory mem) {
bool skip = false;
auto mem_info = GetDevMemState(mem);
if (mem_info && !mem_info->mem_range.size) {
// Valid Usage: memory must currently be mapped
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
HandleToUint64(mem), "VUID-vkUnmapMemory-memory-00689", "Unmapping Memory without memory being mapped: %s.",
report_data->FormatHandle(mem).c_str());
}
return skip;
}
void CoreChecks::PreCallRecordUnmapMemory(VkDevice device, VkDeviceMemory mem) {
auto mem_info = GetDevMemState(mem);
mem_info->mem_range.size = 0;
if (mem_info->shadow_copy) {
free(mem_info->shadow_copy_base);
mem_info->shadow_copy_base = 0;
mem_info->shadow_copy = 0;
}
}
bool CoreChecks::ValidateMemoryIsMapped(const char *funcName, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) {
bool skip = false;
for (uint32_t i = 0; i < memRangeCount; ++i) {
auto mem_info = GetDevMemState(pMemRanges[i].memory);
if (mem_info) {
if (pMemRanges[i].size == VK_WHOLE_SIZE) {
if (mem_info->mem_range.offset > pMemRanges[i].offset) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
HandleToUint64(pMemRanges[i].memory), "VUID-VkMappedMemoryRange-size-00686",
"%s: Flush/Invalidate offset (" PRINTF_SIZE_T_SPECIFIER
") is less than Memory Object's offset (" PRINTF_SIZE_T_SPECIFIER ").",
funcName, static_cast<size_t>(pMemRanges[i].offset),
static_cast<size_t>(mem_info->mem_range.offset));
}
} else {
const uint64_t data_end = (mem_info->mem_range.size == VK_WHOLE_SIZE)
? mem_info->alloc_info.allocationSize
: (mem_info->mem_range.offset + mem_info->mem_range.size);
if ((mem_info->mem_range.offset > pMemRanges[i].offset) ||
(data_end < (pMemRanges[i].offset + pMemRanges[i].size))) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
HandleToUint64(pMemRanges[i].memory), "VUID-VkMappedMemoryRange-size-00685",
"%s: Flush/Invalidate size or offset (" PRINTF_SIZE_T_SPECIFIER ", " PRINTF_SIZE_T_SPECIFIER
") exceed the Memory Object's upper-bound (" PRINTF_SIZE_T_SPECIFIER ").",
funcName, static_cast<size_t>(pMemRanges[i].offset + pMemRanges[i].size),
static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(data_end));
}
}
}
}
return skip;
}
bool CoreChecks::ValidateAndCopyNoncoherentMemoryToDriver(uint32_t mem_range_count, const VkMappedMemoryRange *mem_ranges) {
bool skip = false;
for (uint32_t i = 0; i < mem_range_count; ++i) {
auto mem_info = GetDevMemState(mem_ranges[i].memory);
if (mem_info) {
if (mem_info->shadow_copy) {
VkDeviceSize size = (mem_info->mem_range.size != VK_WHOLE_SIZE)
? mem_info->mem_range.size
: (mem_info->alloc_info.allocationSize - mem_info->mem_range.offset);
char *data = static_cast<char *>(mem_info->shadow_copy);
for (uint64_t j = 0; j < mem_info->shadow_pad_size; ++j) {
if (data[j] != NoncoherentMemoryFillValue) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
HandleToUint64(mem_ranges[i].memory), kVUID_Core_MemTrack_InvalidMap,
"Memory underflow was detected on %s.",
report_data->FormatHandle(mem_ranges[i].memory).c_str());
}
}
for (uint64_t j = (size + mem_info->shadow_pad_size); j < (2 * mem_info->shadow_pad_size + size); ++j) {
if (data[j] != NoncoherentMemoryFillValue) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
HandleToUint64(mem_ranges[i].memory), kVUID_Core_MemTrack_InvalidMap,
"Memory overflow was detected on %s.", report_data->FormatHandle(mem_ranges[i].memory).c_str());
}
}
memcpy(mem_info->p_driver_data, static_cast<void *>(data + mem_info->shadow_pad_size), (size_t)(size));
}
}
}
return skip;
}
void CoreChecks::CopyNoncoherentMemoryFromDriver(uint32_t mem_range_count, const VkMappedMemoryRange *mem_ranges) {
for (uint32_t i = 0; i < mem_range_count; ++i) {
auto mem_info = GetDevMemState(mem_ranges[i].memory);
if (mem_info && mem_info->shadow_copy) {
VkDeviceSize size = (mem_info->mem_range.size != VK_WHOLE_SIZE)
? mem_info->mem_range.size
: (mem_info->alloc_info.allocationSize - mem_ranges[i].offset);
char *data = static_cast<char *>(mem_info->shadow_copy);
memcpy(data + mem_info->shadow_pad_size, mem_info->p_driver_data, (size_t)(size));
}
}
}
bool CoreChecks::ValidateMappedMemoryRangeDeviceLimits(const char *func_name, uint32_t mem_range_count,
const VkMappedMemoryRange *mem_ranges) {
bool skip = false;
for (uint32_t i = 0; i < mem_range_count; ++i) {
uint64_t atom_size = phys_dev_props.limits.nonCoherentAtomSize;
if (SafeModulo(mem_ranges[i].offset, atom_size) != 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
HandleToUint64(mem_ranges->memory), "VUID-VkMappedMemoryRange-offset-00687",
"%s: Offset in pMemRanges[%d] is 0x%" PRIxLEAST64
", which is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 ").",
func_name, i, mem_ranges[i].offset, atom_size);
}
auto mem_info = GetDevMemState(mem_ranges[i].memory);
if ((mem_ranges[i].size != VK_WHOLE_SIZE) &&
(mem_ranges[i].size + mem_ranges[i].offset != mem_info->alloc_info.allocationSize) &&
(SafeModulo(mem_ranges[i].size, atom_size) != 0)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
HandleToUint64(mem_ranges->memory), "VUID-VkMappedMemoryRange-size-01390",
"%s: Size in pMemRanges[%d] is 0x%" PRIxLEAST64
", which is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 ").",
func_name, i, mem_ranges[i].size, atom_size);
}
}
return skip;
}
bool CoreChecks::PreCallValidateFlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount,
const VkMappedMemoryRange *pMemRanges) {
bool skip = false;
skip |= ValidateMappedMemoryRangeDeviceLimits("vkFlushMappedMemoryRanges", memRangeCount, pMemRanges);
skip |= ValidateAndCopyNoncoherentMemoryToDriver(memRangeCount, pMemRanges);
skip |= ValidateMemoryIsMapped("vkFlushMappedMemoryRanges", memRangeCount, pMemRanges);
return skip;
}
bool CoreChecks::PreCallValidateInvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount,
const VkMappedMemoryRange *pMemRanges) {
bool skip = false;
skip |= ValidateMappedMemoryRangeDeviceLimits("vkInvalidateMappedMemoryRanges", memRangeCount, pMemRanges);
skip |= ValidateMemoryIsMapped("vkInvalidateMappedMemoryRanges", memRangeCount, pMemRanges);
return skip;
}
void CoreChecks::PostCallRecordInvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount,
const VkMappedMemoryRange *pMemRanges, VkResult result) {
if (VK_SUCCESS == result) {
// Update our shadow copy with modified driver data
CopyNoncoherentMemoryFromDriver(memRangeCount, pMemRanges);
}
}
bool CoreChecks::PreCallValidateGetDeviceMemoryCommitment(VkDevice device, VkDeviceMemory mem, VkDeviceSize *pCommittedMem) {
bool skip = false;
auto mem_info = GetDevMemState(mem);
if (mem_info) {
if ((phys_dev_mem_props.memoryTypes[mem_info->alloc_info.memoryTypeIndex].propertyFlags &
VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) == 0) {
skip = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
HandleToUint64(mem), "VUID-vkGetDeviceMemoryCommitment-memory-00690",
"Querying commitment for memory without VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT set: %s.",
report_data->FormatHandle(mem).c_str());
}
}
return skip;
}
bool CoreChecks::ValidateBindImageMemory(const VkBindImageMemoryInfo &bindInfo, const char *api_name) const {
bool skip = false;
const IMAGE_STATE *image_state = GetImageState(bindInfo.image);
if (image_state) {
// Track objects tied to memory
uint64_t image_handle = HandleToUint64(bindInfo.image);
skip = ValidateSetMemBinding(bindInfo.memory, VulkanTypedHandle(bindInfo.image, kVulkanObjectTypeImage), api_name);
#ifdef VK_USE_PLATFORM_ANDROID_KHR
if (image_state->external_format_android) {
if (image_state->memory_requirements_checked) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, image_handle,
kVUID_Core_BindImage_InvalidMemReqQuery,
"%s: Must not call vkGetImageMemoryRequirements on %s that will be bound to an external "
"Android hardware buffer.",
api_name, report_data->FormatHandle(bindInfo.image).c_str());
}
return skip;
}
#endif // VK_USE_PLATFORM_ANDROID_KHR
if (!image_state->memory_requirements_checked) {
// There's not an explicit requirement in the spec to call vkGetImageMemoryRequirements() prior to calling
// BindImageMemory but it's implied in that memory being bound must conform with VkMemoryRequirements from
// vkGetImageMemoryRequirements()
skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, image_handle,
kVUID_Core_BindImage_NoMemReqQuery,
"%s: Binding memory to %s but vkGetImageMemoryRequirements() has not been called on that image.",
api_name, report_data->FormatHandle(bindInfo.image).c_str());
// Use this information fetched at CreateImage time, in validation below.
}
// Validate bound memory range information
const auto mem_info = GetDevMemState(bindInfo.memory);
if (mem_info) {
skip |= ValidateInsertImageMemoryRange(bindInfo.image, mem_info, bindInfo.memoryOffset, image_state->requirements,
image_state->createInfo.tiling == VK_IMAGE_TILING_LINEAR, api_name);
skip |= ValidateMemoryTypes(mem_info, image_state->requirements.memoryTypeBits, api_name,
"VUID-vkBindImageMemory-memory-01047");
}
// Validate memory requirements alignment
if (SafeModulo(bindInfo.memoryOffset, image_state->requirements.alignment) != 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, image_handle,
"VUID-vkBindImageMemory-memoryOffset-01048",
"%s: memoryOffset is 0x%" PRIxLEAST64
" but must be an integer multiple of the VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
", returned from a call to vkGetImageMemoryRequirements with image.",
api_name, bindInfo.memoryOffset, image_state->requirements.alignment);
}
if (mem_info) {
// Validate memory requirements size
if (image_state->requirements.size > mem_info->alloc_info.allocationSize - bindInfo.memoryOffset) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, image_handle,
"VUID-vkBindImageMemory-size-01049",
"%s: memory size minus memoryOffset is 0x%" PRIxLEAST64
" but must be at least as large as VkMemoryRequirements::size value 0x%" PRIxLEAST64
", returned from a call to vkGetImageMemoryRequirements with image.",
api_name, mem_info->alloc_info.allocationSize - bindInfo.memoryOffset, image_state->requirements.size);
}
// Validate dedicated allocation
if (mem_info->is_dedicated && ((mem_info->dedicated_image != bindInfo.image) || (bindInfo.memoryOffset != 0))) {
// TODO: Add vkBindImageMemory2KHR error message when added to spec.
auto validation_error = kVUIDUndefined;
if (strcmp(api_name, "vkBindImageMemory()") == 0) {
validation_error = "VUID-vkBindImageMemory-memory-01509";
}
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, image_handle,
validation_error,
"%s: for dedicated memory allocation %s, VkMemoryDedicatedAllocateInfoKHR:: %s must be equal "
"to %s and memoryOffset 0x%" PRIxLEAST64 " must be zero.",
api_name, report_data->FormatHandle(bindInfo.memory).c_str(),
report_data->FormatHandle(mem_info->dedicated_image).c_str(),
report_data->FormatHandle(bindInfo.image).c_str(), bindInfo.memoryOffset);
}
}
const auto swapchain_info = lvl_find_in_chain<VkBindImageMemorySwapchainInfoKHR>(bindInfo.pNext);
if (swapchain_info) {
if (bindInfo.memory != VK_NULL_HANDLE) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, image_handle,
"VUID-VkBindImageMemoryInfo-pNext-01631", "%s: %s is not VK_NULL_HANDLE.", api_name,
report_data->FormatHandle(bindInfo.memory).c_str());
}
const auto swapchain_state = GetSwapchainState(swapchain_info->swapchain);
if (swapchain_state && swapchain_state->images.size() <= swapchain_info->imageIndex) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, image_handle,
"VUID-VkBindImageMemorySwapchainInfoKHR-imageIndex-01644",
"%s: imageIndex (%i) is out of bounds of %s images (size: %i)", api_name, swapchain_info->imageIndex,
report_data->FormatHandle(swapchain_info->swapchain).c_str(), (int)swapchain_state->images.size());
}
} else {
if (image_state->create_from_swapchain) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, image_handle,
"VUID-VkBindImageMemoryInfo-image-01630",
"%s: pNext of VkBindImageMemoryInfo doesn't include VkBindImageMemorySwapchainInfoKHR.", api_name);
}
if (!mem_info) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, image_handle,
"VUID-VkBindImageMemoryInfo-pNext-01632", "%s: %s is invalid.", api_name,
report_data->FormatHandle(bindInfo.memory).c_str());
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateBindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
VkBindImageMemoryInfo bindInfo = {};
bindInfo.sType = VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO;
bindInfo.image = image;
bindInfo.memory = mem;
bindInfo.memoryOffset = memoryOffset;
return ValidateBindImageMemory(bindInfo, "vkBindImageMemory()");
}
void ValidationStateTracker::UpdateBindImageMemoryState(const VkBindImageMemoryInfo &bindInfo) {
IMAGE_STATE *image_state = GetImageState(bindInfo.image);
if (image_state) {
const auto swapchain_info = lvl_find_in_chain<VkBindImageMemorySwapchainInfoKHR>(bindInfo.pNext);
if (swapchain_info) {
image_state->bind_swapchain = swapchain_info->swapchain;
image_state->bind_swapchain_imageIndex = swapchain_info->imageIndex;
} else {
// Track bound memory range information
auto mem_info = GetDevMemState(bindInfo.memory);
if (mem_info) {
InsertImageMemoryRange(bindInfo.image, mem_info, bindInfo.memoryOffset, image_state->requirements,
image_state->createInfo.tiling == VK_IMAGE_TILING_LINEAR);
}
// Track objects tied to memory
SetMemBinding(bindInfo.memory, image_state, bindInfo.memoryOffset,
VulkanTypedHandle(bindInfo.image, kVulkanObjectTypeImage));
}
}
}
void ValidationStateTracker::PostCallRecordBindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem,
VkDeviceSize memoryOffset, VkResult result) {
if (VK_SUCCESS != result) return;
VkBindImageMemoryInfo bindInfo = {};
bindInfo.sType = VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO;
bindInfo.image = image;
bindInfo.memory = mem;
bindInfo.memoryOffset = memoryOffset;
UpdateBindImageMemoryState(bindInfo);
}
bool CoreChecks::PreCallValidateBindImageMemory2(VkDevice device, uint32_t bindInfoCount,
const VkBindImageMemoryInfoKHR *pBindInfos) {
bool skip = false;
char api_name[128];
for (uint32_t i = 0; i < bindInfoCount; i++) {
sprintf(api_name, "vkBindImageMemory2() pBindInfos[%u]", i);
skip |= ValidateBindImageMemory(pBindInfos[i], api_name);
}
return skip;
}
bool CoreChecks::PreCallValidateBindImageMemory2KHR(VkDevice device, uint32_t bindInfoCount,
const VkBindImageMemoryInfoKHR *pBindInfos) {
bool skip = false;
char api_name[128];
for (uint32_t i = 0; i < bindInfoCount; i++) {
sprintf(api_name, "vkBindImageMemory2KHR() pBindInfos[%u]", i);
skip |= ValidateBindImageMemory(pBindInfos[i], api_name);
}
return skip;
}
void ValidationStateTracker::PostCallRecordBindImageMemory2(VkDevice device, uint32_t bindInfoCount,
const VkBindImageMemoryInfoKHR *pBindInfos, VkResult result) {
if (VK_SUCCESS != result) return;
for (uint32_t i = 0; i < bindInfoCount; i++) {
UpdateBindImageMemoryState(pBindInfos[i]);
}
}
void ValidationStateTracker::PostCallRecordBindImageMemory2KHR(VkDevice device, uint32_t bindInfoCount,
const VkBindImageMemoryInfoKHR *pBindInfos, VkResult result) {
if (VK_SUCCESS != result) return;
for (uint32_t i = 0; i < bindInfoCount; i++) {
UpdateBindImageMemoryState(pBindInfos[i]);
}
}
bool CoreChecks::PreCallValidateSetEvent(VkDevice device, VkEvent event) {
bool skip = false;
auto event_state = GetEventState(event);
if (event_state) {
if (event_state->write_in_use) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
HandleToUint64(event), kVUID_Core_DrawState_QueueForwardProgress,
"Cannot call vkSetEvent() on %s that is already in use by a command buffer.",
report_data->FormatHandle(event).c_str());
}
}
return skip;
}
void CoreChecks::PreCallRecordSetEvent(VkDevice device, VkEvent event) {
auto event_state = GetEventState(event);
if (event_state) {
event_state->stageMask = VK_PIPELINE_STAGE_HOST_BIT;
}
// Host setting event is visible to all queues immediately so update stageMask for any queue that's seen this event
// TODO : For correctness this needs separate fix to verify that app doesn't make incorrect assumptions about the
// ordering of this command in relation to vkCmd[Set|Reset]Events (see GH297)
for (auto queue_data : queueMap) {
auto event_entry = queue_data.second.eventToStageMap.find(event);
if (event_entry != queue_data.second.eventToStageMap.end()) {
event_entry->second |= VK_PIPELINE_STAGE_HOST_BIT;
}
}
}
bool CoreChecks::PreCallValidateQueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo,
VkFence fence) {
auto pFence = GetFenceState(fence);
bool skip = ValidateFenceForSubmit(pFence);
if (skip) {
return true;
}
unordered_set<VkSemaphore> signaled_semaphores;
unordered_set<VkSemaphore> unsignaled_semaphores;
unordered_set<VkSemaphore> internal_semaphores;
for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) {
const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx];
std::vector<SEMAPHORE_WAIT> semaphore_waits;
std::vector<VkSemaphore> semaphore_signals;
for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) {
VkSemaphore semaphore = bindInfo.pWaitSemaphores[i];
auto pSemaphore = GetSemaphoreState(semaphore);
if (pSemaphore && (pSemaphore->scope == kSyncScopeInternal || internal_semaphores.count(semaphore))) {
if (unsignaled_semaphores.count(semaphore) ||
(!(signaled_semaphores.count(semaphore)) && !(pSemaphore->signaled))) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
HandleToUint64(semaphore), kVUID_Core_DrawState_QueueForwardProgress,
"%s is waiting on %s that has no way to be signaled.", report_data->FormatHandle(queue).c_str(),
report_data->FormatHandle(semaphore).c_str());
} else {
signaled_semaphores.erase(semaphore);
unsignaled_semaphores.insert(semaphore);
}
}
if (pSemaphore && pSemaphore->scope == kSyncScopeExternalTemporary) {
internal_semaphores.insert(semaphore);
}
}
for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) {
VkSemaphore semaphore = bindInfo.pSignalSemaphores[i];
auto pSemaphore = GetSemaphoreState(semaphore);
if (pSemaphore && pSemaphore->scope == kSyncScopeInternal) {
if (signaled_semaphores.count(semaphore) || (!(unsignaled_semaphores.count(semaphore)) && pSemaphore->signaled)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
HandleToUint64(semaphore), kVUID_Core_DrawState_QueueForwardProgress,
"%s is signaling %s that was previously signaled by %s but has not since "
"been waited on by any queue.",
report_data->FormatHandle(queue).c_str(), report_data->FormatHandle(semaphore).c_str(),
report_data->FormatHandle(pSemaphore->signaler.first).c_str());
} else {
unsignaled_semaphores.erase(semaphore);
signaled_semaphores.insert(semaphore);
}
}
}
// Store sparse binding image_state and after binding is complete make sure that any requiring metadata have it bound
std::unordered_set<IMAGE_STATE *> sparse_images;
// If we're binding sparse image memory make sure reqs were queried and note if metadata is required and bound
for (uint32_t i = 0; i < bindInfo.imageBindCount; ++i) {
const auto &image_bind = bindInfo.pImageBinds[i];
auto image_state = GetImageState(image_bind.image);
if (!image_state)
continue; // Param/Object validation should report image_bind.image handles being invalid, so just skip here.
sparse_images.insert(image_state);
if (image_state->createInfo.flags & VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT) {
if (!image_state->get_sparse_reqs_called || image_state->sparse_requirements.empty()) {
// For now just warning if sparse image binding occurs without calling to get reqs first
return log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), kVUID_Core_MemTrack_InvalidState,
"vkQueueBindSparse(): Binding sparse memory to %s without first calling "
"vkGetImageSparseMemoryRequirements[2KHR]() to retrieve requirements.",
report_data->FormatHandle(image_state->image).c_str());
}
}
if (!image_state->memory_requirements_checked) {
// For now just warning if sparse image binding occurs without calling to get reqs first
return log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), kVUID_Core_MemTrack_InvalidState,
"vkQueueBindSparse(): Binding sparse memory to %s without first calling "
"vkGetImageMemoryRequirements() to retrieve requirements.",
report_data->FormatHandle(image_state->image).c_str());
}
}
for (uint32_t i = 0; i < bindInfo.imageOpaqueBindCount; ++i) {
const auto &image_opaque_bind = bindInfo.pImageOpaqueBinds[i];
auto image_state = GetImageState(bindInfo.pImageOpaqueBinds[i].image);
if (!image_state)
continue; // Param/Object validation should report image_bind.image handles being invalid, so just skip here.
sparse_images.insert(image_state);
if (image_state->createInfo.flags & VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT) {
if (!image_state->get_sparse_reqs_called || image_state->sparse_requirements.empty()) {
// For now just warning if sparse image binding occurs without calling to get reqs first
return log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), kVUID_Core_MemTrack_InvalidState,
"vkQueueBindSparse(): Binding opaque sparse memory to %s without first calling "
"vkGetImageSparseMemoryRequirements[2KHR]() to retrieve requirements.",
report_data->FormatHandle(image_state->image).c_str());
}
}
if (!image_state->memory_requirements_checked) {
// For now just warning if sparse image binding occurs without calling to get reqs first
return log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), kVUID_Core_MemTrack_InvalidState,
"vkQueueBindSparse(): Binding opaque sparse memory to %s without first calling "
"vkGetImageMemoryRequirements() to retrieve requirements.",
report_data->FormatHandle(image_state->image).c_str());
}
for (uint32_t j = 0; j < image_opaque_bind.bindCount; ++j) {
if (image_opaque_bind.pBinds[j].flags & VK_SPARSE_MEMORY_BIND_METADATA_BIT) {
image_state->sparse_metadata_bound = true;
}
}
}
for (const auto &sparse_image_state : sparse_images) {
if (sparse_image_state->sparse_metadata_required && !sparse_image_state->sparse_metadata_bound) {
// Warn if sparse image binding metadata required for image with sparse binding, but metadata not bound
return log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(sparse_image_state->image), kVUID_Core_MemTrack_InvalidState,
"vkQueueBindSparse(): Binding sparse memory to %s which requires a metadata aspect but no "
"binding with VK_SPARSE_MEMORY_BIND_METADATA_BIT set was made.",
report_data->FormatHandle(sparse_image_state->image).c_str());
}
}
}
return skip;
}
void CoreChecks::PostCallRecordQueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo,
VkFence fence, VkResult result) {
if (result != VK_SUCCESS) return;
uint64_t early_retire_seq = 0;
auto pFence = GetFenceState(fence);
auto pQueue = GetQueueState(queue);
if (pFence) {
if (pFence->scope == kSyncScopeInternal) {
SubmitFence(pQueue, pFence, std::max(1u, bindInfoCount));
if (!bindInfoCount) {
// No work to do, just dropping a fence in the queue by itself.
pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(), std::vector<SEMAPHORE_WAIT>(),
std::vector<VkSemaphore>(), std::vector<VkSemaphore>(), fence);
}
} else {
// Retire work up until this fence early, we will not see the wait that corresponds to this signal
early_retire_seq = pQueue->seq + pQueue->submissions.size();
if (!external_sync_warning) {
external_sync_warning = true;
log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, HandleToUint64(fence),
kVUID_Core_DrawState_QueueForwardProgress,
"vkQueueBindSparse(): Signaling external %s on %s will disable validation of preceding command "
"buffer lifecycle states and the in-use status of associated objects.",
report_data->FormatHandle(fence).c_str(), report_data->FormatHandle(queue).c_str());
}
}
}
for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) {
const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx];
// Track objects tied to memory
for (uint32_t j = 0; j < bindInfo.bufferBindCount; j++) {
for (uint32_t k = 0; k < bindInfo.pBufferBinds[j].bindCount; k++) {
auto sparse_binding = bindInfo.pBufferBinds[j].pBinds[k];
SetSparseMemBinding({sparse_binding.memory, sparse_binding.memoryOffset, sparse_binding.size},
VulkanTypedHandle(bindInfo.pBufferBinds[j].buffer, kVulkanObjectTypeBuffer));
}
}
for (uint32_t j = 0; j < bindInfo.imageOpaqueBindCount; j++) {
for (uint32_t k = 0; k < bindInfo.pImageOpaqueBinds[j].bindCount; k++) {
auto sparse_binding = bindInfo.pImageOpaqueBinds[j].pBinds[k];
SetSparseMemBinding({sparse_binding.memory, sparse_binding.memoryOffset, sparse_binding.size},
VulkanTypedHandle(bindInfo.pImageOpaqueBinds[j].image, kVulkanObjectTypeImage));
}
}
for (uint32_t j = 0; j < bindInfo.imageBindCount; j++) {
for (uint32_t k = 0; k < bindInfo.pImageBinds[j].bindCount; k++) {
auto sparse_binding = bindInfo.pImageBinds[j].pBinds[k];
// TODO: This size is broken for non-opaque bindings, need to update to comprehend full sparse binding data
VkDeviceSize size = sparse_binding.extent.depth * sparse_binding.extent.height * sparse_binding.extent.width * 4;
SetSparseMemBinding({sparse_binding.memory, sparse_binding.memoryOffset, size},
VulkanTypedHandle(bindInfo.pImageBinds[j].image, kVulkanObjectTypeImage));
}
}
std::vector<SEMAPHORE_WAIT> semaphore_waits;
std::vector<VkSemaphore> semaphore_signals;
std::vector<VkSemaphore> semaphore_externals;
for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) {
VkSemaphore semaphore = bindInfo.pWaitSemaphores[i];
auto pSemaphore = GetSemaphoreState(semaphore);
if (pSemaphore) {
if (pSemaphore->scope == kSyncScopeInternal) {
if (pSemaphore->signaler.first != VK_NULL_HANDLE) {
semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second});
pSemaphore->in_use.fetch_add(1);
}
pSemaphore->signaler.first = VK_NULL_HANDLE;
pSemaphore->signaled = false;
} else {
semaphore_externals.push_back(semaphore);
pSemaphore->in_use.fetch_add(1);
if (pSemaphore->scope == kSyncScopeExternalTemporary) {
pSemaphore->scope = kSyncScopeInternal;
}
}
}
}
for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) {
VkSemaphore semaphore = bindInfo.pSignalSemaphores[i];
auto pSemaphore = GetSemaphoreState(semaphore);
if (pSemaphore) {
if (pSemaphore->scope == kSyncScopeInternal) {
pSemaphore->signaler.first = queue;
pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1;
pSemaphore->signaled = true;
pSemaphore->in_use.fetch_add(1);
semaphore_signals.push_back(semaphore);
} else {
// Retire work up until this submit early, we will not see the wait that corresponds to this signal
early_retire_seq = std::max(early_retire_seq, pQueue->seq + pQueue->submissions.size() + 1);
if (!external_sync_warning) {
external_sync_warning = true;
log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
HandleToUint64(semaphore), kVUID_Core_DrawState_QueueForwardProgress,
"vkQueueBindSparse(): Signaling external %s on %s will disable validation of "
"preceding command buffer lifecycle states and the in-use status of associated objects.",
report_data->FormatHandle(semaphore).c_str(), report_data->FormatHandle(queue).c_str());
}
}
}
}
pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(), semaphore_waits, semaphore_signals, semaphore_externals,
bindIdx == bindInfoCount - 1 ? fence : (VkFence)VK_NULL_HANDLE);
}
if (early_retire_seq) {
RetireWorkOnQueue(pQueue, early_retire_seq, true);
}
}
void ValidationStateTracker::PostCallRecordCreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore,
VkResult result) {
if (VK_SUCCESS != result) return;
std::unique_ptr<SEMAPHORE_STATE> semaphore_state(new SEMAPHORE_STATE{});
semaphore_state->signaler.first = VK_NULL_HANDLE;
semaphore_state->signaler.second = 0;
semaphore_state->signaled = false;
semaphore_state->scope = kSyncScopeInternal;
semaphoreMap[*pSemaphore] = std::move(semaphore_state);
}
bool CoreChecks::ValidateImportSemaphore(VkSemaphore semaphore, const char *caller_name) {
bool skip = false;
SEMAPHORE_STATE *sema_node = GetSemaphoreState(semaphore);
if (sema_node) {
const VulkanTypedHandle obj_struct(semaphore, kVulkanObjectTypeSemaphore);
skip |= ValidateObjectNotInUse(sema_node, obj_struct, caller_name, kVUIDUndefined);
}
return skip;
}
void CoreChecks::RecordImportSemaphoreState(VkSemaphore semaphore, VkExternalSemaphoreHandleTypeFlagBitsKHR handle_type,
VkSemaphoreImportFlagsKHR flags) {
SEMAPHORE_STATE *sema_node = GetSemaphoreState(semaphore);
if (sema_node && sema_node->scope != kSyncScopeExternalPermanent) {
if ((handle_type == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR || flags & VK_SEMAPHORE_IMPORT_TEMPORARY_BIT_KHR) &&
sema_node->scope == kSyncScopeInternal) {
sema_node->scope = kSyncScopeExternalTemporary;
} else {
sema_node->scope = kSyncScopeExternalPermanent;
}
}
}
#ifdef VK_USE_PLATFORM_WIN32_KHR
bool CoreChecks::PreCallValidateImportSemaphoreWin32HandleKHR(
VkDevice device, const VkImportSemaphoreWin32HandleInfoKHR *pImportSemaphoreWin32HandleInfo) {
return ValidateImportSemaphore(pImportSemaphoreWin32HandleInfo->semaphore, "vkImportSemaphoreWin32HandleKHR");
}
void CoreChecks::PostCallRecordImportSemaphoreWin32HandleKHR(
VkDevice device, const VkImportSemaphoreWin32HandleInfoKHR *pImportSemaphoreWin32HandleInfo, VkResult result) {
if (VK_SUCCESS != result) return;
RecordImportSemaphoreState(pImportSemaphoreWin32HandleInfo->semaphore, pImportSemaphoreWin32HandleInfo->handleType,
pImportSemaphoreWin32HandleInfo->flags);
}
#endif // VK_USE_PLATFORM_WIN32_KHR
bool CoreChecks::PreCallValidateImportSemaphoreFdKHR(VkDevice device, const VkImportSemaphoreFdInfoKHR *pImportSemaphoreFdInfo) {
return ValidateImportSemaphore(pImportSemaphoreFdInfo->semaphore, "vkImportSemaphoreFdKHR");
}
void CoreChecks::PostCallRecordImportSemaphoreFdKHR(VkDevice device, const VkImportSemaphoreFdInfoKHR *pImportSemaphoreFdInfo,
VkResult result) {
if (VK_SUCCESS != result) return;
RecordImportSemaphoreState(pImportSemaphoreFdInfo->semaphore, pImportSemaphoreFdInfo->handleType,
pImportSemaphoreFdInfo->flags);
}
void CoreChecks::RecordGetExternalSemaphoreState(VkSemaphore semaphore, VkExternalSemaphoreHandleTypeFlagBitsKHR handle_type) {
SEMAPHORE_STATE *semaphore_state = GetSemaphoreState(semaphore);
if (semaphore_state && handle_type != VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR) {
// Cannot track semaphore state once it is exported, except for Sync FD handle types which have copy transference
semaphore_state->scope = kSyncScopeExternalPermanent;
}
}
#ifdef VK_USE_PLATFORM_WIN32_KHR
void CoreChecks::PostCallRecordGetSemaphoreWin32HandleKHR(VkDevice device,
const VkSemaphoreGetWin32HandleInfoKHR *pGetWin32HandleInfo,
HANDLE *pHandle, VkResult result) {
if (VK_SUCCESS != result) return;
RecordGetExternalSemaphoreState(pGetWin32HandleInfo->semaphore, pGetWin32HandleInfo->handleType);
}
#endif
void CoreChecks::PostCallRecordGetSemaphoreFdKHR(VkDevice device, const VkSemaphoreGetFdInfoKHR *pGetFdInfo, int *pFd,
VkResult result) {
if (VK_SUCCESS != result) return;
RecordGetExternalSemaphoreState(pGetFdInfo->semaphore, pGetFdInfo->handleType);
}
bool CoreChecks::ValidateImportFence(VkFence fence, const char *caller_name) {
FENCE_STATE *fence_node = GetFenceState(fence);
bool skip = false;
if (fence_node && fence_node->scope == kSyncScopeInternal && fence_node->state == FENCE_INFLIGHT) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, HandleToUint64(fence),
kVUIDUndefined, "Cannot call %s on %s that is currently in use.", caller_name,
report_data->FormatHandle(fence).c_str());
}
return skip;
}
void CoreChecks::RecordImportFenceState(VkFence fence, VkExternalFenceHandleTypeFlagBitsKHR handle_type,
VkFenceImportFlagsKHR flags) {
FENCE_STATE *fence_node = GetFenceState(fence);
if (fence_node && fence_node->scope != kSyncScopeExternalPermanent) {
if ((handle_type == VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR || flags & VK_FENCE_IMPORT_TEMPORARY_BIT_KHR) &&
fence_node->scope == kSyncScopeInternal) {
fence_node->scope = kSyncScopeExternalTemporary;
} else {
fence_node->scope = kSyncScopeExternalPermanent;
}
}
}
#ifdef VK_USE_PLATFORM_WIN32_KHR
bool CoreChecks::PreCallValidateImportFenceWin32HandleKHR(VkDevice device,
const VkImportFenceWin32HandleInfoKHR *pImportFenceWin32HandleInfo) {
return ValidateImportFence(pImportFenceWin32HandleInfo->fence, "vkImportFenceWin32HandleKHR");
}
void CoreChecks::PostCallRecordImportFenceWin32HandleKHR(VkDevice device,
const VkImportFenceWin32HandleInfoKHR *pImportFenceWin32HandleInfo,
VkResult result) {
if (VK_SUCCESS != result) return;
RecordImportFenceState(pImportFenceWin32HandleInfo->fence, pImportFenceWin32HandleInfo->handleType,
pImportFenceWin32HandleInfo->flags);
}
#endif // VK_USE_PLATFORM_WIN32_KHR
bool CoreChecks::PreCallValidateImportFenceFdKHR(VkDevice device, const VkImportFenceFdInfoKHR *pImportFenceFdInfo) {
return ValidateImportFence(pImportFenceFdInfo->fence, "vkImportFenceFdKHR");
}
void CoreChecks::PostCallRecordImportFenceFdKHR(VkDevice device, const VkImportFenceFdInfoKHR *pImportFenceFdInfo,
VkResult result) {
if (VK_SUCCESS != result) return;
RecordImportFenceState(pImportFenceFdInfo->fence, pImportFenceFdInfo->handleType, pImportFenceFdInfo->flags);
}
void CoreChecks::RecordGetExternalFenceState(VkFence fence, VkExternalFenceHandleTypeFlagBitsKHR handle_type) {
FENCE_STATE *fence_state = GetFenceState(fence);
if (fence_state) {
if (handle_type != VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR) {
// Export with reference transference becomes external
fence_state->scope = kSyncScopeExternalPermanent;
} else if (fence_state->scope == kSyncScopeInternal) {
// Export with copy transference has a side effect of resetting the fence
fence_state->state = FENCE_UNSIGNALED;
}
}
}
#ifdef VK_USE_PLATFORM_WIN32_KHR
void CoreChecks::PostCallRecordGetFenceWin32HandleKHR(VkDevice device, const VkFenceGetWin32HandleInfoKHR *pGetWin32HandleInfo,
HANDLE *pHandle, VkResult result) {
if (VK_SUCCESS != result) return;
RecordGetExternalFenceState(pGetWin32HandleInfo->fence, pGetWin32HandleInfo->handleType);
}
#endif
void CoreChecks::PostCallRecordGetFenceFdKHR(VkDevice device, const VkFenceGetFdInfoKHR *pGetFdInfo, int *pFd, VkResult result) {
if (VK_SUCCESS != result) return;
RecordGetExternalFenceState(pGetFdInfo->fence, pGetFdInfo->handleType);
}
void ValidationStateTracker::PostCallRecordCreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkEvent *pEvent, VkResult result) {
if (VK_SUCCESS != result) return;
eventMap[*pEvent].write_in_use = 0;
eventMap[*pEvent].stageMask = VkPipelineStageFlags(0);
}
bool CoreChecks::ValidateCreateSwapchain(const char *func_name, VkSwapchainCreateInfoKHR const *pCreateInfo,
const SURFACE_STATE *surface_state, const SWAPCHAIN_NODE *old_swapchain_state) const {
// All physical devices and queue families are required to be able to present to any native window on Android; require the
// application to have established support on any other platform.
if (!instance_extensions.vk_khr_android_surface) {
auto support_predicate = [this](decltype(surface_state->gpu_queue_support)::value_type qs) -> bool {
// TODO: should restrict search only to queue families of VkDeviceQueueCreateInfos, not whole phys. device
return (qs.first.gpu == physical_device) && qs.second;
};
const auto &support = surface_state->gpu_queue_support;
bool is_supported = std::any_of(support.begin(), support.end(), support_predicate);
if (!is_supported) {
if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkSwapchainCreateInfoKHR-surface-01270",
"%s: pCreateInfo->surface is not known at this time to be supported for presentation by this device. The "
"vkGetPhysicalDeviceSurfaceSupportKHR() must be called beforehand, and it must return VK_TRUE support with "
"this surface for at least one queue family of this device.",
func_name))
return true;
}
}
if (old_swapchain_state) {
if (old_swapchain_state->createInfo.surface != pCreateInfo->surface) {
if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
HandleToUint64(pCreateInfo->oldSwapchain), "VUID-VkSwapchainCreateInfoKHR-oldSwapchain-01933",
"%s: pCreateInfo->oldSwapchain's surface is not pCreateInfo->surface", func_name))
return true;
}
if (old_swapchain_state->retired) {
if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
HandleToUint64(pCreateInfo->oldSwapchain), "VUID-VkSwapchainCreateInfoKHR-oldSwapchain-01933",
"%s: pCreateInfo->oldSwapchain is retired", func_name))
return true;
}
}
if ((pCreateInfo->imageExtent.width == 0) || (pCreateInfo->imageExtent.height == 0)) {
if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkSwapchainCreateInfoKHR-imageExtent-01689", "%s: pCreateInfo->imageExtent = (%d, %d) which is illegal.",
func_name, pCreateInfo->imageExtent.width, pCreateInfo->imageExtent.height))
return true;
}
auto physical_device_state = GetPhysicalDeviceState();
bool skip = false;
VkSurfaceTransformFlagBitsKHR currentTransform = physical_device_state->surfaceCapabilities.currentTransform;
if ((pCreateInfo->preTransform & currentTransform) != pCreateInfo->preTransform) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
HandleToUint64(physical_device), kVUID_Core_Swapchain_PreTransform,
"%s: pCreateInfo->preTransform (%s) doesn't match the currentTransform (%s) returned by "
"vkGetPhysicalDeviceSurfaceCapabilitiesKHR, the presentation engine will transform the image "
"content as part of the presentation operation.",
func_name, string_VkSurfaceTransformFlagBitsKHR(pCreateInfo->preTransform),
string_VkSurfaceTransformFlagBitsKHR(currentTransform));
}
if (physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState == UNCALLED) {
if (log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
HandleToUint64(physical_device), kVUID_Core_DrawState_SwapchainCreateBeforeQuery,
"%s: surface capabilities not retrieved for this physical device", func_name))
return true;
}
VkSurfaceCapabilitiesKHR capabilities{};
DispatchGetPhysicalDeviceSurfaceCapabilitiesKHR(physical_device_state->phys_device, pCreateInfo->surface, &capabilities);
// Validate pCreateInfo->minImageCount against VkSurfaceCapabilitiesKHR::{min|max}ImageCount:
if (pCreateInfo->minImageCount < capabilities.minImageCount) {
if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkSwapchainCreateInfoKHR-minImageCount-01271",
"%s called with minImageCount = %d, which is outside the bounds returned by "
"vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. minImageCount = %d, maxImageCount = %d).",
func_name, pCreateInfo->minImageCount, capabilities.minImageCount, capabilities.maxImageCount))
return true;
}
if ((capabilities.maxImageCount > 0) && (pCreateInfo->minImageCount > capabilities.maxImageCount)) {
if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkSwapchainCreateInfoKHR-minImageCount-01272",
"%s called with minImageCount = %d, which is outside the bounds returned by "
"vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. minImageCount = %d, maxImageCount = %d).",
func_name, pCreateInfo->minImageCount, capabilities.minImageCount, capabilities.maxImageCount))
return true;
}
// Validate pCreateInfo->imageExtent against VkSurfaceCapabilitiesKHR::{current|min|max}ImageExtent:
if ((pCreateInfo->imageExtent.width < capabilities.minImageExtent.width) ||
(pCreateInfo->imageExtent.width > capabilities.maxImageExtent.width) ||
(pCreateInfo->imageExtent.height < capabilities.minImageExtent.height) ||
(pCreateInfo->imageExtent.height > capabilities.maxImageExtent.height)) {
if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkSwapchainCreateInfoKHR-imageExtent-01274",
"%s called with imageExtent = (%d,%d), which is outside the bounds returned by "
"vkGetPhysicalDeviceSurfaceCapabilitiesKHR(): currentExtent = (%d,%d), minImageExtent = (%d,%d), "
"maxImageExtent = (%d,%d).",
func_name, pCreateInfo->imageExtent.width, pCreateInfo->imageExtent.height, capabilities.currentExtent.width,
capabilities.currentExtent.height, capabilities.minImageExtent.width, capabilities.minImageExtent.height,
capabilities.maxImageExtent.width, capabilities.maxImageExtent.height))
return true;
}
// pCreateInfo->preTransform should have exactly one bit set, and that bit must also be set in
// VkSurfaceCapabilitiesKHR::supportedTransforms.
if (!pCreateInfo->preTransform || (pCreateInfo->preTransform & (pCreateInfo->preTransform - 1)) ||
!(pCreateInfo->preTransform & capabilities.supportedTransforms)) {
// This is an error situation; one for which we'd like to give the developer a helpful, multi-line error message. Build
// it up a little at a time, and then log it:
std::string errorString = "";
char str[1024];
// Here's the first part of the message:
sprintf(str, "%s called with a non-supported pCreateInfo->preTransform (i.e. %s). Supported values are:\n", func_name,
string_VkSurfaceTransformFlagBitsKHR(pCreateInfo->preTransform));
errorString += str;
for (int i = 0; i < 32; i++) {
// Build up the rest of the message:
if ((1 << i) & capabilities.supportedTransforms) {
const char *newStr = string_VkSurfaceTransformFlagBitsKHR((VkSurfaceTransformFlagBitsKHR)(1 << i));
sprintf(str, " %s\n", newStr);
errorString += str;
}
}
// Log the message that we've built up:
if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkSwapchainCreateInfoKHR-preTransform-01279", "%s.", errorString.c_str()))
return true;
}
// pCreateInfo->compositeAlpha should have exactly one bit set, and that bit must also be set in
// VkSurfaceCapabilitiesKHR::supportedCompositeAlpha
if (!pCreateInfo->compositeAlpha || (pCreateInfo->compositeAlpha & (pCreateInfo->compositeAlpha - 1)) ||
!((pCreateInfo->compositeAlpha) & capabilities.supportedCompositeAlpha)) {
// This is an error situation; one for which we'd like to give the developer a helpful, multi-line error message. Build
// it up a little at a time, and then log it:
std::string errorString = "";
char str[1024];
// Here's the first part of the message:
sprintf(str, "%s called with a non-supported pCreateInfo->compositeAlpha (i.e. %s). Supported values are:\n", func_name,
string_VkCompositeAlphaFlagBitsKHR(pCreateInfo->compositeAlpha));
errorString += str;
for (int i = 0; i < 32; i++) {
// Build up the rest of the message:
if ((1 << i) & capabilities.supportedCompositeAlpha) {
const char *newStr = string_VkCompositeAlphaFlagBitsKHR((VkCompositeAlphaFlagBitsKHR)(1 << i));
sprintf(str, " %s\n", newStr);
errorString += str;
}
}
// Log the message that we've built up:
if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkSwapchainCreateInfoKHR-compositeAlpha-01280", "%s.", errorString.c_str()))
return true;
}
// Validate pCreateInfo->imageArrayLayers against VkSurfaceCapabilitiesKHR::maxImageArrayLayers:
if (pCreateInfo->imageArrayLayers > capabilities.maxImageArrayLayers) {
if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkSwapchainCreateInfoKHR-imageArrayLayers-01275",
"%s called with a non-supported imageArrayLayers (i.e. %d). Maximum value is %d.", func_name,
pCreateInfo->imageArrayLayers, capabilities.maxImageArrayLayers))
return true;
}
// Validate pCreateInfo->imageUsage against VkSurfaceCapabilitiesKHR::supportedUsageFlags:
if (pCreateInfo->imageUsage != (pCreateInfo->imageUsage & capabilities.supportedUsageFlags)) {
if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkSwapchainCreateInfoKHR-imageUsage-01276",
"%s called with a non-supported pCreateInfo->imageUsage (i.e. 0x%08x). Supported flag bits are 0x%08x.",
func_name, pCreateInfo->imageUsage, capabilities.supportedUsageFlags))
return true;
}
if (device_extensions.vk_khr_surface_protected_capabilities && (pCreateInfo->flags & VK_SWAPCHAIN_CREATE_PROTECTED_BIT_KHR)) {
VkPhysicalDeviceSurfaceInfo2KHR surfaceInfo = {VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SURFACE_INFO_2_KHR};
surfaceInfo.surface = pCreateInfo->surface;
VkSurfaceProtectedCapabilitiesKHR surfaceProtectedCapabilities = {VK_STRUCTURE_TYPE_SURFACE_PROTECTED_CAPABILITIES_KHR};
VkSurfaceCapabilities2KHR surfaceCapabilities = {VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR};
surfaceCapabilities.pNext = &surfaceProtectedCapabilities;
DispatchGetPhysicalDeviceSurfaceCapabilities2KHR(physical_device_state->phys_device, &surfaceInfo, &surfaceCapabilities);
if (!surfaceProtectedCapabilities.supportsProtected) {
if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkSwapchainCreateInfoKHR-flags-03187",
"%s: pCreateInfo->flags contains VK_SWAPCHAIN_CREATE_PROTECTED_BIT_KHR but the surface "
"capabilities does not have VkSurfaceProtectedCapabilitiesKHR.supportsProtected set to VK_TRUE.",
func_name))
return true;
}
}
std::vector<VkSurfaceFormatKHR> surface_formats;
const auto *surface_formats_ref = &surface_formats;
// Validate pCreateInfo values with the results of vkGetPhysicalDeviceSurfaceFormatsKHR():
if (physical_device_state->vkGetPhysicalDeviceSurfaceFormatsKHRState != QUERY_DETAILS) {
if (log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
kVUID_Core_DrawState_SwapchainCreateBeforeQuery,
"%s called before getting format(s) from vkGetPhysicalDeviceSurfaceFormatsKHR().", func_name)) {
return true;
}
uint32_t surface_format_count = 0;
DispatchGetPhysicalDeviceSurfaceFormatsKHR(physical_device, pCreateInfo->surface, &surface_format_count, nullptr);
surface_formats.resize(surface_format_count);
DispatchGetPhysicalDeviceSurfaceFormatsKHR(physical_device, pCreateInfo->surface, &surface_format_count,
&surface_formats[0]);
} else {
surface_formats_ref = &physical_device_state->surface_formats;
}
{
// Validate pCreateInfo->imageFormat against VkSurfaceFormatKHR::format:
bool foundFormat = false;
bool foundColorSpace = false;
bool foundMatch = false;
for (auto const &format : *surface_formats_ref) {
if (pCreateInfo->imageFormat == format.format) {
// Validate pCreateInfo->imageColorSpace against VkSurfaceFormatKHR::colorSpace:
foundFormat = true;
if (pCreateInfo->imageColorSpace == format.colorSpace) {
foundMatch = true;
break;
}
} else {
if (pCreateInfo->imageColorSpace == format.colorSpace) {
foundColorSpace = true;
}
}
}
if (!foundMatch) {
if (!foundFormat) {
if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkSwapchainCreateInfoKHR-imageFormat-01273",
"%s called with a non-supported pCreateInfo->imageFormat (i.e. %d).", func_name,
pCreateInfo->imageFormat))
return true;
}
if (!foundColorSpace) {
if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkSwapchainCreateInfoKHR-imageFormat-01273",
"%s called with a non-supported pCreateInfo->imageColorSpace (i.e. %d).", func_name,
pCreateInfo->imageColorSpace))
return true;
}
}
}
// Validate pCreateInfo values with the results of vkGetPhysicalDeviceSurfacePresentModesKHR():
if (physical_device_state->vkGetPhysicalDeviceSurfacePresentModesKHRState != QUERY_DETAILS) {
// FIFO is required to always be supported
if (pCreateInfo->presentMode != VK_PRESENT_MODE_FIFO_KHR) {
if (log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), kVUID_Core_DrawState_SwapchainCreateBeforeQuery,
"%s called before getting present mode(s) from vkGetPhysicalDeviceSurfacePresentModesKHR().", func_name))
return true;
}
} else {
// Validate pCreateInfo->presentMode against vkGetPhysicalDeviceSurfacePresentModesKHR():
bool foundMatch = std::find(physical_device_state->present_modes.begin(), physical_device_state->present_modes.end(),
pCreateInfo->presentMode) != physical_device_state->present_modes.end();
if (!foundMatch) {
if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkSwapchainCreateInfoKHR-presentMode-01281", "%s called with a non-supported presentMode (i.e. %s).",
func_name, string_VkPresentModeKHR(pCreateInfo->presentMode)))
return true;
}
}
// Validate state for shared presentable case
if (VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR == pCreateInfo->presentMode ||
VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR == pCreateInfo->presentMode) {
if (!device_extensions.vk_khr_shared_presentable_image) {
if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
kVUID_Core_DrawState_ExtensionNotEnabled,
"%s called with presentMode %s which requires the VK_KHR_shared_presentable_image extension, which has not "
"been enabled.",
func_name, string_VkPresentModeKHR(pCreateInfo->presentMode)))
return true;
} else if (pCreateInfo->minImageCount != 1) {
if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-VkSwapchainCreateInfoKHR-minImageCount-01383",
"%s called with presentMode %s, but minImageCount value is %d. For shared presentable image, minImageCount "
"must be 1.",
func_name, string_VkPresentModeKHR(pCreateInfo->presentMode), pCreateInfo->minImageCount))
return true;
}
}
if (pCreateInfo->flags & VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR) {
if (!device_extensions.vk_khr_swapchain_mutable_format) {
if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
kVUID_Core_DrawState_ExtensionNotEnabled,
"%s: pCreateInfo->flags contains VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR which requires the "
"VK_KHR_swapchain_mutable_format extension, which has not been enabled.",
func_name))
return true;
} else {
const auto *image_format_list = lvl_find_in_chain<VkImageFormatListCreateInfoKHR>(pCreateInfo->pNext);
if (image_format_list == nullptr) {
if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkSwapchainCreateInfoKHR-flags-03168",
"%s: pCreateInfo->flags contains VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR but the pNext chain of "
"pCreateInfo does not contain an instance of VkImageFormatListCreateInfoKHR.",
func_name))
return true;
} else if (image_format_list->viewFormatCount == 0) {
if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkSwapchainCreateInfoKHR-flags-03168",
"%s: pCreateInfo->flags contains VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR but the viewFormatCount "
"member of VkImageFormatListCreateInfoKHR in the pNext chain is zero.",
func_name))
return true;
} else {
bool found_base_format = false;
for (uint32_t i = 0; i < image_format_list->viewFormatCount; ++i) {
if (image_format_list->pViewFormats[i] == pCreateInfo->imageFormat) {
found_base_format = true;
break;
}
}
if (!found_base_format) {
if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkSwapchainCreateInfoKHR-flags-03168",
"%s: pCreateInfo->flags contains VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR but none of the "
"elements of the pViewFormats member of VkImageFormatListCreateInfoKHR match "
"pCreateInfo->imageFormat.",
func_name))
return true;
}
}
}
}
if ((pCreateInfo->imageSharingMode == VK_SHARING_MODE_CONCURRENT) && pCreateInfo->pQueueFamilyIndices) {
bool skip1 =
ValidateQueueFamilies(pCreateInfo->queueFamilyIndexCount, pCreateInfo->pQueueFamilyIndices, "vkCreateBuffer",
"pCreateInfo->pQueueFamilyIndices", "VUID-VkSwapchainCreateInfoKHR-imageSharingMode-01428",
"VUID-VkSwapchainCreateInfoKHR-imageSharingMode-01428", false);
if (skip1) return true;
}
return skip;
}
bool CoreChecks::PreCallValidateCreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchain) {
const auto surface_state = GetSurfaceState(pCreateInfo->surface);
const auto old_swapchain_state = GetSwapchainState(pCreateInfo->oldSwapchain);
return ValidateCreateSwapchain("vkCreateSwapchainKHR()", pCreateInfo, surface_state, old_swapchain_state);
}
void ValidationStateTracker::RecordCreateSwapchainState(VkResult result, const VkSwapchainCreateInfoKHR *pCreateInfo,
VkSwapchainKHR *pSwapchain, SURFACE_STATE *surface_state,
SWAPCHAIN_NODE *old_swapchain_state) {
if (VK_SUCCESS == result) {
auto swapchain_state = unique_ptr<SWAPCHAIN_NODE>(new SWAPCHAIN_NODE(pCreateInfo, *pSwapchain));
if (VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR == pCreateInfo->presentMode ||
VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR == pCreateInfo->presentMode) {
swapchain_state->shared_presentable = true;
}
surface_state->swapchain = swapchain_state.get();
swapchainMap[*pSwapchain] = std::move(swapchain_state);
} else {
surface_state->swapchain = nullptr;
}
// Spec requires that even if CreateSwapchainKHR fails, oldSwapchain is retired
if (old_swapchain_state) {
old_swapchain_state->retired = true;
}
return;
}
void ValidationStateTracker::PostCallRecordCreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchain,
VkResult result) {
auto surface_state = GetSurfaceState(pCreateInfo->surface);
auto old_swapchain_state = GetSwapchainState(pCreateInfo->oldSwapchain);
RecordCreateSwapchainState(result, pCreateInfo, pSwapchain, surface_state, old_swapchain_state);
}
void ValidationStateTracker::PreCallRecordDestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain,
const VkAllocationCallbacks *pAllocator) {
if (!swapchain) return;
auto swapchain_data = GetSwapchainState(swapchain);
if (swapchain_data) {
for (const auto &swapchain_image : swapchain_data->images) {
ClearMemoryObjectBindings(VulkanTypedHandle(swapchain_image, kVulkanObjectTypeImage));
imageMap.erase(swapchain_image);
}
auto surface_state = GetSurfaceState(swapchain_data->createInfo.surface);
if (surface_state) {
if (surface_state->swapchain == swapchain_data) surface_state->swapchain = nullptr;
}
swapchainMap.erase(swapchain);
}
}
void CoreChecks::PreCallRecordDestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain,
const VkAllocationCallbacks *pAllocator) {
if (swapchain) {
auto swapchain_data = GetSwapchainState(swapchain);
if (swapchain_data) {
for (const auto &swapchain_image : swapchain_data->images) {
auto image_sub = imageSubresourceMap.find(swapchain_image);
if (image_sub != imageSubresourceMap.end()) {
for (auto imgsubpair : image_sub->second) {
auto image_item = imageLayoutMap.find(imgsubpair);
if (image_item != imageLayoutMap.end()) {
imageLayoutMap.erase(image_item);
}
}
imageSubresourceMap.erase(image_sub);
}
EraseQFOImageRelaseBarriers(swapchain_image);
}
}
}
StateTracker::PreCallRecordDestroySwapchainKHR(device, swapchain, pAllocator);
}
bool CoreChecks::PreCallValidateGetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pSwapchainImageCount,
VkImage *pSwapchainImages) {
auto swapchain_state = GetSwapchainState(swapchain);
bool skip = false;
if (swapchain_state && pSwapchainImages) {
// Compare the preliminary value of *pSwapchainImageCount with the value this time:
if (swapchain_state->vkGetSwapchainImagesKHRState == UNCALLED) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), kVUID_Core_Swapchain_PriorCount,
"vkGetSwapchainImagesKHR() called with non-NULL pSwapchainImageCount; but no prior positive value has "
"been seen for pSwapchainImages.");
} else if (*pSwapchainImageCount > swapchain_state->get_swapchain_image_count) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
kVUID_Core_Swapchain_InvalidCount,
"vkGetSwapchainImagesKHR() called with non-NULL pSwapchainImageCount, and with pSwapchainImages set to a "
"value (%d) that is greater than the value (%d) that was returned when pSwapchainImageCount was NULL.",
*pSwapchainImageCount, swapchain_state->get_swapchain_image_count);
}
}
return skip;
}
void CoreChecks::PostCallRecordGetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pSwapchainImageCount,
VkImage *pSwapchainImages, VkResult result) {
if ((result != VK_SUCCESS) && (result != VK_INCOMPLETE)) return;
auto swapchain_state = GetSwapchainState(swapchain);
if (*pSwapchainImageCount > swapchain_state->images.size()) swapchain_state->images.resize(*pSwapchainImageCount);
if (pSwapchainImages) {
if (swapchain_state->vkGetSwapchainImagesKHRState < QUERY_DETAILS) {
swapchain_state->vkGetSwapchainImagesKHRState = QUERY_DETAILS;
}
for (uint32_t i = 0; i < *pSwapchainImageCount; ++i) {
if (swapchain_state->images[i] != VK_NULL_HANDLE) continue; // Already retrieved this.
IMAGE_LAYOUT_STATE image_layout_node;
image_layout_node.layout = VK_IMAGE_LAYOUT_UNDEFINED;
image_layout_node.format = swapchain_state->createInfo.imageFormat;
// Add imageMap entries for each swapchain image
VkImageCreateInfo image_ci = {};
image_ci.flags = 0;
image_ci.imageType = VK_IMAGE_TYPE_2D;
image_ci.format = swapchain_state->createInfo.imageFormat;
image_ci.extent.width = swapchain_state->createInfo.imageExtent.width;
image_ci.extent.height = swapchain_state->createInfo.imageExtent.height;
image_ci.extent.depth = 1;
image_ci.mipLevels = 1;
image_ci.arrayLayers = swapchain_state->createInfo.imageArrayLayers;
image_ci.samples = VK_SAMPLE_COUNT_1_BIT;
image_ci.tiling = VK_IMAGE_TILING_OPTIMAL;
image_ci.usage = swapchain_state->createInfo.imageUsage;
image_ci.sharingMode = swapchain_state->createInfo.imageSharingMode;
imageMap[pSwapchainImages[i]] = unique_ptr<IMAGE_STATE>(new IMAGE_STATE(pSwapchainImages[i], &image_ci));
auto &image_state = imageMap[pSwapchainImages[i]];
image_state->valid = false;
image_state->binding.mem = MEMTRACKER_SWAP_CHAIN_IMAGE_KEY;
swapchain_state->images[i] = pSwapchainImages[i];
ImageSubresourcePair subpair = {pSwapchainImages[i], false, VkImageSubresource()};
imageSubresourceMap[pSwapchainImages[i]].push_back(subpair);
imageLayoutMap[subpair] = image_layout_node;
}
}
if (*pSwapchainImageCount) {
if (swapchain_state->vkGetSwapchainImagesKHRState < QUERY_COUNT) {
swapchain_state->vkGetSwapchainImagesKHRState = QUERY_COUNT;
}
swapchain_state->get_swapchain_image_count = *pSwapchainImageCount;
}
}
bool CoreChecks::PreCallValidateQueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) {
bool skip = false;
auto queue_state = GetQueueState(queue);
for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
auto pSemaphore = GetSemaphoreState(pPresentInfo->pWaitSemaphores[i]);
if (pSemaphore && !pSemaphore->signaled) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
kVUID_Core_DrawState_QueueForwardProgress, "%s is waiting on %s that has no way to be signaled.",
report_data->FormatHandle(queue).c_str(),
report_data->FormatHandle(pPresentInfo->pWaitSemaphores[i]).c_str());
}
}
for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
auto swapchain_data = GetSwapchainState(pPresentInfo->pSwapchains[i]);
if (swapchain_data) {
if (pPresentInfo->pImageIndices[i] >= swapchain_data->images.size()) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
HandleToUint64(pPresentInfo->pSwapchains[i]), kVUID_Core_DrawState_SwapchainInvalidImage,
"vkQueuePresentKHR: Swapchain image index too large (%u). There are only %u images in this swapchain.",
pPresentInfo->pImageIndices[i], (uint32_t)swapchain_data->images.size());
} else {
auto image = swapchain_data->images[pPresentInfo->pImageIndices[i]];
auto image_state = GetImageState(image);
if (image_state->shared_presentable) {
image_state->layout_locked = true;
}
if (!image_state->acquired) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
HandleToUint64(pPresentInfo->pSwapchains[i]), kVUID_Core_DrawState_SwapchainImageNotAcquired,
"vkQueuePresentKHR: Swapchain image index %u has not been acquired.",
pPresentInfo->pImageIndices[i]);
}
vector<VkImageLayout> layouts;
if (FindLayouts(image, layouts)) {
for (auto layout : layouts) {
if ((layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) && (!device_extensions.vk_khr_shared_presentable_image ||
(layout != VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR))) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
HandleToUint64(queue), "VUID-VkPresentInfoKHR-pImageIndices-01296",
"Images passed to present must be in layout VK_IMAGE_LAYOUT_PRESENT_SRC_KHR or "
"VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR but is in %s.",
string_VkImageLayout(layout));
}
}
}
}
// All physical devices and queue families are required to be able to present to any native window on Android; require
// the application to have established support on any other platform.
if (!instance_extensions.vk_khr_android_surface) {
auto surface_state = GetSurfaceState(swapchain_data->createInfo.surface);
auto support_it = surface_state->gpu_queue_support.find({physical_device, queue_state->queueFamilyIndex});
if (support_it == surface_state->gpu_queue_support.end()) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
HandleToUint64(pPresentInfo->pSwapchains[i]), kVUID_Core_DrawState_SwapchainUnsupportedQueue,
"vkQueuePresentKHR: Presenting image without calling vkGetPhysicalDeviceSurfaceSupportKHR");
} else if (!support_it->second) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
HandleToUint64(pPresentInfo->pSwapchains[i]), "VUID-vkQueuePresentKHR-pSwapchains-01292",
"vkQueuePresentKHR: Presenting image on queue that cannot present to this surface.");
}
}
}
}
if (pPresentInfo && pPresentInfo->pNext) {
// Verify ext struct
const auto *present_regions = lvl_find_in_chain<VkPresentRegionsKHR>(pPresentInfo->pNext);
if (present_regions) {
for (uint32_t i = 0; i < present_regions->swapchainCount; ++i) {
auto swapchain_data = GetSwapchainState(pPresentInfo->pSwapchains[i]);
assert(swapchain_data);
VkPresentRegionKHR region = present_regions->pRegions[i];
for (uint32_t j = 0; j < region.rectangleCount; ++j) {
VkRectLayerKHR rect = region.pRectangles[j];
if ((rect.offset.x + rect.extent.width) > swapchain_data->createInfo.imageExtent.width) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
HandleToUint64(pPresentInfo->pSwapchains[i]), "VUID-VkRectLayerKHR-offset-01261",
"vkQueuePresentKHR(): For VkPresentRegionKHR down pNext chain, "
"pRegion[%i].pRectangles[%i], the sum of offset.x (%i) and extent.width (%i) is greater "
"than the corresponding swapchain's imageExtent.width (%i).",
i, j, rect.offset.x, rect.extent.width, swapchain_data->createInfo.imageExtent.width);
}
if ((rect.offset.y + rect.extent.height) > swapchain_data->createInfo.imageExtent.height) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
HandleToUint64(pPresentInfo->pSwapchains[i]), "VUID-VkRectLayerKHR-offset-01261",
"vkQueuePresentKHR(): For VkPresentRegionKHR down pNext chain, "
"pRegion[%i].pRectangles[%i], the sum of offset.y (%i) and extent.height (%i) is greater "
"than the corresponding swapchain's imageExtent.height (%i).",
i, j, rect.offset.y, rect.extent.height, swapchain_data->createInfo.imageExtent.height);
}
if (rect.layer > swapchain_data->createInfo.imageArrayLayers) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
HandleToUint64(pPresentInfo->pSwapchains[i]), "VUID-VkRectLayerKHR-layer-01262",
"vkQueuePresentKHR(): For VkPresentRegionKHR down pNext chain, pRegion[%i].pRectangles[%i], the layer "
"(%i) is greater than the corresponding swapchain's imageArrayLayers (%i).",
i, j, rect.layer, swapchain_data->createInfo.imageArrayLayers);
}
}
}
}
const auto *present_times_info = lvl_find_in_chain<VkPresentTimesInfoGOOGLE>(pPresentInfo->pNext);
if (present_times_info) {
if (pPresentInfo->swapchainCount != present_times_info->swapchainCount) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
HandleToUint64(pPresentInfo->pSwapchains[0]), "VUID-VkPresentTimesInfoGOOGLE-swapchainCount-01247",
"vkQueuePresentKHR(): VkPresentTimesInfoGOOGLE.swapchainCount is %i but pPresentInfo->swapchainCount "
"is %i. For VkPresentTimesInfoGOOGLE down pNext chain of VkPresentInfoKHR, "
"VkPresentTimesInfoGOOGLE.swapchainCount must equal VkPresentInfoKHR.swapchainCount.",
present_times_info->swapchainCount, pPresentInfo->swapchainCount);
}
}
}
return skip;
}
void CoreChecks::PostCallRecordQueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo, VkResult result) {
// Semaphore waits occur before error generation, if the call reached the ICD. (Confirm?)
for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
auto pSemaphore = GetSemaphoreState(pPresentInfo->pWaitSemaphores[i]);
if (pSemaphore) {
pSemaphore->signaler.first = VK_NULL_HANDLE;
pSemaphore->signaled = false;
}
}
for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
// Note: this is imperfect, in that we can get confused about what did or didn't succeed-- but if the app does that, it's
// confused itself just as much.
auto local_result = pPresentInfo->pResults ? pPresentInfo->pResults[i] : result;
if (local_result != VK_SUCCESS && local_result != VK_SUBOPTIMAL_KHR) continue; // this present didn't actually happen.
// Mark the image as having been released to the WSI
auto swapchain_data = GetSwapchainState(pPresentInfo->pSwapchains[i]);
if (swapchain_data && (swapchain_data->images.size() > pPresentInfo->pImageIndices[i])) {
auto image = swapchain_data->images[pPresentInfo->pImageIndices[i]];
auto image_state = GetImageState(image);
if (image_state) {
image_state->acquired = false;
}
}
}
// Note: even though presentation is directed to a queue, there is no direct ordering between QP and subsequent work, so QP (and
// its semaphore waits) /never/ participate in any completion proof.
}
bool CoreChecks::PreCallValidateCreateSharedSwapchainsKHR(VkDevice device, uint32_t swapchainCount,
const VkSwapchainCreateInfoKHR *pCreateInfos,
const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchains) {
bool skip = false;
if (pCreateInfos) {
for (uint32_t i = 0; i < swapchainCount; i++) {
const auto surface_state = GetSurfaceState(pCreateInfos[i].surface);
const auto old_swapchain_state = GetSwapchainState(pCreateInfos[i].oldSwapchain);
std::stringstream func_name;
func_name << "vkCreateSharedSwapchainsKHR[" << swapchainCount << "]()";
skip |= ValidateCreateSwapchain(func_name.str().c_str(), &pCreateInfos[i], surface_state, old_swapchain_state);
}
}
return skip;
}
void ValidationStateTracker::PostCallRecordCreateSharedSwapchainsKHR(VkDevice device, uint32_t swapchainCount,
const VkSwapchainCreateInfoKHR *pCreateInfos,
const VkAllocationCallbacks *pAllocator,
VkSwapchainKHR *pSwapchains, VkResult result) {
if (pCreateInfos) {
for (uint32_t i = 0; i < swapchainCount; i++) {
auto surface_state = GetSurfaceState(pCreateInfos[i].surface);
auto old_swapchain_state = GetSwapchainState(pCreateInfos[i].oldSwapchain);
RecordCreateSwapchainState(result, &pCreateInfos[i], &pSwapchains[i], surface_state, old_swapchain_state);
}
}
}
bool CoreChecks::ValidateAcquireNextImage(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout, VkSemaphore semaphore,
VkFence fence, uint32_t *pImageIndex, const char *func_name) const {
bool skip = false;
if (fence == VK_NULL_HANDLE && semaphore == VK_NULL_HANDLE) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-vkAcquireNextImageKHR-semaphore-01780",
"%s: Semaphore and fence cannot both be VK_NULL_HANDLE. There would be no way to "
"determine the completion of this operation.",
func_name);
}
auto pSemaphore = GetSemaphoreState(semaphore);
if (pSemaphore && pSemaphore->scope == kSyncScopeInternal && pSemaphore->signaled) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
HandleToUint64(semaphore), "VUID-vkAcquireNextImageKHR-semaphore-01286",
"%s: Semaphore must not be currently signaled or in a wait state.", func_name);
}
auto pFence = GetFenceState(fence);
if (pFence) {
skip |= ValidateFenceForSubmit(pFence);
}
const auto swapchain_data = GetSwapchainState(swapchain);
if (swapchain_data) {
if (swapchain_data->retired) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
HandleToUint64(swapchain), "VUID-vkAcquireNextImageKHR-swapchain-01285",
"%s: This swapchain has been retired. The application can still present any images it "
"has acquired, but cannot acquire any more.",
func_name);
}
auto physical_device_state = GetPhysicalDeviceState();
if (physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState != UNCALLED) {
uint64_t acquired_images = std::count_if(swapchain_data->images.begin(), swapchain_data->images.end(),
[=](VkImage image) { return GetImageState(image)->acquired; });
if (acquired_images > swapchain_data->images.size() - physical_device_state->surfaceCapabilities.minImageCount) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
HandleToUint64(swapchain), kVUID_Core_DrawState_SwapchainTooManyImages,
"%s: Application has already acquired the maximum number of images (0x%" PRIxLEAST64 ")", func_name,
acquired_images);
}
}
if (swapchain_data->images.size() == 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
HandleToUint64(swapchain), kVUID_Core_DrawState_SwapchainImagesNotFound,
"%s: No images found to acquire from. Application probably did not call "
"vkGetSwapchainImagesKHR after swapchain creation.",
func_name);
}
}
return skip;
}
bool CoreChecks::PreCallValidateAcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
return ValidateAcquireNextImage(device, swapchain, timeout, semaphore, fence, pImageIndex, "vkAcquireNextImageKHR");
}
bool CoreChecks::PreCallValidateAcquireNextImage2KHR(VkDevice device, const VkAcquireNextImageInfoKHR *pAcquireInfo,
uint32_t *pImageIndex) {
bool skip = false;
skip |= ValidateDeviceMaskToPhysicalDeviceCount(pAcquireInfo->deviceMask, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
HandleToUint64(pAcquireInfo->swapchain),
"VUID-VkAcquireNextImageInfoKHR-deviceMask-01290");
skip |= ValidateDeviceMaskToZero(pAcquireInfo->deviceMask, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
HandleToUint64(pAcquireInfo->swapchain), "VUID-VkAcquireNextImageInfoKHR-deviceMask-01291");
skip |= ValidateAcquireNextImage(device, pAcquireInfo->swapchain, pAcquireInfo->timeout, pAcquireInfo->semaphore,
pAcquireInfo->fence, pImageIndex, "vkAcquireNextImage2KHR");
return skip;
}
void ValidationStateTracker::RecordAcquireNextImageState(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
auto pFence = GetFenceState(fence);
if (pFence && pFence->scope == kSyncScopeInternal) {
// Treat as inflight since it is valid to wait on this fence, even in cases where it is technically a temporary
// import
pFence->state = FENCE_INFLIGHT;
pFence->signaler.first = VK_NULL_HANDLE; // ANI isn't on a queue, so this can't participate in a completion proof.
}
auto pSemaphore = GetSemaphoreState(semaphore);
if (pSemaphore && pSemaphore->scope == kSyncScopeInternal) {
// Treat as signaled since it is valid to wait on this semaphore, even in cases where it is technically a
// temporary import
pSemaphore->signaled = true;
pSemaphore->signaler.first = VK_NULL_HANDLE;
}
// Mark the image as acquired.
auto swapchain_data = GetSwapchainState(swapchain);
if (swapchain_data && (swapchain_data->images.size() > *pImageIndex)) {
auto image = swapchain_data->images[*pImageIndex];
auto image_state = GetImageState(image);
if (image_state) {
image_state->acquired = true;
image_state->shared_presentable = swapchain_data->shared_presentable;
}
}
}
void ValidationStateTracker::PostCallRecordAcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex,
VkResult result) {
if ((VK_SUCCESS != result) && (VK_SUBOPTIMAL_KHR != result)) return;
RecordAcquireNextImageState(device, swapchain, timeout, semaphore, fence, pImageIndex);
}
void ValidationStateTracker::PostCallRecordAcquireNextImage2KHR(VkDevice device, const VkAcquireNextImageInfoKHR *pAcquireInfo,
uint32_t *pImageIndex, VkResult result) {
if ((VK_SUCCESS != result) && (VK_SUBOPTIMAL_KHR != result)) return;
RecordAcquireNextImageState(device, pAcquireInfo->swapchain, pAcquireInfo->timeout, pAcquireInfo->semaphore,
pAcquireInfo->fence, pImageIndex);
}
void CoreChecks::PostCallRecordEnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount,
VkPhysicalDevice *pPhysicalDevices, VkResult result) {
if ((NULL != pPhysicalDevices) && ((result == VK_SUCCESS || result == VK_INCOMPLETE))) {
for (uint32_t i = 0; i < *pPhysicalDeviceCount; i++) {
auto &phys_device_state = physical_device_map[pPhysicalDevices[i]];
phys_device_state.phys_device = pPhysicalDevices[i];
// Init actual features for each physical device
DispatchGetPhysicalDeviceFeatures(pPhysicalDevices[i], &phys_device_state.features2.features);
}
}
}
// Common function to handle validation for GetPhysicalDeviceQueueFamilyProperties & 2KHR version
static bool ValidateCommonGetPhysicalDeviceQueueFamilyProperties(debug_report_data *report_data,
const PHYSICAL_DEVICE_STATE *pd_state,
uint32_t requested_queue_family_property_count, bool qfp_null,
const char *caller_name) {
bool skip = false;
if (!qfp_null) {
// Verify that for each physical device, this command is called first with NULL pQueueFamilyProperties in order to get count
if (UNCALLED == pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
HandleToUint64(pd_state->phys_device), kVUID_Core_DevLimit_MissingQueryCount,
"%s is called with non-NULL pQueueFamilyProperties before obtaining pQueueFamilyPropertyCount. It is recommended "
"to first call %s with NULL pQueueFamilyProperties in order to obtain the maximal pQueueFamilyPropertyCount.",
caller_name, caller_name);
// Then verify that pCount that is passed in on second call matches what was returned
} else if (pd_state->queue_family_known_count != requested_queue_family_property_count) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
HandleToUint64(pd_state->phys_device), kVUID_Core_DevLimit_CountMismatch,
"%s is called with non-NULL pQueueFamilyProperties and pQueueFamilyPropertyCount value %" PRIu32
", but the largest previously returned pQueueFamilyPropertyCount for this physicalDevice is %" PRIu32
". It is recommended to instead receive all the properties by calling %s with pQueueFamilyPropertyCount that was "
"previously obtained by calling %s with NULL pQueueFamilyProperties.",
caller_name, requested_queue_family_property_count, pd_state->queue_family_known_count, caller_name, caller_name);
}
}
return skip;
}
bool CoreChecks::PreCallValidateGetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice,
uint32_t *pQueueFamilyPropertyCount,
VkQueueFamilyProperties *pQueueFamilyProperties) {
const auto physical_device_state = GetPhysicalDeviceState(physicalDevice);
assert(physical_device_state);
return ValidateCommonGetPhysicalDeviceQueueFamilyProperties(report_data, physical_device_state, *pQueueFamilyPropertyCount,
(nullptr == pQueueFamilyProperties),
"vkGetPhysicalDeviceQueueFamilyProperties()");
}
bool CoreChecks::PreCallValidateGetPhysicalDeviceQueueFamilyProperties2(VkPhysicalDevice physicalDevice,
uint32_t *pQueueFamilyPropertyCount,
VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
const auto physical_device_state = GetPhysicalDeviceState(physicalDevice);
assert(physical_device_state);
return ValidateCommonGetPhysicalDeviceQueueFamilyProperties(report_data, physical_device_state, *pQueueFamilyPropertyCount,
(nullptr == pQueueFamilyProperties),
"vkGetPhysicalDeviceQueueFamilyProperties2()");
}
bool CoreChecks::PreCallValidateGetPhysicalDeviceQueueFamilyProperties2KHR(VkPhysicalDevice physicalDevice,
uint32_t *pQueueFamilyPropertyCount,
VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
auto physical_device_state = GetPhysicalDeviceState(physicalDevice);
assert(physical_device_state);
return ValidateCommonGetPhysicalDeviceQueueFamilyProperties(report_data, physical_device_state, *pQueueFamilyPropertyCount,
(nullptr == pQueueFamilyProperties),
"vkGetPhysicalDeviceQueueFamilyProperties2KHR()");
}
// Common function to update state for GetPhysicalDeviceQueueFamilyProperties & 2KHR version
static void StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(PHYSICAL_DEVICE_STATE *pd_state, uint32_t count,
VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
pd_state->queue_family_known_count = std::max(pd_state->queue_family_known_count, count);
if (!pQueueFamilyProperties) {
if (UNCALLED == pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState)
pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_COUNT;
} else { // Save queue family properties
pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_DETAILS;
pd_state->queue_family_properties.resize(std::max(static_cast<uint32_t>(pd_state->queue_family_properties.size()), count));
for (uint32_t i = 0; i < count; ++i) {
pd_state->queue_family_properties[i] = pQueueFamilyProperties[i].queueFamilyProperties;
}
}
}
void ValidationStateTracker::PostCallRecordGetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice,
uint32_t *pQueueFamilyPropertyCount,
VkQueueFamilyProperties *pQueueFamilyProperties) {
auto physical_device_state = GetPhysicalDeviceState(physicalDevice);
assert(physical_device_state);
VkQueueFamilyProperties2KHR *pqfp = nullptr;
std::vector<VkQueueFamilyProperties2KHR> qfp;
qfp.resize(*pQueueFamilyPropertyCount);
if (pQueueFamilyProperties) {
for (uint32_t i = 0; i < *pQueueFamilyPropertyCount; ++i) {
qfp[i].sType = VK_STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2_KHR;
qfp[i].pNext = nullptr;
qfp[i].queueFamilyProperties = pQueueFamilyProperties[i];
}
pqfp = qfp.data();
}
StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(physical_device_state, *pQueueFamilyPropertyCount, pqfp);
}
void ValidationStateTracker::PostCallRecordGetPhysicalDeviceQueueFamilyProperties2(
VkPhysicalDevice physicalDevice, uint32_t *pQueueFamilyPropertyCount, VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
auto physical_device_state = GetPhysicalDeviceState(physicalDevice);
assert(physical_device_state);
StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(physical_device_state, *pQueueFamilyPropertyCount,
pQueueFamilyProperties);
}
void ValidationStateTracker::PostCallRecordGetPhysicalDeviceQueueFamilyProperties2KHR(
VkPhysicalDevice physicalDevice, uint32_t *pQueueFamilyPropertyCount, VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
auto physical_device_state = GetPhysicalDeviceState(physicalDevice);
assert(physical_device_state);
StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(physical_device_state, *pQueueFamilyPropertyCount,
pQueueFamilyProperties);
}
bool CoreChecks::PreCallValidateDestroySurfaceKHR(VkInstance instance, VkSurfaceKHR surface,
const VkAllocationCallbacks *pAllocator) {
const auto surface_state = GetSurfaceState(surface);
bool skip = false;
if ((surface_state) && (surface_state->swapchain)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT,
HandleToUint64(instance), "VUID-vkDestroySurfaceKHR-surface-01266",
"vkDestroySurfaceKHR() called before its associated VkSwapchainKHR was destroyed.");
}
return skip;
}
void ValidationStateTracker::PreCallRecordDestroySurfaceKHR(VkInstance instance, VkSurfaceKHR surface,
const VkAllocationCallbacks *pAllocator) {
surface_map.erase(surface);
}
void ValidationStateTracker::RecordVulkanSurface(VkSurfaceKHR *pSurface) {
surface_map[*pSurface] = std::unique_ptr<SURFACE_STATE>(new SURFACE_STATE{*pSurface});
}
void ValidationStateTracker::PostCallRecordCreateDisplayPlaneSurfaceKHR(VkInstance instance,
const VkDisplaySurfaceCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkSurfaceKHR *pSurface, VkResult result) {
if (VK_SUCCESS != result) return;
RecordVulkanSurface(pSurface);
}
#ifdef VK_USE_PLATFORM_ANDROID_KHR
void ValidationStateTracker::PostCallRecordCreateAndroidSurfaceKHR(VkInstance instance,
const VkAndroidSurfaceCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface,
VkResult result) {
if (VK_SUCCESS != result) return;
RecordVulkanSurface(pSurface);
}
#endif // VK_USE_PLATFORM_ANDROID_KHR
#ifdef VK_USE_PLATFORM_IOS_MVK
void ValidationStateTracker::PostCallRecordCreateIOSSurfaceMVK(VkInstance instance, const VkIOSSurfaceCreateInfoMVK *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface,
VkResult result) {
if (VK_SUCCESS != result) return;
RecordVulkanSurface(pSurface);
}
#endif // VK_USE_PLATFORM_IOS_MVK
#ifdef VK_USE_PLATFORM_MACOS_MVK
void ValidationStateTracker::PostCallRecordCreateMacOSSurfaceMVK(VkInstance instance,
const VkMacOSSurfaceCreateInfoMVK *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface,
VkResult result) {
if (VK_SUCCESS != result) return;
RecordVulkanSurface(pSurface);
}
#endif // VK_USE_PLATFORM_MACOS_MVK
#ifdef VK_USE_PLATFORM_WAYLAND_KHR
void ValidationStateTracker::PostCallRecordCreateWaylandSurfaceKHR(VkInstance instance,
const VkWaylandSurfaceCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface,
VkResult result) {
if (VK_SUCCESS != result) return;
RecordVulkanSurface(pSurface);
}
bool CoreChecks::PreCallValidateGetPhysicalDeviceWaylandPresentationSupportKHR(VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex,
struct wl_display *display) {
const auto pd_state = GetPhysicalDeviceState(physicalDevice);
return ValidateQueueFamilyIndex(pd_state, queueFamilyIndex,
"VUID-vkGetPhysicalDeviceWaylandPresentationSupportKHR-queueFamilyIndex-01306",
"vkGetPhysicalDeviceWaylandPresentationSupportKHR", "queueFamilyIndex");
}
#endif // VK_USE_PLATFORM_WAYLAND_KHR
#ifdef VK_USE_PLATFORM_WIN32_KHR
void ValidationStateTracker::PostCallRecordCreateWin32SurfaceKHR(VkInstance instance,
const VkWin32SurfaceCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface,
VkResult result) {
if (VK_SUCCESS != result) return;
RecordVulkanSurface(pSurface);
}
bool CoreChecks::PreCallValidateGetPhysicalDeviceWin32PresentationSupportKHR(VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex) {
const auto pd_state = GetPhysicalDeviceState(physicalDevice);
return ValidateQueueFamilyIndex(pd_state, queueFamilyIndex,
"VUID-vkGetPhysicalDeviceWin32PresentationSupportKHR-queueFamilyIndex-01309",
"vkGetPhysicalDeviceWin32PresentationSupportKHR", "queueFamilyIndex");
}
#endif // VK_USE_PLATFORM_WIN32_KHR
#ifdef VK_USE_PLATFORM_XCB_KHR
void ValidationStateTracker::PostCallRecordCreateXcbSurfaceKHR(VkInstance instance, const VkXcbSurfaceCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface,
VkResult result) {
if (VK_SUCCESS != result) return;
RecordVulkanSurface(pSurface);
}
bool CoreChecks::PreCallValidateGetPhysicalDeviceXcbPresentationSupportKHR(VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex, xcb_connection_t *connection,
xcb_visualid_t visual_id) {
const auto pd_state = GetPhysicalDeviceState(physicalDevice);
return ValidateQueueFamilyIndex(pd_state, queueFamilyIndex,
"VUID-vkGetPhysicalDeviceXcbPresentationSupportKHR-queueFamilyIndex-01312",
"vkGetPhysicalDeviceXcbPresentationSupportKHR", "queueFamilyIndex");
}
#endif // VK_USE_PLATFORM_XCB_KHR
#ifdef VK_USE_PLATFORM_XLIB_KHR
void ValidationStateTracker::PostCallRecordCreateXlibSurfaceKHR(VkInstance instance, const VkXlibSurfaceCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface,
VkResult result) {
if (VK_SUCCESS != result) return;
RecordVulkanSurface(pSurface);
}
bool CoreChecks::PreCallValidateGetPhysicalDeviceXlibPresentationSupportKHR(VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex, Display *dpy,
VisualID visualID) {
const auto pd_state = GetPhysicalDeviceState(physicalDevice);
return ValidateQueueFamilyIndex(pd_state, queueFamilyIndex,
"VUID-vkGetPhysicalDeviceXlibPresentationSupportKHR-queueFamilyIndex-01315",
"vkGetPhysicalDeviceXlibPresentationSupportKHR", "queueFamilyIndex");
}
#endif // VK_USE_PLATFORM_XLIB_KHR
void ValidationStateTracker::PostCallRecordGetPhysicalDeviceSurfaceCapabilitiesKHR(VkPhysicalDevice physicalDevice,
VkSurfaceKHR surface,
VkSurfaceCapabilitiesKHR *pSurfaceCapabilities,
VkResult result) {
if (VK_SUCCESS != result) return;
auto physical_device_state = GetPhysicalDeviceState(physicalDevice);
physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState = QUERY_DETAILS;
physical_device_state->surfaceCapabilities = *pSurfaceCapabilities;
}
void ValidationStateTracker::PostCallRecordGetPhysicalDeviceSurfaceCapabilities2KHR(
VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo,
VkSurfaceCapabilities2KHR *pSurfaceCapabilities, VkResult result) {
if (VK_SUCCESS != result) return;
auto physical_device_state = GetPhysicalDeviceState(physicalDevice);
physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState = QUERY_DETAILS;
physical_device_state->surfaceCapabilities = pSurfaceCapabilities->surfaceCapabilities;
}
void ValidationStateTracker::PostCallRecordGetPhysicalDeviceSurfaceCapabilities2EXT(VkPhysicalDevice physicalDevice,
VkSurfaceKHR surface,
VkSurfaceCapabilities2EXT *pSurfaceCapabilities,
VkResult result) {
auto physical_device_state = GetPhysicalDeviceState(physicalDevice);
physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState = QUERY_DETAILS;
physical_device_state->surfaceCapabilities.minImageCount = pSurfaceCapabilities->minImageCount;
physical_device_state->surfaceCapabilities.maxImageCount = pSurfaceCapabilities->maxImageCount;
physical_device_state->surfaceCapabilities.currentExtent = pSurfaceCapabilities->currentExtent;
physical_device_state->surfaceCapabilities.minImageExtent = pSurfaceCapabilities->minImageExtent;
physical_device_state->surfaceCapabilities.maxImageExtent = pSurfaceCapabilities->maxImageExtent;
physical_device_state->surfaceCapabilities.maxImageArrayLayers = pSurfaceCapabilities->maxImageArrayLayers;
physical_device_state->surfaceCapabilities.supportedTransforms = pSurfaceCapabilities->supportedTransforms;
physical_device_state->surfaceCapabilities.currentTransform = pSurfaceCapabilities->currentTransform;
physical_device_state->surfaceCapabilities.supportedCompositeAlpha = pSurfaceCapabilities->supportedCompositeAlpha;
physical_device_state->surfaceCapabilities.supportedUsageFlags = pSurfaceCapabilities->supportedUsageFlags;
}
bool CoreChecks::PreCallValidateGetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex,
VkSurfaceKHR surface, VkBool32 *pSupported) {
const auto physical_device_state = GetPhysicalDeviceState(physicalDevice);
return ValidateQueueFamilyIndex(physical_device_state, queueFamilyIndex,
"VUID-vkGetPhysicalDeviceSurfaceSupportKHR-queueFamilyIndex-01269",
"vkGetPhysicalDeviceSurfaceSupportKHR", "queueFamilyIndex");
}
void ValidationStateTracker::PostCallRecordGetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex, VkSurfaceKHR surface,
VkBool32 *pSupported, VkResult result) {
if (VK_SUCCESS != result) return;
auto surface_state = GetSurfaceState(surface);
surface_state->gpu_queue_support[{physicalDevice, queueFamilyIndex}] = (*pSupported == VK_TRUE);
}
void ValidationStateTracker::PostCallRecordGetPhysicalDeviceSurfacePresentModesKHR(VkPhysicalDevice physicalDevice,
VkSurfaceKHR surface,
uint32_t *pPresentModeCount,
VkPresentModeKHR *pPresentModes,
VkResult result) {
if ((VK_SUCCESS != result) && (VK_INCOMPLETE != result)) return;
// TODO: This isn't quite right -- available modes may differ by surface AND physical device.
auto physical_device_state = GetPhysicalDeviceState(physicalDevice);
auto &call_state = physical_device_state->vkGetPhysicalDeviceSurfacePresentModesKHRState;
if (*pPresentModeCount) {
if (call_state < QUERY_COUNT) call_state = QUERY_COUNT;
if (*pPresentModeCount > physical_device_state->present_modes.size())
physical_device_state->present_modes.resize(*pPresentModeCount);
}
if (pPresentModes) {
if (call_state < QUERY_DETAILS) call_state = QUERY_DETAILS;
for (uint32_t i = 0; i < *pPresentModeCount; i++) {
physical_device_state->present_modes[i] = pPresentModes[i];
}
}
}
bool CoreChecks::PreCallValidateGetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
uint32_t *pSurfaceFormatCount,
VkSurfaceFormatKHR *pSurfaceFormats) {
if (!pSurfaceFormats) return false;
const auto physical_device_state = GetPhysicalDeviceState(physicalDevice);
const auto &call_state = physical_device_state->vkGetPhysicalDeviceSurfaceFormatsKHRState;
bool skip = false;
switch (call_state) {
case UNCALLED:
// Since we haven't recorded a preliminary value of *pSurfaceFormatCount, that likely means that the application didn't
// previously call this function with a NULL value of pSurfaceFormats:
skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
HandleToUint64(physicalDevice), kVUID_Core_DevLimit_MustQueryCount,
"vkGetPhysicalDeviceSurfaceFormatsKHR() called with non-NULL pSurfaceFormatCount; but no prior "
"positive value has been seen for pSurfaceFormats.");
break;
default:
auto prev_format_count = (uint32_t)physical_device_state->surface_formats.size();
if (prev_format_count != *pSurfaceFormatCount) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
HandleToUint64(physicalDevice), kVUID_Core_DevLimit_CountMismatch,
"vkGetPhysicalDeviceSurfaceFormatsKHR() called with non-NULL pSurfaceFormatCount, and with "
"pSurfaceFormats set to a value (%u) that is greater than the value (%u) that was returned "
"when pSurfaceFormatCount was NULL.",
*pSurfaceFormatCount, prev_format_count);
}
break;
}
return skip;
}
void ValidationStateTracker::PostCallRecordGetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
uint32_t *pSurfaceFormatCount,
VkSurfaceFormatKHR *pSurfaceFormats,
VkResult result) {
if ((VK_SUCCESS != result) && (VK_INCOMPLETE != result)) return;
auto physical_device_state = GetPhysicalDeviceState(physicalDevice);
auto &call_state = physical_device_state->vkGetPhysicalDeviceSurfaceFormatsKHRState;
if (*pSurfaceFormatCount) {
if (call_state < QUERY_COUNT) call_state = QUERY_COUNT;
if (*pSurfaceFormatCount > physical_device_state->surface_formats.size())
physical_device_state->surface_formats.resize(*pSurfaceFormatCount);
}
if (pSurfaceFormats) {
if (call_state < QUERY_DETAILS) call_state = QUERY_DETAILS;
for (uint32_t i = 0; i < *pSurfaceFormatCount; i++) {
physical_device_state->surface_formats[i] = pSurfaceFormats[i];
}
}
}
void ValidationStateTracker::PostCallRecordGetPhysicalDeviceSurfaceFormats2KHR(VkPhysicalDevice physicalDevice,
const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo,
uint32_t *pSurfaceFormatCount,
VkSurfaceFormat2KHR *pSurfaceFormats,
VkResult result) {
if ((VK_SUCCESS != result) && (VK_INCOMPLETE != result)) return;
auto physicalDeviceState = GetPhysicalDeviceState(physicalDevice);
if (*pSurfaceFormatCount) {
if (physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState < QUERY_COUNT) {
physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState = QUERY_COUNT;
}
if (*pSurfaceFormatCount > physicalDeviceState->surface_formats.size())
physicalDeviceState->surface_formats.resize(*pSurfaceFormatCount);
}
if (pSurfaceFormats) {
if (physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState < QUERY_DETAILS) {
physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState = QUERY_DETAILS;
}
for (uint32_t i = 0; i < *pSurfaceFormatCount; i++) {
physicalDeviceState->surface_formats[i] = pSurfaceFormats[i].surfaceFormat;
}
}
}
void ValidationStateTracker::PreCallRecordCmdBeginDebugUtilsLabelEXT(VkCommandBuffer commandBuffer,
const VkDebugUtilsLabelEXT *pLabelInfo) {
BeginCmdDebugUtilsLabel(report_data, commandBuffer, pLabelInfo);
}
void ValidationStateTracker::PostCallRecordCmdEndDebugUtilsLabelEXT(VkCommandBuffer commandBuffer) {
EndCmdDebugUtilsLabel(report_data, commandBuffer);
}
void ValidationStateTracker::PreCallRecordCmdInsertDebugUtilsLabelEXT(VkCommandBuffer commandBuffer,
const VkDebugUtilsLabelEXT *pLabelInfo) {
InsertCmdDebugUtilsLabel(report_data, commandBuffer, pLabelInfo);
// Squirrel away an easily accessible copy.
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
cb_state->debug_label = LoggingLabel(pLabelInfo);
}
void ValidationStateTracker::RecordEnumeratePhysicalDeviceGroupsState(
uint32_t *pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupPropertiesKHR *pPhysicalDeviceGroupProperties) {
if (NULL != pPhysicalDeviceGroupProperties) {
for (uint32_t i = 0; i < *pPhysicalDeviceGroupCount; i++) {
for (uint32_t j = 0; j < pPhysicalDeviceGroupProperties[i].physicalDeviceCount; j++) {
VkPhysicalDevice cur_phys_dev = pPhysicalDeviceGroupProperties[i].physicalDevices[j];
auto &phys_device_state = physical_device_map[cur_phys_dev];
phys_device_state.phys_device = cur_phys_dev;
// Init actual features for each physical device
DispatchGetPhysicalDeviceFeatures(cur_phys_dev, &phys_device_state.features2.features);
}
}
}
}
void ValidationStateTracker::PostCallRecordEnumeratePhysicalDeviceGroups(
VkInstance instance, uint32_t *pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupPropertiesKHR *pPhysicalDeviceGroupProperties,
VkResult result) {
if ((VK_SUCCESS != result) && (VK_INCOMPLETE != result)) return;
RecordEnumeratePhysicalDeviceGroupsState(pPhysicalDeviceGroupCount, pPhysicalDeviceGroupProperties);
}
void ValidationStateTracker::PostCallRecordEnumeratePhysicalDeviceGroupsKHR(
VkInstance instance, uint32_t *pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupPropertiesKHR *pPhysicalDeviceGroupProperties,
VkResult result) {
if ((VK_SUCCESS != result) && (VK_INCOMPLETE != result)) return;
RecordEnumeratePhysicalDeviceGroupsState(pPhysicalDeviceGroupCount, pPhysicalDeviceGroupProperties);
}
bool CoreChecks::ValidateDescriptorUpdateTemplate(const char *func_name,
const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo) {
bool skip = false;
const auto layout = GetDescriptorSetLayout(this, pCreateInfo->descriptorSetLayout);
if (VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET == pCreateInfo->templateType && !layout) {
const VulkanTypedHandle ds_typed(pCreateInfo->descriptorSetLayout, kVulkanObjectTypeDescriptorSetLayout);
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, ds_typed.handle,
"VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00350",
"%s: Invalid pCreateInfo->descriptorSetLayout (%s)", func_name, report_data->FormatHandle(ds_typed).c_str());
} else if (VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR == pCreateInfo->templateType) {
auto bind_point = pCreateInfo->pipelineBindPoint;
bool valid_bp = (bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS) || (bind_point == VK_PIPELINE_BIND_POINT_COMPUTE);
if (!valid_bp) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00351",
"%s: Invalid pCreateInfo->pipelineBindPoint (%" PRIu32 ").", func_name, static_cast<uint32_t>(bind_point));
}
const auto pipeline_layout = GetPipelineLayout(pCreateInfo->pipelineLayout);
if (!pipeline_layout) {
const VulkanTypedHandle pl_typed(pCreateInfo->pipelineLayout, kVulkanObjectTypePipelineLayout);
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT,
pl_typed.handle, "VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00352",
"%s: Invalid pCreateInfo->pipelineLayout (%s)", func_name, report_data->FormatHandle(pl_typed).c_str());
} else {
const uint32_t pd_set = pCreateInfo->set;
if ((pd_set >= pipeline_layout->set_layouts.size()) || !pipeline_layout->set_layouts[pd_set] ||
!pipeline_layout->set_layouts[pd_set]->IsPushDescriptor()) {
const VulkanTypedHandle pl_typed(pCreateInfo->pipelineLayout, kVulkanObjectTypePipelineLayout);
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT,
pl_typed.handle, "VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00353",
"%s: pCreateInfo->set (%" PRIu32
") does not refer to the push descriptor set layout for pCreateInfo->pipelineLayout (%s).",
func_name, pd_set, report_data->FormatHandle(pl_typed).c_str());
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateCreateDescriptorUpdateTemplate(VkDevice device,
const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate) {
bool skip = ValidateDescriptorUpdateTemplate("vkCreateDescriptorUpdateTemplate()", pCreateInfo);
return skip;
}
bool CoreChecks::PreCallValidateCreateDescriptorUpdateTemplateKHR(VkDevice device,
const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate) {
bool skip = ValidateDescriptorUpdateTemplate("vkCreateDescriptorUpdateTemplateKHR()", pCreateInfo);
return skip;
}
void ValidationStateTracker::PreCallRecordDestroyDescriptorUpdateTemplate(VkDevice device,
VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
const VkAllocationCallbacks *pAllocator) {
if (!descriptorUpdateTemplate) return;
desc_template_map.erase(descriptorUpdateTemplate);
}
void ValidationStateTracker::PreCallRecordDestroyDescriptorUpdateTemplateKHR(VkDevice device,
VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
const VkAllocationCallbacks *pAllocator) {
if (!descriptorUpdateTemplate) return;
desc_template_map.erase(descriptorUpdateTemplate);
}
void ValidationStateTracker::RecordCreateDescriptorUpdateTemplateState(const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo,
VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate) {
safe_VkDescriptorUpdateTemplateCreateInfo *local_create_info = new safe_VkDescriptorUpdateTemplateCreateInfo(pCreateInfo);
std::unique_ptr<TEMPLATE_STATE> template_state(new TEMPLATE_STATE(*pDescriptorUpdateTemplate, local_create_info));
desc_template_map[*pDescriptorUpdateTemplate] = std::move(template_state);
}
void ValidationStateTracker::PostCallRecordCreateDescriptorUpdateTemplate(
VkDevice device, const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo, const VkAllocationCallbacks *pAllocator,
VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate, VkResult result) {
if (VK_SUCCESS != result) return;
RecordCreateDescriptorUpdateTemplateState(pCreateInfo, pDescriptorUpdateTemplate);
}
void ValidationStateTracker::PostCallRecordCreateDescriptorUpdateTemplateKHR(
VkDevice device, const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo, const VkAllocationCallbacks *pAllocator,
VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate, VkResult result) {
if (VK_SUCCESS != result) return;
RecordCreateDescriptorUpdateTemplateState(pCreateInfo, pDescriptorUpdateTemplate);
}
bool CoreChecks::ValidateUpdateDescriptorSetWithTemplate(VkDescriptorSet descriptorSet,
VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
const void *pData) {
bool skip = false;
auto const template_map_entry = desc_template_map.find(descriptorUpdateTemplate);
if ((template_map_entry == desc_template_map.end()) || (template_map_entry->second.get() == nullptr)) {
// Object tracker will report errors for invalid descriptorUpdateTemplate values, avoiding a crash in release builds
// but retaining the assert as template support is new enough to want to investigate these in debug builds.
assert(0);
} else {
const TEMPLATE_STATE *template_state = template_map_entry->second.get();
// TODO: Validate template push descriptor updates
if (template_state->create_info.templateType == VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET) {
skip = ValidateUpdateDescriptorSetsWithTemplateKHR(descriptorSet, template_state, pData);
}
}
return skip;
}
bool CoreChecks::PreCallValidateUpdateDescriptorSetWithTemplate(VkDevice device, VkDescriptorSet descriptorSet,
VkDescriptorUpdateTemplate descriptorUpdateTemplate,
const void *pData) {
return ValidateUpdateDescriptorSetWithTemplate(descriptorSet, descriptorUpdateTemplate, pData);
}
bool CoreChecks::PreCallValidateUpdateDescriptorSetWithTemplateKHR(VkDevice device, VkDescriptorSet descriptorSet,
VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
const void *pData) {
return ValidateUpdateDescriptorSetWithTemplate(descriptorSet, descriptorUpdateTemplate, pData);
}
void ValidationStateTracker::RecordUpdateDescriptorSetWithTemplateState(VkDescriptorSet descriptorSet,
VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
const void *pData) {
auto const template_map_entry = desc_template_map.find(descriptorUpdateTemplate);
if ((template_map_entry == desc_template_map.end()) || (template_map_entry->second.get() == nullptr)) {
assert(0);
} else {
const TEMPLATE_STATE *template_state = template_map_entry->second.get();
// TODO: Record template push descriptor updates
if (template_state->create_info.templateType == VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET) {
PerformUpdateDescriptorSetsWithTemplateKHR(descriptorSet, template_state, pData);
}
}
}
void ValidationStateTracker::PreCallRecordUpdateDescriptorSetWithTemplate(VkDevice device, VkDescriptorSet descriptorSet,
VkDescriptorUpdateTemplate descriptorUpdateTemplate,
const void *pData) {
RecordUpdateDescriptorSetWithTemplateState(descriptorSet, descriptorUpdateTemplate, pData);
}
void ValidationStateTracker::PreCallRecordUpdateDescriptorSetWithTemplateKHR(VkDevice device, VkDescriptorSet descriptorSet,
VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
const void *pData) {
RecordUpdateDescriptorSetWithTemplateState(descriptorSet, descriptorUpdateTemplate, pData);
}
static std::shared_ptr<cvdescriptorset::DescriptorSetLayout const> GetDslFromPipelineLayout(
PIPELINE_LAYOUT_STATE const *layout_data, uint32_t set) {
std::shared_ptr<cvdescriptorset::DescriptorSetLayout const> dsl = nullptr;
if (layout_data && (set < layout_data->set_layouts.size())) {
dsl = layout_data->set_layouts[set];
}
return dsl;
}
bool CoreChecks::PreCallValidateCmdPushDescriptorSetWithTemplateKHR(VkCommandBuffer commandBuffer,
VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
VkPipelineLayout layout, uint32_t set, const void *pData) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
const char *const func_name = "vkPushDescriptorSetWithTemplateKHR()";
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_PUSHDESCRIPTORSETWITHTEMPLATEKHR, func_name);
auto layout_data = GetPipelineLayout(layout);
auto dsl = GetDslFromPipelineLayout(layout_data, set);
const VulkanTypedHandle layout_typed(layout, kVulkanObjectTypePipelineLayout);
// Validate the set index points to a push descriptor set and is in range
if (dsl) {
if (!dsl->IsPushDescriptor()) {
skip = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT,
layout_typed.handle, "VUID-vkCmdPushDescriptorSetKHR-set-00365",
"%s: Set index %" PRIu32 " does not match push descriptor set layout index for %s.", func_name, set,
report_data->FormatHandle(layout_typed).c_str());
}
} else if (layout_data && (set >= layout_data->set_layouts.size())) {
skip = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT,
layout_typed.handle, "VUID-vkCmdPushDescriptorSetKHR-set-00364",
"%s: Set index %" PRIu32 " is outside of range for %s (set < %" PRIu32 ").", func_name, set,
report_data->FormatHandle(layout_typed).c_str(), static_cast<uint32_t>(layout_data->set_layouts.size()));
}
const auto template_state = GetDescriptorTemplateState(descriptorUpdateTemplate);
if (template_state) {
const auto &template_ci = template_state->create_info;
static const std::map<VkPipelineBindPoint, std::string> bind_errors = {
std::make_pair(VK_PIPELINE_BIND_POINT_GRAPHICS, "VUID-vkCmdPushDescriptorSetWithTemplateKHR-commandBuffer-00366"),
std::make_pair(VK_PIPELINE_BIND_POINT_COMPUTE, "VUID-vkCmdPushDescriptorSetWithTemplateKHR-commandBuffer-00366"),
std::make_pair(VK_PIPELINE_BIND_POINT_RAY_TRACING_NV,
"VUID-vkCmdPushDescriptorSetWithTemplateKHR-commandBuffer-00366")};
skip |= ValidatePipelineBindPoint(cb_state, template_ci.pipelineBindPoint, func_name, bind_errors);
if (template_ci.templateType != VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), kVUID_Core_PushDescriptorUpdate_TemplateType,
"%s: descriptorUpdateTemplate %s was not created with flag "
"VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR.",
func_name, report_data->FormatHandle(descriptorUpdateTemplate).c_str());
}
if (template_ci.set != set) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), kVUID_Core_PushDescriptorUpdate_Template_SetMismatched,
"%s: descriptorUpdateTemplate %s created with set %" PRIu32
" does not match command parameter set %" PRIu32 ".",
func_name, report_data->FormatHandle(descriptorUpdateTemplate).c_str(), template_ci.set, set);
}
if (!CompatForSet(set, layout_data, GetPipelineLayout(template_ci.pipelineLayout))) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), kVUID_Core_PushDescriptorUpdate_Template_LayoutMismatched,
"%s: descriptorUpdateTemplate %s created with %s is incompatible with command parameter "
"%s for set %" PRIu32,
func_name, report_data->FormatHandle(descriptorUpdateTemplate).c_str(),
report_data->FormatHandle(template_ci.pipelineLayout).c_str(),
report_data->FormatHandle(layout).c_str(), set);
}
}
if (dsl && template_state) {
// Create an empty proxy in order to use the existing descriptor set update validation
cvdescriptorset::DescriptorSet proxy_ds(VK_NULL_HANDLE, VK_NULL_HANDLE, dsl, 0, this);
// Decode the template into a set of write updates
cvdescriptorset::DecodedTemplateUpdate decoded_template(this, VK_NULL_HANDLE, template_state, pData,
dsl->GetDescriptorSetLayout());
// Validate the decoded update against the proxy_ds
skip |= ValidatePushDescriptorsUpdate(&proxy_ds, static_cast<uint32_t>(decoded_template.desc_writes.size()),
decoded_template.desc_writes.data(), func_name);
}
return skip;
}
void CoreChecks::PreCallRecordCmdPushDescriptorSetWithTemplateKHR(VkCommandBuffer commandBuffer,
VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
VkPipelineLayout layout, uint32_t set, const void *pData) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
const auto template_state = GetDescriptorTemplateState(descriptorUpdateTemplate);
if (template_state) {
auto layout_data = GetPipelineLayout(layout);
auto dsl = GetDslFromPipelineLayout(layout_data, set);
const auto &template_ci = template_state->create_info;
if (dsl && !dsl->IsDestroyed()) {
// Decode the template into a set of write updates
cvdescriptorset::DecodedTemplateUpdate decoded_template(this, VK_NULL_HANDLE, template_state, pData,
dsl->GetDescriptorSetLayout());
RecordCmdPushDescriptorSetState(cb_state, template_ci.pipelineBindPoint, layout, set,
static_cast<uint32_t>(decoded_template.desc_writes.size()),
decoded_template.desc_writes.data());
}
}
}
void ValidationStateTracker::RecordGetPhysicalDeviceDisplayPlanePropertiesState(VkPhysicalDevice physicalDevice,
uint32_t *pPropertyCount, void *pProperties) {
auto physical_device_state = GetPhysicalDeviceState(physicalDevice);
if (*pPropertyCount) {
if (physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState < QUERY_COUNT) {
physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState = QUERY_COUNT;
}
physical_device_state->display_plane_property_count = *pPropertyCount;
}
if (pProperties) {
if (physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState < QUERY_DETAILS) {
physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState = QUERY_DETAILS;
}
}
}
void ValidationStateTracker::PostCallRecordGetPhysicalDeviceDisplayPlanePropertiesKHR(VkPhysicalDevice physicalDevice,
uint32_t *pPropertyCount,
VkDisplayPlanePropertiesKHR *pProperties,
VkResult result) {
if ((VK_SUCCESS != result) && (VK_INCOMPLETE != result)) return;
RecordGetPhysicalDeviceDisplayPlanePropertiesState(physicalDevice, pPropertyCount, pProperties);
}
void ValidationStateTracker::PostCallRecordGetPhysicalDeviceDisplayPlaneProperties2KHR(VkPhysicalDevice physicalDevice,
uint32_t *pPropertyCount,
VkDisplayPlaneProperties2KHR *pProperties,
VkResult result) {
if ((VK_SUCCESS != result) && (VK_INCOMPLETE != result)) return;
RecordGetPhysicalDeviceDisplayPlanePropertiesState(physicalDevice, pPropertyCount, pProperties);
}
bool CoreChecks::ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(VkPhysicalDevice physicalDevice, uint32_t planeIndex,
const char *api_name) const {
bool skip = false;
const auto physical_device_state = GetPhysicalDeviceState(physicalDevice);
if (physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState == UNCALLED) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
HandleToUint64(physicalDevice), kVUID_Core_Swapchain_GetSupportedDisplaysWithoutQuery,
"Potential problem with calling %s() without first retrieving properties from "
"vkGetPhysicalDeviceDisplayPlanePropertiesKHR or vkGetPhysicalDeviceDisplayPlaneProperties2KHR.",
api_name);
} else {
if (planeIndex >= physical_device_state->display_plane_property_count) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
HandleToUint64(physicalDevice), "VUID-vkGetDisplayPlaneSupportedDisplaysKHR-planeIndex-01249",
"%s(): planeIndex must be in the range [0, %d] that was returned by vkGetPhysicalDeviceDisplayPlanePropertiesKHR "
"or vkGetPhysicalDeviceDisplayPlaneProperties2KHR. Do you have the plane index hardcoded?",
api_name, physical_device_state->display_plane_property_count - 1);
}
}
return skip;
}
bool CoreChecks::PreCallValidateGetDisplayPlaneSupportedDisplaysKHR(VkPhysicalDevice physicalDevice, uint32_t planeIndex,
uint32_t *pDisplayCount, VkDisplayKHR *pDisplays) {
bool skip = false;
skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(physicalDevice, planeIndex,
"vkGetDisplayPlaneSupportedDisplaysKHR");
return skip;
}
bool CoreChecks::PreCallValidateGetDisplayPlaneCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkDisplayModeKHR mode,
uint32_t planeIndex, VkDisplayPlaneCapabilitiesKHR *pCapabilities) {
bool skip = false;
skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(physicalDevice, planeIndex, "vkGetDisplayPlaneCapabilitiesKHR");
return skip;
}
bool CoreChecks::PreCallValidateGetDisplayPlaneCapabilities2KHR(VkPhysicalDevice physicalDevice,
const VkDisplayPlaneInfo2KHR *pDisplayPlaneInfo,
VkDisplayPlaneCapabilities2KHR *pCapabilities) {
bool skip = false;
skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(physicalDevice, pDisplayPlaneInfo->planeIndex,
"vkGetDisplayPlaneCapabilities2KHR");
return skip;
}
bool CoreChecks::PreCallValidateCmdDebugMarkerBeginEXT(VkCommandBuffer commandBuffer,
const VkDebugMarkerMarkerInfoEXT *pMarkerInfo) {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
return ValidateCmd(cb_state, CMD_DEBUGMARKERBEGINEXT, "vkCmdDebugMarkerBeginEXT()");
}
bool CoreChecks::PreCallValidateCmdDebugMarkerEndEXT(VkCommandBuffer commandBuffer) {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
return ValidateCmd(cb_state, CMD_DEBUGMARKERENDEXT, "vkCmdDebugMarkerEndEXT()");
}
bool CoreChecks::PreCallValidateCmdBeginQueryIndexedEXT(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query,
VkQueryControlFlags flags, uint32_t index) {
if (disabled.query_validation) return false;
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
QueryObject query_obj(queryPool, query, index);
const char *cmd_name = "vkCmdBeginQueryIndexedEXT()";
bool skip = ValidateBeginQuery(
cb_state, query_obj, flags, CMD_BEGINQUERYINDEXEDEXT, cmd_name, "VUID-vkCmdBeginQueryIndexedEXT-commandBuffer-cmdpool",
"VUID-vkCmdBeginQueryIndexedEXT-queryType-02338", "VUID-vkCmdBeginQueryIndexedEXT-queryType-00803",
"VUID-vkCmdBeginQueryIndexedEXT-queryType-00800", "VUID-vkCmdBeginQueryIndexedEXT-query-00802");
// Extension specific VU's
const auto &query_pool_ci = GetQueryPoolState(query_obj.pool)->createInfo;
if (query_pool_ci.queryType == VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT) {
if (device_extensions.vk_ext_transform_feedback &&
(index >= phys_dev_ext_props.transform_feedback_props.maxTransformFeedbackStreams)) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdBeginQueryIndexedEXT-queryType-02339",
"%s: index %" PRIu32
" must be less than VkPhysicalDeviceTransformFeedbackPropertiesEXT::maxTransformFeedbackStreams %" PRIu32 ".",
cmd_name, index, phys_dev_ext_props.transform_feedback_props.maxTransformFeedbackStreams);
}
} else if (index != 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdBeginQueryIndexedEXT-queryType-02340",
"%s: index %" PRIu32
" must be zero if %s was not created with type VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT.",
cmd_name, index, report_data->FormatHandle(queryPool).c_str());
}
return skip;
}
void ValidationStateTracker::PostCallRecordCmdBeginQueryIndexedEXT(VkCommandBuffer commandBuffer, VkQueryPool queryPool,
uint32_t query, VkQueryControlFlags flags, uint32_t index) {
QueryObject query_obj = {queryPool, query, index};
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
RecordCmdBeginQuery(cb_state, query_obj);
}
void CoreChecks::PreCallRecordCmdBeginQueryIndexedEXT(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query,
VkQueryControlFlags flags, uint32_t index) {
QueryObject query_obj = {queryPool, query, index};
EnqueueVerifyBeginQuery(commandBuffer, query_obj);
}
bool CoreChecks::PreCallValidateCmdEndQueryIndexedEXT(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query,
uint32_t index) {
if (disabled.query_validation) return false;
QueryObject query_obj = {queryPool, query, index};
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
return ValidateCmdEndQuery(cb_state, query_obj, CMD_ENDQUERYINDEXEDEXT, "vkCmdEndQueryIndexedEXT()",
"VUID-vkCmdEndQueryIndexedEXT-commandBuffer-cmdpool", "VUID-vkCmdEndQueryIndexedEXT-None-02342");
}
void ValidationStateTracker::PostCallRecordCmdEndQueryIndexedEXT(VkCommandBuffer commandBuffer, VkQueryPool queryPool,
uint32_t query, uint32_t index) {
QueryObject query_obj = {queryPool, query, index};
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
RecordCmdEndQuery(cb_state, query_obj);
}
bool CoreChecks::PreCallValidateCmdSetDiscardRectangleEXT(VkCommandBuffer commandBuffer, uint32_t firstDiscardRectangle,
uint32_t discardRectangleCount, const VkRect2D *pDiscardRectangles) {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
// Minimal validation for command buffer state
return ValidateCmd(cb_state, CMD_SETDISCARDRECTANGLEEXT, "vkCmdSetDiscardRectangleEXT()");
}
bool CoreChecks::PreCallValidateCmdSetSampleLocationsEXT(VkCommandBuffer commandBuffer,
const VkSampleLocationsInfoEXT *pSampleLocationsInfo) {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
// Minimal validation for command buffer state
return ValidateCmd(cb_state, CMD_SETSAMPLELOCATIONSEXT, "vkCmdSetSampleLocationsEXT()");
}
bool CoreChecks::ValidateCreateSamplerYcbcrConversion(const char *func_name,
const VkSamplerYcbcrConversionCreateInfo *create_info) const {
bool skip = false;
if (device_extensions.vk_android_external_memory_android_hardware_buffer) {
skip |= ValidateCreateSamplerYcbcrConversionANDROID(create_info);
} else { // Not android hardware buffer
if (VK_FORMAT_UNDEFINED == create_info->format) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_EXT, 0,
"VUID-VkSamplerYcbcrConversionCreateInfo-format-01649",
"%s: CreateInfo format type is VK_FORMAT_UNDEFINED.", func_name);
}
}
return skip;
}
bool CoreChecks::PreCallValidateCreateSamplerYcbcrConversion(VkDevice device, const VkSamplerYcbcrConversionCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkSamplerYcbcrConversion *pYcbcrConversion) {
return ValidateCreateSamplerYcbcrConversion("vkCreateSamplerYcbcrConversion()", pCreateInfo);
}
bool CoreChecks::PreCallValidateCreateSamplerYcbcrConversionKHR(VkDevice device,
const VkSamplerYcbcrConversionCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkSamplerYcbcrConversion *pYcbcrConversion) {
return ValidateCreateSamplerYcbcrConversion("vkCreateSamplerYcbcrConversionKHR()", pCreateInfo);
}
void ValidationStateTracker::RecordCreateSamplerYcbcrConversionState(const VkSamplerYcbcrConversionCreateInfo *create_info,
VkSamplerYcbcrConversion ycbcr_conversion) {
if (device_extensions.vk_android_external_memory_android_hardware_buffer) {
RecordCreateSamplerYcbcrConversionANDROID(create_info, ycbcr_conversion);
}
}
void ValidationStateTracker::PostCallRecordCreateSamplerYcbcrConversion(VkDevice device,
const VkSamplerYcbcrConversionCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkSamplerYcbcrConversion *pYcbcrConversion,
VkResult result) {
if (VK_SUCCESS != result) return;
RecordCreateSamplerYcbcrConversionState(pCreateInfo, *pYcbcrConversion);
}
void ValidationStateTracker::PostCallRecordCreateSamplerYcbcrConversionKHR(VkDevice device,
const VkSamplerYcbcrConversionCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkSamplerYcbcrConversion *pYcbcrConversion,
VkResult result) {
if (VK_SUCCESS != result) return;
RecordCreateSamplerYcbcrConversionState(pCreateInfo, *pYcbcrConversion);
}
void ValidationStateTracker::PostCallRecordDestroySamplerYcbcrConversion(VkDevice device, VkSamplerYcbcrConversion ycbcrConversion,
const VkAllocationCallbacks *pAllocator) {
if (!ycbcrConversion) return;
if (device_extensions.vk_android_external_memory_android_hardware_buffer) {
RecordDestroySamplerYcbcrConversionANDROID(ycbcrConversion);
}
}
void ValidationStateTracker::PostCallRecordDestroySamplerYcbcrConversionKHR(VkDevice device,
VkSamplerYcbcrConversion ycbcrConversion,
const VkAllocationCallbacks *pAllocator) {
if (!ycbcrConversion) return;
if (device_extensions.vk_android_external_memory_android_hardware_buffer) {
RecordDestroySamplerYcbcrConversionANDROID(ycbcrConversion);
}
}
bool CoreChecks::PreCallValidateGetBufferDeviceAddressEXT(VkDevice device, const VkBufferDeviceAddressInfoEXT *pInfo) {
bool skip = false;
if (!enabled_features.buffer_address.bufferDeviceAddress) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
HandleToUint64(pInfo->buffer), "VUID-vkGetBufferDeviceAddressEXT-None-02598",
"The bufferDeviceAddress feature must: be enabled.");
}
if (physical_device_count > 1 && !enabled_features.buffer_address.bufferDeviceAddressMultiDevice) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
HandleToUint64(pInfo->buffer), "VUID-vkGetBufferDeviceAddressEXT-device-02599",
"If device was created with multiple physical devices, then the "
"bufferDeviceAddressMultiDevice feature must: be enabled.");
}
auto buffer_state = GetBufferState(pInfo->buffer);
if (buffer_state) {
if (!(buffer_state->createInfo.flags & VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_EXT)) {
skip |= ValidateMemoryIsBoundToBuffer(buffer_state, "vkGetBufferDeviceAddressEXT()",
"VUID-VkBufferDeviceAddressInfoEXT-buffer-02600");
}
skip |= ValidateBufferUsageFlags(buffer_state, VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT, true,
"VUID-VkBufferDeviceAddressInfoEXT-buffer-02601", "vkGetBufferDeviceAddressEXT()",
"VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT");
}
return skip;
}
bool CoreChecks::ValidateQueryRange(VkDevice device, VkQueryPool queryPool, uint32_t totalCount, uint32_t firstQuery,
uint32_t queryCount, const char *vuid_badfirst, const char *vuid_badrange) const {
bool skip = false;
if (firstQuery >= totalCount) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
vuid_badfirst, "firstQuery (%" PRIu32 ") greater than or equal to query pool count (%" PRIu32 ") for %s",
firstQuery, totalCount, report_data->FormatHandle(queryPool).c_str());
}
if ((firstQuery + queryCount) > totalCount) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
vuid_badrange, "Query range [%" PRIu32 ", %" PRIu32 ") goes beyond query pool count (%" PRIu32 ") for %s",
firstQuery, firstQuery + queryCount, totalCount, report_data->FormatHandle(queryPool).c_str());
}
return skip;
}
bool CoreChecks::PreCallValidateResetQueryPoolEXT(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery,
uint32_t queryCount) {
if (disabled.query_validation) return false;
bool skip = false;
if (!enabled_features.host_query_reset_features.hostQueryReset) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-vkResetQueryPoolEXT-None-02665", "Host query reset not enabled for device");
}
const auto query_pool_state = GetQueryPoolState(queryPool);
if (query_pool_state) {
skip |= ValidateQueryRange(device, queryPool, query_pool_state->createInfo.queryCount, firstQuery, queryCount,
"VUID-vkResetQueryPoolEXT-firstQuery-02666", "VUID-vkResetQueryPoolEXT-firstQuery-02667");
}
return skip;
}
void ValidationStateTracker::PostCallRecordResetQueryPoolEXT(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery,
uint32_t queryCount) {
// Do nothing if the feature is not enabled.
if (!enabled_features.host_query_reset_features.hostQueryReset) return;
// Do nothing if the query pool has been destroyed.
auto query_pool_state = GetQueryPoolState(queryPool);
if (!query_pool_state) return;
// Reset the state of existing entries.
QueryObject query_obj{queryPool, 0};
const uint32_t max_query_count = std::min(queryCount, query_pool_state->createInfo.queryCount - firstQuery);
for (uint32_t i = 0; i < max_query_count; ++i) {
query_obj.query = firstQuery + i;
auto query_it = queryToStateMap.find(query_obj);
if (query_it != queryToStateMap.end()) query_it->second = QUERYSTATE_RESET;
}
}
void CoreChecks::PreCallRecordGetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice,
VkPhysicalDeviceProperties *pPhysicalDeviceProperties) {
// There is an implicit layer that can cause this call to return 0 for maxBoundDescriptorSets - Ignore such calls
if (enabled.gpu_validation && enabled.gpu_validation_reserve_binding_slot &&
pPhysicalDeviceProperties->limits.maxBoundDescriptorSets > 0) {
if (pPhysicalDeviceProperties->limits.maxBoundDescriptorSets > 1) {
pPhysicalDeviceProperties->limits.maxBoundDescriptorSets -= 1;
} else {
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
HandleToUint64(physicalDevice), "UNASSIGNED-GPU-Assisted Validation Setup Error.",
"Unable to reserve descriptor binding slot on a device with only one slot.");
}
}
}
VkResult CoreChecks::CoreLayerCreateValidationCacheEXT(VkDevice device, const VkValidationCacheCreateInfoEXT *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkValidationCacheEXT *pValidationCache) {
*pValidationCache = ValidationCache::Create(pCreateInfo);
return *pValidationCache ? VK_SUCCESS : VK_ERROR_INITIALIZATION_FAILED;
}
void CoreChecks::CoreLayerDestroyValidationCacheEXT(VkDevice device, VkValidationCacheEXT validationCache,
const VkAllocationCallbacks *pAllocator) {
delete CastFromHandle<ValidationCache *>(validationCache);
}
VkResult CoreChecks::CoreLayerGetValidationCacheDataEXT(VkDevice device, VkValidationCacheEXT validationCache, size_t *pDataSize,
void *pData) {
size_t inSize = *pDataSize;
CastFromHandle<ValidationCache *>(validationCache)->Write(pDataSize, pData);
return (pData && *pDataSize != inSize) ? VK_INCOMPLETE : VK_SUCCESS;
}
VkResult CoreChecks::CoreLayerMergeValidationCachesEXT(VkDevice device, VkValidationCacheEXT dstCache, uint32_t srcCacheCount,
const VkValidationCacheEXT *pSrcCaches) {
bool skip = false;
auto dst = CastFromHandle<ValidationCache *>(dstCache);
VkResult result = VK_SUCCESS;
for (uint32_t i = 0; i < srcCacheCount; i++) {
auto src = CastFromHandle<const ValidationCache *>(pSrcCaches[i]);
if (src == dst) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT, 0,
"VUID-vkMergeValidationCachesEXT-dstCache-01536",
"vkMergeValidationCachesEXT: dstCache (0x%" PRIx64 ") must not appear in pSrcCaches array.",
HandleToUint64(dstCache));
result = VK_ERROR_VALIDATION_FAILED_EXT;
}
if (!skip) {
dst->Merge(src);
}
}
return result;
}
bool CoreChecks::PreCallValidateCmdSetDeviceMask(VkCommandBuffer commandBuffer, uint32_t deviceMask) {
bool skip = false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
skip |= ValidateDeviceMaskToPhysicalDeviceCount(deviceMask, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetDeviceMask-deviceMask-00108");
skip |= ValidateDeviceMaskToZero(deviceMask, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer),
"VUID-vkCmdSetDeviceMask-deviceMask-00109");
skip |= ValidateDeviceMaskToCommandBuffer(cb_state, deviceMask, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetDeviceMask-deviceMask-00110");
if (cb_state->activeRenderPass) {
skip |= ValidateDeviceMaskToRenderPass(cb_state, deviceMask, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetDeviceMask-deviceMask-00111");
}
return skip;
}
bool CoreChecks::ValidateQueryPoolStride(const std::string &vuid_not_64, const std::string &vuid_64, const VkDeviceSize stride,
const char *parameter_name, const uint64_t parameter_value,
const VkQueryResultFlags flags) const {
bool skip = false;
if (flags & VK_QUERY_RESULT_64_BIT) {
static const int condition_multiples = 0b0111;
if ((stride & condition_multiples) || (parameter_value & condition_multiples)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid_64,
"stride %" PRIx64 " or %s %" PRIx64 " is invalid.", stride, parameter_name, parameter_value);
}
} else {
static const int condition_multiples = 0b0011;
if ((stride & condition_multiples) || (parameter_value & condition_multiples)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid_not_64,
"stride %" PRIx64 " or %s %" PRIx64 " is invalid.", stride, parameter_name, parameter_value);
}
}
return skip;
}
bool CoreChecks::ValidateCmdDrawStrideWithStruct(VkCommandBuffer commandBuffer, const std::string &vuid, const uint32_t stride,
const char *struct_name, const uint32_t struct_size) const {
bool skip = false;
static const int condition_multiples = 0b0011;
if ((stride & condition_multiples) || (stride < struct_size)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), vuid, "stride %d is invalid or less than sizeof(%s) %d.", stride,
struct_name, struct_size);
}
return skip;
}
bool CoreChecks::ValidateCmdDrawStrideWithBuffer(VkCommandBuffer commandBuffer, const std::string &vuid, const uint32_t stride,
const char *struct_name, const uint32_t struct_size, const uint32_t drawCount,
const VkDeviceSize offset, const BUFFER_STATE *buffer_state) const {
bool skip = false;
uint64_t validation_value = stride * (drawCount - 1) + offset + struct_size;
if (validation_value > buffer_state->createInfo.size) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), vuid,
"stride[%d] * (drawCount[%d] - 1) + offset[%" PRIx64 "] + sizeof(%s)[%d] = %" PRIx64
" is greater than the size[%" PRIx64 "] of %s.",
stride, drawCount, offset, struct_name, struct_size, validation_value, buffer_state->createInfo.size,
report_data->FormatHandle(buffer_state->buffer).c_str());
}
return skip;
}
void PIPELINE_STATE::initGraphicsPipeline(ValidationStateTracker *state_data, const VkGraphicsPipelineCreateInfo *pCreateInfo,
std::shared_ptr<RENDER_PASS_STATE> &&rpstate) {
reset();
bool uses_color_attachment = false;
bool uses_depthstencil_attachment = false;
if (pCreateInfo->subpass < rpstate->createInfo.subpassCount) {
const auto &subpass = rpstate->createInfo.pSubpasses[pCreateInfo->subpass];
for (uint32_t i = 0; i < subpass.colorAttachmentCount; ++i) {
if (subpass.pColorAttachments[i].attachment != VK_ATTACHMENT_UNUSED) {
uses_color_attachment = true;
break;
}
}
if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
uses_depthstencil_attachment = true;
}
}
graphicsPipelineCI.initialize(pCreateInfo, uses_color_attachment, uses_depthstencil_attachment);
stage_state.resize(pCreateInfo->stageCount);
for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
const VkPipelineShaderStageCreateInfo *pPSSCI = &pCreateInfo->pStages[i];
this->duplicate_shaders |= this->active_shaders & pPSSCI->stage;
this->active_shaders |= pPSSCI->stage;
state_data->RecordPipelineShaderStage(pPSSCI, this, &stage_state[i]);
}
if (graphicsPipelineCI.pVertexInputState) {
const auto pVICI = graphicsPipelineCI.pVertexInputState;
if (pVICI->vertexBindingDescriptionCount) {
this->vertex_binding_descriptions_ = std::vector<VkVertexInputBindingDescription>(
pVICI->pVertexBindingDescriptions, pVICI->pVertexBindingDescriptions + pVICI->vertexBindingDescriptionCount);
this->vertex_binding_to_index_map_.reserve(pVICI->vertexBindingDescriptionCount);
for (uint32_t i = 0; i < pVICI->vertexBindingDescriptionCount; ++i) {
this->vertex_binding_to_index_map_[pVICI->pVertexBindingDescriptions[i].binding] = i;
}
}
if (pVICI->vertexAttributeDescriptionCount) {
this->vertex_attribute_descriptions_ = std::vector<VkVertexInputAttributeDescription>(
pVICI->pVertexAttributeDescriptions, pVICI->pVertexAttributeDescriptions + pVICI->vertexAttributeDescriptionCount);
}
}
if (graphicsPipelineCI.pColorBlendState) {
const auto pCBCI = graphicsPipelineCI.pColorBlendState;
if (pCBCI->attachmentCount) {
this->attachments =
std::vector<VkPipelineColorBlendAttachmentState>(pCBCI->pAttachments, pCBCI->pAttachments + pCBCI->attachmentCount);
}
}
if (graphicsPipelineCI.pInputAssemblyState) {
topology_at_rasterizer = graphicsPipelineCI.pInputAssemblyState->topology;
}
rp_state = rpstate;
}
void PIPELINE_STATE::initComputePipeline(ValidationStateTracker *state_data, const VkComputePipelineCreateInfo *pCreateInfo) {
reset();
computePipelineCI.initialize(pCreateInfo);
switch (computePipelineCI.stage.stage) {
case VK_SHADER_STAGE_COMPUTE_BIT:
this->active_shaders |= VK_SHADER_STAGE_COMPUTE_BIT;
stage_state.resize(1);
state_data->RecordPipelineShaderStage(&pCreateInfo->stage, this, &stage_state[0]);
break;
default:
// TODO : Flag error
break;
}
}
void PIPELINE_STATE::initRayTracingPipelineNV(ValidationStateTracker *state_data,
const VkRayTracingPipelineCreateInfoNV *pCreateInfo) {
reset();
raytracingPipelineCI.initialize(pCreateInfo);
stage_state.resize(pCreateInfo->stageCount);
for (uint32_t stage_index = 0; stage_index < pCreateInfo->stageCount; stage_index++) {
const auto &shader_stage = pCreateInfo->pStages[stage_index];
switch (shader_stage.stage) {
case VK_SHADER_STAGE_RAYGEN_BIT_NV:
this->active_shaders |= VK_SHADER_STAGE_RAYGEN_BIT_NV;
break;
case VK_SHADER_STAGE_ANY_HIT_BIT_NV:
this->active_shaders |= VK_SHADER_STAGE_ANY_HIT_BIT_NV;
break;
case VK_SHADER_STAGE_CLOSEST_HIT_BIT_NV:
this->active_shaders |= VK_SHADER_STAGE_CLOSEST_HIT_BIT_NV;
break;
case VK_SHADER_STAGE_MISS_BIT_NV:
this->active_shaders |= VK_SHADER_STAGE_MISS_BIT_NV;
break;
case VK_SHADER_STAGE_INTERSECTION_BIT_NV:
this->active_shaders |= VK_SHADER_STAGE_INTERSECTION_BIT_NV;
break;
case VK_SHADER_STAGE_CALLABLE_BIT_NV:
this->active_shaders |= VK_SHADER_STAGE_CALLABLE_BIT_NV;
break;
default:
// TODO : Flag error
break;
}
state_data->RecordPipelineShaderStage(&shader_stage, this, &stage_state[stage_index]);
}
}
| 1 | 11,619 | `RecordPipelineShaderStage()` might change `topology_at_rasterizer `, according to shader code, so we should check `pInputAssemblyState `first. | KhronosGroup-Vulkan-ValidationLayers | cpp |
@@ -1,6 +1,10 @@
const path = require("path");
+const { CleanWebpackPlugin} = require("clean-webpack-plugin");
const CopyPlugin = require("copy-webpack-plugin");
+// assets.js
+const Assets = require('./assets');
+
module.exports = {
context: path.resolve(__dirname, "src"),
entry: "./bundle.js", | 1 | const path = require("path");
const CopyPlugin = require("copy-webpack-plugin");
module.exports = {
context: path.resolve(__dirname, "src"),
entry: "./bundle.js",
resolve: {
modules: [
path.resolve(__dirname, "node_modules")
]
},
plugins: [
new CopyPlugin([{
from: "**/*",
to: "."
}])
]
};
| 1 | 12,142 | Should we just inline the assets here? I can't think of an advantage to having them in a separate file. | jellyfin-jellyfin-web | js |
@@ -175,13 +175,13 @@ func (t *endpointsChangesTracker) Update(em types.EndpointsMap) map[k8sproxy.Ser
for spn, endpoints := range change.current {
em[spn] = endpoints
}
- detectStaleConnections(change.previous, change.current, staleEndpoints)
+ detectStale(change.previous, change.current, staleEndpoints)
}
return staleEndpoints
}
-// detectStaleConnections updates staleEndpoints with detected stale connections.
-func detectStaleConnections(oldEndpointsMap, newEndpointsMap types.EndpointsMap, staleEndpoints map[k8sproxy.ServicePortName]map[string]k8sproxy.Endpoint) {
+// detectStale updates staleEndpoints with detected stale endpoints.
+func detectStale(oldEndpointsMap, newEndpointsMap types.EndpointsMap, staleEndpoints map[k8sproxy.ServicePortName]map[string]k8sproxy.Endpoint) {
for svcPortName, epList := range oldEndpointsMap {
for _, ep := range epList {
stale := true | 1 | // Copyright 2020 Antrea Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package proxy
import (
"fmt"
"net"
"reflect"
"sync"
corev1 "k8s.io/api/core/v1"
apimachinerytypes "k8s.io/apimachinery/pkg/types"
"k8s.io/klog"
"github.com/vmware-tanzu/antrea/pkg/agent/proxy/types"
k8sproxy "github.com/vmware-tanzu/antrea/third_party/proxy"
)
// endpointsChange describes an Endpoints change, previous is the state from before
// all of them, current is state after applying all of those.
type endpointsChange struct {
previous types.EndpointsMap
current types.EndpointsMap
}
// endpointsChangesTracker tracks Endpoints changes.
type endpointsChangesTracker struct {
// hostname is used to tell whether the Endpoint is located on current Node.
hostname string
sync.RWMutex
// initialized tells whether Endpoints have been synced.
initialized bool
// changes contains endpoints changes since the last checkoutChanges call.
changes map[apimachinerytypes.NamespacedName]*endpointsChange
}
func newEndpointsChangesTracker(hostname string) *endpointsChangesTracker {
return &endpointsChangesTracker{
hostname: hostname,
changes: map[apimachinerytypes.NamespacedName]*endpointsChange{},
}
}
// OnEndpointUpdate updates given Service's Endpoints change map based on the
// <previous, current> Endpoints pair. It returns true if items changed,
// otherwise it returns false.
// Update can be used to add/update/delete items of EndpointsChangeMap.
// For example,
// Add item
// - pass <nil, Endpoints> as the <previous, current> pair.
// Update item
// - pass <oldEndpoints, Endpoints> as the <previous, current> pair.
// Delete item
// - pass <Endpoints, nil> as the <previous, current> pair.
func (t *endpointsChangesTracker) OnEndpointUpdate(previous, current *corev1.Endpoints) bool {
endpoints := current
if endpoints == nil {
endpoints = previous
}
// previous == nil && current == nil is unexpected, we should return false directly.
if endpoints == nil {
return false
}
namespacedName := apimachinerytypes.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}
t.Lock()
defer t.Unlock()
change, exists := t.changes[namespacedName]
if !exists {
change = &endpointsChange{}
change.previous = t.endpointsToEndpointsMap(previous)
t.changes[namespacedName] = change
}
change.current = t.endpointsToEndpointsMap(current)
// If change.previous equals to change.current, it means no change.
if reflect.DeepEqual(change.previous, change.current) {
delete(t.changes, namespacedName)
}
return len(t.changes) > 0
}
func (t *endpointsChangesTracker) checkoutChanges() []*endpointsChange {
t.Lock()
defer t.Unlock()
var changes []*endpointsChange
for _, change := range t.changes {
changes = append(changes, change)
}
t.changes = make(map[apimachinerytypes.NamespacedName]*endpointsChange)
return changes
}
func (t *endpointsChangesTracker) OnEndpointsSynced() {
t.Lock()
defer t.Unlock()
t.initialized = true
}
func (t *endpointsChangesTracker) Synced() bool {
t.RLock()
defer t.RUnlock()
return t.initialized
}
// endpointsToEndpointsMap translates single Endpoints object to EndpointsMap.
// This function is used for incremental update of EndpointsMap.
func (t *endpointsChangesTracker) endpointsToEndpointsMap(endpoints *corev1.Endpoints) types.EndpointsMap {
if endpoints == nil {
return nil
}
endpointsMap := make(types.EndpointsMap)
// We need to build a map of portname -> all ip:ports for that
// portname. Explode Endpoints.Subsets[*] into this structure.
for i := range endpoints.Subsets {
ss := &endpoints.Subsets[i]
for i := range ss.Ports {
port := &ss.Ports[i]
if port.Port == 0 {
klog.Warningf("Ignoring invalid endpoint port %s", port.Name)
continue
}
svcPortName := k8sproxy.ServicePortName{
NamespacedName: apimachinerytypes.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name},
Protocol: port.Protocol,
Port: port.Name,
}
if _, ok := endpointsMap[svcPortName]; !ok {
endpointsMap[svcPortName] = map[string]k8sproxy.Endpoint{}
}
for i := range ss.Addresses {
addr := &ss.Addresses[i]
if addr.IP == "" {
klog.Warningf("ignoring invalid endpoint port %s with empty host", port.Name)
continue
}
isLocal := addr.NodeName != nil && *addr.NodeName == t.hostname
ei := types.NewEndpointInfo(&k8sproxy.BaseEndpointInfo{
Endpoint: net.JoinHostPort(addr.IP, fmt.Sprint(port.Port)),
IsLocal: isLocal,
})
endpointsMap[svcPortName][ei.String()] = ei
}
}
}
return endpointsMap
}
// Update updates an EndpointsMap based on current changes and returns stale
// Endpoints of each Service.
func (t *endpointsChangesTracker) Update(em types.EndpointsMap) map[k8sproxy.ServicePortName]map[string]k8sproxy.Endpoint {
staleEndpoints := map[k8sproxy.ServicePortName]map[string]k8sproxy.Endpoint{}
for _, change := range t.checkoutChanges() {
for spn := range change.previous {
delete(em, spn)
}
for spn, endpoints := range change.current {
em[spn] = endpoints
}
detectStaleConnections(change.previous, change.current, staleEndpoints)
}
return staleEndpoints
}
// detectStaleConnections updates staleEndpoints with detected stale connections.
func detectStaleConnections(oldEndpointsMap, newEndpointsMap types.EndpointsMap, staleEndpoints map[k8sproxy.ServicePortName]map[string]k8sproxy.Endpoint) {
for svcPortName, epList := range oldEndpointsMap {
for _, ep := range epList {
stale := true
for i := range newEndpointsMap[svcPortName] {
if newEndpointsMap[svcPortName][i].Equal(ep) {
stale = false
break
}
}
if stale {
if _, ok := staleEndpoints[svcPortName]; !ok {
staleEndpoints[svcPortName] = map[string]k8sproxy.Endpoint{}
}
staleEndpoints[svcPortName][ep.String()] = ep
}
}
}
}
| 1 | 32,761 | detectStaleEndpoints to be more specific? | antrea-io-antrea | go |
@@ -100,6 +100,8 @@ class SpatialPoolerCompatabilityTest(unittest.TestCase):
cppSp.getMinPctOverlapDutyCycles())
self.assertAlmostEqual(pySp.getMinPctActiveDutyCycles(),
cppSp.getMinPctActiveDutyCycles())
+# self.assertEqual(pySp.getRandomSP(), #FIXME enable when done in C++ too
+# cppSP.getRandomSP())
numColumns = pySp.getNumColumns()
numInputs = pySp.getNumInputs() | 1 | #! /usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import cPickle as pickle
import numpy
import unittest2 as unittest
import time
import traceback
from nupic.support.unittesthelpers.algorithm_test_helpers \
import getNumpyRandomGenerator, CreateSP, convertPermanences
from nupic.research.spatial_pooler import SpatialPooler as PySpatialPooler
from nupic.bindings.algorithms import SpatialPooler as CPPSpatialPooler
from nupic.bindings.math import GetNTAReal, Random as NupicRandom
realType = GetNTAReal()
uintType = "uint32"
numRecords = 100
class SpatialPoolerCompatabilityTest(unittest.TestCase):
"""
Tests to ensure that the PY and CPP implementations of the spatial pooler
are functionally identical.
"""
def setUp(self):
# Set to 1 for more verbose debugging output
self.verbosity = 1
def assertListAlmostEqual(self, alist, blist):
self.assertEqual(len(alist), len(blist))
for a, b in zip(alist, blist):
diff = abs(a - b)
self.assertLess(diff, 1e-4)
def compare(self, pySp, cppSp):
self.assertAlmostEqual(pySp.getNumColumns(),
cppSp.getNumColumns())
self.assertAlmostEqual(pySp.getNumInputs(),
cppSp.getNumInputs())
self.assertAlmostEqual(pySp.getPotentialRadius(),
cppSp.getPotentialRadius())
self.assertAlmostEqual(pySp.getPotentialPct(),
cppSp.getPotentialPct())
self.assertAlmostEqual(pySp.getGlobalInhibition(),
cppSp.getGlobalInhibition())
self.assertAlmostEqual(pySp.getNumActiveColumnsPerInhArea(),
cppSp.getNumActiveColumnsPerInhArea())
self.assertAlmostEqual(pySp.getLocalAreaDensity(),
cppSp.getLocalAreaDensity())
self.assertAlmostEqual(pySp.getStimulusThreshold(),
cppSp.getStimulusThreshold())
self.assertAlmostEqual(pySp.getInhibitionRadius(),
cppSp.getInhibitionRadius())
self.assertAlmostEqual(pySp.getDutyCyclePeriod(),
cppSp.getDutyCyclePeriod())
self.assertAlmostEqual(pySp.getMaxBoost(),
cppSp.getMaxBoost())
self.assertAlmostEqual(pySp.getIterationNum(),
cppSp.getIterationNum())
self.assertAlmostEqual(pySp.getIterationLearnNum(),
cppSp.getIterationLearnNum())
self.assertAlmostEqual(pySp.getSpVerbosity(),
cppSp.getSpVerbosity())
self.assertAlmostEqual(pySp.getUpdatePeriod(),
cppSp.getUpdatePeriod())
self.assertAlmostEqual(pySp.getSynPermTrimThreshold(),
cppSp.getSynPermTrimThreshold())
self.assertAlmostEqual(pySp.getSynPermActiveInc(),
cppSp.getSynPermActiveInc())
self.assertAlmostEqual(pySp.getSynPermInactiveDec(),
cppSp.getSynPermInactiveDec())
self.assertAlmostEqual(pySp.getSynPermBelowStimulusInc(),
cppSp.getSynPermBelowStimulusInc())
self.assertAlmostEqual(pySp.getSynPermConnected(),
cppSp.getSynPermConnected())
self.assertAlmostEqual(pySp.getMinPctOverlapDutyCycles(),
cppSp.getMinPctOverlapDutyCycles())
self.assertAlmostEqual(pySp.getMinPctActiveDutyCycles(),
cppSp.getMinPctActiveDutyCycles())
numColumns = pySp.getNumColumns()
numInputs = pySp.getNumInputs()
pyBoost = numpy.zeros(numColumns).astype(realType)
cppBoost = numpy.zeros(numColumns).astype(realType)
pySp.getBoostFactors(pyBoost)
cppSp.getBoostFactors(cppBoost)
self.assertListAlmostEqual(list(pyBoost), list(cppBoost))
pyOverlap = numpy.zeros(numColumns).astype(realType)
cppOverlap = numpy.zeros(numColumns).astype(realType)
pySp.getOverlapDutyCycles(pyOverlap)
cppSp.getOverlapDutyCycles(cppOverlap)
self.assertListAlmostEqual(list(pyOverlap), list(cppOverlap))
pyActive = numpy.zeros(numColumns).astype(realType)
cppActive = numpy.zeros(numColumns).astype(realType)
pySp.getActiveDutyCycles(pyActive)
cppSp.getActiveDutyCycles(cppActive)
self.assertListAlmostEqual(list(pyActive), list(cppActive))
pyMinOverlap = numpy.zeros(numColumns).astype(realType)
cppMinOverlap = numpy.zeros(numColumns).astype(realType)
pySp.getMinOverlapDutyCycles(pyMinOverlap)
cppSp.getMinOverlapDutyCycles(cppMinOverlap)
self.assertListAlmostEqual(list(pyMinOverlap), list(cppMinOverlap))
pyMinActive = numpy.zeros(numColumns).astype(realType)
cppMinActive = numpy.zeros(numColumns).astype(realType)
pySp.getMinActiveDutyCycles(pyMinActive)
cppSp.getMinActiveDutyCycles(cppMinActive)
self.assertListAlmostEqual(list(pyMinActive), list(cppMinActive))
for i in xrange(pySp.getNumColumns()):
if self.verbosity > 2: print "Column:",i
pyPot = numpy.zeros(numInputs).astype(uintType)
cppPot = numpy.zeros(numInputs).astype(uintType)
pySp.getPotential(i, pyPot)
cppSp.getPotential(i, cppPot)
self.assertListEqual(list(pyPot),list(cppPot))
pyPerm = numpy.zeros(numInputs).astype(realType)
cppPerm = numpy.zeros(numInputs).astype(realType)
pySp.getPermanence(i, pyPerm)
cppSp.getPermanence(i, cppPerm)
self.assertListAlmostEqual(list(pyPerm),list(cppPerm))
pyCon = numpy.zeros(numInputs).astype(uintType)
cppCon = numpy.zeros(numInputs).astype(uintType)
pySp.getConnectedSynapses(i, pyCon)
cppSp.getConnectedSynapses(i, cppCon)
self.assertListEqual(list(pyCon), list(cppCon))
pyConCounts = numpy.zeros(numColumns).astype(uintType)
cppConCounts = numpy.zeros(numColumns).astype(uintType)
pySp.getConnectedCounts(pyConCounts)
cppSp.getConnectedCounts(cppConCounts)
self.assertListEqual(list(pyConCounts), list(cppConCounts))
def runSideBySide(self, params, seed = None,
learnMode = None,
convertEveryIteration = False):
"""
Run the PY and CPP implementations side by side on random inputs.
If seed is None a random seed will be chosen based on time, otherwise
the fixed seed will be used.
If learnMode is None learning will be randomly turned on and off.
If it is False or True then set it accordingly.
If convertEveryIteration is True, the CPP will be copied from the PY
instance on every iteration just before each compute.
"""
randomState = getNumpyRandomGenerator(seed)
cppSp = CreateSP("cpp", params)
pySp = CreateSP("py", params)
self.compare(pySp, cppSp)
numColumns = pySp.getNumColumns()
numInputs = pySp.getNumInputs()
threshold = 0.8
inputMatrix = (
randomState.rand(numRecords,numInputs) > threshold).astype(uintType)
# Run side by side for numRecords iterations
for i in xrange(numRecords):
if learnMode is None:
learn = (randomState.rand() > 0.5)
else:
learn = learnMode
if self.verbosity > 1:
print "Iteration:",i,"learn=",learn
PyActiveArray = numpy.zeros(numColumns).astype(uintType)
CppActiveArray = numpy.zeros(numColumns).astype(uintType)
inputVector = inputMatrix[i,:]
pySp.compute(inputVector, learn, PyActiveArray)
cppSp.compute(inputVector, learn, CppActiveArray)
self.assertListEqual(list(PyActiveArray), list(CppActiveArray))
self.compare(pySp,cppSp)
# The permanence values for the two implementations drift ever so slowly
# over time due to numerical precision issues. This occasionally causes
# different permanences to be connected. By transferring the permanence
# values every so often, we can avoid this drift but still check that
# the logic is applied equally for both implementations.
if convertEveryIteration or ((i+1)%10 == 0):
convertPermanences(pySp, cppSp)
def runSerialize(self, imp, params, seed = None):
randomState = getNumpyRandomGenerator(seed)
sp1 = CreateSP(imp, params)
numColumns = sp1.getNumColumns()
numInputs = sp1.getNumInputs()
threshold = 0.8
inputMatrix = (
randomState.rand(numRecords,numInputs) > threshold).astype(uintType)
for i in xrange(numRecords/2):
activeArray = numpy.zeros(numColumns).astype(uintType)
inputVector = inputMatrix[i,:]
learn = (randomState.rand() > 0.5)
sp1.compute(inputVector, learn, activeArray)
sp2 = pickle.loads(pickle.dumps(sp1))
for i in xrange(numRecords/2+1,numRecords):
activeArray1 = numpy.zeros(numColumns).astype(uintType)
activeArray2 = numpy.zeros(numColumns).astype(uintType)
inputVector = inputMatrix[i,:]
learn = (randomState.rand() > 0.5)
sp1.compute(inputVector, learn, activeArray1)
sp2.compute(inputVector, learn, activeArray2)
self.assertListEqual(list(activeArray1), list(activeArray2))
def testCompatability1(self):
params = {
"inputDimensions": [4,4],
"columnDimensions": [5,3],
"potentialRadius": 20,
"potentialPct": 0.5,
"globalInhibition": True,
"localAreaDensity": 0,
"numActiveColumnsPerInhArea": 5,
"stimulusThreshold": 0,
"synPermInactiveDec": 0.01,
"synPermActiveInc": 0.1,
"synPermConnected": 0.10,
"minPctOverlapDutyCycle": 0.001,
"minPctActiveDutyCycle": 0.001,
"dutyCyclePeriod": 30,
"maxBoost": 10.0,
"seed": 4,
"spVerbosity": 0
}
# This seed used to cause problems if learnMode is set to None
self.runSideBySide(params, seed = 63862)
# These seeds used to fail
self.runSideBySide(params, seed = 62605)
self.runSideBySide(params, seed = 30440)
self.runSideBySide(params, seed = 49457)
self.runSideBySide(params)
def testCompatabilityNoLearn(self):
params = {
"inputDimensions": [4,4],
"columnDimensions": [5,3],
"potentialRadius": 20,
"potentialPct": 0.5,
"globalInhibition": True,
"localAreaDensity": 0,
"numActiveColumnsPerInhArea": 5,
"stimulusThreshold": 0,
"synPermInactiveDec": 0.01,
"synPermActiveInc": 0.1,
"synPermConnected": 0.10,
"minPctOverlapDutyCycle": 0.001,
"minPctActiveDutyCycle": 0.001,
"dutyCyclePeriod": 30,
"maxBoost": 10.0,
"seed": 4,
"spVerbosity": 0
}
self.runSideBySide(params, seed = None, learnMode = False)
def testCompatability2(self):
params = {
"inputDimensions": [12,7],
"columnDimensions": [4,15],
"potentialRadius": 22,
"potentialPct": 0.3,
"globalInhibition": False,
"localAreaDensity": 0,
"numActiveColumnsPerInhArea": 5,
"stimulusThreshold": 2,
"synPermInactiveDec": 0.04,
"synPermActiveInc": 0.14,
"synPermConnected": 0.178,
"minPctOverlapDutyCycle": 0.021,
"minPctActiveDutyCycle": 0.0012,
"dutyCyclePeriod": 20,
"maxBoost": 11.0,
"seed": 6,
"spVerbosity": 0
}
self.runSideBySide(params, convertEveryIteration = True)
def testCompatability3(self):
params = {
"inputDimensions": [2,4,5],
"columnDimensions": [4,3,3],
"potentialRadius": 30,
"potentialPct": 0.7,
"globalInhibition": False,
"localAreaDensity": 0.23,
"numActiveColumnsPerInhArea": 0,
"stimulusThreshold": 2,
"synPermInactiveDec": 0.02,
"synPermActiveInc": 0.1,
"synPermConnected": 0.12,
"minPctOverlapDutyCycle": 0.011,
"minPctActiveDutyCycle": 0.052,
"dutyCyclePeriod": 25,
"maxBoost": 11.0,
"seed": 19,
"spVerbosity": 0
}
self.runSideBySide(params, convertEveryIteration = True)
def testSerialization(self):
params = {
'inputDimensions' : [2,4,5],
'columnDimensions' : [4,3,3],
'potentialRadius' : 30,
'potentialPct' : 0.7,
'globalInhibition' : False,
'localAreaDensity' : 0.23,
'numActiveColumnsPerInhArea' : 0,
'stimulusThreshold' : 2,
'synPermInactiveDec' : 0.02,
'synPermActiveInc' : 0.1,
'synPermConnected' : 0.12,
'minPctOverlapDutyCycle' : 0.011,
'minPctActiveDutyCycle' : 0.052,
'dutyCyclePeriod' : 25,
'maxBoost' : 11.0,
'seed' : 19,
'spVerbosity' : 0
}
sp1 = CreateSP("py", params)
sp2 = pickle.loads(pickle.dumps(sp1))
self.compare(sp1, sp2)
sp1 = CreateSP("cpp", params)
sp2 = pickle.loads(pickle.dumps(sp1))
self.compare(sp1, sp2)
def testSerializationRun(self):
params = {
'inputDimensions' : [2,4,5],
'columnDimensions' : [4,3,3],
'potentialRadius' : 30,
'potentialPct' : 0.7,
'globalInhibition' : False,
'localAreaDensity' : 0.23,
'numActiveColumnsPerInhArea' : 0,
'stimulusThreshold' : 2,
'synPermInactiveDec' : 0.02,
'synPermActiveInc' : 0.1,
'synPermConnected' : 0.12,
'minPctOverlapDutyCycle' : 0.011,
'minPctActiveDutyCycle' : 0.052,
'dutyCyclePeriod' : 25,
'maxBoost' : 11.0,
'seed' : 19,
'spVerbosity' : 0
}
self.runSerialize("py", params)
self.runSerialize("cpp", params)
@unittest.skip("Currently fails due to non-fixed randomness in C++ SP.")
def testCompatibilityCppPyDirectCall1D(self):
"""Check SP implementations have same behavior with 1D input."""
pySp = PySpatialPooler(
inputDimensions=[121], columnDimensions=[300])
cppSp = CPPSpatialPooler(
inputDimensions=[121], columnDimensions=[300])
data = numpy.zeros([121], dtype=uintType)
for i in xrange(21):
data[i] = 1
nCols = 300
d1 = numpy.zeros(nCols, dtype=uintType)
d2 = numpy.zeros(nCols, dtype=uintType)
pySp.compute(data, True, d1) # learn
cppSp.compute(data, True, d2)
d1 = d1.nonzero()[0].tolist()
d2 = d2.nonzero()[0].tolist()
self.assertListEqual(
d1, d2, "SP outputs are not equal: \n%s \n%s" % (str(d1), str(d2)))
@unittest.skip("Currently fails due to non-fixed randomness in C++ SP.")
def testCompatibilityCppPyDirectCall2D(self):
"""Check SP implementations have same behavior with 2D input."""
pySp = PySpatialPooler(
inputDimensions=[121, 1], columnDimensions=[30, 30])
cppSp = CPPSpatialPooler(
inputDimensions=[121, 1], columnDimensions=[30, 30])
data = numpy.zeros([121, 1], dtype=uintType)
for i in xrange(21):
data[i][0] = 1
nCols = 900
d1 = numpy.zeros(nCols, dtype=uintType)
d2 = numpy.zeros(nCols, dtype=uintType)
pySp.compute(data, True, d1) # learn
cppSp.compute(data, True, d2)
d1 = d1.nonzero()[0].tolist()
d2 = d2.nonzero()[0].tolist()
self.assertListEqual(
d1, d2, "SP outputs are not equal: \n%s \n%s" % (str(d1), str(d2)))
if __name__ == "__main__":
unittest.main()
| 1 | 17,394 | Please create a new issue for addressing this (if there isn't one already), so it doesn't get lost. | numenta-nupic | py |
@@ -78,7 +78,7 @@ func NewStorageMiningSubmodule(minerAddr address.Address, ds datastore.Batching,
PoStGenerator: postGen,
minerNode: minerNode,
storageMiner: storageMiner,
- heaviestTipSetCh: c.HeaviestTipSetCh,
+ heaviestTipSetCh: make(chan interface{}),
poster: poster.NewPoster(minerAddr, m.Outbox, s, c.State, stateViewer, mw),
}
| 1 | package submodule
import (
"context"
"github.com/filecoin-project/go-filecoin/internal/pkg/block"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-sectorbuilder"
"github.com/filecoin-project/go-storage-miner"
"github.com/filecoin-project/go-storage-miner/policies/precommit"
"github.com/filecoin-project/go-storage-miner/policies/selfdeal"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/ipfs/go-datastore"
storageminerconnector "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/connectors/storage_miner"
"github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/plumbing/msg"
"github.com/filecoin-project/go-filecoin/internal/pkg/piecemanager"
"github.com/filecoin-project/go-filecoin/internal/pkg/poster"
"github.com/filecoin-project/go-filecoin/internal/pkg/postgenerator"
appstate "github.com/filecoin-project/go-filecoin/internal/pkg/state"
)
// StorageMiningSubmodule enhances the `Node` with storage mining capabilities.
type StorageMiningSubmodule struct {
started bool
// StorageMining is used by the miner to fill and seal sectors.
PieceManager piecemanager.PieceManager
// PoStGenerator generates election PoSts
PoStGenerator postgenerator.PoStGenerator
minerNode *storageminerconnector.StorageMinerNodeConnector
storageMiner *storage.Miner
heaviestTipSetCh chan interface{}
poster *poster.Poster
}
// NewStorageMiningSubmodule creates a new storage mining submodule.
func NewStorageMiningSubmodule(minerAddr address.Address, ds datastore.Batching, s sectorbuilder.Interface, c *ChainSubmodule,
m *MessagingSubmodule, mw *msg.Waiter, w *WalletSubmodule, stateViewer *appstate.Viewer, postGen postgenerator.PoStGenerator) (*StorageMiningSubmodule, error) {
minerNode := storageminerconnector.NewStorageMinerNodeConnector(minerAddr, c.ChainReader, c.State, m.Outbox, mw, w.Signer, stateViewer)
// The amount of epochs we expect the storage miner to take to replicate and
// prove a sector. This value should be shared with the storage miner side
// of go-fil-markets. The protocol specifies a maximum sealing duration (1)
// which could be used to improve the accuracy of provingDelay.
//
// 1: https://github.com/filecoin-project/specs-actors/commit/fa20d55a3ff0c0134b130dc27850998ffd432580#diff-5a14038af5531003ed825ab608d0dd51R21
//
// TODO: What is the correct value for proving delay given 32GiB sectors?
provingDelay := abi.ChainEpoch(2 * 60 * 24)
// The quantity of epochs during which the self-deal will be valid.
selfDealDuration := abi.ChainEpoch(2 * 60 * 24)
sdp := selfdeal.NewBasicPolicy(minerNode, provingDelay, selfDealDuration)
// If a sector contains no pieces, this policy will set the sector
// pre-commit expiration to the current epoch + the provided value. If the
// sector does contain deals' pieces, the sector pre-commit expiration will
// be set to the farthest-into-the-future deal end-epoch.
pcp := precommit.NewBasicPolicy(minerNode, abi.ChainEpoch(2*60*24))
storageMiner, err := storage.NewMiner(minerNode, ds, s, minerAddr, &sdp, &pcp)
if err != nil {
return nil, err
}
smbe := piecemanager.NewStorageMinerBackEnd(storageMiner, s)
if postGen == nil {
postGen = postgenerator.NewSectorBuilderBackEnd(s)
}
modu := &StorageMiningSubmodule{
PieceManager: smbe,
PoStGenerator: postGen,
minerNode: minerNode,
storageMiner: storageMiner,
heaviestTipSetCh: c.HeaviestTipSetCh,
poster: poster.NewPoster(minerAddr, m.Outbox, s, c.State, stateViewer, mw),
}
return modu, nil
}
// Start starts the StorageMiningSubmodule
func (s *StorageMiningSubmodule) Start(ctx context.Context) error {
if s.started {
return nil
}
s.minerNode.StartHeightListener(ctx, s.heaviestTipSetCh)
err := s.storageMiner.Run(ctx)
if err != nil {
return err
}
s.started = true
return nil
}
// Stop stops the StorageMiningSubmodule
func (s *StorageMiningSubmodule) Stop(ctx context.Context) error {
if !s.started {
return nil
}
s.minerNode.StopHeightListener()
err := s.storageMiner.Stop(ctx)
if err != nil {
return err
}
s.poster.StopPoSting()
s.started = false
return nil
}
// HandleNewHead submits a new chain head for possible fallback PoSt.
func (s *StorageMiningSubmodule) HandleNewHead(ctx context.Context, newHead block.TipSet) error {
if !s.started {
return nil
}
return s.poster.HandleNewHead(ctx, newHead)
}
| 1 | 23,421 | Nothing was ever coming out of this channel because its already being consumed. Create a new channel here and feed it in HandleNewHead method. | filecoin-project-venus | go |
@@ -1,4 +1,4 @@
-//snippet-sourcedescription:[GetPolicy.java demonstrates how to get the details for an AWS Identity and Access Management (IAM) policy.]
+//snippet-sourcedescription:[GetPolicy.java demonstrates how to get the details for an AWS Identity and Access Management (AWS IAM) policy.]
//snippet-keyword:[AWS SDK for Java v2]
//snippet-keyword:[Code Sample]
//snippet-service:[AWS IAM] | 1 | //snippet-sourcedescription:[GetPolicy.java demonstrates how to get the details for an AWS Identity and Access Management (IAM) policy.]
//snippet-keyword:[AWS SDK for Java v2]
//snippet-keyword:[Code Sample]
//snippet-service:[AWS IAM]
//snippet-sourcetype:[full-example]
//snippet-sourcedate:[11/02/2020]
//snippet-sourceauthor:[scmacdon-aws]
/*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package com.example.iam;
// snippet-start:[iam.java2.get_policy.import]
import software.amazon.awssdk.services.iam.model.GetPolicyRequest;
import software.amazon.awssdk.services.iam.model.GetPolicyResponse;
import software.amazon.awssdk.regions.Region;
import software.amazon.awssdk.services.iam.IamClient;
import software.amazon.awssdk.services.iam.model.IamException;
// snippet-end:[iam.java2.get_policy.import]
public class GetPolicy {
public static void main(String[] args) {
final String USAGE = "\n" +
"Usage:\n" +
" GetPolicy <policyArn> \n\n" +
"Where:\n" +
" policyArn - a policy ARN that you can obtain from the AWS Console. \n\n" ;
if (args.length != 1) {
System.out.println(USAGE);
System.exit(1);
}
// Read the command line argument
String policyArn = args[0];
Region region = Region.AWS_GLOBAL;
IamClient iam = IamClient.builder()
.region(region)
.build();
getIAMPolicy(iam, policyArn);
System.out.println("Done");
iam.close();
}
// snippet-start:[iam.java2.get_policy.main]
public static void getIAMPolicy(IamClient iam, String policyArn) {
try {
GetPolicyRequest request = GetPolicyRequest.builder()
.policyArn(policyArn).build();
GetPolicyResponse response = iam.getPolicy(request);
System.out.format("Successfully retrieved policy %s",
response.policy().policyName());
} catch (IamException e) {
System.err.println(e.awsErrorDetails().errorMessage());
System.exit(1);
}
}
// snippet-end:[iam.java2.get_policy.main]
}
| 1 | 18,243 | AWS Identity and Access Management (IAM) | awsdocs-aws-doc-sdk-examples | rb |
@@ -76,7 +76,7 @@ public class TestEdgeDriver extends RemoteWebDriver implements WebStorage, Locat
.findFirst().orElseThrow(WebDriverException::new);
service = (EdgeDriverService) builder.withVerbose(true).withLogFile(logFile.toFile()).build();
- LOG.info("edgedriver will log to " + logFile);
+ LOG.fine("edgedriver will log to " + logFile);
service.start();
Runtime.getRuntime().addShutdownHook(new Thread(() -> service.stop()));
} | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.testing.drivers;
import org.openqa.selenium.Capabilities;
import org.openqa.selenium.OutputType;
import org.openqa.selenium.WebDriverException;
import org.openqa.selenium.edge.EdgeDriverService;
import org.openqa.selenium.edge.EdgeOptions;
import org.openqa.selenium.html5.LocalStorage;
import org.openqa.selenium.html5.Location;
import org.openqa.selenium.html5.LocationContext;
import org.openqa.selenium.html5.SessionStorage;
import org.openqa.selenium.html5.WebStorage;
import org.openqa.selenium.remote.DriverCommand;
import org.openqa.selenium.remote.RemoteWebDriver;
import org.openqa.selenium.remote.html5.RemoteLocationContext;
import org.openqa.selenium.remote.html5.RemoteWebStorage;
import org.openqa.selenium.remote.service.DriverService;
import java.io.File;
import java.io.IOException;
import java.net.URL;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.HashMap;
import java.util.Map;
import java.util.ServiceLoader;
import java.util.logging.Logger;
import java.util.stream.StreamSupport;
/**
* Customized RemoteWebDriver that will communicate with a service that lives and dies with the
* entire test suite. We do not use {@link org.openqa.selenium.edge.EdgeDriver} since that starts and stops the service
* with each instance (and that is too expensive for our purposes).
*/
public class TestEdgeDriver extends RemoteWebDriver implements WebStorage, LocationContext {
private final static Logger LOG = Logger.getLogger(TestEdgeDriver.class.getName());
private static EdgeDriverService service;
private RemoteWebStorage webStorage;
private RemoteLocationContext locationContext;
public TestEdgeDriver(Capabilities capabilities) {
super(getServiceUrl(), edgeWithCustomCapabilities(capabilities));
webStorage = new RemoteWebStorage(getExecuteMethod());
locationContext = new RemoteLocationContext(getExecuteMethod());
}
private static URL getServiceUrl() {
try {
if (service == null) {
Path logFile = Files.createTempFile("edgedriver", ".log");
boolean isLegacy = System.getProperty("webdriver.edge.edgehtml") == null || Boolean.getBoolean("webdriver.edge.edgehtml");
EdgeDriverService.Builder<?, ?> builder =
StreamSupport.stream(ServiceLoader.load(DriverService.Builder.class).spliterator(), false)
.filter(b -> b instanceof EdgeDriverService.Builder)
.map(b -> (EdgeDriverService.Builder<?, ?>) b)
.filter(b -> b.isLegacy() == isLegacy)
.findFirst().orElseThrow(WebDriverException::new);
service = (EdgeDriverService) builder.withVerbose(true).withLogFile(logFile.toFile()).build();
LOG.info("edgedriver will log to " + logFile);
service.start();
Runtime.getRuntime().addShutdownHook(new Thread(() -> service.stop()));
}
return service.getUrl();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
private static Capabilities edgeWithCustomCapabilities(Capabilities originalCapabilities) {
EdgeOptions options = new EdgeOptions();
if (System.getProperty("webdriver.edge.edgehtml") == null || Boolean.getBoolean("webdriver.edge.edgehtml"))
return options;
options.addArguments("disable-extensions", "disable-infobars", "disable-breakpad");
Map<String, Object> prefs = new HashMap<>();
prefs.put("exit_type", "None");
prefs.put("exited_cleanly", true);
options.setExperimentalOption("prefs", prefs);
String edgePath = System.getProperty("webdriver.edge.binary");
if (edgePath != null) {
options.setBinary(new File(edgePath));
}
if (originalCapabilities != null) {
options.merge(originalCapabilities);
}
return options;
}
@Override
public <X> X getScreenshotAs(OutputType<X> target) {
// Get the screenshot as base64.
String base64 = (String) execute(DriverCommand.SCREENSHOT).getValue();
// ... and convert it.
return target.convertFromBase64Png(base64);
}
@Override
public LocalStorage getLocalStorage() {
return webStorage.getLocalStorage();
}
@Override
public SessionStorage getSessionStorage() {
return webStorage.getSessionStorage();
}
@Override
public Location location() {
return locationContext.location();
}
@Override
public void setLocation(Location location) {
locationContext.setLocation(location);
}
}
| 1 | 17,118 | This is deliberately at this level. | SeleniumHQ-selenium | js |
@@ -63,8 +63,10 @@ func stake2Transfer(args []string) error {
var payload []byte
if len(args) == 3 {
- payload = make([]byte, 2*len([]byte(args[2])))
- hex.Encode(payload, []byte(args[2]))
+ payload, err = hex.DecodeString(args[2])
+ if err != nil {
+ return output.NewError(output.ConvertError, "failed to decode data", err)
+ }
}
sender, err := signer() | 1 | // Copyright (c) 2020 IoTeX Foundation
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package action
import (
"encoding/hex"
"strconv"
"github.com/spf13/cobra"
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/ioctl/config"
"github.com/iotexproject/iotex-core/ioctl/output"
"github.com/iotexproject/iotex-core/ioctl/util"
)
// Multi-language support
var (
stake2TransferCmdUses = map[config.Language]string{
config.English: "transfer (ALIAS|VOTE_ADDRESS) BUCKET_INDEX [DATA]" +
" [-s SIGNER] [-n NONCE] [-l GAS_LIMIT] [-p GAS_PRICE] [-P PASSWORD] [-y]",
config.Chinese: "transfer (别名|投票地址) 票索引 [数据]" +
" [-s 签署人] [-n NONCE] [-l GAS限制] [-p GAS价格] [-P 密码] [-y]",
}
stake2TransferCmdShorts = map[config.Language]string{
config.English: "Transfer bucket ownership on IoTeX blockchain",
config.Chinese: "在IoTeX区块链上转移投票所有权",
}
)
// stake2TransferCmd represents the stake2 transfer command
var stake2TransferCmd = &cobra.Command{
Use: config.TranslateInLang(stake2TransferCmdUses, config.UILanguage),
Short: config.TranslateInLang(stake2TransferCmdShorts, config.UILanguage),
Args: cobra.RangeArgs(2, 3),
RunE: func(cmd *cobra.Command, args []string) error {
cmd.SilenceUsage = true
err := stake2Transfer(args)
return output.PrintError(err)
},
}
func init() {
RegisterWriteCommand(stake2TransferCmd)
}
func stake2Transfer(args []string) error {
voterAddrStr, err := util.Address(args[0])
if err != nil {
return output.NewError(output.AddressError, "failed to get voter address", err)
}
bucketIndex, err := strconv.ParseUint(args[1], 10, 64)
if err != nil {
return output.NewError(output.ConvertError, "failed to convert bucket index", nil)
}
var payload []byte
if len(args) == 3 {
payload = make([]byte, 2*len([]byte(args[2])))
hex.Encode(payload, []byte(args[2]))
}
sender, err := signer()
if err != nil {
return output.NewError(output.AddressError, "failed to get signed address", err)
}
gasLimit := gasLimitFlag.Value().(uint64)
if gasLimit == 0 {
gasLimit = action.MoveStakeBaseIntrinsicGas +
action.MoveStakePayloadGas*uint64(len(payload))
}
gasPriceRau, err := gasPriceInRau()
if err != nil {
return output.NewError(0, "failed to get gas price", err)
}
nonce, err := nonce(sender)
if err != nil {
return output.NewError(0, "failed to get nonce ", err)
}
s2t, err := action.NewTransferStake(nonce, voterAddrStr, bucketIndex, payload, gasLimit, gasPriceRau)
if err != nil {
return output.NewError(output.InstantiationError, "failed to make a transferStake instance", err)
}
return SendAction(
(&action.EnvelopeBuilder{}).
SetNonce(nonce).
SetGasPrice(gasPriceRau).
SetGasLimit(gasLimit).
SetAction(s2t).Build(),
sender)
}
| 1 | 21,868 | payload entered on command line is in hex-encoded format should use same processing as in ioctl/cmd/action/actiontransfer.go | iotexproject-iotex-core | go |
@@ -37,7 +37,14 @@ class PricingGroupDataFixture extends AbstractReferenceFixture
$pricingGroupData->name = 'Obyčejný zákazník';
$domainId = 2;
- $this->createPricingGroup($pricingGroupData, $domainId, self::PRICING_GROUP_ORDINARY_DOMAIN_2);
+
+ $alreadyCreatedDemoPricingGroupsByDomain = $this->pricingGroupFacade->getByDomainId($domainId);
+ if (count($alreadyCreatedDemoPricingGroupsByDomain) > 0) {
+ $pricingGroup = reset($alreadyCreatedDemoPricingGroupsByDomain);
+
+ $this->pricingGroupFacade->edit($pricingGroup->getId(), $pricingGroupData);
+ $this->addReference(self::PRICING_GROUP_ORDINARY_DOMAIN_2, $pricingGroup);
+ }
$pricingGroupData->name = 'VIP zákazník';
$domainId1 = 2; | 1 | <?php
namespace Shopsys\FrameworkBundle\DataFixtures\DemoMultidomain;
use Doctrine\Common\Persistence\ObjectManager;
use Shopsys\FrameworkBundle\Component\DataFixture\AbstractReferenceFixture;
use Shopsys\FrameworkBundle\Model\Pricing\Group\PricingGroupData;
use Shopsys\FrameworkBundle\Model\Pricing\Group\PricingGroupDataFactoryInterface;
use Shopsys\FrameworkBundle\Model\Pricing\Group\PricingGroupFacade;
class PricingGroupDataFixture extends AbstractReferenceFixture
{
const PRICING_GROUP_ORDINARY_DOMAIN_2 = 'pricing_group_ordinary_domain_2';
const PRICING_GROUP_VIP_DOMAIN_2 = 'pricing_group_vip_domain_2';
/**
* @var \Shopsys\FrameworkBundle\Model\Pricing\Group\PricingGroupFacade
*/
private $pricingGroupFacade;
/**
* @var \Shopsys\FrameworkBundle\Model\Pricing\Group\PricingGroupDataFactoryInterface
*/
private $pricingGroupDataFactory;
public function __construct(
PricingGroupFacade $pricingGroupFacade,
PricingGroupDataFactoryInterface $pricingGroupDataFactory
) {
$this->pricingGroupFacade = $pricingGroupFacade;
$this->pricingGroupDataFactory = $pricingGroupDataFactory;
}
public function load(ObjectManager $manager)
{
$pricingGroupData = $this->pricingGroupDataFactory->create();
$pricingGroupData->name = 'Obyčejný zákazník';
$domainId = 2;
$this->createPricingGroup($pricingGroupData, $domainId, self::PRICING_GROUP_ORDINARY_DOMAIN_2);
$pricingGroupData->name = 'VIP zákazník';
$domainId1 = 2;
$this->createPricingGroup($pricingGroupData, $domainId1, self::PRICING_GROUP_VIP_DOMAIN_2);
}
/**
* @param \Shopsys\FrameworkBundle\Model\Pricing\Group\PricingGroupData $pricingGroupData
* @param int $domainId
* @param string $referenceName
*/
private function createPricingGroup(
PricingGroupData $pricingGroupData,
$domainId,
$referenceName
) {
$pricingGroup = $this->pricingGroupFacade->create($pricingGroupData, $domainId);
$this->addReference($referenceName, $pricingGroup);
}
}
| 1 | 11,948 | should this be kept in the `else` branch? | shopsys-shopsys | php |
@@ -32,10 +32,11 @@ import (
"strings"
"time"
+ "sync"
+
"cloud.google.com/go/storage"
"github.com/GoogleCloudPlatform/compute-image-tools/daisy/compute"
"google.golang.org/api/option"
- "sync"
)
const defaultTimeout = "10m" | 1 | // Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package workflow describes a daisy workflow.
package workflow
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"os/user"
"path"
"path/filepath"
"reflect"
"strconv"
"strings"
"time"
"cloud.google.com/go/storage"
"github.com/GoogleCloudPlatform/compute-image-tools/daisy/compute"
"google.golang.org/api/option"
"sync"
)
const defaultTimeout = "10m"
type gcsLogger struct {
client *storage.Client
bucket, object string
buf *bytes.Buffer
ctx context.Context
}
func (l *gcsLogger) Write(b []byte) (int, error) {
if l.buf == nil {
l.buf = new(bytes.Buffer)
}
l.buf.Write(b)
wc := l.client.Bucket(l.bucket).Object(l.object).NewWriter(l.ctx)
wc.ContentType = "text/plain"
n, err := wc.Write(l.buf.Bytes())
if err != nil {
return 0, err
}
if err := wc.Close(); err != nil {
return 0, err
}
return n, err
}
// Workflow is a single Daisy workflow workflow.
type Workflow struct {
// Populated on New() construction.
Ctx context.Context `json:"-"`
Cancel chan struct{} `json:"-"`
// Workflow template fields.
// Workflow name.
Name string
// Project to run in.
Project string
// Zone to run in.
Zone string
// GCS Path to use for scratch data and write logs/results to.
GCSPath string
// Path to OAuth credentials file.
OAuthPath string `json:",omitempty"`
// Sources used by this workflow, map of destination to source.
Sources map[string]string `json:",omitempty"`
// Vars defines workflow variables, substitution is done at Workflow run time.
Vars map[string]string `json:",omitempty"`
Steps map[string]*Step
// Map of steps to their dependencies.
Dependencies map[string][]string
// Working fields.
workflowDir string
parent *Workflow
bucket string
scratchPath string
sourcesPath string
logsPath string
outsPath string
username string
gcsLogging bool
ComputeClient *compute.Client `json:"-"`
StorageClient *storage.Client `json:"-"`
id string
logger *log.Logger
cleanupHooks []func() error
cleanupHooksMx sync.Mutex
}
func (w *Workflow) addCleanupHook(hook func() error) {
w.cleanupHooksMx.Lock()
w.cleanupHooks = append(w.cleanupHooks, hook)
w.cleanupHooksMx.Unlock()
}
// Validate runs validation on the workflow.
func (w *Workflow) Validate() error {
if err := w.validateRequiredFields(); err != nil {
close(w.Cancel)
return fmt.Errorf("error validating workflow: %v", err)
}
if err := w.populate(); err != nil {
close(w.Cancel)
return fmt.Errorf("error populating workflow: %v", err)
}
w.logger.Print("Validating workflow")
if err := w.validate(); err != nil {
w.logger.Printf("Error validating workflow: %v", err)
close(w.Cancel)
return err
}
w.logger.Print("Validation Complete")
return nil
}
// Run runs a workflow.
func (w *Workflow) Run() error {
w.gcsLogging = true
if err := w.Validate(); err != nil {
return err
}
defer w.cleanup()
w.logger.Print("Uploading sources")
if err := w.uploadSources(); err != nil {
w.logger.Printf("Error uploading sources: %v", err)
close(w.Cancel)
return err
}
w.logger.Print("Running workflow")
if err := w.run(); err != nil {
w.logger.Printf("Error running workflow: %v", err)
select {
case <-w.Cancel:
default:
close(w.Cancel)
}
return err
}
return nil
}
func (w *Workflow) String() string {
f := "{Name:%q Project:%q Zone:%q Bucket:%q OAuthPath:%q Sources:%s Vars:%s Steps:%s Dependencies:%s id:%q}"
return fmt.Sprintf(f, w.Name, w.Project, w.Zone, w.bucket, w.OAuthPath, w.Sources, w.Vars, w.Steps, w.Dependencies, w.id)
}
func (w *Workflow) cleanup() {
w.logger.Printf("Workflow %q cleaning up (this may take up to 2 minutes.", w.Name)
for _, hook := range w.cleanupHooks {
if err := hook(); err != nil {
w.logger.Printf("Error returned from cleanup hook: %s", err)
}
}
}
func (w *Workflow) genName(n string) string {
prefix := fmt.Sprintf("%s-%s", n, w.Name)
if len(prefix) > 57 {
prefix = prefix[0:56]
}
result := fmt.Sprintf("%s-%s", prefix, w.id)
if len(result) > 64 {
result = result[0:63]
}
return strings.ToLower(result)
}
func (w *Workflow) populateStep(step *Step) error {
if step.Timeout == "" {
step.Timeout = defaultTimeout
}
timeout, err := time.ParseDuration(step.Timeout)
if err != nil {
return err
}
step.timeout = timeout
if step.WaitForInstancesSignal != nil {
for i, s := range *step.WaitForInstancesSignal {
if s.Interval == "" {
s.Interval = defaultInterval
}
interval, err := time.ParseDuration(s.Interval)
if err != nil {
return err
}
(*step.WaitForInstancesSignal)[i].interval = interval
}
}
// Recurse on subworkflows.
if step.SubWorkflow == nil {
return nil
}
step.SubWorkflow.workflow.GCSPath = fmt.Sprintf("gs://%s/%s", w.bucket, w.scratchPath)
step.SubWorkflow.workflow.Name = step.name
step.SubWorkflow.workflow.Project = w.Project
step.SubWorkflow.workflow.Zone = w.Zone
step.SubWorkflow.workflow.OAuthPath = w.OAuthPath
step.SubWorkflow.workflow.ComputeClient = w.ComputeClient
step.SubWorkflow.workflow.StorageClient = w.StorageClient
step.SubWorkflow.workflow.Ctx = w.Ctx
step.SubWorkflow.workflow.Cancel = w.Cancel
step.SubWorkflow.workflow.logger = w.logger
for k, v := range step.SubWorkflow.Vars {
step.SubWorkflow.workflow.Vars[k] = v
}
return step.SubWorkflow.workflow.populate()
}
func (w *Workflow) populate() error {
w.id = randString(5)
now := time.Now().UTC()
cu, err := user.Current()
if err != nil {
return err
}
w.username = cu.Username
autovars := map[string]string{
"ID": w.id,
"DATE": now.Format("20060102"),
"DATETIME": now.Format("20060102150405"),
"TIMESTAMP": strconv.FormatInt(now.Unix(), 10),
"USERNAME": w.username,
}
vars := map[string]string{}
for k, v := range w.Vars {
vars[k] = v
}
var replacements []string
for k, v := range autovars {
replacements = append(replacements, fmt.Sprintf("${%s}", k), v)
}
for k, v := range vars {
replacements = append(replacements, fmt.Sprintf("${%s}", k), v)
}
substitute(reflect.ValueOf(w).Elem(), strings.NewReplacer(replacements...))
// Set up GCS paths.
bkt, p, err := splitGCSPath(w.GCSPath)
if err != nil {
return err
}
w.bucket = bkt
w.scratchPath = path.Join(p, fmt.Sprintf("daisy-%s-%s-%s", w.Name, now.Format("20060102-15:04:05"), w.id))
w.sourcesPath = path.Join(w.scratchPath, "sources")
w.logsPath = path.Join(w.scratchPath, "logs")
w.outsPath = path.Join(w.scratchPath, "outs")
// Do replacement for autovars. Autovars pull from workflow fields,
// so Vars replacement must run before this to resolve the final
// value for those fields.
autovars = map[string]string{
"NAME": w.Name,
"ZONE": w.Zone,
"PROJECT": w.Project,
"GCSPATH": w.GCSPath,
"SCRATCHPATH": fmt.Sprintf("gs://%s/%s", w.bucket, w.scratchPath),
"SOURCESPATH": fmt.Sprintf("gs://%s/%s", w.bucket, w.sourcesPath),
"LOGSPATH": fmt.Sprintf("gs://%s/%s", w.bucket, w.logsPath),
"OUTSPATH": fmt.Sprintf("gs://%s/%s", w.bucket, w.outsPath),
}
replacements = []string{}
for k, v := range autovars {
replacements = append(replacements, fmt.Sprintf("${%s}", k), v)
}
substitute(reflect.ValueOf(w).Elem(), strings.NewReplacer(replacements...))
if w.ComputeClient == nil {
w.ComputeClient, err = compute.NewClient(w.Ctx, option.WithServiceAccountFile(w.OAuthPath))
if err != nil {
return err
}
}
if w.StorageClient == nil {
w.StorageClient, err = storage.NewClient(w.Ctx, option.WithServiceAccountFile(w.OAuthPath))
if err != nil {
return err
}
}
if w.logger == nil {
name := w.Name
for parent := w.parent; parent != nil; parent = w.parent.parent {
name = parent.Name + "." + name
}
prefix := fmt.Sprintf("[%s]: ", name)
flags := log.Ldate | log.Ltime
gcs := ioutil.Discard
if w.gcsLogging {
gcs = &gcsLogger{client: w.StorageClient, bucket: w.bucket, object: path.Join(w.logsPath, "daisy.log"), ctx: w.Ctx}
log.New(os.Stdout, prefix, flags).Println("Logs will be streamed to", "gs://"+path.Join(w.bucket, w.logsPath, "daisy.log"))
}
w.logger = log.New(io.MultiWriter(os.Stdout, gcs), prefix, flags)
}
for name, s := range w.Steps {
s.name = name
if err := w.populateStep(s); err != nil {
return err
}
}
return nil
}
// Print populates then pretty prints the workflow.
func (w *Workflow) Print() {
if err := w.populate(); err != nil {
fmt.Println("Error running populate:", err)
}
b, err := json.MarshalIndent(w, "", " ")
if err != nil {
fmt.Println("Error marshalling workflow for printing:", err)
}
fmt.Println(string(b))
}
func (w *Workflow) run() error {
return w.traverseDAG(func(s *Step) error {
return w.runStep(s)
})
}
func (w *Workflow) runStep(s *Step) error {
timeout := make(chan struct{})
go func() {
time.Sleep(s.timeout)
close(timeout)
}()
e := make(chan error)
go func() {
e <- s.run(w)
}()
select {
case err := <-e:
return err
case <-timeout:
return fmt.Errorf("step %q did not stop in specified timeout of %s", s.name, s.timeout)
}
}
// Concurrently traverse the DAG, running func f on each step.
// Return an error if f returns an error on any step.
func (w *Workflow) traverseDAG(f func(*Step) error) error {
// waiting = steps and the dependencies they are waiting for.
// running = the currently running steps.
// start = map of steps' start channels/semaphores.
// done = map of steps' done channels for signaling step completion.
waiting := map[string][]string{}
var running []string
start := map[string]chan error{}
done := map[string]chan error{}
// Setup: channels, copy dependencies.
for name := range w.Steps {
waiting[name] = w.Dependencies[name]
start[name] = make(chan error)
done[name] = make(chan error)
}
// Setup: goroutine for each step. Each waits to be notified to start.
for name, s := range w.Steps {
go func(name string, s *Step) {
// Wait for signal, then run the function. Return any errs.
if err := <-start[name]; err != nil {
done[name] <- err
} else if err := f(s); err != nil {
done[name] <- err
}
close(done[name])
}(name, s)
}
// Main signaling logic.
for len(waiting) != 0 || len(running) != 0 {
// If we got a Cancel signal, kill all waiting steps.
// Let running steps finish.
select {
case <-w.Cancel:
waiting = map[string][]string{}
default:
}
// Kick off all steps that aren't waiting for anything.
for name, deps := range waiting {
if len(deps) == 0 {
delete(waiting, name)
running = append(running, name)
close(start[name])
}
}
// Sanity check. There should be at least one running step,
// but loop back through if there isn't.
if len(running) == 0 {
continue
}
// Get next finished step. Return the step error if it erred.
finished, err := stepsListen(running, done)
if err != nil {
return err
}
// Remove finished step from other steps' waiting lists.
for name, deps := range waiting {
waiting[name] = filter(deps, finished)
}
// Remove finished from currently running list.
running = filter(running, finished)
}
return nil
}
// New instantiates a new workflow.
func New(ctx context.Context) *Workflow {
var w Workflow
w.Ctx = ctx
// We can't use context.WithCancel as we use the context even after cancel for cleanup.
w.Cancel = make(chan struct{})
initWorkflowResources(&w)
return &w
}
// NewFromFile reads and unmarshals a workflow file.
// Recursively reads subworkflow steps as well.
func NewFromFile(ctx context.Context, file string) (*Workflow, error) {
w := New(ctx)
data, err := ioutil.ReadFile(file)
if err != nil {
return nil, err
}
w.workflowDir, err = filepath.Abs(filepath.Dir(file))
if err != nil {
return nil, err
}
if err := json.Unmarshal(data, &w); err != nil {
// If this is a syntax error return a useful error.
sErr, ok := err.(*json.SyntaxError)
if !ok {
return nil, err
}
// Byte number where the error line starts.
start := bytes.LastIndex(data[:sErr.Offset], []byte("\n")) + 1
// Assume end byte of error line is EOF unless this isn't the last line.
end := len(data)
if i := bytes.Index(data[start:], []byte("\n")); i >= 0 {
end = start + i
}
// Line number of error.
line := bytes.Count(data[:start], []byte("\n")) + 1
// Position of error in line (where to place the '^').
pos := int(sErr.Offset) - start
if pos != 0 {
pos = pos - 1
}
return nil, fmt.Errorf("%s: JSON syntax error in line %d: %s \n%s\n%s^", file, line, err, data[start:end], strings.Repeat(" ", pos))
}
if w.OAuthPath != "" && !filepath.IsAbs(w.OAuthPath) {
w.OAuthPath = filepath.Join(w.workflowDir, w.OAuthPath)
}
// We need to unmarshal any SubWorkflows.
for name, s := range w.Steps {
s.name = name
s.w = w
if s.SubWorkflow == nil {
continue
}
swPath := s.SubWorkflow.Path
if !filepath.IsAbs(swPath) {
swPath = filepath.Join(w.workflowDir, swPath)
}
sw, err := NewFromFile(w.Ctx, swPath)
if err != nil {
return nil, err
}
s.SubWorkflow.workflow = sw
sw.parent = w
}
return w, nil
}
// stepsListen returns the first step that finishes/errs.
func stepsListen(names []string, chans map[string]chan error) (string, error) {
cases := make([]reflect.SelectCase, len(names))
for i, name := range names {
cases[i] = reflect.SelectCase{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(chans[name])}
}
caseIndex, value, recvOk := reflect.Select(cases)
name := names[caseIndex]
if recvOk {
// recvOk -> a step failed, return the error.
return name, value.Interface().(error)
}
return name, nil
}
| 1 | 6,499 | Why not put this below "strings"? | GoogleCloudPlatform-compute-image-tools | go |
@@ -33,6 +33,11 @@ namespace Microsoft.AspNetCore.Server.Kestrel.Transport.Libuv.Internal
{
}
+ /// <summary>
+ /// For testing purposes.
+ /// </summary>
+ public int UvPipeCount => _dispatchPipes.Count;
+
private UvPipeHandle ListenPipe { get; set; }
public async Task StartAsync( | 1 | // Copyright (c) .NET Foundation. All rights reserved.
// Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
using System;
using System.Collections.Generic;
using System.IO;
using System.Runtime.InteropServices;
using System.Threading.Tasks;
using Microsoft.AspNetCore.Server.Kestrel.Transport.Abstractions;
using Microsoft.AspNetCore.Server.Kestrel.Transport.Libuv.Internal.Networking;
using Microsoft.Extensions.Logging;
namespace Microsoft.AspNetCore.Server.Kestrel.Transport.Libuv.Internal
{
/// <summary>
/// A primary listener waits for incoming connections on a specified socket. Incoming
/// connections may be passed to a secondary listener to handle.
/// </summary>
public class ListenerPrimary : Listener
{
private readonly List<UvPipeHandle> _dispatchPipes = new List<UvPipeHandle>();
private int _dispatchIndex;
private string _pipeName;
private byte[] _pipeMessage;
private IntPtr _fileCompletionInfoPtr;
private bool _tryDetachFromIOCP = PlatformApis.IsWindows;
// this message is passed to write2 because it must be non-zero-length,
// but it has no other functional significance
private readonly ArraySegment<ArraySegment<byte>> _dummyMessage = new ArraySegment<ArraySegment<byte>>(new[] { new ArraySegment<byte>(new byte[] { 1, 2, 3, 4 }) });
public ListenerPrimary(LibuvTransportContext transportContext) : base(transportContext)
{
}
private UvPipeHandle ListenPipe { get; set; }
public async Task StartAsync(
string pipeName,
byte[] pipeMessage,
IEndPointInformation endPointInformation,
LibuvThread thread)
{
_pipeName = pipeName;
_pipeMessage = pipeMessage;
if (_fileCompletionInfoPtr == IntPtr.Zero)
{
var fileCompletionInfo = new FILE_COMPLETION_INFORMATION() { Key = IntPtr.Zero, Port = IntPtr.Zero };
_fileCompletionInfoPtr = Marshal.AllocHGlobal(Marshal.SizeOf(fileCompletionInfo));
Marshal.StructureToPtr(fileCompletionInfo, _fileCompletionInfoPtr, false);
}
await StartAsync(endPointInformation, thread).ConfigureAwait(false);
await Thread.PostAsync(listener => listener.PostCallback(), this).ConfigureAwait(false);
}
private void PostCallback()
{
ListenPipe = new UvPipeHandle(Log);
ListenPipe.Init(Thread.Loop, Thread.QueueCloseHandle, false);
ListenPipe.Bind(_pipeName);
ListenPipe.Listen(LibuvConstants.ListenBacklog,
(pipe, status, error, state) => ((ListenerPrimary)state).OnListenPipe(pipe, status, error), this);
}
private void OnListenPipe(UvStreamHandle pipe, int status, Exception error)
{
if (status < 0)
{
return;
}
var dispatchPipe = new UvPipeHandle(Log);
try
{
dispatchPipe.Init(Thread.Loop, Thread.QueueCloseHandle, true);
pipe.Accept(dispatchPipe);
// Ensure client sends "Kestrel" before adding pipe to _dispatchPipes.
var readContext = new PipeReadContext(this);
dispatchPipe.ReadStart(
(handle, status2, state) => ((PipeReadContext)state).AllocCallback(handle, status2),
(handle, status2, state) => ((PipeReadContext)state).ReadCallback(handle, status2),
readContext);
}
catch (UvException ex)
{
dispatchPipe.Dispose();
Log.LogError(0, ex, "ListenerPrimary.OnListenPipe");
}
}
protected override void DispatchConnection(UvStreamHandle socket)
{
var index = _dispatchIndex++ % (_dispatchPipes.Count + 1);
if (index == _dispatchPipes.Count)
{
base.DispatchConnection(socket);
}
else
{
DetachFromIOCP(socket);
var dispatchPipe = _dispatchPipes[index];
var write = new UvWriteReq(Log);
try
{
write.Init(Thread);
write.Write2(
dispatchPipe,
_dummyMessage,
socket,
(write2, status, error, state) =>
{
write2.Dispose();
((UvStreamHandle)state).Dispose();
},
socket);
}
catch (UvException)
{
write.Dispose();
throw;
}
}
}
private void DetachFromIOCP(UvHandle handle)
{
if (!_tryDetachFromIOCP)
{
return;
}
// https://msdn.microsoft.com/en-us/library/windows/hardware/ff728840(v=vs.85).aspx
const int FileReplaceCompletionInformation = 61;
// https://msdn.microsoft.com/en-us/library/cc704588.aspx
const uint STATUS_INVALID_INFO_CLASS = 0xC0000003;
var statusBlock = new IO_STATUS_BLOCK();
var socket = IntPtr.Zero;
Thread.Loop.Libuv.uv_fileno(handle, ref socket);
if (NtSetInformationFile(socket, out statusBlock, _fileCompletionInfoPtr,
(uint)Marshal.SizeOf<FILE_COMPLETION_INFORMATION>(), FileReplaceCompletionInformation) == STATUS_INVALID_INFO_CLASS)
{
// Replacing IOCP information is only supported on Windows 8.1 or newer
_tryDetachFromIOCP = false;
}
}
private struct IO_STATUS_BLOCK
{
uint status;
ulong information;
}
private struct FILE_COMPLETION_INFORMATION
{
public IntPtr Port;
public IntPtr Key;
}
[DllImport("NtDll.dll")]
private static extern uint NtSetInformationFile(IntPtr FileHandle,
out IO_STATUS_BLOCK IoStatusBlock, IntPtr FileInformation, uint Length,
int FileInformationClass);
public override async Task DisposeAsync()
{
// Call base first so the ListenSocket gets closed and doesn't
// try to dispatch connections to closed pipes.
await base.DisposeAsync().ConfigureAwait(false);
if (_fileCompletionInfoPtr != IntPtr.Zero)
{
Marshal.FreeHGlobal(_fileCompletionInfoPtr);
_fileCompletionInfoPtr = IntPtr.Zero;
}
if (Thread.FatalError == null && ListenPipe != null)
{
await Thread.PostAsync(listener =>
{
listener.ListenPipe.Dispose();
foreach (var dispatchPipe in listener._dispatchPipes)
{
dispatchPipe.Dispose();
}
}, this).ConfigureAwait(false);
}
}
private class PipeReadContext
{
private const int _bufferLength = 16;
private readonly ListenerPrimary _listener;
private readonly byte[] _buf = new byte[_bufferLength];
private readonly IntPtr _bufPtr;
private GCHandle _bufHandle;
private int _bytesRead;
public PipeReadContext(ListenerPrimary listener)
{
_listener = listener;
_bufHandle = GCHandle.Alloc(_buf, GCHandleType.Pinned);
_bufPtr = _bufHandle.AddrOfPinnedObject();
}
public LibuvFunctions.uv_buf_t AllocCallback(UvStreamHandle dispatchPipe, int suggestedSize)
{
return dispatchPipe.Libuv.buf_init(_bufPtr + _bytesRead, _bufferLength - _bytesRead);
}
public void ReadCallback(UvStreamHandle dispatchPipe, int status)
{
try
{
dispatchPipe.Libuv.ThrowIfErrored(status);
_bytesRead += status;
if (_bytesRead == _bufferLength)
{
var correctMessage = true;
for (var i = 0; i < _bufferLength; i++)
{
if (_buf[i] != _listener._pipeMessage[i])
{
correctMessage = false;
}
}
if (correctMessage)
{
_listener._dispatchPipes.Add((UvPipeHandle)dispatchPipe);
dispatchPipe.ReadStop();
_bufHandle.Free();
}
else
{
throw new IOException("Bad data sent over Kestrel pipe.");
}
}
}
catch (Exception ex)
{
dispatchPipe.Dispose();
_bufHandle.Free();
_listener.Log.LogError(0, ex, "ListenerPrimary.ReadCallback");
}
}
}
}
}
| 1 | 13,016 | Make it `internal` if it's just for testing. | aspnet-KestrelHttpServer | .cs |
@@ -1,4 +1,4 @@
-# pylint: disable=missing-docstring,pointless-statement,expression-not-assigned,too-few-public-methods,import-error,no-init,wrong-import-position,no-else-return
+# pylint: disable=missing-docstring,pointless-statement,expression-not-assigned,too-few-public-methods,import-error,no-init,wrong-import-position,no-else-return, logical-tautology
# standard types
1 in [1, 2, 3] | 1 | # pylint: disable=missing-docstring,pointless-statement,expression-not-assigned,too-few-public-methods,import-error,no-init,wrong-import-position,no-else-return
# standard types
1 in [1, 2, 3]
1 in {'a': 1, 'b': 2}
1 in {1, 2, 3}
1 in (1, 2, 3)
'1' in "123"
'1' in u"123"
'1' in bytearray(b"123")
1 in frozenset([1, 2, 3])
# comprehensions
1 in [x ** 2 % 10 for x in range(10)]
1 in {x ** 2 % 10 for x in range(10)}
1 in {x: x ** 2 % 10 for x in range(10)}
# iterators
1 in iter([1, 2, 3])
# generator
def count(upto=float("inf")):
i = 0
while True:
if i > upto:
break
yield i
i += 1
10 in count(upto=10)
# custom instance
class UniversalContainer(object):
def __contains__(self, key):
return True
42 in UniversalContainer()
# custom iterable
class CustomIterable(object):
def __iter__(self):
return iter((1, 2, 3))
3 in CustomIterable()
# old-style iterable
class OldStyleIterable(object):
def __getitem__(self, key):
if key < 10:
return 2 ** key
else:
raise IndexError("bad index")
64 in OldStyleIterable()
# do not emit warning if class has unknown bases
from some_missing_module import ImportedClass
class MaybeIterable(ImportedClass):
pass
10 in MaybeIterable()
# do not emit warning inside mixins/abstract/base classes
class UsefulMixin(object):
stuff = None
def get_stuff(self):
return self.stuff
def act(self, thing):
stuff = self.get_stuff()
if thing in stuff:
pass
class BaseThing(object):
valid_values = None
def validate(self, value):
if self.valid_values is None:
return True
else:
# error should not be emitted here
return value in self.valid_values
class AbstractThing(object):
valid_values = None
def validate(self, value):
if self.valid_values is None:
return True
else:
# error should not be emitted here
return value in self.valid_values
# class is not named as abstract
# but still is deduceably abstract
class Thing(object):
valid_values = None
def __init__(self):
self._init_values()
def validate(self, value):
if self.valid_values is None:
return True
else:
# error should not be emitted here
return value in self.valid_values
def _init_values(self):
raise NotImplementedError
# error cases
42 in 42 # [unsupported-membership-test]
42 not in None # [unsupported-membership-test]
42 in 8.5 # [unsupported-membership-test]
class EmptyClass(object):
pass
42 not in EmptyClass() # [unsupported-membership-test]
42 in EmptyClass # [unsupported-membership-test]
42 not in count # [unsupported-membership-test]
42 in range # [unsupported-membership-test]
| 1 | 10,151 | What is triggering this message in this file? | PyCQA-pylint | py |
@@ -212,7 +212,7 @@ def start_workers(args, actions, context, analyzer_config_map, skp_handler,
pool.map_async(check,
analyzed_actions,
1,
- callback=worker_result_handler).get(float('inf'))
+ callback=worker_result_handler).get()
pool.close()
except Exception: | 1 | # -------------------------------------------------------------------------
# The CodeChecker Infrastructure
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
# -------------------------------------------------------------------------
"""
"""
import multiprocessing
import ntpath
import os
import re
import shutil
import signal
import sys
import traceback
from collections import defaultdict
from codechecker_lib import analyzer_env
from codechecker_lib.logger import LoggerFactory
from codechecker_lib.analyzers import analyzer_types
LOG = LoggerFactory.get_new_logger('ANALYSIS MANAGER')
def worker_result_handler(results):
"""
Print the analysis summary.
"""
successful_analysis = defaultdict(int)
failed_analysis = defaultdict(int)
skipped_num = 0
for res, skipped, analyzer_type in results:
if skipped:
skipped_num += 1
else:
if res == 0:
successful_analysis[analyzer_type] += 1
else:
failed_analysis[analyzer_type] += 1
LOG.info("----==== Summary ====----")
LOG.info('Total compilation commands: ' + str(len(results)))
if successful_analysis:
LOG.info('Successfully analyzed')
for analyzer_type, res in successful_analysis.items():
LOG.info(' ' + analyzer_type + ': ' + str(res))
if failed_analysis:
LOG.info("Failed to analyze")
for analyzer_type, res in failed_analysis.items():
LOG.info(' ' + analyzer_type + ': ' + str(res))
if skipped_num:
LOG.info('Skipped compilation commands: ' + str(skipped_num))
LOG.info("----=================----")
# Progress reporting.
progress_checked_num = None
progress_actions = None
progress_lock = None
def init_worker(checked_num, action_num, lock):
global progress_checked_num, progress_actions, progress_lock
progress_checked_num = checked_num
progress_actions = action_num
progress_lock = lock
def check(check_data):
"""
Invoke clang with an action which called by processes.
Different analyzer object belongs to for each build action.
skiplist handler is None if no skip file was configured.
"""
args, action, context, analyzer_config_map, skp_handler, \
report_output_dir, use_db = check_data
skipped = False
try:
# If one analysis fails the check fails.
return_codes = 0
skipped = False
for source in action.sources:
# If there is no skiplist handler there was no skip list file
# in the command line.
# C++ file skipping is handled here.
_, source_file_name = ntpath.split(source)
if skp_handler and skp_handler.should_skip(source):
LOG.debug_analyzer(source_file_name + ' is skipped')
skipped = True
continue
# Construct analyzer env.
analyzer_environment = analyzer_env.get_check_env(
context.path_env_extra,
context.ld_lib_path_extra)
run_id = context.run_id
rh = analyzer_types.construct_result_handler(args,
action,
run_id,
report_output_dir,
context.severity_map,
skp_handler,
progress_lock,
use_db)
# Create a source analyzer.
source_analyzer = \
analyzer_types.construct_analyzer(action,
analyzer_config_map)
# Source is the currently analyzed source file
# there can be more in one buildaction.
source_analyzer.source_file = source
# Fills up the result handler with the analyzer information.
source_analyzer.analyze(rh, analyzer_environment)
if rh.analyzer_returncode == 0:
# Analysis was successful processing results.
if rh.analyzer_stdout != '':
LOG.debug_analyzer('\n' + rh.analyzer_stdout)
if rh.analyzer_stderr != '':
LOG.debug_analyzer('\n' + rh.analyzer_stderr)
rh.postprocess_result()
rh.handle_results()
LOG.info("[%d/%d] %s analyzed %s successfully." %
(progress_checked_num.value, progress_actions.value,
action.analyzer_type, source_file_name))
else:
# Analysis failed.
LOG.error('Analyzing ' + source_file_name + ' with ' +
action.analyzer_type + ' failed.')
if rh.analyzer_stdout != '':
LOG.error(rh.analyzer_stdout)
if rh.analyzer_stderr != '':
LOG.error(rh.analyzer_stderr)
return_codes = rh.analyzer_returncode
if not args.keep_tmp:
rh.clean_results()
progress_checked_num.value += 1
return return_codes, skipped, action.analyzer_type
except Exception as e:
LOG.debug_analyzer(str(e))
traceback.print_exc(file=sys.stdout)
return 1, skipped, action.analyzer_type
def start_workers(args, actions, context, analyzer_config_map, skp_handler,
use_db=True):
"""
Start the workers in the process pool
for every buildaction there is worker which makes the analysis.
"""
# Handle SIGINT to stop this script running.
def signal_handler(*arg, **kwarg):
try:
pool.terminate()
finally:
sys.exit(1)
signal.signal(signal.SIGINT, signal_handler)
# Remove characters which could cause directory creation problems.
no_spec_char_name = re.sub(r'[^\w\-_. ]', '_', args.name)
report_output = os.path.join(context.codechecker_workspace,
no_spec_char_name + '_reports')
# Create report output dir which will be used by the result handlers for
# each analyzer to store analyzer results or temporary files.
# Each analyzer instance does its own cleanup.
if not os.path.exists(report_output):
os.mkdir(report_output)
# Start checking parallel.
checked_var = multiprocessing.Value('i', 1)
actions_num = multiprocessing.Value('i', len(actions))
lock = multiprocessing.Lock()
pool = multiprocessing.Pool(args.jobs, initializer=init_worker,
initargs=(checked_var, actions_num, lock))
try:
# Workaround, equivalent of map.
# The main script does not get signal
# while map or map_async function is running.
# It is a python bug, this does not happen if a timeout is specified;
# then receive the interrupt immediately.
analyzed_actions = [(args,
build_action,
context,
analyzer_config_map,
skp_handler,
report_output,
use_db) for build_action in actions]
pool.map_async(check,
analyzed_actions,
1,
callback=worker_result_handler).get(float('inf'))
pool.close()
except Exception:
pool.terminate()
raise
finally:
pool.join()
if not args.keep_tmp:
LOG.debug('Removing temporary directory: ' + report_output)
shutil.rmtree(report_output)
| 1 | 6,274 | Are you sure this should be removed? | Ericsson-codechecker | c |
@@ -121,8 +121,7 @@ public class DefaultTrustedSocketFactory implements TrustedSocketFactory {
private Context context;
- private ProxySettings proxySettings;
-
+ private ProxySettings proxySettings = new ProxySettings(false, "", 0);
public DefaultTrustedSocketFactory(Context context, ProxySettings proxySettings) {
this.context = context; | 1 | package com.fsck.k9.mail.ssl;
import java.io.IOException;
import java.net.Proxy.Type;
import java.net.Socket;
import java.security.KeyManagementException;
import java.security.NoSuchAlgorithmException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import android.content.Context;
import android.net.SSLCertificateSocketFactory;
import android.text.TextUtils;
import android.util.Log;
import com.fsck.k9.mail.MessagingException;
import javax.net.ssl.KeyManager;
import javax.net.ssl.SSLContext;
import javax.net.ssl.SSLSocket;
import javax.net.ssl.SSLSocketFactory;
import javax.net.ssl.TrustManager;
import java.net.InetSocketAddress;
import java.net.Proxy;
import com.fsck.k9.mail.ProxySettings;
import static com.fsck.k9.mail.K9MailLib.LOG_TAG;
import static com.fsck.k9.mail.store.RemoteStore.SOCKET_CONNECT_TIMEOUT;
/**
* Filter and reorder list of cipher suites and TLS versions.
*/
public class DefaultTrustedSocketFactory implements TrustedSocketFactory {
protected static final String[] ENABLED_CIPHERS;
protected static final String[] ENABLED_PROTOCOLS;
protected static final String[] ORDERED_KNOWN_CIPHERS = {
"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
"TLS_DHE_RSA_WITH_AES_256_GCM_SHA384",
"TLS_DHE_RSA_WITH_AES_128_GCM_SHA256",
"TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA",
"TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA",
"TLS_DHE_RSA_WITH_AES_256_CBC_SHA",
"TLS_DHE_DSS_WITH_AES_256_CBC_SHA",
"TLS_ECDH_RSA_WITH_AES_256_CBC_SHA",
"TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA",
"TLS_RSA_WITH_AES_256_CBC_SHA",
"TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA",
"TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA",
"TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA",
"TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA",
"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA",
"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA",
"TLS_DHE_RSA_WITH_AES_128_CBC_SHA",
"TLS_DHE_DSS_WITH_AES_128_CBC_SHA",
"TLS_ECDH_RSA_WITH_AES_128_CBC_SHA",
"TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA",
"TLS_RSA_WITH_AES_128_CBC_SHA",
};
protected static final String[] BLACKLISTED_CIPHERS = {
"SSL_RSA_WITH_DES_CBC_SHA",
"SSL_DHE_RSA_WITH_DES_CBC_SHA",
"SSL_DHE_DSS_WITH_DES_CBC_SHA",
"SSL_RSA_EXPORT_WITH_RC4_40_MD5",
"SSL_RSA_EXPORT_WITH_DES40_CBC_SHA",
"SSL_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA",
"SSL_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA",
"SSL_RSA_WITH_3DES_EDE_CBC_SHA",
"SSL_DHE_RSA_WITH_3DES_EDE_CBC_SHA",
"SSL_DHE_DSS_WITH_3DES_EDE_CBC_SHA",
"TLS_ECDHE_RSA_WITH_RC4_128_SHA",
"TLS_ECDHE_ECDSA_WITH_RC4_128_SHA",
"TLS_ECDH_RSA_WITH_RC4_128_SHA",
"TLS_ECDH_ECDSA_WITH_RC4_128_SHA",
"SSL_RSA_WITH_RC4_128_SHA",
"SSL_RSA_WITH_RC4_128_MD5",
};
protected static final String[] ORDERED_KNOWN_PROTOCOLS = {
"TLSv1.2", "TLSv1.1", "TLSv1"
};
protected static final String[] BLACKLISTED_PROTOCOLS = {
"SSLv3"
};
static {
String[] enabledCiphers = null;
String[] supportedProtocols = null;
try {
SSLContext sslContext = SSLContext.getInstance("TLS");
sslContext.init(null, null, null);
SSLSocketFactory sf = sslContext.getSocketFactory();
SSLSocket sock = (SSLSocket) sf.createSocket();
enabledCiphers = sock.getEnabledCipherSuites();
/*
* Retrieve all supported protocols, not just the (default) enabled
* ones. TLSv1.1 & TLSv1.2 are supported on API levels 16+, but are
* only enabled by default on API levels 20+.
*/
supportedProtocols = sock.getSupportedProtocols();
} catch (Exception e) {
Log.e(LOG_TAG, "Error getting information about available SSL/TLS ciphers and " +
"protocols", e);
}
ENABLED_CIPHERS = (enabledCiphers == null) ? null :
reorder(enabledCiphers, ORDERED_KNOWN_CIPHERS, BLACKLISTED_CIPHERS);
ENABLED_PROTOCOLS = (supportedProtocols == null) ? null :
reorder(supportedProtocols, ORDERED_KNOWN_PROTOCOLS, BLACKLISTED_PROTOCOLS);
}
private Context context;
private ProxySettings proxySettings;
public DefaultTrustedSocketFactory(Context context, ProxySettings proxySettings) {
this.context = context;
this.proxySettings = proxySettings;
}
protected static String[] reorder(String[] enabled, String[] known, String[] blacklisted) {
List<String> unknown = new ArrayList<String>();
Collections.addAll(unknown, enabled);
// Remove blacklisted items
if (blacklisted != null) {
for (String item : blacklisted) {
unknown.remove(item);
}
}
// Order known items
List<String> result = new ArrayList<String>();
for (String item : known) {
if (unknown.remove(item)) {
result.add(item);
}
}
// Add unknown items at the end. This way security won't get worse when unknown ciphers
// start showing up in the future.
result.addAll(unknown);
return result.toArray(new String[result.size()]);
}
public Socket createSocket(Socket socket, String host, int port, String clientCertificateAlias)
throws NoSuchAlgorithmException, KeyManagementException, MessagingException, IOException {
TrustManager[] trustManagers = new TrustManager[] { TrustManagerFactory.get(host, port) };
KeyManager[] keyManagers = null;
if (!TextUtils.isEmpty(clientCertificateAlias)) {
keyManagers = new KeyManager[] { new KeyChainKeyManager(context, clientCertificateAlias) };
}
SSLContext sslContext = SSLContext.getInstance("TLS");
sslContext.init(keyManagers, trustManagers, null);
SSLSocketFactory socketFactory = sslContext.getSocketFactory();
Socket trustedSocket;
if (socket == null) {
if (proxySettings.enabled) {
InetSocketAddress proxyAddress = new InetSocketAddress(proxySettings.host, proxySettings.port);
Proxy proxy = new Proxy(Type.SOCKS, proxyAddress);
Socket underlying = new Socket(proxy);
InetSocketAddress serverAddress = new InetSocketAddress(host, port);
underlying.connect(serverAddress, SOCKET_CONNECT_TIMEOUT);
trustedSocket = socketFactory.createSocket(underlying, proxySettings.host, proxySettings.port, true);
} else {
trustedSocket = socketFactory.createSocket();
}
} else {
trustedSocket = socketFactory.createSocket(socket, host, port, true);
}
SSLSocket sslSocket = (SSLSocket) trustedSocket;
hardenSocket(sslSocket);
setSniHost(socketFactory, sslSocket, host);
if (!proxySettings.enabled && socket == null) {
InetSocketAddress serverAddress = new InetSocketAddress(host, port);
trustedSocket.connect(serverAddress, SOCKET_CONNECT_TIMEOUT);
}
return trustedSocket;
}
private static void hardenSocket(SSLSocket sock) {
if (ENABLED_CIPHERS != null) {
sock.setEnabledCipherSuites(ENABLED_CIPHERS);
}
if (ENABLED_PROTOCOLS != null) {
sock.setEnabledProtocols(ENABLED_PROTOCOLS);
}
}
public static void setSniHost(SSLSocketFactory factory, SSLSocket socket, String hostname) {
if (android.os.Build.VERSION.SDK_INT >= android.os.Build.VERSION_CODES.JELLY_BEAN_MR1 &&
factory instanceof android.net.SSLCertificateSocketFactory) {
SSLCertificateSocketFactory sslCertificateSocketFactory = (SSLCertificateSocketFactory) factory;
sslCertificateSocketFactory.setHostname(socket, hostname);
} else {
setHostnameViaReflection(socket, hostname);
}
}
private static void setHostnameViaReflection(SSLSocket socket, String hostname) {
try {
socket.getClass().getMethod("setHostname", String.class).invoke(socket, hostname);
} catch (Throwable e) {
Log.e(LOG_TAG, "Could not call SSLSocket#setHostname(String) method ", e);
}
}
}
| 1 | 13,552 | What's that good for? The field is initialized in the constructor. | k9mail-k-9 | java |
@@ -484,12 +484,14 @@ class _Connection(object):
if not isinstance(self.connection, SSL.Connection):
if not getattr(self.wfile, "closed", False):
try:
- self.wfile.flush()
- self.wfile.close()
+ if self.wfile:
+ self.wfile.flush()
+ self.wfile.close()
except exceptions.TcpDisconnect:
pass
- self.rfile.close()
+ if self.rfile:
+ self.rfile.close()
else:
try:
self.connection.shutdown() | 1 | from __future__ import (absolute_import, print_function, division)
import os
import select
import socket
import sys
import threading
import time
import traceback
import binascii
from typing import Optional # noqa
from netlib import strutils
from six.moves import range
import certifi
from backports import ssl_match_hostname
import six
import OpenSSL
from OpenSSL import SSL
from netlib import certutils
from netlib import version_check
from netlib import basetypes
from netlib import exceptions
from netlib import basethread
# This is a rather hackish way to make sure that
# the latest version of pyOpenSSL is actually installed.
version_check.check_pyopenssl_version()
if six.PY2:
socket_fileobject = socket._fileobject
else:
socket_fileobject = socket.SocketIO
EINTR = 4
if os.environ.get("NO_ALPN"):
HAS_ALPN = False
else:
HAS_ALPN = SSL._lib.Cryptography_HAS_ALPN
# To enable all SSL methods use: SSLv23
# then add options to disable certain methods
# https://bugs.launchpad.net/pyopenssl/+bug/1020632/comments/3
SSL_BASIC_OPTIONS = (
SSL.OP_CIPHER_SERVER_PREFERENCE
)
if hasattr(SSL, "OP_NO_COMPRESSION"):
SSL_BASIC_OPTIONS |= SSL.OP_NO_COMPRESSION
SSL_DEFAULT_METHOD = SSL.SSLv23_METHOD
SSL_DEFAULT_OPTIONS = (
SSL.OP_NO_SSLv2 |
SSL.OP_NO_SSLv3 |
SSL_BASIC_OPTIONS
)
if hasattr(SSL, "OP_NO_COMPRESSION"):
SSL_DEFAULT_OPTIONS |= SSL.OP_NO_COMPRESSION
"""
Map a reasonable SSL version specification into the format OpenSSL expects.
Don't ask...
https://bugs.launchpad.net/pyopenssl/+bug/1020632/comments/3
"""
sslversion_choices = {
"all": (SSL.SSLv23_METHOD, SSL_BASIC_OPTIONS),
# SSLv23_METHOD + NO_SSLv2 + NO_SSLv3 == TLS 1.0+
# TLSv1_METHOD would be TLS 1.0 only
"secure": (SSL.SSLv23_METHOD, (SSL.OP_NO_SSLv2 | SSL.OP_NO_SSLv3 | SSL_BASIC_OPTIONS)),
"SSLv2": (SSL.SSLv2_METHOD, SSL_BASIC_OPTIONS),
"SSLv3": (SSL.SSLv3_METHOD, SSL_BASIC_OPTIONS),
"TLSv1": (SSL.TLSv1_METHOD, SSL_BASIC_OPTIONS),
"TLSv1_1": (SSL.TLSv1_1_METHOD, SSL_BASIC_OPTIONS),
"TLSv1_2": (SSL.TLSv1_2_METHOD, SSL_BASIC_OPTIONS),
}
class SSLKeyLogger(object):
def __init__(self, filename):
self.filename = filename
self.f = None
self.lock = threading.Lock()
# required for functools.wraps, which pyOpenSSL uses.
__name__ = "SSLKeyLogger"
def __call__(self, connection, where, ret):
if where == SSL.SSL_CB_HANDSHAKE_DONE and ret == 1:
with self.lock:
if not self.f:
d = os.path.dirname(self.filename)
if not os.path.isdir(d):
os.makedirs(d)
self.f = open(self.filename, "ab")
self.f.write(b"\r\n")
client_random = binascii.hexlify(connection.client_random())
masterkey = binascii.hexlify(connection.master_key())
self.f.write(b"CLIENT_RANDOM %s %s\r\n" % (client_random, masterkey))
self.f.flush()
def close(self):
with self.lock:
if self.f:
self.f.close()
@staticmethod
def create_logfun(filename):
if filename:
return SSLKeyLogger(filename)
return False
log_ssl_key = SSLKeyLogger.create_logfun(
os.getenv("MITMPROXY_SSLKEYLOGFILE") or os.getenv("SSLKEYLOGFILE"))
class _FileLike(object):
BLOCKSIZE = 1024 * 32
def __init__(self, o):
self.o = o
self._log = None
self.first_byte_timestamp = None
def set_descriptor(self, o):
self.o = o
def __getattr__(self, attr):
return getattr(self.o, attr)
def start_log(self):
"""
Starts or resets the log.
This will store all bytes read or written.
"""
self._log = []
def stop_log(self):
"""
Stops the log.
"""
self._log = None
def is_logging(self):
return self._log is not None
def get_log(self):
"""
Returns the log as a string.
"""
if not self.is_logging():
raise ValueError("Not logging!")
return b"".join(self._log)
def add_log(self, v):
if self.is_logging():
self._log.append(v)
def reset_timestamps(self):
self.first_byte_timestamp = None
class Writer(_FileLike):
def flush(self):
"""
May raise exceptions.TcpDisconnect
"""
if hasattr(self.o, "flush"):
try:
self.o.flush()
except (socket.error, IOError) as v:
raise exceptions.TcpDisconnect(str(v))
def write(self, v):
"""
May raise exceptions.TcpDisconnect
"""
if v:
self.first_byte_timestamp = self.first_byte_timestamp or time.time()
try:
if hasattr(self.o, "sendall"):
self.add_log(v)
return self.o.sendall(v)
else:
r = self.o.write(v)
self.add_log(v[:r])
return r
except (SSL.Error, socket.error) as e:
raise exceptions.TcpDisconnect(str(e))
class Reader(_FileLike):
def read(self, length):
"""
If length is -1, we read until connection closes.
"""
result = b''
start = time.time()
while length == -1 or length > 0:
if length == -1 or length > self.BLOCKSIZE:
rlen = self.BLOCKSIZE
else:
rlen = length
try:
data = self.o.read(rlen)
except SSL.ZeroReturnError:
# TLS connection was shut down cleanly
break
except (SSL.WantWriteError, SSL.WantReadError):
# From the OpenSSL docs:
# If the underlying BIO is non-blocking, SSL_read() will also return when the
# underlying BIO could not satisfy the needs of SSL_read() to continue the
# operation. In this case a call to SSL_get_error with the return value of
# SSL_read() will yield SSL_ERROR_WANT_READ or SSL_ERROR_WANT_WRITE.
if (time.time() - start) < self.o.gettimeout():
time.sleep(0.1)
continue
else:
raise exceptions.TcpTimeout()
except socket.timeout:
raise exceptions.TcpTimeout()
except socket.error as e:
raise exceptions.TcpDisconnect(str(e))
except SSL.SysCallError as e:
if e.args == (-1, 'Unexpected EOF'):
break
raise exceptions.TlsException(str(e))
except SSL.Error as e:
raise exceptions.TlsException(str(e))
self.first_byte_timestamp = self.first_byte_timestamp or time.time()
if not data:
break
result += data
if length != -1:
length -= len(data)
self.add_log(result)
return result
def readline(self, size=None):
result = b''
bytes_read = 0
while True:
if size is not None and bytes_read >= size:
break
ch = self.read(1)
bytes_read += 1
if not ch:
break
else:
result += ch
if ch == b'\n':
break
return result
def safe_read(self, length):
"""
Like .read, but is guaranteed to either return length bytes, or
raise an exception.
"""
result = self.read(length)
if length != -1 and len(result) != length:
if not result:
raise exceptions.TcpDisconnect()
else:
raise exceptions.TcpReadIncomplete(
"Expected %s bytes, got %s" % (length, len(result))
)
return result
def peek(self, length):
"""
Tries to peek into the underlying file object.
Returns:
Up to the next N bytes if peeking is successful.
Raises:
exceptions.TcpException if there was an error with the socket
TlsException if there was an error with pyOpenSSL.
NotImplementedError if the underlying file object is not a [pyOpenSSL] socket
"""
if isinstance(self.o, socket_fileobject):
try:
return self.o._sock.recv(length, socket.MSG_PEEK)
except socket.error as e:
raise exceptions.TcpException(repr(e))
elif isinstance(self.o, SSL.Connection):
try:
return self.o.recv(length, socket.MSG_PEEK)
except SSL.Error as e:
six.reraise(exceptions.TlsException, exceptions.TlsException(str(e)), sys.exc_info()[2])
else:
raise NotImplementedError("Can only peek into (pyOpenSSL) sockets")
class Address(basetypes.Serializable):
"""
This class wraps an IPv4/IPv6 tuple to provide named attributes and
ipv6 information.
"""
def __init__(self, address, use_ipv6=False):
self.address = tuple(address)
self.use_ipv6 = use_ipv6
def get_state(self):
return {
"address": self.address,
"use_ipv6": self.use_ipv6
}
def set_state(self, state):
self.address = state["address"]
self.use_ipv6 = state["use_ipv6"]
@classmethod
def from_state(cls, state):
return Address(**state)
@classmethod
def wrap(cls, t):
if isinstance(t, cls):
return t
else:
return cls(t)
def __call__(self):
return self.address
@property
def host(self):
return self.address[0]
@property
def port(self):
return self.address[1]
@property
def use_ipv6(self):
return self.family == socket.AF_INET6
@use_ipv6.setter
def use_ipv6(self, b):
self.family = socket.AF_INET6 if b else socket.AF_INET
def __repr__(self):
return "{}:{}".format(self.host, self.port)
def __eq__(self, other):
if not other:
return False
other = Address.wrap(other)
return (self.address, self.family) == (other.address, other.family)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.address) ^ 42 # different hash than the tuple alone.
def ssl_read_select(rlist, timeout):
"""
This is a wrapper around select.select() which also works for SSL.Connections
by taking ssl_connection.pending() into account.
Caveats:
If .pending() > 0 for any of the connections in rlist, we avoid the select syscall
and **will not include any other connections which may or may not be ready**.
Args:
rlist: wait until ready for reading
Returns:
subset of rlist which is ready for reading.
"""
return [
conn for conn in rlist
if isinstance(conn, SSL.Connection) and conn.pending() > 0
] or select.select(rlist, (), (), timeout)[0]
def close_socket(sock):
"""
Does a hard close of a socket, without emitting a RST.
"""
try:
# We already indicate that we close our end.
# may raise "Transport endpoint is not connected" on Linux
sock.shutdown(socket.SHUT_WR)
# Section 4.2.2.13 of RFC 1122 tells us that a close() with any pending
# readable data could lead to an immediate RST being sent (which is the
# case on Windows).
# http://ia600609.us.archive.org/22/items/TheUltimateSo_lingerPageOrWhyIsMyTcpNotReliable/the-ultimate-so_linger-page-or-why-is-my-tcp-not-reliable.html
#
# This in turn results in the following issue: If we send an error page
# to the client and then close the socket, the RST may be received by
# the client before the error page and the users sees a connection
# error rather than the error page. Thus, we try to empty the read
# buffer on Windows first. (see
# https://github.com/mitmproxy/mitmproxy/issues/527#issuecomment-93782988)
#
if os.name == "nt": # pragma: no cover
# We cannot rely on the shutdown()-followed-by-read()-eof technique
# proposed by the page above: Some remote machines just don't send
# a TCP FIN, which would leave us in the unfortunate situation that
# recv() would block infinitely. As a workaround, we set a timeout
# here even if we are in blocking mode.
sock.settimeout(sock.gettimeout() or 20)
# limit at a megabyte so that we don't read infinitely
for _ in range(1024 ** 3 // 4096):
# may raise a timeout/disconnect exception.
if not sock.recv(4096):
break
# Now we can close the other half as well.
sock.shutdown(socket.SHUT_RD)
except socket.error:
pass
sock.close()
class _Connection(object):
rbufsize = -1
wbufsize = -1
def _makefile(self):
"""
Set up .rfile and .wfile attributes from .connection
"""
# Ideally, we would use the Buffered IO in Python 3 by default.
# Unfortunately, the implementation of .peek() is broken for n>1 bytes,
# as it may just return what's left in the buffer and not all the bytes we want.
# As a workaround, we just use unbuffered sockets directly.
# https://mail.python.org/pipermail/python-dev/2009-June/089986.html
if six.PY2:
self.rfile = Reader(self.connection.makefile('rb', self.rbufsize))
self.wfile = Writer(self.connection.makefile('wb', self.wbufsize))
else:
self.rfile = Reader(socket.SocketIO(self.connection, "rb"))
self.wfile = Writer(socket.SocketIO(self.connection, "wb"))
def __init__(self, connection):
if connection:
self.connection = connection
self.ip_address = Address(connection.getpeername())
self._makefile()
else:
self.connection = None
self.ip_address = None
self.rfile = None
self.wfile = None
self.ssl_established = False
self.finished = False
def get_current_cipher(self):
if not self.ssl_established:
return None
name = self.connection.get_cipher_name()
bits = self.connection.get_cipher_bits()
version = self.connection.get_cipher_version()
return name, bits, version
def finish(self):
self.finished = True
# If we have an SSL connection, wfile.close == connection.close
# (We call _FileLike.set_descriptor(conn))
# Closing the socket is not our task, therefore we don't call close
# then.
if not isinstance(self.connection, SSL.Connection):
if not getattr(self.wfile, "closed", False):
try:
self.wfile.flush()
self.wfile.close()
except exceptions.TcpDisconnect:
pass
self.rfile.close()
else:
try:
self.connection.shutdown()
except SSL.Error:
pass
def _create_ssl_context(self,
method=SSL_DEFAULT_METHOD,
options=SSL_DEFAULT_OPTIONS,
verify_options=SSL.VERIFY_NONE,
ca_path=None,
ca_pemfile=None,
cipher_list=None,
alpn_protos=None,
alpn_select=None,
alpn_select_callback=None,
sni=None,
):
"""
Creates an SSL Context.
:param method: One of SSLv2_METHOD, SSLv3_METHOD, SSLv23_METHOD, TLSv1_METHOD, TLSv1_1_METHOD, or TLSv1_2_METHOD
:param options: A bit field consisting of OpenSSL.SSL.OP_* values
:param verify_options: A bit field consisting of OpenSSL.SSL.VERIFY_* values
:param ca_path: Path to a directory of trusted CA certificates prepared using the c_rehash tool
:param ca_pemfile: Path to a PEM formatted trusted CA certificate
:param cipher_list: A textual OpenSSL cipher list, see https://www.openssl.org/docs/apps/ciphers.html
:rtype : SSL.Context
"""
context = SSL.Context(method)
# Options (NO_SSLv2/3)
if options is not None:
context.set_options(options)
# Verify Options (NONE/PEER and trusted CAs)
if verify_options is not None:
def verify_cert(conn, x509, errno, err_depth, is_cert_verified):
if not is_cert_verified:
self.ssl_verification_error = exceptions.InvalidCertificateException(
"Certificate Verification Error for {}: {} (errno: {}, depth: {})".format(
sni,
strutils.native(SSL._ffi.string(SSL._lib.X509_verify_cert_error_string(errno)), "utf8"),
errno,
err_depth
)
)
return is_cert_verified
context.set_verify(verify_options, verify_cert)
if ca_path is None and ca_pemfile is None:
ca_pemfile = certifi.where()
context.load_verify_locations(ca_pemfile, ca_path)
# Workaround for
# https://github.com/pyca/pyopenssl/issues/190
# https://github.com/mitmproxy/mitmproxy/issues/472
# Options already set before are not cleared.
context.set_mode(SSL._lib.SSL_MODE_AUTO_RETRY)
# Cipher List
if cipher_list:
try:
context.set_cipher_list(cipher_list)
# TODO: maybe change this to with newer pyOpenSSL APIs
context.set_tmp_ecdh(OpenSSL.crypto.get_elliptic_curve('prime256v1'))
except SSL.Error as v:
raise exceptions.TlsException("SSL cipher specification error: %s" % str(v))
# SSLKEYLOGFILE
if log_ssl_key:
context.set_info_callback(log_ssl_key)
if HAS_ALPN:
if alpn_protos is not None:
# advertise application layer protocols
context.set_alpn_protos(alpn_protos)
elif alpn_select is not None and alpn_select_callback is None:
# select application layer protocol
def alpn_select_callback(conn_, options):
if alpn_select in options:
return bytes(alpn_select)
else: # pragma no cover
return options[0]
context.set_alpn_select_callback(alpn_select_callback)
elif alpn_select_callback is not None and alpn_select is None:
context.set_alpn_select_callback(alpn_select_callback)
elif alpn_select_callback is not None and alpn_select is not None:
raise exceptions.TlsException("ALPN error: only define alpn_select (string) OR alpn_select_callback (method).")
return context
class ConnectionCloser(object):
def __init__(self, conn):
self.conn = conn
self._canceled = False
def pop(self):
"""
Cancel the current closer, and return a fresh one.
"""
self._canceled = True
return ConnectionCloser(self.conn)
def __enter__(self):
return self
def __exit__(self, *args):
if not self._canceled:
self.conn.close()
class TCPClient(_Connection):
def __init__(self, address, source_address=None):
super(TCPClient, self).__init__(None)
self.address = address
self.source_address = source_address
self.cert = None
self.server_certs = []
self.ssl_verification_error = None # type: Optional[exceptions.InvalidCertificateException]
self.sni = None
@property
def address(self):
return self.__address
@address.setter
def address(self, address):
if address:
self.__address = Address.wrap(address)
else:
self.__address = None
@property
def source_address(self):
return self.__source_address
@source_address.setter
def source_address(self, source_address):
if source_address:
self.__source_address = Address.wrap(source_address)
else:
self.__source_address = None
def close(self):
# Make sure to close the real socket, not the SSL proxy.
# OpenSSL is really good at screwing up, i.e. when trying to recv from a failed connection,
# it tries to renegotiate...
if isinstance(self.connection, SSL.Connection):
close_socket(self.connection._socket)
else:
close_socket(self.connection)
def create_ssl_context(self, cert=None, alpn_protos=None, **sslctx_kwargs):
context = self._create_ssl_context(
alpn_protos=alpn_protos,
**sslctx_kwargs)
# Client Certs
if cert:
try:
context.use_privatekey_file(cert)
context.use_certificate_file(cert)
except SSL.Error as v:
raise exceptions.TlsException("SSL client certificate error: %s" % str(v))
return context
def convert_to_ssl(self, sni=None, alpn_protos=None, **sslctx_kwargs):
"""
cert: Path to a file containing both client cert and private key.
options: A bit field consisting of OpenSSL.SSL.OP_* values
verify_options: A bit field consisting of OpenSSL.SSL.VERIFY_* values
ca_path: Path to a directory of trusted CA certificates prepared using the c_rehash tool
ca_pemfile: Path to a PEM formatted trusted CA certificate
"""
verification_mode = sslctx_kwargs.get('verify_options', None)
if verification_mode == SSL.VERIFY_PEER and not sni:
raise exceptions.TlsException("Cannot validate certificate hostname without SNI")
context = self.create_ssl_context(
alpn_protos=alpn_protos,
sni=sni,
**sslctx_kwargs
)
self.connection = SSL.Connection(context, self.connection)
if sni:
self.sni = sni
self.connection.set_tlsext_host_name(sni.encode("idna"))
self.connection.set_connect_state()
try:
self.connection.do_handshake()
except SSL.Error as v:
if self.ssl_verification_error:
raise self.ssl_verification_error
else:
raise exceptions.TlsException("SSL handshake error: %s" % repr(v))
else:
# Fix for pre v1.0 OpenSSL, which doesn't throw an exception on
# certificate validation failure
if verification_mode == SSL.VERIFY_PEER and self.ssl_verification_error:
raise self.ssl_verification_error
self.cert = certutils.SSLCert(self.connection.get_peer_certificate())
# Keep all server certificates in a list
for i in self.connection.get_peer_cert_chain():
self.server_certs.append(certutils.SSLCert(i))
# Validate TLS Hostname
try:
crt = dict(
subjectAltName=[("DNS", x.decode("ascii", "strict")) for x in self.cert.altnames]
)
if self.cert.cn:
crt["subject"] = [[["commonName", self.cert.cn.decode("ascii", "strict")]]]
if sni:
hostname = sni
else:
hostname = "no-hostname"
ssl_match_hostname.match_hostname(crt, hostname)
except (ValueError, ssl_match_hostname.CertificateError) as e:
self.ssl_verification_error = exceptions.InvalidCertificateException(
"Certificate Verification Error for {}: {}".format(
sni or repr(self.address),
str(e)
)
)
if verification_mode == SSL.VERIFY_PEER:
raise self.ssl_verification_error
self.ssl_established = True
self.rfile.set_descriptor(self.connection)
self.wfile.set_descriptor(self.connection)
def connect(self):
try:
connection = socket.socket(self.address.family, socket.SOCK_STREAM)
if self.source_address:
connection.bind(self.source_address())
connection.connect(self.address())
self.source_address = Address(connection.getsockname())
except (socket.error, IOError) as err:
raise exceptions.TcpException(
'Error connecting to "%s": %s' %
(self.address.host, err)
)
self.connection = connection
self.ip_address = Address(connection.getpeername())
self._makefile()
return ConnectionCloser(self)
def settimeout(self, n):
self.connection.settimeout(n)
def gettimeout(self):
return self.connection.gettimeout()
def get_alpn_proto_negotiated(self):
if HAS_ALPN and self.ssl_established:
return self.connection.get_alpn_proto_negotiated()
else:
return b""
class BaseHandler(_Connection):
"""
The instantiator is expected to call the handle() and finish() methods.
"""
def __init__(self, connection, address, server):
super(BaseHandler, self).__init__(connection)
self.address = Address.wrap(address)
self.server = server
self.clientcert = None
def create_ssl_context(self,
cert, key,
handle_sni=None,
request_client_cert=None,
chain_file=None,
dhparams=None,
extra_chain_certs=None,
**sslctx_kwargs):
"""
cert: A certutils.SSLCert object or the path to a certificate
chain file.
handle_sni: SNI handler, should take a connection object. Server
name can be retrieved like this:
connection.get_servername()
And you can specify the connection keys as follows:
new_context = Context(TLSv1_METHOD)
new_context.use_privatekey(key)
new_context.use_certificate(cert)
connection.set_context(new_context)
The request_client_cert argument requires some explanation. We're
supposed to be able to do this with no negative effects - if the
client has no cert to present, we're notified and proceed as usual.
Unfortunately, Android seems to have a bug (tested on 4.2.2) - when
an Android client is asked to present a certificate it does not
have, it hangs up, which is frankly bogus. Some time down the track
we may be able to make the proper behaviour the default again, but
until then we're conservative.
"""
context = self._create_ssl_context(**sslctx_kwargs)
context.use_privatekey(key)
if isinstance(cert, certutils.SSLCert):
context.use_certificate(cert.x509)
else:
context.use_certificate_chain_file(cert)
if extra_chain_certs:
for i in extra_chain_certs:
context.add_extra_chain_cert(i.x509)
if handle_sni:
# SNI callback happens during do_handshake()
context.set_tlsext_servername_callback(handle_sni)
if request_client_cert:
def save_cert(conn_, cert, errno_, depth_, preverify_ok_):
self.clientcert = certutils.SSLCert(cert)
# Return true to prevent cert verification error
return True
context.set_verify(SSL.VERIFY_PEER, save_cert)
# Cert Verify
if chain_file:
context.load_verify_locations(chain_file)
if dhparams:
SSL._lib.SSL_CTX_set_tmp_dh(context._context, dhparams)
return context
def convert_to_ssl(self, cert, key, **sslctx_kwargs):
"""
Convert connection to SSL.
For a list of parameters, see BaseHandler._create_ssl_context(...)
"""
context = self.create_ssl_context(
cert,
key,
**sslctx_kwargs)
self.connection = SSL.Connection(context, self.connection)
self.connection.set_accept_state()
try:
self.connection.do_handshake()
except SSL.Error as v:
raise exceptions.TlsException("SSL handshake error: %s" % repr(v))
self.ssl_established = True
self.rfile.set_descriptor(self.connection)
self.wfile.set_descriptor(self.connection)
def handle(self): # pragma: no cover
raise NotImplementedError
def settimeout(self, n):
self.connection.settimeout(n)
def get_alpn_proto_negotiated(self):
if HAS_ALPN and self.ssl_established:
return self.connection.get_alpn_proto_negotiated()
else:
return b""
class Counter:
def __init__(self):
self._count = 0
self._lock = threading.Lock()
@property
def count(self):
with self._lock:
return self._count
def __enter__(self):
with self._lock:
self._count += 1
def __exit__(self, *args):
with self._lock:
self._count -= 1
class TCPServer(object):
request_queue_size = 20
def __init__(self, address):
self.address = Address.wrap(address)
self.__is_shut_down = threading.Event()
self.__shutdown_request = False
self.socket = socket.socket(self.address.family, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(self.address())
self.address = Address.wrap(self.socket.getsockname())
self.socket.listen(self.request_queue_size)
self.handler_counter = Counter()
def connection_thread(self, connection, client_address):
with self.handler_counter:
client_address = Address(client_address)
try:
self.handle_client_connection(connection, client_address)
except:
self.handle_error(connection, client_address)
finally:
close_socket(connection)
def serve_forever(self, poll_interval=0.1):
self.__is_shut_down.clear()
try:
while not self.__shutdown_request:
try:
r, w_, e_ = select.select(
[self.socket], [], [], poll_interval)
except select.error as ex: # pragma: no cover
if ex[0] == EINTR:
continue
else:
raise
if self.socket in r:
connection, client_address = self.socket.accept()
t = basethread.BaseThread(
"TCPConnectionHandler (%s: %s:%s -> %s:%s)" % (
self.__class__.__name__,
client_address[0],
client_address[1],
self.address.host,
self.address.port
),
target=self.connection_thread,
args=(connection, client_address),
)
t.setDaemon(1)
try:
t.start()
except threading.ThreadError:
self.handle_error(connection, Address(client_address))
connection.close()
finally:
self.__shutdown_request = False
self.__is_shut_down.set()
def shutdown(self):
self.__shutdown_request = True
self.__is_shut_down.wait()
self.socket.close()
self.handle_shutdown()
def handle_error(self, connection_, client_address, fp=sys.stderr):
"""
Called when handle_client_connection raises an exception.
"""
# If a thread has persisted after interpreter exit, the module might be
# none.
if traceback and six:
exc = six.text_type(traceback.format_exc())
print(u'-' * 40, file=fp)
print(
u"Error in processing of request from %s" % repr(client_address), file=fp)
print(exc, file=fp)
print(u'-' * 40, file=fp)
def handle_client_connection(self, conn, client_address): # pragma: no cover
"""
Called after client connection.
"""
raise NotImplementedError
def handle_shutdown(self):
"""
Called after server shutdown.
"""
def wait_for_silence(self, timeout=5):
start = time.time()
while 1:
if time.time() - start >= timeout:
raise exceptions.Timeout(
"%s service threads still alive" %
self.handler_counter.count
)
if self.handler_counter.count == 0:
return
| 1 | 12,168 | This shouldn't be necessary (same below). Do you have a traceback for me? | mitmproxy-mitmproxy | py |
@@ -13,6 +13,8 @@ import (
"strconv"
"syscall" // only for Signal
+ "github.com/opencontainers/runc/libcontainer/logs"
+
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/configs"
"github.com/opencontainers/runc/libcontainer/intelrdt" | 1 | // +build linux
package libcontainer
import (
"encoding/json"
"errors"
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
"strconv"
"syscall" // only for Signal
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/configs"
"github.com/opencontainers/runc/libcontainer/intelrdt"
"github.com/opencontainers/runc/libcontainer/system"
"github.com/opencontainers/runc/libcontainer/utils"
"golang.org/x/sys/unix"
)
// Synchronisation value for cgroup namespace setup.
// The same constant is defined in nsexec.c as "CREATECGROUPNS".
const createCgroupns = 0x80
type parentProcess interface {
// pid returns the pid for the running process.
pid() int
// start starts the process execution.
start() error
// send a SIGKILL to the process and wait for the exit.
terminate() error
// wait waits on the process returning the process state.
wait() (*os.ProcessState, error)
// startTime returns the process start time.
startTime() (uint64, error)
signal(os.Signal) error
externalDescriptors() []string
setExternalDescriptors(fds []string)
}
type setnsProcess struct {
cmd *exec.Cmd
parentPipe *os.File
childPipe *os.File
cgroupPaths map[string]string
rootlessCgroups bool
intelRdtPath string
config *initConfig
fds []string
process *Process
bootstrapData io.Reader
}
func (p *setnsProcess) startTime() (uint64, error) {
stat, err := system.Stat(p.pid())
return stat.StartTime, err
}
func (p *setnsProcess) signal(sig os.Signal) error {
s, ok := sig.(syscall.Signal)
if !ok {
return errors.New("os: unsupported signal type")
}
return unix.Kill(p.pid(), s)
}
func (p *setnsProcess) start() (err error) {
defer p.parentPipe.Close()
err = p.cmd.Start()
p.childPipe.Close()
if err != nil {
return newSystemErrorWithCause(err, "starting setns process")
}
if p.bootstrapData != nil {
if _, err := io.Copy(p.parentPipe, p.bootstrapData); err != nil {
return newSystemErrorWithCause(err, "copying bootstrap data to pipe")
}
}
if err = p.execSetns(); err != nil {
return newSystemErrorWithCause(err, "executing setns process")
}
if len(p.cgroupPaths) > 0 {
if err := cgroups.EnterPid(p.cgroupPaths, p.pid()); err != nil && !p.rootlessCgroups {
return newSystemErrorWithCausef(err, "adding pid %d to cgroups", p.pid())
}
}
if p.intelRdtPath != "" {
// if Intel RDT "resource control" filesystem path exists
_, err := os.Stat(p.intelRdtPath)
if err == nil {
if err := intelrdt.WriteIntelRdtTasks(p.intelRdtPath, p.pid()); err != nil {
return newSystemErrorWithCausef(err, "adding pid %d to Intel RDT resource control filesystem", p.pid())
}
}
}
// set rlimits, this has to be done here because we lose permissions
// to raise the limits once we enter a user-namespace
if err := setupRlimits(p.config.Rlimits, p.pid()); err != nil {
return newSystemErrorWithCause(err, "setting rlimits for process")
}
if err := utils.WriteJSON(p.parentPipe, p.config); err != nil {
return newSystemErrorWithCause(err, "writing config to pipe")
}
ierr := parseSync(p.parentPipe, func(sync *syncT) error {
switch sync.Type {
case procReady:
// This shouldn't happen.
panic("unexpected procReady in setns")
case procHooks:
// This shouldn't happen.
panic("unexpected procHooks in setns")
default:
return newSystemError(fmt.Errorf("invalid JSON payload from child"))
}
})
if err := unix.Shutdown(int(p.parentPipe.Fd()), unix.SHUT_WR); err != nil {
return newSystemErrorWithCause(err, "calling shutdown on init pipe")
}
// Must be done after Shutdown so the child will exit and we can wait for it.
if ierr != nil {
p.wait()
return ierr
}
return nil
}
// execSetns runs the process that executes C code to perform the setns calls
// because setns support requires the C process to fork off a child and perform the setns
// before the go runtime boots, we wait on the process to die and receive the child's pid
// over the provided pipe.
func (p *setnsProcess) execSetns() error {
status, err := p.cmd.Process.Wait()
if err != nil {
p.cmd.Wait()
return newSystemErrorWithCause(err, "waiting on setns process to finish")
}
if !status.Success() {
p.cmd.Wait()
return newSystemError(&exec.ExitError{ProcessState: status})
}
var pid *pid
if err := json.NewDecoder(p.parentPipe).Decode(&pid); err != nil {
p.cmd.Wait()
return newSystemErrorWithCause(err, "reading pid from init pipe")
}
// Clean up the zombie parent process
// On Unix systems FindProcess always succeeds.
firstChildProcess, _ := os.FindProcess(pid.PidFirstChild)
// Ignore the error in case the child has already been reaped for any reason
_, _ = firstChildProcess.Wait()
process, err := os.FindProcess(pid.Pid)
if err != nil {
return err
}
p.cmd.Process = process
p.process.ops = p
return nil
}
// terminate sends a SIGKILL to the forked process for the setns routine then waits to
// avoid the process becoming a zombie.
func (p *setnsProcess) terminate() error {
if p.cmd.Process == nil {
return nil
}
err := p.cmd.Process.Kill()
if _, werr := p.wait(); err == nil {
err = werr
}
return err
}
func (p *setnsProcess) wait() (*os.ProcessState, error) {
err := p.cmd.Wait()
// Return actual ProcessState even on Wait error
return p.cmd.ProcessState, err
}
func (p *setnsProcess) pid() int {
return p.cmd.Process.Pid
}
func (p *setnsProcess) externalDescriptors() []string {
return p.fds
}
func (p *setnsProcess) setExternalDescriptors(newFds []string) {
p.fds = newFds
}
type initProcess struct {
cmd *exec.Cmd
parentPipe *os.File
childPipe *os.File
config *initConfig
manager cgroups.Manager
intelRdtManager intelrdt.Manager
container *linuxContainer
fds []string
process *Process
bootstrapData io.Reader
sharePidns bool
}
func (p *initProcess) pid() int {
return p.cmd.Process.Pid
}
func (p *initProcess) externalDescriptors() []string {
return p.fds
}
// getChildPid receives the final child's pid over the provided pipe.
func (p *initProcess) getChildPid() (int, error) {
var pid pid
if err := json.NewDecoder(p.parentPipe).Decode(&pid); err != nil {
p.cmd.Wait()
return -1, err
}
// Clean up the zombie parent process
// On Unix systems FindProcess always succeeds.
firstChildProcess, _ := os.FindProcess(pid.PidFirstChild)
// Ignore the error in case the child has already been reaped for any reason
_, _ = firstChildProcess.Wait()
return pid.Pid, nil
}
func (p *initProcess) waitForChildExit(childPid int) error {
status, err := p.cmd.Process.Wait()
if err != nil {
p.cmd.Wait()
return err
}
if !status.Success() {
p.cmd.Wait()
return &exec.ExitError{ProcessState: status}
}
process, err := os.FindProcess(childPid)
if err != nil {
return err
}
p.cmd.Process = process
p.process.ops = p
return nil
}
func (p *initProcess) start() error {
defer p.parentPipe.Close()
err := p.cmd.Start()
p.process.ops = p
p.childPipe.Close()
if err != nil {
p.process.ops = nil
return newSystemErrorWithCause(err, "starting init process command")
}
// Do this before syncing with child so that no children can escape the
// cgroup. We don't need to worry about not doing this and not being root
// because we'd be using the rootless cgroup manager in that case.
if err := p.manager.Apply(p.pid()); err != nil {
return newSystemErrorWithCause(err, "applying cgroup configuration for process")
}
if p.intelRdtManager != nil {
if err := p.intelRdtManager.Apply(p.pid()); err != nil {
return newSystemErrorWithCause(err, "applying Intel RDT configuration for process")
}
}
defer func() {
if err != nil {
// TODO: should not be the responsibility to call here
p.manager.Destroy()
if p.intelRdtManager != nil {
p.intelRdtManager.Destroy()
}
}
}()
if _, err := io.Copy(p.parentPipe, p.bootstrapData); err != nil {
return newSystemErrorWithCause(err, "copying bootstrap data to pipe")
}
childPid, err := p.getChildPid()
if err != nil {
return newSystemErrorWithCause(err, "getting the final child's pid from pipe")
}
// Save the standard descriptor names before the container process
// can potentially move them (e.g., via dup2()). If we don't do this now,
// we won't know at checkpoint time which file descriptor to look up.
fds, err := getPipeFds(childPid)
if err != nil {
return newSystemErrorWithCausef(err, "getting pipe fds for pid %d", childPid)
}
p.setExternalDescriptors(fds)
// Do this before syncing with child so that no children
// can escape the cgroup
if err := p.manager.Apply(childPid); err != nil {
return newSystemErrorWithCause(err, "applying cgroup configuration for process")
}
if p.intelRdtManager != nil {
if err := p.intelRdtManager.Apply(childPid); err != nil {
return newSystemErrorWithCause(err, "applying Intel RDT configuration for process")
}
}
// Now it's time to setup cgroup namesapce
if p.config.Config.Namespaces.Contains(configs.NEWCGROUP) && p.config.Config.Namespaces.PathOf(configs.NEWCGROUP) == "" {
if _, err := p.parentPipe.Write([]byte{createCgroupns}); err != nil {
return newSystemErrorWithCause(err, "sending synchronization value to init process")
}
}
// Wait for our first child to exit
if err := p.waitForChildExit(childPid); err != nil {
return newSystemErrorWithCause(err, "waiting for our first child to exit")
}
defer func() {
if err != nil {
// TODO: should not be the responsibility to call here
p.manager.Destroy()
}
}()
if err := p.createNetworkInterfaces(); err != nil {
return newSystemErrorWithCause(err, "creating network interfaces")
}
if err := p.sendConfig(); err != nil {
return newSystemErrorWithCause(err, "sending config to init process")
}
var (
sentRun bool
sentResume bool
)
ierr := parseSync(p.parentPipe, func(sync *syncT) error {
switch sync.Type {
case procReady:
// set rlimits, this has to be done here because we lose permissions
// to raise the limits once we enter a user-namespace
if err := setupRlimits(p.config.Rlimits, p.pid()); err != nil {
return newSystemErrorWithCause(err, "setting rlimits for ready process")
}
// call prestart hooks
if !p.config.Config.Namespaces.Contains(configs.NEWNS) {
// Setup cgroup before prestart hook, so that the prestart hook could apply cgroup permissions.
if err := p.manager.Set(p.config.Config); err != nil {
return newSystemErrorWithCause(err, "setting cgroup config for ready process")
}
if p.intelRdtManager != nil {
if err := p.intelRdtManager.Set(p.config.Config); err != nil {
return newSystemErrorWithCause(err, "setting Intel RDT config for ready process")
}
}
if p.config.Config.Hooks != nil {
s, err := p.container.currentOCIState()
if err != nil {
return err
}
// initProcessStartTime hasn't been set yet.
s.Pid = p.cmd.Process.Pid
s.Status = "creating"
for i, hook := range p.config.Config.Hooks.Prestart {
if err := hook.Run(s); err != nil {
return newSystemErrorWithCausef(err, "running prestart hook %d", i)
}
}
}
}
// Sync with child.
if err := writeSync(p.parentPipe, procRun); err != nil {
return newSystemErrorWithCause(err, "writing syncT 'run'")
}
sentRun = true
case procHooks:
// Setup cgroup before prestart hook, so that the prestart hook could apply cgroup permissions.
if err := p.manager.Set(p.config.Config); err != nil {
return newSystemErrorWithCause(err, "setting cgroup config for procHooks process")
}
if p.intelRdtManager != nil {
if err := p.intelRdtManager.Set(p.config.Config); err != nil {
return newSystemErrorWithCause(err, "setting Intel RDT config for procHooks process")
}
}
if p.config.Config.Hooks != nil {
s, err := p.container.currentOCIState()
if err != nil {
return err
}
// initProcessStartTime hasn't been set yet.
s.Pid = p.cmd.Process.Pid
s.Status = "creating"
for i, hook := range p.config.Config.Hooks.Prestart {
if err := hook.Run(s); err != nil {
return newSystemErrorWithCausef(err, "running prestart hook %d", i)
}
}
}
// Sync with child.
if err := writeSync(p.parentPipe, procResume); err != nil {
return newSystemErrorWithCause(err, "writing syncT 'resume'")
}
sentResume = true
default:
return newSystemError(fmt.Errorf("invalid JSON payload from child"))
}
return nil
})
if !sentRun {
return newSystemErrorWithCause(ierr, "container init")
}
if p.config.Config.Namespaces.Contains(configs.NEWNS) && !sentResume {
return newSystemError(fmt.Errorf("could not synchronise after executing prestart hooks with container process"))
}
if err := unix.Shutdown(int(p.parentPipe.Fd()), unix.SHUT_WR); err != nil {
return newSystemErrorWithCause(err, "shutting down init pipe")
}
// Must be done after Shutdown so the child will exit and we can wait for it.
if ierr != nil {
p.wait()
return ierr
}
return nil
}
func (p *initProcess) wait() (*os.ProcessState, error) {
err := p.cmd.Wait()
if err != nil {
return p.cmd.ProcessState, err
}
// we should kill all processes in cgroup when init is died if we use host PID namespace
if p.sharePidns {
signalAllProcesses(p.manager, unix.SIGKILL)
}
return p.cmd.ProcessState, nil
}
func (p *initProcess) terminate() error {
if p.cmd.Process == nil {
return nil
}
err := p.cmd.Process.Kill()
if _, werr := p.wait(); err == nil {
err = werr
}
return err
}
func (p *initProcess) startTime() (uint64, error) {
stat, err := system.Stat(p.pid())
return stat.StartTime, err
}
func (p *initProcess) sendConfig() error {
// send the config to the container's init process, we don't use JSON Encode
// here because there might be a problem in JSON decoder in some cases, see:
// https://github.com/docker/docker/issues/14203#issuecomment-174177790
return utils.WriteJSON(p.parentPipe, p.config)
}
func (p *initProcess) createNetworkInterfaces() error {
for _, config := range p.config.Config.Networks {
strategy, err := getStrategy(config.Type)
if err != nil {
return err
}
n := &network{
Network: *config,
}
if err := strategy.create(n, p.pid()); err != nil {
return err
}
p.config.Networks = append(p.config.Networks, n)
}
return nil
}
func (p *initProcess) signal(sig os.Signal) error {
s, ok := sig.(syscall.Signal)
if !ok {
return errors.New("os: unsupported signal type")
}
return unix.Kill(p.pid(), s)
}
func (p *initProcess) setExternalDescriptors(newFds []string) {
p.fds = newFds
}
func getPipeFds(pid int) ([]string, error) {
fds := make([]string, 3)
dirPath := filepath.Join("/proc", strconv.Itoa(pid), "/fd")
for i := 0; i < 3; i++ {
// XXX: This breaks if the path is not a valid symlink (which can
// happen in certain particularly unlucky mount namespace setups).
f := filepath.Join(dirPath, strconv.Itoa(i))
target, err := os.Readlink(f)
if err != nil {
// Ignore permission errors, for rootless containers and other
// non-dumpable processes. if we can't get the fd for a particular
// file, there's not much we can do.
if os.IsPermission(err) {
continue
}
return fds, err
}
fds[i] = target
}
return fds, nil
}
// InitializeIO creates pipes for use with the process's stdio and returns the
// opposite side for each. Do not use this if you want to have a pseudoterminal
// set up for you by libcontainer (TODO: fix that too).
// TODO: This is mostly unnecessary, and should be handled by clients.
func (p *Process) InitializeIO(rootuid, rootgid int) (i *IO, err error) {
var fds []uintptr
i = &IO{}
// cleanup in case of an error
defer func() {
if err != nil {
for _, fd := range fds {
unix.Close(int(fd))
}
}
}()
// STDIN
r, w, err := os.Pipe()
if err != nil {
return nil, err
}
fds = append(fds, r.Fd(), w.Fd())
p.Stdin, i.Stdin = r, w
// STDOUT
if r, w, err = os.Pipe(); err != nil {
return nil, err
}
fds = append(fds, r.Fd(), w.Fd())
p.Stdout, i.Stdout = w, r
// STDERR
if r, w, err = os.Pipe(); err != nil {
return nil, err
}
fds = append(fds, r.Fd(), w.Fd())
p.Stderr, i.Stderr = w, r
// change ownership of the pipes in case we are in a user namespace
for _, fd := range fds {
if err := unix.Fchown(int(fd), rootuid, rootgid); err != nil {
return nil, err
}
}
return i, nil
}
| 1 | 17,568 | No newline needed here. | opencontainers-runc | go |
@@ -7,11 +7,12 @@ package libkbfs
import (
"errors"
"fmt"
- "io/ioutil"
"os"
"path/filepath"
"sync"
+ "github.com/keybase/kbfs/ioutil"
+
"github.com/keybase/client/go/logger"
"github.com/keybase/kbfs/kbfscodec"
"github.com/keybase/kbfs/kbfscrypto" | 1 | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sync"
"github.com/keybase/client/go/logger"
"github.com/keybase/kbfs/kbfscodec"
"github.com/keybase/kbfs/kbfscrypto"
"github.com/keybase/kbfs/tlf"
"golang.org/x/net/context"
)
type blockServerDiskTlfStorage struct {
lock sync.RWMutex
// store is nil after it is shut down in Shutdown().
store *blockDiskStore
}
// BlockServerDisk implements the BlockServer interface by just
// storing blocks in a local disk store.
type BlockServerDisk struct {
codec kbfscodec.Codec
crypto cryptoPure
log logger.Logger
dirPath string
shutdownFunc func(logger.Logger)
tlfStorageLock sync.RWMutex
// tlfStorage is nil after Shutdown() is called.
tlfStorage map[tlf.ID]*blockServerDiskTlfStorage
}
var _ blockServerLocal = (*BlockServerDisk)(nil)
// newBlockServerDisk constructs a new BlockServerDisk that stores
// its data in the given directory.
func newBlockServerDisk(
codec kbfscodec.Codec, crypto cryptoPure, log logger.Logger,
dirPath string, shutdownFunc func(logger.Logger)) *BlockServerDisk {
bserv := &BlockServerDisk{
codec, crypto, log, dirPath, shutdownFunc, sync.RWMutex{},
make(map[tlf.ID]*blockServerDiskTlfStorage),
}
return bserv
}
// NewBlockServerDir constructs a new BlockServerDisk that stores
// its data in the given directory.
func NewBlockServerDir(codec kbfscodec.Codec, crypto cryptoPure,
log logger.Logger, dirPath string) *BlockServerDisk {
return newBlockServerDisk(codec, crypto, log, dirPath, nil)
}
// NewBlockServerTempDir constructs a new BlockServerDisk that stores its
// data in a temp directory which is cleaned up on shutdown.
func NewBlockServerTempDir(codec kbfscodec.Codec, crypto cryptoPure,
log logger.Logger) (*BlockServerDisk, error) {
tempdir, err := ioutil.TempDir(os.TempDir(), "kbfs_bserver_tmp")
if err != nil {
return nil, err
}
return newBlockServerDisk(codec, crypto, log, tempdir, func(log logger.Logger) {
err := os.RemoveAll(tempdir)
if err != nil {
log.Warning("error removing %s: %s", tempdir, err)
}
}), nil
}
var errBlockServerDiskShutdown = errors.New("BlockServerDisk is shutdown")
func (b *BlockServerDisk) getStorage(tlfID tlf.ID) (
*blockServerDiskTlfStorage, error) {
storage, err := func() (*blockServerDiskTlfStorage, error) {
b.tlfStorageLock.RLock()
defer b.tlfStorageLock.RUnlock()
if b.tlfStorage == nil {
return nil, errBlockServerDiskShutdown
}
return b.tlfStorage[tlfID], nil
}()
if err != nil {
return nil, err
}
if storage != nil {
return storage, nil
}
b.tlfStorageLock.Lock()
defer b.tlfStorageLock.Unlock()
if b.tlfStorage == nil {
return nil, errBlockServerDiskShutdown
}
storage, ok := b.tlfStorage[tlfID]
if ok {
return storage, nil
}
path := filepath.Join(b.dirPath, tlfID.String())
store := makeBlockDiskStore(b.codec, b.crypto, path)
storage = &blockServerDiskTlfStorage{
store: store,
}
b.tlfStorage[tlfID] = storage
return storage, nil
}
// Get implements the BlockServer interface for BlockServerDisk.
func (b *BlockServerDisk) Get(ctx context.Context, tlfID tlf.ID, id BlockID,
context BlockContext) (
data []byte, serverHalf kbfscrypto.BlockCryptKeyServerHalf, err error) {
defer func() {
err = translateToBlockServerError(err)
}()
b.log.CDebugf(ctx, "BlockServerDisk.Get id=%s tlfID=%s context=%s",
id, tlfID, context)
tlfStorage, err := b.getStorage(tlfID)
if err != nil {
return nil, kbfscrypto.BlockCryptKeyServerHalf{}, err
}
tlfStorage.lock.RLock()
defer tlfStorage.lock.RUnlock()
if tlfStorage.store == nil {
return nil, kbfscrypto.BlockCryptKeyServerHalf{},
errBlockServerDiskShutdown
}
data, keyServerHalf, err := tlfStorage.store.getDataWithContext(
id, context)
if err != nil {
return nil, kbfscrypto.BlockCryptKeyServerHalf{}, err
}
return data, keyServerHalf, nil
}
// Put implements the BlockServer interface for BlockServerDisk.
func (b *BlockServerDisk) Put(ctx context.Context, tlfID tlf.ID, id BlockID,
context BlockContext, buf []byte,
serverHalf kbfscrypto.BlockCryptKeyServerHalf) (err error) {
defer func() {
err = translateToBlockServerError(err)
}()
b.log.CDebugf(ctx, "BlockServerDisk.Put id=%s tlfID=%s context=%s size=%d",
id, tlfID, context, len(buf))
if context.GetRefNonce() != ZeroBlockRefNonce {
return errors.New("can't Put() a block with a non-zero refnonce")
}
tlfStorage, err := b.getStorage(tlfID)
if err != nil {
return err
}
tlfStorage.lock.Lock()
defer tlfStorage.lock.Unlock()
if tlfStorage.store == nil {
return errBlockServerDiskShutdown
}
err = tlfStorage.store.put(id, context, buf, serverHalf, "")
if err != nil {
return err
}
return nil
}
// AddBlockReference implements the BlockServer interface for BlockServerDisk.
func (b *BlockServerDisk) AddBlockReference(ctx context.Context, tlfID tlf.ID,
id BlockID, context BlockContext) error {
b.log.CDebugf(ctx, "BlockServerDisk.AddBlockReference id=%s "+
"tlfID=%s context=%s", id, tlfID, context)
tlfStorage, err := b.getStorage(tlfID)
if err != nil {
return err
}
tlfStorage.lock.Lock()
defer tlfStorage.lock.Unlock()
if tlfStorage.store == nil {
return errBlockServerDiskShutdown
}
hasRef, err := tlfStorage.store.hasAnyRef(id)
if err != nil {
return err
}
if !hasRef {
return BServerErrorBlockNonExistent{fmt.Sprintf("Block ID %s "+
"doesn't exist and cannot be referenced.", id)}
}
hasNonArchivedRef, err := tlfStorage.store.hasNonArchivedRef(id)
if err != nil {
return err
}
if !hasNonArchivedRef {
return BServerErrorBlockArchived{fmt.Sprintf("Block ID %s has "+
"been archived and cannot be referenced.", id)}
}
return tlfStorage.store.addReference(id, context, "")
}
// RemoveBlockReferences implements the BlockServer interface for
// BlockServerDisk.
func (b *BlockServerDisk) RemoveBlockReferences(ctx context.Context,
tlfID tlf.ID, contexts map[BlockID][]BlockContext) (
liveCounts map[BlockID]int, err error) {
defer func() {
err = translateToBlockServerError(err)
}()
b.log.CDebugf(ctx, "BlockServerDisk.RemoveBlockReference "+
"tlfID=%s contexts=%v", tlfID, contexts)
tlfStorage, err := b.getStorage(tlfID)
if err != nil {
return nil, err
}
tlfStorage.lock.Lock()
defer tlfStorage.lock.Unlock()
if tlfStorage.store == nil {
return nil, errBlockServerDiskShutdown
}
liveCounts = make(map[BlockID]int)
for id, idContexts := range contexts {
liveCount, err := tlfStorage.store.removeReferences(
id, idContexts, "")
if err != nil {
return nil, err
}
liveCounts[id] = liveCount
if liveCount == 0 {
err := tlfStorage.store.remove(id)
if err != nil {
return nil, err
}
}
}
return liveCounts, nil
}
// ArchiveBlockReferences implements the BlockServer interface for
// BlockServerDisk.
func (b *BlockServerDisk) ArchiveBlockReferences(ctx context.Context,
tlfID tlf.ID, contexts map[BlockID][]BlockContext) (err error) {
defer func() {
err = translateToBlockServerError(err)
}()
b.log.CDebugf(ctx, "BlockServerDisk.ArchiveBlockReferences "+
"tlfID=%s contexts=%v", tlfID, contexts)
tlfStorage, err := b.getStorage(tlfID)
if err != nil {
return err
}
tlfStorage.lock.Lock()
defer tlfStorage.lock.Unlock()
if tlfStorage.store == nil {
return errBlockServerDiskShutdown
}
for id, idContexts := range contexts {
for _, context := range idContexts {
hasContext, err := tlfStorage.store.hasContext(id, context)
if err != nil {
return err
}
if !hasContext {
return BServerErrorBlockNonExistent{
fmt.Sprintf(
"Block ID %s (context %s) doesn't "+
"exist and cannot be archived.",
id, context),
}
}
}
}
return tlfStorage.store.archiveReferences(contexts, "")
}
// getAllRefsForTest implements the blockServerLocal interface for
// BlockServerDisk.
func (b *BlockServerDisk) getAllRefsForTest(ctx context.Context, tlfID tlf.ID) (
map[BlockID]blockRefMap, error) {
tlfStorage, err := b.getStorage(tlfID)
if err != nil {
return nil, err
}
tlfStorage.lock.RLock()
defer tlfStorage.lock.RUnlock()
if tlfStorage.store == nil {
return nil, errBlockServerDiskShutdown
}
return tlfStorage.store.getAllRefsForTest()
}
// IsUnflushed implements the BlockServer interface for BlockServerDisk.
func (b *BlockServerDisk) IsUnflushed(ctx context.Context, tlfID tlf.ID,
_ BlockID) (bool, error) {
tlfStorage, err := b.getStorage(tlfID)
if err != nil {
return false, err
}
tlfStorage.lock.RLock()
defer tlfStorage.lock.RUnlock()
if tlfStorage.store == nil {
return false, errBlockServerDiskShutdown
}
return false, nil
}
// Shutdown implements the BlockServer interface for BlockServerDisk.
func (b *BlockServerDisk) Shutdown() {
tlfStorage := func() map[tlf.ID]*blockServerDiskTlfStorage {
b.tlfStorageLock.Lock()
defer b.tlfStorageLock.Unlock()
// Make further accesses error out.
tlfStorage := b.tlfStorage
b.tlfStorage = nil
return tlfStorage
}()
for _, s := range tlfStorage {
func() {
s.lock.Lock()
defer s.lock.Unlock()
if s.store == nil {
// Already shutdown.
return
}
// Make further accesses error out.
s.store = nil
}()
}
if b.shutdownFunc != nil {
b.shutdownFunc(b.log)
}
}
// RefreshAuthToken implements the BlockServer interface for BlockServerDisk.
func (b *BlockServerDisk) RefreshAuthToken(_ context.Context) {}
// GetUserQuotaInfo implements the BlockServer interface for BlockServerDisk.
func (b *BlockServerDisk) GetUserQuotaInfo(ctx context.Context) (info *UserQuotaInfo, err error) {
// Return a dummy value here.
return &UserQuotaInfo{Limit: 0x7FFFFFFFFFFFFFFF}, nil
}
| 1 | 14,802 | Why a separate block? | keybase-kbfs | go |
@@ -2531,10 +2531,16 @@ reg_get_size(reg_id_t reg)
return OPSZ_SCALABLE;
if (reg >= DR_REG_P0 && reg <= DR_REG_P15)
return OPSZ_SCALABLE_PRED;
+ if (reg == DR_REG_CNTVCT_EL0)
+ return OPSZ_8;
+ if (reg >= DR_REG_NZCV && reg <= DR_REG_FPSR)
+ return OPSZ_8;
# endif
if (reg == DR_REG_TPIDRURW || reg == DR_REG_TPIDRURO)
return OPSZ_PTR;
#endif
+ LOG(GLOBAL, LOG_ANNOTATIONS, 2, "reg=%d, %s, last reg=%d\n", reg,
+ get_register_name(reg), DR_REG_LAST_ENUM);
CLIENT_ASSERT(false, "reg_get_size: invalid register");
return OPSZ_NA;
} | 1 | /* **********************************************************
* Copyright (c) 2011-2020 Google, Inc. All rights reserved.
* Copyright (c) 2000-2010 VMware, Inc. All rights reserved.
* **********************************************************/
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of VMware, Inc. nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
/* Copyright (c) 2003-2007 Determina Corp. */
/* Copyright (c) 2001-2003 Massachusetts Institute of Technology */
/* Copyright (c) 2000-2001 Hewlett-Packard Company */
/* file "opnd_shared.c" -- IR opnd utilities */
#include "../globals.h"
#include "opnd.h"
#include "arch.h"
/* FIXME i#1551: refactor this file and avoid this x86-specific include in base arch/ */
#ifndef AARCH64
# include "x86/decode_private.h"
#endif
#if defined(DEBUG) && !defined(STANDALONE_DECODER)
/* case 10450: give messages to clients */
/* we can't undef ASSERT b/c of DYNAMO_OPTION */
# undef ASSERT_TRUNCATE
# undef ASSERT_BITFIELD_TRUNCATE
# undef ASSERT_NOT_REACHED
# define ASSERT_TRUNCATE DO_NOT_USE_ASSERT_USE_CLIENT_ASSERT_INSTEAD
# define ASSERT_BITFIELD_TRUNCATE DO_NOT_USE_ASSERT_USE_CLIENT_ASSERT_INSTEAD
# define ASSERT_NOT_REACHED DO_NOT_USE_ASSERT_USE_CLIENT_ASSERT_INSTEAD
#endif
#undef opnd_is_null
#undef opnd_is_immed_int
#undef opnd_is_immed_float
#undef opnd_is_immed_double
#undef opnd_is_near_pc
#undef opnd_is_near_instr
#undef opnd_is_reg
#undef opnd_is_base_disp
#undef opnd_is_far_pc
#undef opnd_is_far_instr
#undef opnd_is_mem_instr
#undef opnd_is_valid
bool
opnd_is_null(opnd_t op)
{
return OPND_IS_NULL(op);
}
bool
opnd_is_immed_int(opnd_t op)
{
return OPND_IS_IMMED_INT(op);
}
bool
opnd_is_immed_float(opnd_t op)
{
return OPND_IS_IMMED_FLOAT(op);
}
bool
opnd_is_immed_double(opnd_t op)
{
return OPND_IS_IMMED_DOUBLE(op);
}
bool
opnd_is_near_pc(opnd_t op)
{
return OPND_IS_NEAR_PC(op);
}
bool
opnd_is_near_instr(opnd_t op)
{
return OPND_IS_NEAR_INSTR(op);
}
bool
opnd_is_reg(opnd_t op)
{
return OPND_IS_REG(op);
}
bool
opnd_is_base_disp(opnd_t op)
{
return OPND_IS_BASE_DISP(op);
}
bool
opnd_is_far_pc(opnd_t op)
{
return OPND_IS_FAR_PC(op);
}
bool
opnd_is_far_instr(opnd_t op)
{
return OPND_IS_FAR_INSTR(op);
}
bool
opnd_is_mem_instr(opnd_t op)
{
return OPND_IS_MEM_INSTR(op);
}
bool
opnd_is_valid(opnd_t op)
{
return OPND_IS_VALID(op);
}
#define opnd_is_null OPND_IS_NULL
#define opnd_is_immed_int OPND_IS_IMMED_INT
#define opnd_is_immed_float OPND_IS_IMMED_FLOAT
#define opnd_is_immed_double OPND_IS_IMMED_DOUBLE
#define opnd_is_near_pc OPND_IS_NEAR_PC
#define opnd_is_near_instr OPND_IS_NEAR_INSTR
#define opnd_is_reg OPND_IS_REG
#define opnd_is_base_disp OPND_IS_BASE_DISP
#define opnd_is_far_pc OPND_IS_FAR_PC
#define opnd_is_far_instr OPND_IS_FAR_INSTR
#define opnd_is_mem_instr OPND_IS_MEM_INSTR
#define opnd_is_valid OPND_IS_VALID
#if defined(X64) || defined(ARM)
# undef opnd_is_rel_addr
bool
opnd_is_rel_addr(opnd_t op)
{
return OPND_IS_REL_ADDR(op);
}
# define opnd_is_rel_addr OPND_IS_REL_ADDR
#endif
/* We allow overlap between ABS_ADDR_kind and BASE_DISP_kind w/ no base or index */
bool
opnd_is_abs_base_disp(opnd_t opnd)
{
return (opnd_is_base_disp(opnd) && opnd_get_base(opnd) == REG_NULL &&
opnd_get_index(opnd) == REG_NULL);
}
bool
opnd_is_abs_addr(opnd_t opnd)
{
return IF_X64(opnd.kind == ABS_ADDR_kind ||) opnd_is_abs_base_disp(opnd);
}
bool
opnd_is_near_abs_addr(opnd_t opnd)
{
return opnd_is_abs_addr(opnd) IF_X86(&&opnd.aux.segment == REG_NULL);
}
bool
opnd_is_far_abs_addr(opnd_t opnd)
{
return IF_X86_ELSE(opnd_is_abs_addr(opnd) && opnd.aux.segment != REG_NULL, false);
}
bool
opnd_is_vsib(opnd_t op)
{
return (opnd_is_base_disp(op) &&
(reg_is_strictly_xmm(opnd_get_index(op)) ||
reg_is_strictly_ymm(opnd_get_index(op)) ||
reg_is_strictly_zmm(opnd_get_index(op))));
}
bool
opnd_is_reg_32bit(opnd_t opnd)
{
if (opnd_is_reg(opnd))
return reg_is_32bit(opnd_get_reg(opnd));
return false;
}
bool
reg_is_32bit(reg_id_t reg)
{
return (reg >= REG_START_32 && reg <= REG_STOP_32);
}
#if defined(X86) || defined(AARCH64)
bool
opnd_is_reg_64bit(opnd_t opnd)
{
if (opnd_is_reg(opnd))
return reg_is_64bit(opnd_get_reg(opnd));
return false;
}
bool
reg_is_64bit(reg_id_t reg)
{
return (reg >= REG_START_64 && reg <= REG_STOP_64);
}
#endif /* !ARM */
bool
opnd_is_reg_pointer_sized(opnd_t opnd)
{
if (opnd_is_reg(opnd))
return reg_is_pointer_sized(opnd_get_reg(opnd));
return false;
}
bool
opnd_is_reg_partial(opnd_t opnd)
{
return (opnd_is_reg(opnd) && opnd.size != 0 &&
opnd_get_size(opnd) != reg_get_size(opnd_get_reg(opnd)));
}
bool
reg_is_pointer_sized(reg_id_t reg)
{
#ifdef X64
return (reg >= REG_START_64 && reg <= REG_STOP_64);
#else
return (reg >= REG_START_32 && reg <= REG_STOP_32);
#endif
}
#undef opnd_get_reg
reg_id_t
opnd_get_reg(opnd_t opnd)
{
return OPND_GET_REG(opnd);
}
#define opnd_get_reg OPND_GET_REG
#undef opnd_get_flags
dr_opnd_flags_t
opnd_get_flags(opnd_t opnd)
{
return OPND_GET_FLAGS(opnd);
}
#define opnd_get_flags OPND_GET_FLAGS
void
opnd_set_flags(opnd_t *opnd, dr_opnd_flags_t flags)
{
CLIENT_ASSERT(opnd_is_reg(*opnd) || opnd_is_base_disp(*opnd) ||
opnd_is_immed_int(*opnd),
"opnd_set_flags called on non-reg non-base-disp non-immed-int opnd");
opnd->aux.flags = flags;
}
opnd_t
opnd_add_flags(opnd_t opnd, dr_opnd_flags_t flags)
{
opnd_set_flags(&opnd, flags | opnd.aux.flags);
return opnd;
}
opnd_size_t
opnd_get_size(opnd_t opnd)
{
switch (opnd.kind) {
case REG_kind: return (opnd.size == 0 ? reg_get_size(opnd_get_reg(opnd)) : opnd.size);
case IMMED_INTEGER_kind:
case IMMED_FLOAT_kind:
case IMMED_DOUBLE_kind:
case BASE_DISP_kind:
#if defined(X64) || defined(ARM)
case REL_ADDR_kind:
#endif
#ifdef X64
case ABS_ADDR_kind:
#endif
case MEM_INSTR_kind:
case INSTR_kind: return opnd.size;
case PC_kind: return OPSZ_PTR;
case FAR_PC_kind:
case FAR_INSTR_kind: return OPSZ_6_irex10_short4;
case NULL_kind: return OPSZ_NA;
default: CLIENT_ASSERT(false, "opnd_get_size: unknown opnd type"); return OPSZ_NA;
}
}
void
opnd_set_size(opnd_t *opnd, opnd_size_t newsize)
{
switch (opnd->kind) {
case IMMED_INTEGER_kind:
case BASE_DISP_kind:
#if defined(X64) || defined(ARM)
case REL_ADDR_kind:
#endif
#ifdef X64
case ABS_ADDR_kind:
#endif
case REG_kind:
case MEM_INSTR_kind:
case INSTR_kind: opnd->size = newsize; return;
default: CLIENT_ASSERT(false, "opnd_set_size: unknown opnd type");
}
}
/* immediate operands */
#if defined(DEBUG) && !defined(STANDALONE_DECODER)
static void
opnd_check_immed_size(int64 i, opnd_size_t size)
{
uint sz = opnd_size_in_bytes(size);
if (sz == 1) {
CLIENT_ASSERT(CHECK_TRUNCATE_TYPE_sbyte(i) || CHECK_TRUNCATE_TYPE_byte(i),
"opnd_create_immed_int: value too large for 8-bit size");
} else if (sz == 2) {
CLIENT_ASSERT(CHECK_TRUNCATE_TYPE_short(i) || CHECK_TRUNCATE_TYPE_ushort(i),
"opnd_create_immed_int: value too large for 16-bit size");
} else if (sz == 4) {
CLIENT_ASSERT(CHECK_TRUNCATE_TYPE_int(i) || CHECK_TRUNCATE_TYPE_uint(i),
"opnd_create_immed_int: value too large for 32-bit size");
}
}
#endif
opnd_t
opnd_create_immed_int(ptr_int_t i, opnd_size_t size)
{
opnd_t opnd;
opnd.kind = IMMED_INTEGER_kind;
CLIENT_ASSERT(size < OPSZ_LAST_ENUM, "opnd_create_immed_int: invalid size");
opnd.size = size;
opnd.value.immed_int = i;
opnd.aux.flags = 0;
DOCHECK(1, { opnd_check_immed_size(i, size); });
return opnd;
}
opnd_t
opnd_create_immed_uint(ptr_uint_t i, opnd_size_t size)
{
opnd_t opnd;
opnd.kind = IMMED_INTEGER_kind;
CLIENT_ASSERT(size < OPSZ_LAST_ENUM, "opnd_create_immed_uint: invalid size");
opnd.size = size;
opnd.value.immed_int = (ptr_int_t)i;
opnd.aux.flags = 0;
DOCHECK(1, { opnd_check_immed_size(i, size); });
return opnd;
}
opnd_t
opnd_create_immed_int64(int64 i, opnd_size_t size)
{
opnd_t opnd;
opnd.kind = IMMED_INTEGER_kind;
IF_X64(CLIENT_ASSERT(false, "32-bit only"));
CLIENT_ASSERT(size < OPSZ_LAST_ENUM, "opnd_create_immed_uint: invalid size");
opnd.size = size;
opnd.value.immed_int_multi_part.low = (uint)i;
opnd.value.immed_int_multi_part.high = (uint)((uint64)i >> 32);
opnd.aux.flags = DR_OPND_MULTI_PART;
DOCHECK(1, { opnd_check_immed_size(i, size); });
return opnd;
}
bool
opnd_is_immed_int64(opnd_t opnd)
{
return (opnd_is_immed_int(opnd) && TEST(DR_OPND_MULTI_PART, opnd_get_flags(opnd)));
}
/* NOTE: requires caller to be under PRESERVE_FLOATING_POINT_STATE */
opnd_t
opnd_create_immed_float(float i)
{
opnd_t opnd;
opnd.kind = IMMED_FLOAT_kind;
/* Note that manipulating floats and doubles by copying in this way can
* result in using FP load/store instructions which can trigger any pending
* FP exception (i#386).
*/
opnd.value.immed_float = i;
/* currently only used for implicit constants that have no size */
opnd.size = OPSZ_0;
return opnd;
}
#ifndef WINDOWS
/* XXX i#4488: x87 floating point immediates should be double precision.
* Type double currently not included for Windows because sizeof(opnd_t) does
* not equal EXPECTED_SIZEOF_OPND, triggering the ASSERT in d_r_arch_init().
*/
/* NOTE: requires caller to be under PRESERVE_FLOATING_POINT_STATE */
opnd_t
opnd_create_immed_double(double i)
{
opnd_t opnd;
opnd.kind = IMMED_DOUBLE_kind;
/* Note that manipulating floats and doubles by copying in this way can
* result in using FP load/store instructions which can trigger any pending
* FP exception (i#386).
*/
opnd.value.immed_double = i;
/* currently only used for implicit constants that have no size */
opnd.size = OPSZ_0;
return opnd;
}
#endif
opnd_t
opnd_create_immed_float_for_opcode(uint opcode)
{
opnd_t opnd;
uint float_value;
opnd.kind = IMMED_FLOAT_kind;
/* avoid any fp instrs (xref i#386) */
float_value = opnd_immed_float_arch(opcode);
*(uint *)(&opnd.value.immed_float) = float_value;
/* currently only used for implicit constants that have no size */
opnd.size = OPSZ_0;
return opnd;
}
ptr_int_t
opnd_get_immed_int(opnd_t opnd)
{
CLIENT_ASSERT(opnd_is_immed_int(opnd), "opnd_get_immed_int called on non-immed-int");
return opnd.value.immed_int;
}
int64
opnd_get_immed_int64(opnd_t opnd)
{
IF_X64(CLIENT_ASSERT(false, "32-bit only"));
CLIENT_ASSERT(opnd_is_immed_int64(opnd),
"opnd_get_immed_int64 called on non-multi-part-immed-int");
return (((uint64)(uint)opnd.value.immed_int_multi_part.high) << 32) |
(uint64)(uint)opnd.value.immed_int_multi_part.low;
}
/* NOTE: requires caller to be under PRESERVE_FLOATING_POINT_STATE */
float
opnd_get_immed_float(opnd_t opnd)
{
CLIENT_ASSERT(opnd_is_immed_float(opnd),
"opnd_get_immed_float called on non-immed-float");
/* note that manipulating floats is dangerous - see case 4360
* this return shouldn't require any fp state, though
*/
return opnd.value.immed_float;
}
#ifndef WINDOWS
/* XXX i#4488: x87 floating point immediates should be double precision.
* Type double currently not included for Windows because sizeof(opnd_t) does
* not equal EXPECTED_SIZEOF_OPND, triggering the ASSERT in d_r_arch_init().
*/
double
opnd_get_immed_double(opnd_t opnd)
{
CLIENT_ASSERT(opnd_is_immed_double(opnd),
"opnd_get_immed_double called on non-immed-float");
return opnd.value.immed_double;
}
#endif
/* address operands */
/* N.B.: seg_selector is a segment selector, not a SEG_ constant */
opnd_t
opnd_create_far_pc(ushort seg_selector, app_pc pc)
{
opnd_t opnd;
opnd.kind = FAR_PC_kind;
opnd.aux.far_pc_seg_selector = seg_selector;
opnd.value.pc = pc;
return opnd;
}
opnd_t
opnd_create_instr_ex(instr_t *instr, opnd_size_t size, ushort shift)
{
opnd_t opnd;
opnd.kind = INSTR_kind;
opnd.value.instr = instr;
opnd.aux.shift = shift;
opnd.size = size;
return opnd;
}
opnd_t
opnd_create_instr(instr_t *instr)
{
return opnd_create_instr_ex(instr, OPSZ_PTR, 0);
}
opnd_t
opnd_create_far_instr(ushort seg_selector, instr_t *instr)
{
opnd_t opnd;
opnd.kind = FAR_INSTR_kind;
opnd.aux.far_pc_seg_selector = seg_selector;
opnd.value.instr = instr;
return opnd;
}
DR_API
opnd_t
opnd_create_mem_instr(instr_t *instr, short disp, opnd_size_t data_size)
{
opnd_t opnd;
opnd.kind = MEM_INSTR_kind;
opnd.size = data_size;
opnd.aux.disp = disp;
opnd.value.instr = instr;
return opnd;
}
app_pc
opnd_get_pc(opnd_t opnd)
{
if (opnd_is_pc(opnd))
return opnd.value.pc;
else {
SYSLOG_INTERNAL_ERROR("opnd type is %d", opnd.kind);
CLIENT_ASSERT(false, "opnd_get_pc called on non-pc");
return NULL;
}
}
ushort
opnd_get_segment_selector(opnd_t opnd)
{
if (opnd_is_far_pc(opnd) || opnd_is_far_instr(opnd)) {
return opnd.aux.far_pc_seg_selector;
}
CLIENT_ASSERT(false, "opnd_get_segment_selector called on invalid opnd type");
return REG_INVALID;
}
instr_t *
opnd_get_instr(opnd_t opnd)
{
CLIENT_ASSERT(opnd_is_instr(opnd) || opnd_is_mem_instr(opnd),
"opnd_get_instr called on non-instr");
return opnd.value.instr;
}
DR_API
ushort
opnd_get_shift(opnd_t opnd)
{
CLIENT_ASSERT(opnd_is_near_instr(opnd), "opnd_get_shift called on non-near-instr");
return opnd.aux.shift;
}
short
opnd_get_mem_instr_disp(opnd_t opnd)
{
CLIENT_ASSERT(opnd_is_mem_instr(opnd),
"opnd_get_mem_instr_disp called on non-mem-instr");
return opnd.aux.disp;
}
/* Base+displacement+scaled index operands */
opnd_t
opnd_create_base_disp_ex(reg_id_t base_reg, reg_id_t index_reg, int scale, int disp,
opnd_size_t size, bool encode_zero_disp, bool force_full_disp,
bool disp_short_addr)
{
return opnd_create_far_base_disp_ex(REG_NULL, base_reg, index_reg, scale, disp, size,
encode_zero_disp, force_full_disp,
disp_short_addr);
}
opnd_t
opnd_create_base_disp(reg_id_t base_reg, reg_id_t index_reg, int scale, int disp,
opnd_size_t size)
{
return opnd_create_far_base_disp_ex(REG_NULL, base_reg, index_reg, scale, disp, size,
false, false, false);
}
static inline void
opnd_set_disp_helper(opnd_t *opnd, int disp)
{
IF_ARM_ELSE(
{
if (disp < 0) {
opnd->aux.flags |= DR_OPND_NEGATED;
opnd->value.base_disp.disp = -disp;
} else
opnd->value.base_disp.disp = disp;
},
{ opnd->value.base_disp.disp = disp; });
}
opnd_t
opnd_create_far_base_disp_ex(reg_id_t seg, reg_id_t base_reg, reg_id_t index_reg,
int scale, int disp, opnd_size_t size, bool encode_zero_disp,
bool force_full_disp, bool disp_short_addr)
{
opnd_t opnd;
opnd.kind = BASE_DISP_kind;
CLIENT_ASSERT(size < OPSZ_LAST_ENUM, "opnd_create_*base_disp*: invalid size");
opnd.size = size;
CLIENT_ASSERT(scale == 0 || scale == 1 || scale == 2 || scale == 4 || scale == 8,
"opnd_create_*base_disp*: invalid scale");
IF_X86(CLIENT_ASSERT(index_reg == REG_NULL || scale > 0,
"opnd_create_*base_disp*: index requires scale"));
CLIENT_ASSERT(
seg == REG_NULL IF_X86(|| (seg >= REG_START_SEGMENT && seg <= REG_STOP_SEGMENT)),
"opnd_create_*base_disp*: invalid segment");
CLIENT_ASSERT(base_reg <= REG_LAST_ENUM, "opnd_create_*base_disp*: invalid base");
CLIENT_ASSERT(index_reg <= REG_LAST_ENUM, "opnd_create_*base_disp*: invalid index");
CLIENT_ASSERT_BITFIELD_TRUNCATE(SCALE_SPECIFIER_BITS, scale,
"opnd_create_*base_disp*: invalid scale");
/* reg_id_t is now a ushort, but we can only accept low values */
CLIENT_ASSERT_BITFIELD_TRUNCATE(REG_SPECIFIER_BITS, base_reg,
"opnd_create_*base_disp*: invalid base");
CLIENT_ASSERT_BITFIELD_TRUNCATE(REG_SPECIFIER_BITS, index_reg,
"opnd_create_*base_disp*: invalid index");
IF_X86_ELSE({ opnd.aux.segment = seg; },
{
opnd.aux.flags = 0;
CLIENT_ASSERT(
disp == 0 || index_reg == REG_NULL,
"opnd_create_*base_disp*: cannot have both disp and index");
});
opnd_set_disp_helper(&opnd, disp);
opnd.value.base_disp.base_reg = base_reg;
#ifdef X86
if (reg_is_strictly_zmm(index_reg)) {
opnd.value.base_disp.index_reg = index_reg - DR_REG_START_ZMM;
opnd.value.base_disp.index_reg_is_zmm = 1;
} else {
opnd.value.base_disp.index_reg = index_reg;
opnd.value.base_disp.index_reg_is_zmm = 0;
}
#else
opnd.value.base_disp.index_reg = index_reg;
#endif
#if defined(ARM)
if (scale > 1) {
opnd.value.base_disp.shift_type = DR_SHIFT_LSL;
opnd.value.base_disp.shift_amount_minus_1 =
/* we store the amount minus one */
(scale == 2 ? 0 : (scale == 4 ? 1 : 2));
} else {
opnd.value.base_disp.shift_type = DR_SHIFT_NONE;
opnd.value.base_disp.shift_amount_minus_1 = 0;
}
#elif defined(AARCH64)
opnd.value.base_disp.pre_index = true;
opnd.value.base_disp.extend_type = DR_EXTEND_UXTX;
opnd.value.base_disp.scaled = false;
#elif defined(X86)
opnd.value.base_disp.scale = (byte)scale;
opnd.value.base_disp.encode_zero_disp = (byte)encode_zero_disp;
opnd.value.base_disp.force_full_disp = (byte)force_full_disp;
opnd.value.base_disp.disp_short_addr = (byte)disp_short_addr;
#endif
return opnd;
}
opnd_t
opnd_create_far_base_disp(reg_id_t seg, reg_id_t base_reg, reg_id_t index_reg, int scale,
int disp, opnd_size_t size)
{
return opnd_create_far_base_disp_ex(seg, base_reg, index_reg, scale, disp, size,
false, false, false);
}
#ifdef ARM
opnd_t
opnd_create_base_disp_arm(reg_id_t base_reg, reg_id_t index_reg,
dr_shift_type_t shift_type, uint shift_amount, int disp,
dr_opnd_flags_t flags, opnd_size_t size)
{
opnd_t opnd;
opnd.kind = BASE_DISP_kind;
CLIENT_ASSERT(size < OPSZ_LAST_ENUM, "opnd_create_*base_disp*: invalid size");
opnd.size = size;
CLIENT_ASSERT(disp == 0 || index_reg == REG_NULL,
"opnd_create_base_disp_arm: cannot have both disp and index");
CLIENT_ASSERT(base_reg <= REG_LAST_ENUM, "opnd_create_base_disp_arm: invalid base");
CLIENT_ASSERT(index_reg <= REG_LAST_ENUM, "opnd_create_base_disp_arm: invalid index");
/* reg_id_t is now a ushort, but we can only accept low values */
CLIENT_ASSERT_BITFIELD_TRUNCATE(REG_SPECIFIER_BITS, base_reg,
"opnd_create_base_disp_arm: invalid base");
CLIENT_ASSERT_BITFIELD_TRUNCATE(REG_SPECIFIER_BITS, index_reg,
"opnd_create_base_disp_arm: invalid index");
opnd.value.base_disp.base_reg = base_reg;
opnd.value.base_disp.index_reg = index_reg;
opnd_set_disp_helper(&opnd, disp);
/* Set the flags before the shift as the shift will change the flags */
opnd.aux.flags = flags;
if (!opnd_set_index_shift(&opnd, shift_type, shift_amount))
CLIENT_ASSERT(false, "opnd_create_base_disp_arm: invalid shift type/amount");
return opnd;
}
#endif
#ifdef AARCH64
opnd_t
opnd_create_base_disp_aarch64(reg_id_t base_reg, reg_id_t index_reg,
dr_extend_type_t extend_type, bool scaled, int disp,
dr_opnd_flags_t flags, opnd_size_t size)
{
opnd_t opnd;
opnd.kind = BASE_DISP_kind;
CLIENT_ASSERT(size < OPSZ_LAST_ENUM, "opnd_create_*base_disp*: invalid size");
opnd.size = size;
CLIENT_ASSERT(disp == 0 || index_reg == REG_NULL,
"opnd_create_base_disp_aarch64: cannot have both disp and index");
CLIENT_ASSERT(base_reg <= REG_LAST_ENUM,
"opnd_create_base_disp_aarch64: invalid base");
CLIENT_ASSERT(index_reg <= REG_LAST_ENUM,
"opnd_create_base_disp_aarch64: invalid index");
/* reg_id_t is now a ushort, but we can only accept low values */
CLIENT_ASSERT_BITFIELD_TRUNCATE(REG_SPECIFIER_BITS, base_reg,
"opnd_create_base_disp_aarch64: invalid base");
CLIENT_ASSERT_BITFIELD_TRUNCATE(REG_SPECIFIER_BITS, index_reg,
"opnd_create_base_disp_aarch64: invalid index");
opnd.value.base_disp.base_reg = base_reg;
opnd.value.base_disp.index_reg = index_reg;
opnd.value.base_disp.pre_index = false;
opnd_set_disp_helper(&opnd, disp);
opnd.aux.flags = flags;
if (!opnd_set_index_extend(&opnd, extend_type, scaled))
CLIENT_ASSERT(false, "opnd_create_base_disp_aarch64: invalid extend type");
return opnd;
}
#endif
#undef opnd_get_base
#undef opnd_get_disp
#undef opnd_get_index
#undef opnd_get_scale
#undef opnd_get_segment
reg_id_t
opnd_get_base(opnd_t opnd)
{
return OPND_GET_BASE(opnd);
}
int
opnd_get_disp(opnd_t opnd)
{
return OPND_GET_DISP(opnd);
}
reg_id_t
opnd_get_index(opnd_t opnd)
{
return OPND_GET_INDEX(opnd);
}
int
opnd_get_scale(opnd_t opnd)
{
return OPND_GET_SCALE(opnd);
}
reg_id_t
opnd_get_segment(opnd_t opnd)
{
return OPND_GET_SEGMENT(opnd);
}
#define opnd_get_base OPND_GET_BASE
#define opnd_get_disp OPND_GET_DISP
#define opnd_get_index OPND_GET_INDEX
#define opnd_get_scale OPND_GET_SCALE
#define opnd_get_segment OPND_GET_SEGMENT
#ifdef ARM
dr_shift_type_t
opnd_get_index_shift(opnd_t opnd, uint *amount OUT)
{
if (amount != NULL)
*amount = 0;
if (!opnd_is_base_disp(opnd)) {
CLIENT_ASSERT(false, "opnd_get_index_shift called on invalid opnd type");
return DR_SHIFT_NONE;
}
if (amount != NULL && opnd.value.base_disp.shift_type != DR_SHIFT_NONE)
*amount = opnd.value.base_disp.shift_amount_minus_1 + 1;
return opnd.value.base_disp.shift_type;
}
bool
opnd_set_index_shift(opnd_t *opnd, dr_shift_type_t shift, uint amount)
{
if (!opnd_is_base_disp(*opnd)) {
CLIENT_ASSERT(false, "opnd_set_index_shift called on invalid opnd type");
return false;
}
switch (shift) {
case DR_SHIFT_NONE:
if (amount != 0) {
/* Called from opnd_create_base_disp_arm() so we have a generic msg */
CLIENT_ASSERT(false, "opnd index shift: invalid shift amount");
return false;
}
opnd->value.base_disp.shift_amount_minus_1 = 0; /* so opnd_same matches */
break;
case DR_SHIFT_LSL:
case DR_SHIFT_ROR:
/* XXX: T32 only allows shift value [1, 3] */
if (amount < 1 || amount > 31) {
CLIENT_ASSERT(false, "opnd index shift: invalid shift amount");
return false;
}
opnd->value.base_disp.shift_amount_minus_1 = (byte)amount - 1;
break;
case DR_SHIFT_LSR:
case DR_SHIFT_ASR:
if (amount < 1 || amount > 32) {
CLIENT_ASSERT(false, "opnd index shift: invalid shift amount");
return false;
}
opnd->value.base_disp.shift_amount_minus_1 = (byte)amount - 1;
break;
case DR_SHIFT_RRX:
if (amount != 1) {
CLIENT_ASSERT(false, "opnd index shift: invalid shift amount");
return false;
}
opnd->value.base_disp.shift_amount_minus_1 = (byte)amount - 1;
break;
default: CLIENT_ASSERT(false, "opnd index shift: invalid shift type"); return false;
}
if (shift == DR_SHIFT_NONE)
opnd->aux.flags &= ~DR_OPND_SHIFTED;
else
opnd->aux.flags |= DR_OPND_SHIFTED;
opnd->value.base_disp.shift_type = shift;
return true;
}
#endif /* ARM */
#ifdef AARCH64
static uint
opnd_size_to_extend_amount(opnd_size_t size)
{
switch (size) {
default:
ASSERT(false);
/* fall-through */
case OPSZ_1: return 0;
case OPSZ_2: return 1;
case OPSZ_4: return 2;
case OPSZ_0: /* fall-through */
case OPSZ_8: return 3;
case OPSZ_16: return 4;
}
}
dr_extend_type_t
opnd_get_index_extend(opnd_t opnd, OUT bool *scaled, OUT uint *amount)
{
dr_extend_type_t extend = DR_EXTEND_UXTX;
bool scaled_out = false;
uint amount_out = 0;
if (!opnd_is_base_disp(opnd))
CLIENT_ASSERT(false, "opnd_get_index_shift called on invalid opnd type");
else {
extend = opnd.value.base_disp.extend_type;
scaled_out = opnd.value.base_disp.scaled;
if (scaled_out)
amount_out = opnd_size_to_extend_amount(opnd_get_size(opnd));
}
if (scaled != NULL)
*scaled = scaled_out;
if (amount != NULL)
*amount = amount_out;
return extend;
}
bool
opnd_set_index_extend(opnd_t *opnd, dr_extend_type_t extend, bool scaled)
{
if (!opnd_is_base_disp(*opnd)) {
CLIENT_ASSERT(false, "opnd_set_index_shift called on invalid opnd type");
return false;
}
if (extend > 7) {
CLIENT_ASSERT(false, "opnd index extend: invalid extend type");
return false;
}
opnd->value.base_disp.extend_type = extend;
opnd->value.base_disp.scaled = scaled;
return true;
}
#endif /* AARCH64 */
bool
opnd_is_disp_encode_zero(opnd_t opnd)
{
if (opnd_is_base_disp(opnd))
return IF_X86_ELSE(opnd.value.base_disp.encode_zero_disp, false);
CLIENT_ASSERT(false, "opnd_is_disp_encode_zero called on invalid opnd type");
return false;
}
bool
opnd_is_disp_force_full(opnd_t opnd)
{
if (opnd_is_base_disp(opnd))
return IF_X86_ELSE(opnd.value.base_disp.force_full_disp, false);
CLIENT_ASSERT(false, "opnd_is_disp_force_full called on invalid opnd type");
return false;
}
bool
opnd_is_disp_short_addr(opnd_t opnd)
{
if (opnd_is_base_disp(opnd))
return IF_X86_ELSE(opnd.value.base_disp.disp_short_addr, false);
CLIENT_ASSERT(false, "opnd_is_disp_short_addr called on invalid opnd type");
return false;
}
void
opnd_set_disp(opnd_t *opnd, int disp)
{
if (opnd_is_base_disp(*opnd))
opnd_set_disp_helper(opnd, disp);
else
CLIENT_ASSERT(false, "opnd_set_disp called on invalid opnd type");
}
#ifdef X86
void
opnd_set_disp_ex(opnd_t *opnd, int disp, bool encode_zero_disp, bool force_full_disp,
bool disp_short_addr)
{
if (opnd_is_base_disp(*opnd)) {
opnd->value.base_disp.encode_zero_disp = (byte)encode_zero_disp;
opnd->value.base_disp.force_full_disp = (byte)force_full_disp;
opnd->value.base_disp.disp_short_addr = (byte)disp_short_addr;
opnd_set_disp_helper(opnd, disp);
} else
CLIENT_ASSERT(false, "opnd_set_disp_ex called on invalid opnd type");
}
#endif
opnd_t
opnd_create_abs_addr(void *addr, opnd_size_t data_size)
{
return opnd_create_far_abs_addr(REG_NULL, addr, data_size);
}
opnd_t
opnd_create_far_abs_addr(reg_id_t seg, void *addr, opnd_size_t data_size)
{
/* PR 253327: For x64, there's no way to create 0xa0-0xa3 w/ addr
* prefix since we'll make a base-disp instead: but our IR is
* supposed to be at a higher abstraction level anyway, though w/
* the sib byte the base-disp ends up being one byte longer.
*/
if (IF_X64_ELSE((ptr_uint_t)addr <= UINT_MAX, true)) {
bool need_addr32 = false;
CLIENT_ASSERT(CHECK_TRUNCATE_TYPE_uint((ptr_uint_t)addr),
"internal error: abs addr too large");
#ifdef X64
/* To reach the high 2GB of the lower 4GB we need the addr32 prefix */
if ((ptr_uint_t)addr > INT_MAX)
need_addr32 = X64_MODE_DC(get_thread_private_dcontext());
#endif
return opnd_create_far_base_disp_ex(seg, REG_NULL, REG_NULL, 0,
(int)(ptr_int_t)addr, data_size, false, false,
need_addr32);
}
#ifdef X64
else {
opnd_t opnd;
opnd.kind = ABS_ADDR_kind;
CLIENT_ASSERT(data_size < OPSZ_LAST_ENUM, "opnd_create_base_disp: invalid size");
opnd.size = data_size;
CLIENT_ASSERT(
seg ==
REG_NULL IF_X86(|| (seg >= REG_START_SEGMENT && seg <= REG_STOP_SEGMENT)),
"opnd_create_far_abs_addr: invalid segment");
IF_X86(opnd.aux.segment = seg);
opnd.value.addr = addr;
return opnd;
}
#endif
}
#if defined(X64) || defined(ARM)
opnd_t
opnd_create_rel_addr(void *addr, opnd_size_t data_size)
{
return opnd_create_far_rel_addr(REG_NULL, addr, data_size);
}
/* PR 253327: We represent rip-relative w/ an address-size prefix
* (i.e., 32 bits instead of 64) as simply having the top 32 bits of
* "addr" zeroed out. This means that we never encode an address
* prefix, and we if one already exists in the raw bits we have to go
* looking for it at encode time.
*/
opnd_t
opnd_create_far_rel_addr(reg_id_t seg, void *addr, opnd_size_t data_size)
{
opnd_t opnd;
opnd.kind = REL_ADDR_kind;
CLIENT_ASSERT(data_size < OPSZ_LAST_ENUM, "opnd_create_base_disp: invalid size");
opnd.size = data_size;
CLIENT_ASSERT(
seg == REG_NULL IF_X86(|| (seg >= REG_START_SEGMENT && seg <= REG_STOP_SEGMENT)),
"opnd_create_far_rel_addr: invalid segment");
IF_X86(opnd.aux.segment = seg);
opnd.value.addr = addr;
return opnd;
}
#endif /* X64 */
void *
opnd_get_addr(opnd_t opnd)
{
/* check base-disp first since opnd_is_abs_addr() says yes for it */
if (opnd_is_abs_base_disp(opnd))
return (void *)(ptr_int_t)opnd_get_disp(opnd);
#if defined(X64) || defined(ARM)
if (IF_X64(opnd_is_abs_addr(opnd) ||) opnd_is_rel_addr(opnd))
return opnd.value.addr;
#endif
CLIENT_ASSERT(false, "opnd_get_addr called on invalid opnd type");
return NULL;
}
bool
opnd_is_memory_reference(opnd_t opnd)
{
return (opnd_is_base_disp(opnd) IF_X86_64(|| opnd_is_abs_addr(opnd)) ||
#if defined(X64) || defined(ARM)
opnd_is_rel_addr(opnd) ||
#endif
opnd_is_mem_instr(opnd));
}
bool
opnd_is_far_memory_reference(opnd_t opnd)
{
return (opnd_is_far_base_disp(opnd)
IF_X64(|| opnd_is_far_abs_addr(opnd) || opnd_is_far_rel_addr(opnd)));
}
bool
opnd_is_near_memory_reference(opnd_t opnd)
{
return (opnd_is_near_base_disp(opnd)
IF_X64(|| opnd_is_near_abs_addr(opnd) || opnd_is_near_rel_addr(opnd)) ||
IF_ARM(opnd_is_near_rel_addr(opnd) ||) opnd_is_mem_instr(opnd));
}
int
opnd_num_regs_used(opnd_t opnd)
{
switch (opnd.kind) {
case NULL_kind:
case IMMED_INTEGER_kind:
case IMMED_FLOAT_kind:
case IMMED_DOUBLE_kind:
case PC_kind:
case FAR_PC_kind:
case INSTR_kind:
case FAR_INSTR_kind:
case MEM_INSTR_kind: return 0;
case REG_kind: return 1;
case BASE_DISP_kind:
return (((opnd_get_base(opnd) == REG_NULL) ? 0 : 1) +
((opnd_get_index(opnd) == REG_NULL) ? 0 : 1) +
((opnd_get_segment(opnd) == REG_NULL) ? 0 : 1));
#if defined(X64) || defined(ARM)
case REL_ADDR_kind:
#endif
#ifdef X64
case ABS_ADDR_kind: return ((opnd_get_segment(opnd) == REG_NULL) ? 0 : 1);
#endif
default:
CLIENT_ASSERT(false, "opnd_num_regs_used called on invalid opnd type");
return 0;
}
}
reg_id_t
opnd_get_reg_used(opnd_t opnd, int index)
{
switch (opnd.kind) {
case NULL_kind:
case IMMED_INTEGER_kind:
case IMMED_FLOAT_kind:
case IMMED_DOUBLE_kind:
case PC_kind:
case FAR_PC_kind:
case MEM_INSTR_kind:
CLIENT_ASSERT(false, "opnd_get_reg_used called on invalid opnd type");
return REG_NULL;
case REG_kind:
if (index == 0)
return opnd_get_reg(opnd);
else {
CLIENT_ASSERT(false, "opnd_get_reg_used called on invalid opnd type");
return REG_NULL;
}
case BASE_DISP_kind:
if (index == 0) {
if (opnd_get_base(opnd) != REG_NULL)
return opnd_get_base(opnd);
else if (opnd_get_index(opnd) != REG_NULL)
return opnd_get_index(opnd);
else
return opnd_get_segment(opnd);
} else if (index == 1) {
if (opnd_get_index(opnd) != REG_NULL)
return opnd_get_index(opnd);
else
return opnd_get_segment(opnd);
} else if (index == 2)
return opnd_get_segment(opnd);
else {
CLIENT_ASSERT(false, "opnd_get_reg_used called on invalid opnd type");
return REG_NULL;
}
#if defined(X64) || defined(ARM)
case REL_ADDR_kind:
#endif
#ifdef X64
case ABS_ADDR_kind:
if (index == 0)
return opnd_get_segment(opnd);
else {
/* We only assert if beyond the number possible: not if beyond the
* number present. Should we assert on the latter?
*/
CLIENT_ASSERT(false, "opnd_get_reg_used called on invalid opnd type");
return REG_NULL;
}
#endif
default:
CLIENT_ASSERT(false, "opnd_get_reg_used called on invalid opnd type");
return REG_NULL;
}
}
/***************************************************************************/
/* utility routines */
const reg_id_t d_r_regparms[] = {
#ifdef X86
# ifdef X64
REGPARM_0, REGPARM_1, REGPARM_2, REGPARM_3,
# ifdef UNIX
REGPARM_4, REGPARM_5,
# endif
# endif
#elif defined(AARCHXX)
REGPARM_0, REGPARM_1, REGPARM_2, REGPARM_3,
# ifdef X64
REGPARM_4, REGPARM_5, REGPARM_6, REGPARM_7,
# endif
#endif
REG_INVALID
};
/*
opnd_uses_reg is now changed so that it does consider 8/16 bit
register overlaps. i think this change is OK and correct, but not
sure. as far as I'm aware, only my optimization stuff and the
register stealing code (which is now not used, right?) relies on
this code ==> but we now export it via CI API */
bool
opnd_uses_reg(opnd_t opnd, reg_id_t reg)
{
if (reg == REG_NULL)
return false;
switch (opnd.kind) {
case NULL_kind:
case IMMED_INTEGER_kind:
case IMMED_FLOAT_kind:
case IMMED_DOUBLE_kind:
case PC_kind:
case FAR_PC_kind:
case INSTR_kind:
case FAR_INSTR_kind:
case MEM_INSTR_kind: return false;
case REG_kind: return (dr_reg_fixer[reg] == dr_reg_fixer[opnd_get_reg(opnd)]);
case BASE_DISP_kind:
return (dr_reg_fixer[reg] == dr_reg_fixer[opnd_get_base(opnd)] ||
dr_reg_fixer[reg] == dr_reg_fixer[opnd_get_index(opnd)] ||
dr_reg_fixer[reg] == dr_reg_fixer[opnd_get_segment(opnd)]);
#if defined(X64) || defined(ARM)
case REL_ADDR_kind:
#endif
#ifdef X64
case ABS_ADDR_kind:
return (dr_reg_fixer[reg] == dr_reg_fixer[opnd_get_segment(opnd)]);
#endif
default: CLIENT_ASSERT(false, "opnd_uses_reg: unknown opnd type"); return false;
}
}
bool
opnd_replace_reg(opnd_t *opnd, reg_id_t old_reg, reg_id_t new_reg)
{
switch (opnd->kind) {
case NULL_kind:
case IMMED_INTEGER_kind:
case IMMED_FLOAT_kind:
case IMMED_DOUBLE_kind:
case PC_kind:
case FAR_PC_kind:
case INSTR_kind:
case FAR_INSTR_kind:
case MEM_INSTR_kind: return false;
case REG_kind:
if (old_reg == opnd_get_reg(*opnd)) {
*opnd = opnd_create_reg_ex(
new_reg, opnd_is_reg_partial(*opnd) ? opnd_get_size(*opnd) : 0,
opnd_get_flags(*opnd));
return true;
}
return false;
case BASE_DISP_kind: {
reg_id_t ob = opnd_get_base(*opnd);
reg_id_t oi = opnd_get_index(*opnd);
reg_id_t os = opnd_get_segment(*opnd);
opnd_size_t size = opnd_get_size(*opnd);
if (old_reg == ob || old_reg == oi || old_reg == os) {
reg_id_t b = (old_reg == ob) ? new_reg : ob;
reg_id_t i = (old_reg == oi) ? new_reg : oi;
int d = opnd_get_disp(*opnd);
#if defined(AARCH64)
bool scaled = false;
dr_extend_type_t extend = opnd_get_index_extend(*opnd, &scaled, NULL);
dr_opnd_flags_t flags = opnd_get_flags(*opnd);
*opnd = opnd_create_base_disp_aarch64(b, i, extend, scaled, d, flags, size);
#elif defined(ARM)
uint amount;
dr_shift_type_t shift = opnd_get_index_shift(*opnd, &amount);
dr_opnd_flags_t flags = opnd_get_flags(*opnd);
*opnd = opnd_create_base_disp_arm(b, i, shift, amount, d, flags, size);
#elif defined(X86)
int sc = opnd_get_scale(*opnd);
reg_id_t s = (old_reg == os) ? new_reg : os;
*opnd = opnd_create_far_base_disp_ex(
s, b, i, sc, d, size, opnd_is_disp_encode_zero(*opnd),
opnd_is_disp_force_full(*opnd), opnd_is_disp_short_addr(*opnd));
#endif
return true;
}
}
return false;
#if defined(X64) || defined(ARM)
case REL_ADDR_kind:
if (old_reg == opnd_get_segment(*opnd)) {
*opnd = opnd_create_far_rel_addr(new_reg, opnd_get_addr(*opnd),
opnd_get_size(*opnd));
return true;
}
return false;
#endif
#ifdef X64
case ABS_ADDR_kind:
if (old_reg == opnd_get_segment(*opnd)) {
*opnd = opnd_create_far_abs_addr(new_reg, opnd_get_addr(*opnd),
opnd_get_size(*opnd));
return true;
}
return false;
#endif
default: CLIENT_ASSERT(false, "opnd_replace_reg: invalid opnd type"); return false;
}
}
static reg_id_t
reg_match_size_and_type(reg_id_t new_reg, opnd_size_t size, reg_id_t old_reg)
{
reg_id_t sized_reg = reg_resize_to_opsz(new_reg, size);
#ifdef X86
/* Convert from L to H version of 8-bit regs. */
if (old_reg >= DR_REG_START_x86_8 && old_reg <= DR_REG_STOP_x86_8) {
sized_reg = (sized_reg - DR_REG_START_8HL) + DR_REG_START_x86_8;
ASSERT(sized_reg <= DR_REG_STOP_x86_8);
}
#endif
return sized_reg;
}
bool
opnd_replace_reg_resize(opnd_t *opnd, reg_id_t old_reg, reg_id_t new_reg)
{
switch (opnd->kind) {
case NULL_kind:
case IMMED_INTEGER_kind:
case IMMED_FLOAT_kind:
case IMMED_DOUBLE_kind:
case PC_kind:
case FAR_PC_kind:
case INSTR_kind:
case FAR_INSTR_kind:
case MEM_INSTR_kind: return false;
case REG_kind:
if (reg_overlap(old_reg, opnd_get_reg(*opnd))) {
reg_id_t sized_reg = reg_match_size_and_type(new_reg, opnd_get_size(*opnd),
opnd_get_reg(*opnd));
*opnd = opnd_create_reg_ex(
sized_reg, opnd_is_reg_partial(*opnd) ? opnd_get_size(*opnd) : 0,
opnd_get_flags(*opnd));
return true;
}
return false;
case BASE_DISP_kind: {
reg_id_t ob = opnd_get_base(*opnd);
reg_id_t oi = opnd_get_index(*opnd);
reg_id_t os = opnd_get_segment(*opnd);
opnd_size_t size = opnd_get_size(*opnd);
bool found = false;
reg_id_t new_b = ob;
reg_id_t new_i = oi;
reg_id_t new_s = os;
if (reg_overlap(old_reg, ob)) {
found = true;
new_b = reg_match_size_and_type(new_reg, reg_get_size(ob), ob);
}
if (reg_overlap(old_reg, oi)) {
found = true;
new_i = reg_match_size_and_type(new_reg, reg_get_size(oi), oi);
}
if (reg_overlap(old_reg, os)) {
found = true;
new_s = reg_match_size_and_type(new_reg, reg_get_size(os), os);
}
if (found) {
int disp = opnd_get_disp(*opnd);
#if defined(AARCH64)
bool scaled = false;
dr_extend_type_t extend = opnd_get_index_extend(*opnd, &scaled, NULL);
dr_opnd_flags_t flags = opnd_get_flags(*opnd);
*opnd = opnd_create_base_disp_aarch64(new_b, new_i, extend, scaled, disp,
flags, size);
#elif defined(ARM)
uint amount;
dr_shift_type_t shift = opnd_get_index_shift(*opnd, &amount);
dr_opnd_flags_t flags = opnd_get_flags(*opnd);
*opnd =
opnd_create_base_disp_arm(new_b, new_i, shift, amount, disp, flags, size);
#elif defined(X86)
int sc = opnd_get_scale(*opnd);
*opnd = opnd_create_far_base_disp_ex(
new_s, new_b, new_i, sc, disp, size, opnd_is_disp_encode_zero(*opnd),
opnd_is_disp_force_full(*opnd), opnd_is_disp_short_addr(*opnd));
#endif
return true;
}
}
return false;
#if defined(X64) || defined(ARM)
case REL_ADDR_kind:
if (reg_overlap(old_reg, opnd_get_segment(*opnd))) {
reg_id_t new_s = reg_match_size_and_type(
new_reg, reg_get_size(opnd_get_segment(*opnd)), opnd_get_segment(*opnd));
*opnd = opnd_create_far_rel_addr(new_s, opnd_get_addr(*opnd),
opnd_get_size(*opnd));
return true;
}
return false;
#endif
#ifdef X64
case ABS_ADDR_kind:
if (reg_overlap(old_reg, opnd_get_segment(*opnd))) {
reg_id_t new_s = reg_match_size_and_type(
new_reg, reg_get_size(opnd_get_segment(*opnd)), opnd_get_segment(*opnd));
*opnd = opnd_create_far_abs_addr(new_s, opnd_get_addr(*opnd),
opnd_get_size(*opnd));
return true;
}
return false;
#endif
default: CLIENT_ASSERT(false, "opnd_replace_reg: invalid opnd type"); return false;
}
}
/* this is not conservative -- only considers two memory references to
* be the same if their constituent components (registers, displacement)
* are the same.
* different from opnd_same b/c this routine ignores data size!
*/
bool
opnd_same_address(opnd_t op1, opnd_t op2)
{
if (op1.kind != op2.kind)
return false;
if (!opnd_is_memory_reference(op1) || !opnd_is_memory_reference(op2))
return false;
if (opnd_get_segment(op1) != opnd_get_segment(op2))
return false;
if (opnd_is_base_disp(op1)) {
#ifdef ARM
uint amount1, amount2;
#endif
if (!opnd_is_base_disp(op2))
return false;
if (opnd_get_base(op1) != opnd_get_base(op2))
return false;
if (opnd_get_index(op1) != opnd_get_index(op2))
return false;
if (opnd_get_scale(op1) != opnd_get_scale(op2))
return false;
if (opnd_get_disp(op1) != opnd_get_disp(op2))
return false;
#ifdef ARM
if (opnd_get_index_shift(op1, &amount1) != opnd_get_index_shift(op2, &amount2) ||
amount1 != amount2)
return false;
if (opnd_get_flags(op1) != opnd_get_flags(op2))
return false;
#endif
} else {
#if defined(X64) || defined(ARM)
CLIENT_ASSERT(IF_X64(opnd_is_abs_addr(op1) ||) opnd_is_rel_addr(op1),
"internal type error in opnd_same_address");
if (opnd_get_addr(op1) != opnd_get_addr(op2))
return false;
#else
CLIENT_ASSERT(false, "internal type error in opnd_same_address");
#endif
}
/* we ignore size */
return true;
}
bool
opnd_same(opnd_t op1, opnd_t op2)
{
if (op1.kind != op2.kind)
return false;
else if (!opnd_same_sizes_ok(opnd_get_size(op1), opnd_get_size(op2),
opnd_is_reg(op1)) &&
(IF_X86(opnd_is_immed_int(op1) ||) /* on ARM we ignore immed sizes */
opnd_is_reg(op1) ||
opnd_is_memory_reference(op1)))
return false;
/* If we could rely on unused bits being 0 could avoid dispatch on type.
* Presumably not on critical path, though, so not bothering to try and
* asssert that those bits are 0.
*/
switch (op1.kind) {
case NULL_kind: return true;
case IMMED_INTEGER_kind: return op1.value.immed_int == op2.value.immed_int;
case IMMED_FLOAT_kind:
/* avoid any fp instrs (xref i#386) */
return *(int *)(&op1.value.immed_float) == *(int *)(&op2.value.immed_float);
#ifndef WINDOWS
/* XXX i#4488: x87 floating point immediates should be double precision.
* Type double currently not included for Windows because sizeof(opnd_t) does
* not equal EXPECTED_SIZEOF_OPND, triggering the ASSERT in d_r_arch_init().
*/
case IMMED_DOUBLE_kind:
return *(int64 *)(&op1.value.immed_double) == *(int64 *)(&op2.value.immed_double);
#endif
case PC_kind: return op1.value.pc == op2.value.pc;
case FAR_PC_kind:
return (op1.aux.far_pc_seg_selector == op2.aux.far_pc_seg_selector &&
op1.value.pc == op2.value.pc);
case INSTR_kind:
return (op1.value.instr == op2.value.instr && op1.aux.shift == op2.aux.shift &&
op1.size == op2.size);
case FAR_INSTR_kind: return op1.value.instr == op2.value.instr;
case REG_kind: return op1.value.reg == op2.value.reg;
case BASE_DISP_kind:
return (IF_X86(op1.aux.segment == op2.aux.segment &&)
op1.value.base_disp.base_reg == op2.value.base_disp.base_reg &&
op1.value.base_disp.index_reg == op2.value.base_disp.index_reg &&
#ifdef X86
op1.value.base_disp.index_reg_is_zmm ==
op2.value.base_disp.index_reg_is_zmm &&
#endif
IF_X86(op1.value.base_disp.scale == op2.value.base_disp.scale &&) IF_ARM(
op1.value.base_disp.shift_type == op2.value.base_disp.shift_type &&
op1.value.base_disp.shift_amount_minus_1 ==
op2.value.base_disp.shift_amount_minus_1 &&)
op1.value.base_disp.disp == op2.value.base_disp.disp &&
IF_X86(op1.value.base_disp.encode_zero_disp ==
op2.value.base_disp.encode_zero_disp &&
op1.value.base_disp.force_full_disp ==
op2.value.base_disp.force_full_disp &&
/* disp_short_addr only matters if no registers are set */
(((op1.value.base_disp.base_reg != REG_NULL ||
op1.value.base_disp.index_reg != REG_NULL) &&
(op2.value.base_disp.base_reg != REG_NULL ||
op2.value.base_disp.index_reg != REG_NULL)) ||
op1.value.base_disp.disp_short_addr ==
op2.value.base_disp.disp_short_addr) &&) true);
#if defined(X64) || defined(ARM)
case REL_ADDR_kind:
#endif
#ifdef X64
case ABS_ADDR_kind:
return (IF_X86(op1.aux.segment == op2.aux.segment &&)
op1.value.addr == op2.value.addr);
#endif
case MEM_INSTR_kind:
return (op1.value.instr == op2.value.instr && op1.aux.disp == op2.aux.disp);
default: CLIENT_ASSERT(false, "opnd_same: invalid opnd type"); return false;
}
}
bool
opnd_share_reg(opnd_t op1, opnd_t op2)
{
switch (op1.kind) {
case NULL_kind:
case IMMED_INTEGER_kind:
case IMMED_FLOAT_kind:
case IMMED_DOUBLE_kind:
case PC_kind:
case FAR_PC_kind:
case INSTR_kind:
case FAR_INSTR_kind:
case MEM_INSTR_kind: return false;
case REG_kind: return opnd_uses_reg(op2, opnd_get_reg(op1));
case BASE_DISP_kind:
return (opnd_uses_reg(op2, opnd_get_base(op1)) ||
opnd_uses_reg(op2, opnd_get_index(op1)) ||
opnd_uses_reg(op2, opnd_get_segment(op1)));
#if defined(X64) || defined(ARM)
case REL_ADDR_kind:
#endif
#ifdef X64
case ABS_ADDR_kind: return (opnd_uses_reg(op2, opnd_get_segment(op1)));
#endif
default: CLIENT_ASSERT(false, "opnd_share_reg: invalid opnd type"); return false;
}
}
static bool
range_overlap(ptr_uint_t a1, ptr_uint_t a2, size_t s1, size_t s2)
{
ptr_uint_t min, max;
size_t min_plus;
if (a1 < a2) {
min = a1;
min_plus = s1;
max = a2;
} else {
min = a2;
min_plus = s2;
max = a1;
}
return (min + min_plus > max); /* open-ended */
}
/* Returns true if def, considered as a write, affects use.
* Is conservative, so if both def and use are memory references,
* will return true unless it can disambiguate them.
*/
bool
opnd_defines_use(opnd_t def, opnd_t use)
{
switch (def.kind) {
case NULL_kind:
case IMMED_INTEGER_kind:
case IMMED_FLOAT_kind:
case IMMED_DOUBLE_kind:
case PC_kind:
case FAR_PC_kind:
case INSTR_kind:
case FAR_INSTR_kind: return false;
case REG_kind: return opnd_uses_reg(use, opnd_get_reg(def));
case BASE_DISP_kind: {
#ifdef ARM
uint amount1, amount2;
#endif
if (!opnd_is_memory_reference(use))
return false;
#ifdef X64
if (!opnd_is_base_disp(use))
return true;
#endif
/* try to disambiguate the two memory references
* for now, only consider identical regs and different disp
*/
if (opnd_get_base(def) != opnd_get_base(use))
return true;
if (opnd_get_index(def) != opnd_get_index(use))
return true;
if (opnd_get_scale(def) != opnd_get_scale(use))
return true;
if (opnd_get_segment(def) != opnd_get_segment(use))
return true;
#ifdef ARM
if (opnd_get_index_shift(def, &amount1) != opnd_get_index_shift(use, &amount2) ||
amount1 != amount2)
return true;
if (opnd_get_flags(def) != opnd_get_flags(use))
return true;
#endif
/* everything is identical, now make sure disps don't overlap */
return range_overlap(opnd_get_disp(def), opnd_get_disp(use),
opnd_size_in_bytes(opnd_get_size(def)),
opnd_size_in_bytes(opnd_get_size(use)));
}
#if defined(X64) || defined(ARM)
case REL_ADDR_kind:
#endif
#ifdef X64
case ABS_ADDR_kind:
if (!opnd_is_memory_reference(use))
return false;
if (opnd_is_base_disp(use))
return true;
if (opnd_get_segment(def) != opnd_get_segment(use))
return true;
return range_overlap((ptr_uint_t)opnd_get_addr(def),
(ptr_uint_t)opnd_get_addr(use),
opnd_size_in_bytes(opnd_get_size(def)),
opnd_size_in_bytes(opnd_get_size(use)));
#endif
case MEM_INSTR_kind:
if (!opnd_is_memory_reference(use))
return false;
/* we don't know our address so we have to assume true */
return true;
default: CLIENT_ASSERT(false, "opnd_defines_use: invalid opnd type"); return false;
}
}
uint
opnd_size_in_bytes(opnd_size_t size)
{
CLIENT_ASSERT(size >= OPSZ_FIRST, "opnd_size_in_bytes: invalid size");
switch (size) {
case OPSZ_0: return 0;
case OPSZ_1:
case OPSZ_1_reg4: /* mem size */
case OPSZ_1_of_4:
case OPSZ_1_of_8:
case OPSZ_1_of_16:
case OPSZ_1b: /* round up */
case OPSZ_2b:
case OPSZ_3b:
case OPSZ_4b:
case OPSZ_5b:
case OPSZ_6b:
case OPSZ_7b: return 1;
case OPSZ_2_of_4:
case OPSZ_2_of_8:
case OPSZ_2_of_16:
case OPSZ_2_short1: /* default size */
case OPSZ_2:
case OPSZ_2_reg4: /* mem size */
case OPSZ_9b: /* round up */
case OPSZ_10b:
case OPSZ_11b:
case OPSZ_12b:
case OPSZ_eighth_16_vex32:
case OPSZ_eighth_16_vex32_evex64: return 2;
case OPSZ_20b: /* round up */
case OPSZ_3: return 3;
case OPSZ_4_of_8:
case OPSZ_4_of_16:
case OPSZ_4_rex8_of_16:
case OPSZ_4_short2: /* default size */
#ifndef X64
case OPSZ_4x8: /* default size */
case OPSZ_4x8_short2: /* default size */
case OPSZ_4x8_short2xi8: /* default size */
#endif
case OPSZ_4_short2xi4: /* default size */
case OPSZ_4_rex8_short2: /* default size */
case OPSZ_4_rex8:
case OPSZ_4:
case OPSZ_4_reg16: /* mem size */
case OPSZ_25b: /* round up */
case OPSZ_quarter_16_vex32:
case OPSZ_quarter_16_vex32_evex64: return 4;
case OPSZ_6_irex10_short4: /* default size */
case OPSZ_6: return 6;
case OPSZ_8_of_16:
case OPSZ_half_16_vex32:
case OPSZ_8_short2:
case OPSZ_8_short4:
case OPSZ_8:
#ifdef X64
case OPSZ_4x8: /* default size */
case OPSZ_4x8_short2: /* default size */
case OPSZ_4x8_short2xi8: /* default size */
#endif
case OPSZ_8_rex16: /* default size */
case OPSZ_8_rex16_short4: /* default size */
#ifndef X64
case OPSZ_8x16: /* default size */
#endif
return 8;
case OPSZ_16:
case OPSZ_16_vex32:
case OPSZ_16_of_32:
case OPSZ_16_vex32_evex64:
#ifdef X64
case OPSZ_8x16: /* default size */
#endif
return 16;
case OPSZ_vex32_evex64: return 32;
case OPSZ_6x10:
/* table base + limit; w/ addr16, different format, but same total footprint */
return IF_X64_ELSE(6, 10);
case OPSZ_10: return 10;
case OPSZ_12:
case OPSZ_12_of_16:
case OPSZ_12_rex8_of_16:
case OPSZ_12_rex40_short6: /* default size */ return 12;
case OPSZ_14_of_16:
case OPSZ_14: return 14;
case OPSZ_15_of_16:
case OPSZ_15: return 15;
case OPSZ_20: return 20;
case OPSZ_24: return 24;
case OPSZ_28_short14: /* default size */
case OPSZ_28: return 28;
case OPSZ_32:
case OPSZ_32_short16: /* default size */ return 32;
case OPSZ_36: return 36;
case OPSZ_40: return 40;
case OPSZ_44: return 44;
case OPSZ_48: return 48;
case OPSZ_52: return 52;
case OPSZ_56: return 56;
case OPSZ_60: return 60;
case OPSZ_64: return 64;
case OPSZ_68: return 68;
case OPSZ_72: return 72;
case OPSZ_76: return 76;
case OPSZ_80: return 80;
case OPSZ_84: return 84;
case OPSZ_88: return 88;
case OPSZ_92: return 92;
case OPSZ_94: return 94;
case OPSZ_96: return 96;
case OPSZ_100: return 100;
case OPSZ_104: return 104;
case OPSZ_108_short94: /* default size */
case OPSZ_108: return 108;
case OPSZ_112: return 112;
case OPSZ_116: return 116;
case OPSZ_120: return 120;
case OPSZ_124: return 124;
case OPSZ_128: return 128;
case OPSZ_512: return 512;
case OPSZ_VAR_REGLIST: return 0; /* varies to match reglist operand */
case OPSZ_xsave:
return 0; /* > 512 bytes: client to use drutil_opnd_mem_size_in_bytes */
default: CLIENT_ASSERT(false, "opnd_size_in_bytes: invalid opnd type"); return 0;
}
}
DR_API
uint
opnd_size_in_bits(opnd_size_t size)
{
switch (size) {
case OPSZ_1b: return 1;
case OPSZ_2b: return 2;
case OPSZ_3b: return 3;
case OPSZ_4b: return 4;
case OPSZ_5b: return 5;
case OPSZ_6b: return 6;
case OPSZ_7b: return 7;
case OPSZ_9b: return 9;
case OPSZ_10b: return 10;
case OPSZ_11b: return 11;
case OPSZ_12b: return 12;
case OPSZ_20b: return 20;
case OPSZ_25b: return 25;
default: return opnd_size_in_bytes(size) * 8;
}
}
DR_API
opnd_size_t
opnd_size_from_bytes(uint bytes)
{
switch (bytes) {
case 0: return OPSZ_0;
case 1: return OPSZ_1;
case 2: return OPSZ_2;
case 3: return OPSZ_3;
case 4: return OPSZ_4;
case 6: return OPSZ_6;
case 8: return OPSZ_8;
case 10: return OPSZ_10;
case 12: return OPSZ_12;
case 14: return OPSZ_14;
case 15: return OPSZ_15;
case 16: return OPSZ_16;
case 20: return OPSZ_20;
case 24: return OPSZ_24;
case 28: return OPSZ_28;
case 32: return OPSZ_32;
case 36: return OPSZ_36;
case 40: return OPSZ_40;
case 44: return OPSZ_44;
case 48: return OPSZ_48;
case 52: return OPSZ_52;
case 56: return OPSZ_56;
case 60: return OPSZ_60;
case 64: return OPSZ_64;
case 68: return OPSZ_68;
case 72: return OPSZ_72;
case 76: return OPSZ_76;
case 80: return OPSZ_80;
case 84: return OPSZ_84;
case 88: return OPSZ_88;
case 92: return OPSZ_92;
case 94: return OPSZ_94;
case 96: return OPSZ_96;
case 100: return OPSZ_100;
case 104: return OPSZ_104;
case 108: return OPSZ_108;
case 112: return OPSZ_112;
case 116: return OPSZ_116;
case 120: return OPSZ_120;
case 124: return OPSZ_124;
case 128: return OPSZ_128;
case 512: return OPSZ_512;
default: return OPSZ_NA;
}
}
/* shrinks all 32-bit registers in opnd to 16 bits. also shrinks the size of
* immed ints and mem refs from OPSZ_4 to OPSZ_2.
*/
opnd_t
opnd_shrink_to_16_bits(opnd_t opnd)
{
int i;
for (i = 0; i < opnd_num_regs_used(opnd); i++) {
reg_id_t reg = opnd_get_reg_used(opnd, i);
if (reg >= REG_START_32 && reg <= REG_STOP_32) {
opnd_replace_reg(&opnd, reg, reg_32_to_16(reg));
}
}
if ((opnd_is_immed_int(opnd) || opnd_is_memory_reference(opnd)) &&
opnd_get_size(opnd) == OPSZ_4) /* OPSZ_*_short2 will shrink at encode time */
opnd_set_size(&opnd, OPSZ_2);
return opnd;
}
#ifdef X64
/* shrinks all 64-bit registers in opnd to 32 bits. also shrinks the size of
* immed ints and mem refs from OPSZ_8 to OPSZ_4.
*/
opnd_t
opnd_shrink_to_32_bits(opnd_t opnd)
{
int i;
for (i = 0; i < opnd_num_regs_used(opnd); i++) {
reg_id_t reg = opnd_get_reg_used(opnd, i);
if (reg >= REG_START_64 && reg <= REG_STOP_64) {
opnd_replace_reg(&opnd, reg, reg_64_to_32(reg));
}
}
if ((opnd_is_immed_int(opnd) || opnd_is_memory_reference(opnd)) &&
opnd_get_size(opnd) == OPSZ_8)
opnd_set_size(&opnd, OPSZ_4);
return opnd;
}
#endif
static reg_t
reg_get_value_helper(reg_id_t reg, priv_mcontext_t *mc)
{
CLIENT_ASSERT(reg_is_pointer_sized(reg),
"reg_get_value_helper(): internal error non-ptr sized reg");
if (reg == REG_NULL)
return 0;
return *(reg_t *)((byte *)mc + opnd_get_reg_mcontext_offs(reg));
}
/* Returns the value of the register reg, selected from the passed-in
* register values.
*/
reg_t
reg_get_value_priv(reg_id_t reg, priv_mcontext_t *mc)
{
if (reg == REG_NULL)
return 0;
#ifdef X64
if (reg >= REG_START_64 && reg <= REG_STOP_64)
return reg_get_value_helper(reg, mc);
if (reg >= REG_START_32 && reg <= REG_STOP_32) {
reg_t val = reg_get_value_helper(dr_reg_fixer[reg], mc);
return (val & 0x00000000ffffffff);
}
#else
if (reg >= REG_START_32 && reg <= REG_STOP_32) {
return reg_get_value_helper(reg, mc);
}
#endif
#ifdef X86
if (reg >= REG_START_8 && reg <= REG_STOP_8) {
reg_t val = reg_get_value_helper(dr_reg_fixer[reg], mc);
if (reg >= REG_AH && reg <= REG_BH)
return ((val & 0x0000ff00) >> 8);
else /* all others are the lower 8 bits */
return (val & 0x000000ff);
}
if (reg >= REG_START_16 && reg <= REG_STOP_16) {
reg_t val = reg_get_value_helper(dr_reg_fixer[reg], mc);
return (val & 0x0000ffff);
}
#endif
/* mmx and segment cannot be part of address.
* xmm/ymm/zmm can with VSIB, but we'd have to either return a larger type,
* or take in an offset within the xmm/ymm/zmm register -- so we leave this
* routine supporting only GPR and have a separate routine for VSIB
* (opnd_compute_VSIB_index()).
* if want to use this routine for more than just effective address
* calculations, need to pass in mmx/xmm state, or need to grab it
* here. would then need to check dr_mcontext_t.size.
*/
CLIENT_ASSERT(false, "reg_get_value: unsupported register");
return 0;
}
DR_API
reg_t
reg_get_value(reg_id_t reg, dr_mcontext_t *mc)
{
/* only supports GPRs so we ignore mc.size */
return reg_get_value_priv(reg, dr_mcontext_as_priv_mcontext(mc));
}
DR_API
/* Supports all but floating-point */
bool
reg_get_value_ex(reg_id_t reg, dr_mcontext_t *mc, OUT byte *val)
{
#ifdef X86
if (reg >= DR_REG_START_MMX && reg <= DR_REG_STOP_MMX) {
get_mmx_val((uint64 *)val, reg - DR_REG_START_MMX);
} else if (reg >= DR_REG_START_XMM && reg <= DR_REG_STOP_XMM) {
if (!TEST(DR_MC_MULTIMEDIA, mc->flags) || mc->size != sizeof(dr_mcontext_t))
return false;
memcpy(val, &mc->simd[reg - DR_REG_START_XMM], XMM_REG_SIZE);
} else if (reg >= DR_REG_START_YMM && reg <= DR_REG_STOP_YMM) {
if (!TEST(DR_MC_MULTIMEDIA, mc->flags) || mc->size != sizeof(dr_mcontext_t))
return false;
memcpy(val, &mc->simd[reg - DR_REG_START_YMM], YMM_REG_SIZE);
} else if (reg >= DR_REG_START_ZMM && reg <= DR_REG_STOP_ZMM) {
if (!TEST(DR_MC_MULTIMEDIA, mc->flags) || mc->size != sizeof(dr_mcontext_t))
return false;
memcpy(val, &mc->simd[reg - DR_REG_START_ZMM], ZMM_REG_SIZE);
} else if (reg >= DR_REG_START_OPMASK && reg <= DR_REG_STOP_OPMASK) {
if (!TEST(DR_MC_MULTIMEDIA, mc->flags) || mc->size != sizeof(dr_mcontext_t))
return false;
memcpy(val, &mc->opmask[reg - DR_REG_START_OPMASK], OPMASK_AVX512BW_REG_SIZE);
} else {
reg_t regval = reg_get_value(reg, mc);
*(reg_t *)val = regval;
}
#else
CLIENT_ASSERT(false, "NYI i#1551");
#endif
return true;
}
/* Sets the register reg in the passed in mcontext to value. Currently only works
* with ptr sized registers. See reg_set_value_ex to handle other sized registers. */
void
reg_set_value_priv(reg_id_t reg, priv_mcontext_t *mc, reg_t value)
{
CLIENT_ASSERT(reg_is_pointer_sized(reg),
"reg_get_value_helper(): internal error non-ptr sized reg");
if (reg == REG_NULL)
return;
*(reg_t *)((byte *)mc + opnd_get_reg_mcontext_offs(reg)) = value;
}
bool
reg_set_value_ex_priv(reg_id_t reg, priv_mcontext_t *mc, byte *val_buf)
{
#ifdef X86
CLIENT_ASSERT(reg != REG_NULL, "REG_NULL was passed.");
dr_zmm_t *simd = (dr_zmm_t *)((byte *)mc + SIMD_OFFSET);
if (reg_is_gpr(reg)) {
reg_t *value = (reg_t *)val_buf;
reg_set_value_priv(reg, mc, *value);
} else if (reg >= DR_REG_START_XMM && reg <= DR_REG_STOP_XMM) {
memcpy(&(simd[reg - DR_REG_START_XMM]), val_buf, XMM_REG_SIZE);
} else if (reg >= DR_REG_START_YMM && reg <= DR_REG_STOP_YMM) {
memcpy(&(simd[reg - DR_REG_START_YMM]), val_buf, YMM_REG_SIZE);
} else if (reg >= DR_REG_START_ZMM && reg <= DR_REG_STOP_ZMM) {
memcpy(&(simd[reg - DR_REG_START_ZMM]), val_buf, ZMM_REG_SIZE);
} else {
/* Note, we can reach here for MMX register */
CLIENT_ASSERT(false, "NYI i#3504");
return false;
}
return true;
#else
CLIENT_ASSERT(false, "NYI i#1551, i#3504");
return false;
#endif
}
DR_API
void
reg_set_value(reg_id_t reg, dr_mcontext_t *mc, reg_t value)
{
/* only supports GPRs so we ignore mc.size */
reg_set_value_priv(reg, dr_mcontext_as_priv_mcontext(mc), value);
}
DR_API
bool
reg_set_value_ex(reg_id_t reg, dr_mcontext_t *mc, IN byte *val_buf)
{
return reg_set_value_ex_priv(reg, dr_mcontext_as_priv_mcontext(mc), val_buf);
}
/* helper for sharing w/ VSIB computations */
app_pc
opnd_compute_address_helper(opnd_t opnd, priv_mcontext_t *mc, ptr_int_t scaled_index)
{
reg_id_t base;
int disp;
app_pc seg_base = NULL;
app_pc addr = NULL;
CLIENT_ASSERT(opnd_is_memory_reference(opnd),
"opnd_compute_address: must pass memory reference");
if (opnd_is_far_base_disp(opnd)) {
#ifdef X86
# ifdef STANDALONE_DECODER
seg_base = NULL; /* not supported */
# else
seg_base = get_app_segment_base(opnd_get_segment(opnd));
if (seg_base == (app_pc)POINTER_MAX) /* failure */
seg_base = NULL;
# endif
#endif
}
#if defined(X64) || defined(ARM)
if (IF_X64(opnd_is_abs_addr(opnd) ||) opnd_is_rel_addr(opnd)) {
return (app_pc)opnd_get_addr(opnd) + (ptr_uint_t)seg_base;
}
#endif
addr = seg_base;
base = opnd_get_base(opnd);
disp = opnd_get_disp(opnd);
d_r_logopnd(get_thread_private_dcontext(), 4, opnd, "opnd_compute_address for");
addr += reg_get_value_priv(base, mc);
LOG(THREAD_GET, LOG_ALL, 4, "\tbase => " PFX "\n", addr);
addr += scaled_index;
LOG(THREAD_GET, LOG_ALL, 4, "\tindex,scale => " PFX "\n", addr);
addr += disp;
LOG(THREAD_GET, LOG_ALL, 4, "\tdisp => " PFX "\n", addr);
return addr;
}
/* Returns the effective address of opnd, computed using the passed-in
* register values. If opnd is a far address, ignores that aspect
* except for TLS references on Windows (fs: for 32-bit, gs: for 64-bit)
* or typical fs: or gs: references on Linux. For far addresses the
* calling thread's segment selector is used.
*
* XXX: this does not support VSIB. All callers should really be switched to
* use instr_compute_address_ex_priv().
*/
app_pc
opnd_compute_address_priv(opnd_t opnd, priv_mcontext_t *mc)
{
ptr_int_t scaled_index = 0;
if (opnd_is_base_disp(opnd)) {
reg_id_t index = opnd_get_index(opnd);
#if defined(X86)
ptr_int_t scale = opnd_get_scale(opnd);
scaled_index = scale * reg_get_value_priv(index, mc);
#elif defined(AARCH64)
bool scaled = false;
uint amount = 0;
dr_extend_type_t type = opnd_get_index_extend(opnd, &scaled, &amount);
reg_t index_val = reg_get_value_priv(index, mc);
reg_t extended = 0;
uint msb = 0;
switch (type) {
default: CLIENT_ASSERT(false, "Unsupported extend type"); return NULL;
case DR_EXTEND_UXTW: extended = (index_val << (63u - 31u)) >> (63u - 31u); break;
case DR_EXTEND_SXTW:
extended = (index_val << (63u - 31u)) >> (63u - 31u);
msb = extended >> 31u;
if (msb == 1) {
extended = ((~0ull) << 32u) | extended;
}
break;
case DR_EXTEND_UXTX:
case DR_EXTEND_SXTX: extended = index_val; break;
}
if (scaled) {
scaled_index = extended << amount;
} else {
scaled_index = extended;
}
#elif defined(ARM)
uint amount;
dr_shift_type_t type = opnd_get_index_shift(opnd, &amount);
reg_t index_val = reg_get_value_priv(index, mc);
switch (type) {
case DR_SHIFT_LSL: scaled_index = index_val << amount; break;
case DR_SHIFT_LSR: scaled_index = index_val >> amount; break;
case DR_SHIFT_ASR: scaled_index = (ptr_int_t)index_val << amount; break;
case DR_SHIFT_ROR:
scaled_index =
(index_val >> amount) | (index_val << (sizeof(reg_t) * 8 - amount));
break;
case DR_SHIFT_RRX:
scaled_index = (index_val >> 1) |
(TEST(EFLAGS_C, mc->cpsr) ? (1 << (sizeof(reg_t) * 8 - 1)) : 0);
break;
default: scaled_index = index_val;
}
#endif
}
return opnd_compute_address_helper(opnd, mc, scaled_index);
}
DR_API
app_pc
opnd_compute_address(opnd_t opnd, dr_mcontext_t *mc)
{
/* only uses GPRs so we ignore mc.size */
return opnd_compute_address_priv(opnd, dr_mcontext_as_priv_mcontext(mc));
}
/***************************************************************************
*** Register utility functions
***************************************************************************/
const char *
get_register_name(reg_id_t reg)
{
return reg_names[reg];
}
reg_id_t
reg_to_pointer_sized(reg_id_t reg)
{
return dr_reg_fixer[reg];
}
reg_id_t
reg_32_to_16(reg_id_t reg)
{
#ifdef X86
CLIENT_ASSERT(reg >= REG_START_32 && reg <= REG_STOP_32,
"reg_32_to_16: passed non-32-bit reg");
return (reg - REG_START_32) + REG_START_16;
#elif defined(AARCHXX)
CLIENT_ASSERT(false, "reg_32_to_16 not supported on ARM");
return REG_NULL;
#endif
}
reg_id_t
reg_32_to_8(reg_id_t reg)
{
#ifdef X86
reg_id_t r8;
CLIENT_ASSERT(reg >= REG_START_32 && reg <= REG_STOP_32,
"reg_32_to_16: passed non-32-bit reg");
r8 = (reg - REG_START_32) + REG_START_8;
if (r8 >= REG_START_x86_8 && r8 <= REG_STOP_x86_8) {
# ifdef X64
r8 += (REG_START_x64_8 - REG_START_x86_8);
# else
r8 = REG_NULL;
# endif
}
return r8;
#elif defined(AARCHXX)
CLIENT_ASSERT(false, "reg_32_to_8 not supported on ARM");
return REG_NULL;
#endif
}
#ifdef X64
reg_id_t
reg_32_to_64(reg_id_t reg)
{
# ifdef AARCH64
if (reg == DR_REG_WZR)
return DR_REG_XZR;
# endif
CLIENT_ASSERT(reg >= REG_START_32 && reg <= REG_STOP_32,
"reg_32_to_64: passed non-32-bit reg");
return (reg - REG_START_32) + REG_START_64;
}
reg_id_t
reg_64_to_32(reg_id_t reg)
{
# ifdef AARCH64
if (reg == DR_REG_XZR)
return DR_REG_WZR;
# endif
CLIENT_ASSERT(reg >= REG_START_64 && reg <= REG_STOP_64,
"reg_64_to_32: passed non-64-bit reg");
return (reg - REG_START_64) + REG_START_32;
}
# ifdef X86
bool
reg_is_extended(reg_id_t reg)
{
/* Note that we do consider spl, bpl, sil, and dil to be "extended" */
return ((reg >= REG_START_64 + 8 && reg <= REG_STOP_64) ||
(reg >= REG_START_32 + 8 && reg <= REG_STOP_32) ||
(reg >= REG_START_16 + 8 && reg <= REG_STOP_16) ||
(reg >= REG_START_8 + 8 && reg <= REG_STOP_8) ||
(reg >= REG_START_x64_8 && reg <= REG_STOP_x64_8) ||
((reg >= DR_REG_START_XMM + 8 && reg <= DR_REG_START_XMM + 15) ||
(reg >= DR_REG_START_XMM + 24 && reg <= DR_REG_STOP_XMM)) ||
((reg >= DR_REG_START_YMM + 8 && reg <= DR_REG_START_YMM + 15) ||
(reg >= DR_REG_START_YMM + 24 && reg <= DR_REG_STOP_YMM)) ||
((reg >= DR_REG_START_ZMM + 8 && reg <= DR_REG_START_ZMM + 15) ||
(reg >= DR_REG_START_ZMM + 24 && reg <= DR_REG_STOP_ZMM)) ||
(reg >= REG_START_DR + 8 && reg <= REG_STOP_DR) ||
(reg >= REG_START_CR + 8 && reg <= REG_STOP_CR));
}
bool
reg_is_avx512_extended(reg_id_t reg)
{
/* Note that we do consider spl, bpl, sil, and dil to be "extended" */
return ((reg >= DR_REG_START_XMM + 16 && reg <= DR_REG_STOP_XMM) ||
(reg >= DR_REG_START_YMM + 16 && reg <= DR_REG_STOP_YMM) ||
(reg >= DR_REG_START_ZMM + 16 && reg <= DR_REG_STOP_ZMM));
}
# endif
#endif
reg_id_t
reg_32_to_opsz(reg_id_t reg, opnd_size_t sz)
{
CLIENT_ASSERT((reg >= REG_START_32 && reg <= REG_STOP_32)
IF_AARCH64(|| reg == DR_REG_XZR || reg == DR_REG_WZR),
"reg_32_to_opsz: passed non-32-bit reg");
/* On ARM, we use the same reg for the size of 8, 16, and 32 bit */
if (sz == OPSZ_4)
return reg;
else if (sz == OPSZ_2)
return IF_AARCHXX_ELSE(reg, reg_32_to_16(reg));
else if (sz == OPSZ_1)
return IF_AARCHXX_ELSE(reg, reg_32_to_8(reg));
#ifdef X64
else if (sz == OPSZ_8)
return reg_32_to_64(reg);
#endif
else
CLIENT_ASSERT(false, "reg_32_to_opsz: invalid size parameter");
return reg;
}
static reg_id_t
reg_resize_to_zmm(reg_id_t simd_reg)
{
#ifdef X86
if (reg_is_strictly_xmm(simd_reg)) {
return simd_reg - DR_REG_START_XMM + DR_REG_START_ZMM;
} else if (reg_is_strictly_ymm(simd_reg)) {
return simd_reg - DR_REG_START_YMM + DR_REG_START_ZMM;
} else if (reg_is_strictly_zmm(simd_reg)) {
return simd_reg;
}
CLIENT_ASSERT(false, "Not a simd register.");
#endif
return DR_REG_INVALID;
}
static reg_id_t
reg_resize_to_ymm(reg_id_t simd_reg)
{
#ifdef X86
if (reg_is_strictly_xmm(simd_reg)) {
return simd_reg - DR_REG_START_XMM + DR_REG_START_YMM;
} else if (reg_is_strictly_ymm(simd_reg)) {
return simd_reg;
} else if (reg_is_strictly_zmm(simd_reg)) {
return simd_reg - DR_REG_START_ZMM + DR_REG_START_YMM;
}
CLIENT_ASSERT(false, "not a simd register.");
#endif
return DR_REG_INVALID;
}
static reg_id_t
reg_resize_to_xmm(reg_id_t simd_reg)
{
#ifdef X86
if (reg_is_strictly_xmm(simd_reg)) {
return simd_reg;
} else if (reg_is_strictly_ymm(simd_reg)) {
return simd_reg - DR_REG_START_YMM + DR_REG_START_XMM;
} else if (reg_is_strictly_zmm(simd_reg)) {
return simd_reg - DR_REG_START_ZMM + DR_REG_START_XMM;
}
CLIENT_ASSERT(false, "not a simd register");
#endif
return DR_REG_INVALID;
}
reg_id_t
reg_resize_to_opsz(reg_id_t reg, opnd_size_t sz)
{
if (reg_is_gpr(reg) IF_AARCH64(|| reg == DR_REG_XZR || reg == DR_REG_WZR)) {
reg = reg_to_pointer_sized(reg);
return reg_32_to_opsz(IF_X64_ELSE(reg_64_to_32(reg), reg), sz);
} else if (reg_is_strictly_xmm(reg) || reg_is_strictly_ymm(reg) ||
reg_is_strictly_zmm(reg)) {
if (sz == OPSZ_16) {
return reg_resize_to_xmm(reg);
} else if (sz == OPSZ_32) {
return reg_resize_to_ymm(reg);
} else if (sz == OPSZ_64) {
return reg_resize_to_zmm(reg);
} else {
CLIENT_ASSERT(false, "invalid size for simd register");
}
} else if (reg_is_simd(reg)) {
if (reg_get_size(reg) == sz)
return reg;
/* XXX i#1569: Add aarchxx SIMD conversions here. */
CLIENT_ASSERT(false, "reg_resize_to_opsz: unsupported reg");
} else {
CLIENT_ASSERT(false, "reg_resize_to_opsz: unsupported reg");
}
return DR_REG_INVALID;
}
int
reg_parameter_num(reg_id_t reg)
{
int r;
for (r = 0; r < NUM_REGPARM; r++) {
if (reg == d_r_regparms[r])
return r;
}
return -1;
}
int
opnd_get_reg_mcontext_offs(reg_id_t reg)
{
return opnd_get_reg_dcontext_offs(reg) - MC_OFFS;
}
bool
reg_overlap(reg_id_t r1, reg_id_t r2)
{
if (r1 == REG_NULL || r2 == REG_NULL)
return false;
#ifdef X86
/* The XH registers do NOT overlap with the XL registers; else, the
* dr_reg_fixer is the answer.
*/
if ((r1 >= REG_START_8HL && r1 <= REG_STOP_8HL) &&
(r2 >= REG_START_8HL && r2 <= REG_STOP_8HL) && r1 != r2)
return false;
#endif
return (dr_reg_fixer[r1] == dr_reg_fixer[r2]);
}
/* returns the register's representation as 3 bits in a modrm byte,
* callers do not expect it to fail
*/
enum { REG_INVALID_BITS = 0x0 }; /* returns a valid register nevertheless */
byte
reg_get_bits(reg_id_t reg)
{
#ifdef X86
# ifdef X64
if (reg >= REG_START_64 && reg <= REG_STOP_64)
return (byte)((reg - REG_START_64) % 8);
# endif
if (reg >= REG_START_32 && reg <= REG_STOP_32)
return (byte)((reg - REG_START_32) % 8);
if (reg >= REG_START_8 && reg <= REG_R15L)
return (byte)((reg - REG_START_8) % 8);
# ifdef X64
if (reg >= REG_START_x64_8 && reg <= REG_STOP_x64_8) /* alternates to AH-BH */
return (byte)((reg - REG_START_x64_8 + 4) % 8);
# endif
if (reg >= REG_START_16 && reg <= REG_STOP_16)
return (byte)((reg - REG_START_16) % 8);
if (reg >= REG_START_MMX && reg <= REG_STOP_MMX)
return (byte)((reg - REG_START_MMX) % 8);
if (reg >= DR_REG_START_XMM && reg <= DR_REG_STOP_XMM)
return (byte)((reg - DR_REG_START_XMM) % 8);
if (reg >= DR_REG_START_YMM && reg <= DR_REG_STOP_YMM)
return (byte)((reg - DR_REG_START_YMM) % 8);
if (reg >= DR_REG_START_ZMM && reg <= DR_REG_STOP_ZMM)
return (byte)((reg - DR_REG_START_ZMM) % 8);
if (reg >= DR_REG_START_BND && reg <= DR_REG_STOP_BND)
return (byte)((reg - DR_REG_START_BND) % 4);
if (reg >= DR_REG_START_OPMASK && reg <= DR_REG_STOP_OPMASK)
return (byte)((reg - DR_REG_START_OPMASK) % 8);
if (reg >= REG_START_SEGMENT && reg <= REG_STOP_SEGMENT)
return (byte)((reg - REG_START_SEGMENT) % 8);
if (reg >= REG_START_DR && reg <= REG_STOP_DR)
return (byte)((reg - REG_START_DR) % 8);
if (reg >= REG_START_CR && reg <= REG_STOP_CR)
return (byte)((reg - REG_START_CR) % 8);
#else
CLIENT_ASSERT(false, "i#1551: NYI");
#endif
CLIENT_ASSERT(false, "reg_get_bits: invalid register");
return REG_INVALID_BITS; /* callers don't expect a failure - return some value */
}
/* returns the OPSZ_ field appropriate for the register */
opnd_size_t
reg_get_size(reg_id_t reg)
{
#ifdef X64
if (reg >= REG_START_64 && reg <= REG_STOP_64)
return OPSZ_8;
#endif
if (reg >= REG_START_32 && reg <= REG_STOP_32)
return OPSZ_4;
#ifdef X86
if (reg >= REG_START_8 && reg <= REG_STOP_8)
return OPSZ_1;
#endif
#if defined(X86) && defined(X64)
if (reg >= REG_START_x64_8 && reg <= REG_STOP_x64_8) /* alternates to AH-BH */
return OPSZ_1;
#endif
#ifdef X86
if (reg >= REG_START_16 && reg <= REG_STOP_16)
return OPSZ_2;
if (reg >= REG_START_MMX && reg <= REG_STOP_MMX)
return OPSZ_8;
if (reg >= DR_REG_START_XMM && reg <= DR_REG_STOP_XMM)
return OPSZ_16;
if (reg >= DR_REG_START_YMM && reg <= DR_REG_STOP_YMM)
return OPSZ_32;
if (reg >= DR_REG_START_ZMM && reg <= DR_REG_STOP_ZMM)
return OPSZ_64;
if (reg >= DR_REG_START_OPMASK && reg <= DR_REG_STOP_OPMASK) {
/* The default is 16 bits wide. The register may be up to 64 bits wide with
* the AVX-512BW extension, which depends on the processor. The number of
* bits actually used depends on the vector type of the instruction.
*/
return OPSZ_8;
}
if (reg >= DR_REG_START_BND && reg <= DR_REG_STOP_BND)
return IF_X64_ELSE(OPSZ_16, OPSZ_8);
if (reg >= REG_START_SEGMENT && reg <= REG_STOP_SEGMENT)
return OPSZ_2;
if (reg >= REG_START_DR && reg <= REG_STOP_DR)
return IF_X64_ELSE(OPSZ_8, OPSZ_4);
if (reg >= REG_START_CR && reg <= REG_STOP_CR)
return IF_X64_ELSE(OPSZ_8, OPSZ_4);
/* i#176 add reg size handling for floating point registers */
if (reg >= REG_START_FLOAT && reg <= REG_STOP_FLOAT)
return OPSZ_10;
#elif defined(AARCHXX)
if (reg >= DR_REG_Q0 && reg <= DR_REG_Q31)
return OPSZ_16;
if (reg >= DR_REG_D0 && reg <= DR_REG_D31)
return OPSZ_8;
if (reg >= DR_REG_S0 && reg <= DR_REG_S31)
return OPSZ_4;
if (reg >= DR_REG_H0 && reg <= DR_REG_H31)
return OPSZ_2;
if (reg >= DR_REG_B0 && reg <= DR_REG_B31)
return OPSZ_1;
# ifdef ARM
if (reg >= DR_REG_CR0 && reg <= DR_REG_CR15)
return OPSZ_PTR;
if (reg >= DR_REG_CPSR && reg <= DR_REG_FPSCR)
return OPSZ_4;
# elif defined(AARCH64)
if (reg == DR_REG_XZR)
return OPSZ_8;
if (reg == DR_REG_WZR)
return OPSZ_4;
if (reg >= DR_REG_Z0 && reg <= DR_REG_Z31)
return OPSZ_SCALABLE;
if (reg >= DR_REG_P0 && reg <= DR_REG_P15)
return OPSZ_SCALABLE_PRED;
# endif
if (reg == DR_REG_TPIDRURW || reg == DR_REG_TPIDRURO)
return OPSZ_PTR;
#endif
CLIENT_ASSERT(false, "reg_get_size: invalid register");
return OPSZ_NA;
}
#ifndef STANDALONE_DECODER
/****************************************************************************/
/* dcontext convenience routines */
static opnd_t
dcontext_opnd_common(dcontext_t *dcontext, bool absolute, reg_id_t basereg, int offs,
opnd_size_t size)
{
IF_X64(ASSERT_NOT_IMPLEMENTED(!absolute));
/* offs is not raw offset, but includes upcontext size, so we
* can tell unprotected from normal
*/
if (TEST(SELFPROT_DCONTEXT, dynamo_options.protect_mask) &&
offs < sizeof(unprotected_context_t)) {
return opnd_create_base_disp(
absolute ? REG_NULL : (basereg == REG_NULL ? REG_DCXT_PROT : basereg),
REG_NULL, 0,
((int)(ptr_int_t)(absolute ? dcontext->upcontext.separate_upcontext : 0)) +
offs,
size);
} else {
if (offs >= sizeof(unprotected_context_t))
offs -= sizeof(unprotected_context_t);
return opnd_create_base_disp(
absolute ? REG_NULL : (basereg == REG_NULL ? REG_DCXT : basereg), REG_NULL, 0,
((int)(ptr_int_t)(absolute ? dcontext : 0)) + offs, size);
}
}
opnd_t
opnd_create_dcontext_field_sz(dcontext_t *dcontext, int offs, opnd_size_t sz)
{
return dcontext_opnd_common(dcontext, true, REG_NULL, offs, sz);
}
opnd_t
opnd_create_dcontext_field(dcontext_t *dcontext, int offs)
{
return dcontext_opnd_common(dcontext, true, REG_NULL, offs, OPSZ_PTR);
}
/* use basereg==REG_NULL to get default (xdi, or xsi for upcontext) */
opnd_t
opnd_create_dcontext_field_via_reg_sz(dcontext_t *dcontext, reg_id_t basereg, int offs,
opnd_size_t sz)
{
return dcontext_opnd_common(dcontext, false, basereg, offs, sz);
}
/* use basereg==REG_NULL to get default (xdi, or xsi for upcontext) */
opnd_t
opnd_create_dcontext_field_via_reg(dcontext_t *dcontext, reg_id_t basereg, int offs)
{
return dcontext_opnd_common(dcontext, false, basereg, offs, OPSZ_PTR);
}
opnd_t
opnd_create_dcontext_field_byte(dcontext_t *dcontext, int offs)
{
return dcontext_opnd_common(dcontext, true, REG_NULL, offs, OPSZ_1);
}
opnd_t
update_dcontext_address(opnd_t op, dcontext_t *old_dcontext, dcontext_t *new_dcontext)
{
int offs;
CLIENT_ASSERT(opnd_is_near_base_disp(op) && opnd_get_base(op) == REG_NULL &&
opnd_get_index(op) == REG_NULL,
"update_dcontext_address: invalid opnd");
IF_X64(ASSERT_NOT_IMPLEMENTED(false));
offs = opnd_get_disp(op) - (uint)(ptr_uint_t)old_dcontext;
if (offs >= 0 && offs < sizeof(dcontext_t)) {
/* don't pass raw offset, add in upcontext size */
offs += sizeof(unprotected_context_t);
return opnd_create_dcontext_field(new_dcontext, offs);
}
/* some fields are in a separate memory region! */
else {
CLIENT_ASSERT(TEST(SELFPROT_DCONTEXT, dynamo_options.protect_mask),
"update_dcontext_address: inconsistent layout");
IF_X64(ASSERT_NOT_IMPLEMENTED(false));
offs = opnd_get_disp(op) -
(uint)(ptr_uint_t)(old_dcontext->upcontext.separate_upcontext);
if (offs >= 0 && offs < sizeof(unprotected_context_t)) {
/* raw offs is what we want for upcontext */
return opnd_create_dcontext_field(new_dcontext, offs);
}
}
/* not a dcontext offset: just return original value */
return op;
}
opnd_t
opnd_create_tls_slot(int offs)
{
return opnd_create_sized_tls_slot(offs, OPSZ_PTR);
}
#endif /* !STANDALONE_DECODER */
/****************************************************************************/
| 1 | 25,482 | Probably better to ask @AssadHashmi or another AArch64 expert for a review rather than me -- @AssadHashmi if you could confirm that these status registers are 64-bit despite having only a few fields? | DynamoRIO-dynamorio | c |
@@ -684,11 +684,10 @@ is_private_key = lambda x: is_xprv(x) or is_private_key_list(x)
is_bip32_key = lambda x: is_xprv(x) or is_xpub(x)
-def bip44_derivation(account_id):
- if bitcoin.TESTNET:
- return "m/44'/1'/%d'"% int(account_id)
- else:
- return "m/44'/0'/%d'"% int(account_id)
+def bip44_derivation(account_id, segwit=False):
+ bip = 49 if segwit else 44
+ coin = 1 if bitcoin.TESTNET else 0
+ return "m/%d'/%d'/%d'" % (bip, coin, int(account_id))
def from_seed(seed, passphrase):
t = seed_type(seed) | 1 | #!/usr/bin/env python2
# -*- mode: python -*-
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2016 The Electrum developers
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import struct
from unicodedata import normalize
from .version import *
from . import bitcoin
from .bitcoin import pw_encode, pw_decode, bip32_root, bip32_private_derivation, bip32_public_derivation, bip32_private_key, deserialize_xprv, deserialize_xpub
from .bitcoin import public_key_from_private_key, public_key_to_p2pkh
from .bitcoin import *
from .bitcoin import is_old_seed, is_new_seed, is_seed
from .util import PrintError, InvalidPassword, hfu
from .mnemonic import Mnemonic, load_wordlist
class KeyStore(PrintError):
def has_seed(self):
return False
def is_watching_only(self):
return False
def can_import(self):
return False
def get_tx_derivations(self, tx):
keypairs = {}
for txin in tx.inputs():
num_sig = txin.get('num_sig')
if num_sig is None:
continue
x_signatures = txin['signatures']
signatures = [sig for sig in x_signatures if sig]
if len(signatures) == num_sig:
# input is complete
continue
for k, x_pubkey in enumerate(txin['x_pubkeys']):
if x_signatures[k] is not None:
# this pubkey already signed
continue
derivation = self.get_pubkey_derivation(x_pubkey)
if not derivation:
continue
keypairs[x_pubkey] = derivation
return keypairs
def can_sign(self, tx):
if self.is_watching_only():
return False
return bool(self.get_tx_derivations(tx))
def is_segwit(self):
return False
class Software_KeyStore(KeyStore):
def __init__(self):
KeyStore.__init__(self)
def may_have_password(self):
return not self.is_watching_only()
def sign_message(self, sequence, message, password):
sec = self.get_private_key(sequence, password)
key = regenerate_key(sec)
compressed = is_compressed(sec)
return key.sign_message(message, compressed)
def decrypt_message(self, sequence, message, password):
sec = self.get_private_key(sequence, password)
ec = regenerate_key(sec)
decrypted = ec.decrypt_message(message)
return decrypted
def sign_transaction(self, tx, password):
if self.is_watching_only():
return
# Raise if password is not correct.
self.check_password(password)
# Add private keys
keypairs = self.get_tx_derivations(tx)
for k, v in keypairs.items():
keypairs[k] = self.get_private_key(v, password)
# Sign
if keypairs:
tx.sign(keypairs)
class Imported_KeyStore(Software_KeyStore):
# keystore for imported private keys
def __init__(self, d):
Software_KeyStore.__init__(self)
self.keypairs = d.get('keypairs', {})
def is_deterministic(self):
return False
def can_change_password(self):
return True
def get_master_public_key(self):
return None
def dump(self):
return {
'type': 'imported',
'keypairs': self.keypairs,
}
def can_import(self):
return True
def check_password(self, password):
pubkey = list(self.keypairs.keys())[0]
self.get_private_key(pubkey, password)
def import_key(self, sec, password):
try:
pubkey = public_key_from_private_key(sec)
except Exception:
raise BaseException('Invalid private key')
# allow overwrite
self.keypairs[pubkey] = pw_encode(sec, password)
return pubkey
def delete_imported_key(self, key):
self.keypairs.pop(key)
def get_private_key(self, pubkey, password):
pk = pw_decode(self.keypairs[pubkey], password)
# this checks the password
if pubkey != public_key_from_private_key(pk):
raise InvalidPassword()
return pk
def get_pubkey_derivation(self, x_pubkey):
if x_pubkey[0:2] in ['02', '03', '04']:
if x_pubkey in self.keypairs.keys():
return x_pubkey
elif x_pubkey[0:2] == 'fd':
# fixme: this assumes p2pkh
_, addr = xpubkey_to_address(x_pubkey)
for pubkey in self.keypairs.keys():
if public_key_to_p2pkh(bfh(pubkey)) == addr:
return pubkey
def update_password(self, old_password, new_password):
self.check_password(old_password)
if new_password == '':
new_password = None
for k, v in self.keypairs.items():
b = pw_decode(v, old_password)
c = pw_encode(b, new_password)
self.keypairs[k] = c
class Deterministic_KeyStore(Software_KeyStore):
def __init__(self, d):
Software_KeyStore.__init__(self)
self.seed = d.get('seed', '')
self.passphrase = d.get('passphrase', '')
def is_deterministic(self):
return True
def dump(self):
d = {}
if self.seed:
d['seed'] = self.seed
if self.passphrase:
d['passphrase'] = self.passphrase
return d
def has_seed(self):
return bool(self.seed)
def is_watching_only(self):
return not self.has_seed()
def can_change_password(self):
return not self.is_watching_only()
def add_seed(self, seed):
if self.seed:
raise Exception("a seed exists")
self.seed = self.format_seed(seed)
def get_seed(self, password):
return pw_decode(self.seed, password)
def get_passphrase(self, password):
return pw_decode(self.passphrase, password) if self.passphrase else ''
class Xpub:
def __init__(self):
self.xpub = None
self.xpub_receive = None
self.xpub_change = None
def get_master_public_key(self):
return self.xpub
def derive_pubkey(self, for_change, n):
xpub = self.xpub_change if for_change else self.xpub_receive
if xpub is None:
xpub = bip32_public_derivation(self.xpub, "", "/%d"%for_change)
if for_change:
self.xpub_change = xpub
else:
self.xpub_receive = xpub
return self.get_pubkey_from_xpub(xpub, (n,))
@classmethod
def get_pubkey_from_xpub(self, xpub, sequence):
_, _, _, _, c, cK = deserialize_xpub(xpub)
for i in sequence:
cK, c = CKD_pub(cK, c, i)
return bh2u(cK)
def get_xpubkey(self, c, i):
s = ''.join(map(lambda x: bitcoin.int_to_hex(x,2), (c, i)))
return 'ff' + bh2u(bitcoin.DecodeBase58Check(self.xpub)) + s
@classmethod
def parse_xpubkey(self, pubkey):
assert pubkey[0:2] == 'ff'
pk = bfh(pubkey)
pk = pk[1:]
xkey = bitcoin.EncodeBase58Check(pk[0:78])
dd = pk[78:]
s = []
while dd:
n = int(bitcoin.rev_hex(bh2u(dd[0:2])), 16)
dd = dd[2:]
s.append(n)
assert len(s) == 2
return xkey, s
def get_pubkey_derivation(self, x_pubkey):
if x_pubkey[0:2] != 'ff':
return
xpub, derivation = self.parse_xpubkey(x_pubkey)
if self.xpub != xpub:
return
return derivation
class BIP32_KeyStore(Deterministic_KeyStore, Xpub):
def __init__(self, d):
Xpub.__init__(self)
Deterministic_KeyStore.__init__(self, d)
self.xpub = d.get('xpub')
self.xprv = d.get('xprv')
def format_seed(self, seed):
return ' '.join(seed.split())
def dump(self):
d = Deterministic_KeyStore.dump(self)
d['type'] = 'bip32'
d['xpub'] = self.xpub
d['xprv'] = self.xprv
return d
def get_master_private_key(self, password):
return pw_decode(self.xprv, password)
def check_password(self, password):
xprv = pw_decode(self.xprv, password)
if deserialize_xprv(xprv)[4] != deserialize_xpub(self.xpub)[4]:
raise InvalidPassword()
def update_password(self, old_password, new_password):
self.check_password(old_password)
if new_password == '':
new_password = None
if self.has_seed():
decoded = self.get_seed(old_password)
self.seed = pw_encode(decoded, new_password)
if self.passphrase:
decoded = self.get_passphrase(old_password)
self.passphrase = pw_encode(decoded, new_password)
if self.xprv is not None:
b = pw_decode(self.xprv, old_password)
self.xprv = pw_encode(b, new_password)
def is_watching_only(self):
return self.xprv is None
def add_xprv(self, xprv):
self.xprv = xprv
self.xpub = bitcoin.xpub_from_xprv(xprv)
def add_xprv_from_seed(self, bip32_seed, xtype, derivation):
xprv, xpub = bip32_root(bip32_seed, xtype)
xprv, xpub = bip32_private_derivation(xprv, "m/", derivation)
self.add_xprv(xprv)
def get_private_key(self, sequence, password):
xprv = self.get_master_private_key(password)
_, _, _, _, c, k = deserialize_xprv(xprv)
pk = bip32_private_key(sequence, k, c)
return pk
def is_segwit(self):
return bool(deserialize_xpub(self.xpub)[0])
class Old_KeyStore(Deterministic_KeyStore):
def __init__(self, d):
Deterministic_KeyStore.__init__(self, d)
self.mpk = d.get('mpk')
def get_hex_seed(self, password):
return pw_decode(self.seed, password).encode('utf8')
def dump(self):
d = Deterministic_KeyStore.dump(self)
d['mpk'] = self.mpk
d['type'] = 'old'
return d
def add_seed(self, seedphrase):
Deterministic_KeyStore.add_seed(self, seedphrase)
s = self.get_hex_seed(None)
self.mpk = self.mpk_from_seed(s)
def add_master_public_key(self, mpk):
self.mpk = mpk
def format_seed(self, seed):
from . import old_mnemonic
# see if seed was entered as hex
seed = seed.strip()
if seed:
try:
bfh(seed)
return str(seed)
except Exception:
pass
words = seed.split()
seed = old_mnemonic.mn_decode(words)
if not seed:
raise Exception("Invalid seed")
return seed
def get_seed(self, password):
from . import old_mnemonic
s = self.get_hex_seed(password)
return ' '.join(old_mnemonic.mn_encode(s))
@classmethod
def mpk_from_seed(klass, seed):
secexp = klass.stretch_key(seed)
master_private_key = ecdsa.SigningKey.from_secret_exponent(secexp, curve = SECP256k1)
master_public_key = master_private_key.get_verifying_key().to_string()
return bh2u(master_public_key)
@classmethod
def stretch_key(self, seed):
x = seed
for i in range(100000):
x = hashlib.sha256(x + seed).digest()
return string_to_number(x)
@classmethod
def get_sequence(self, mpk, for_change, n):
return string_to_number(Hash(("%d:%d:"%(n, for_change)).encode('ascii') + bfh(mpk)))
def get_address(self, for_change, n):
pubkey = self.get_pubkey(for_change, n)
address = public_key_to_p2pkh(bfh(pubkey))
return address
@classmethod
def get_pubkey_from_mpk(self, mpk, for_change, n):
z = self.get_sequence(mpk, for_change, n)
master_public_key = ecdsa.VerifyingKey.from_string(bfh(mpk), curve = SECP256k1)
pubkey_point = master_public_key.pubkey.point + z*SECP256k1.generator
public_key2 = ecdsa.VerifyingKey.from_public_point(pubkey_point, curve = SECP256k1)
return '04' + bh2u(public_key2.to_string())
def derive_pubkey(self, for_change, n):
return self.get_pubkey_from_mpk(self.mpk, for_change, n)
def get_private_key_from_stretched_exponent(self, for_change, n, secexp):
order = generator_secp256k1.order()
secexp = (secexp + self.get_sequence(self.mpk, for_change, n)) % order
pk = number_to_string(secexp, generator_secp256k1.order())
compressed = False
return SecretToASecret(pk, compressed)
def get_private_key(self, sequence, password):
seed = self.get_hex_seed(password)
self.check_seed(seed)
for_change, n = sequence
secexp = self.stretch_key(seed)
pk = self.get_private_key_from_stretched_exponent(for_change, n, secexp)
return pk
def check_seed(self, seed):
secexp = self.stretch_key(seed)
master_private_key = ecdsa.SigningKey.from_secret_exponent( secexp, curve = SECP256k1 )
master_public_key = master_private_key.get_verifying_key().to_string()
if master_public_key != bfh(self.mpk):
print_error('invalid password (mpk)', self.mpk, bh2u(master_public_key))
raise InvalidPassword()
def check_password(self, password):
seed = self.get_hex_seed(password)
self.check_seed(seed)
def get_master_public_key(self):
return self.mpk
def get_xpubkey(self, for_change, n):
s = ''.join(map(lambda x: bitcoin.int_to_hex(x,2), (for_change, n)))
return 'fe' + self.mpk + s
@classmethod
def parse_xpubkey(self, x_pubkey):
assert x_pubkey[0:2] == 'fe'
pk = x_pubkey[2:]
mpk = pk[0:128]
dd = pk[128:]
s = []
while dd:
n = int(bitcoin.rev_hex(dd[0:4]), 16)
dd = dd[4:]
s.append(n)
assert len(s) == 2
return mpk, s
def get_pubkey_derivation(self, x_pubkey):
if x_pubkey[0:2] != 'fe':
return
mpk, derivation = self.parse_xpubkey(x_pubkey)
if self.mpk != mpk:
return
return derivation
def update_password(self, old_password, new_password):
self.check_password(old_password)
if new_password == '':
new_password = None
if self.has_seed():
decoded = pw_decode(self.seed, old_password)
self.seed = pw_encode(decoded, new_password)
class Hardware_KeyStore(KeyStore, Xpub):
# Derived classes must set:
# - device
# - DEVICE_IDS
# - wallet_type
#restore_wallet_class = BIP32_RD_Wallet
max_change_outputs = 1
def __init__(self, d):
Xpub.__init__(self)
KeyStore.__init__(self)
# Errors and other user interaction is done through the wallet's
# handler. The handler is per-window and preserved across
# device reconnects
self.xpub = d.get('xpub')
self.label = d.get('label')
self.derivation = d.get('derivation')
self.handler = None
def set_label(self, label):
self.label = label
def may_have_password(self):
return False
def is_deterministic(self):
return True
def dump(self):
return {
'type': 'hardware',
'hw_type': self.hw_type,
'xpub': self.xpub,
'derivation':self.derivation,
'label':self.label,
}
def unpaired(self):
'''A device paired with the wallet was diconnected. This can be
called in any thread context.'''
self.print_error("unpaired")
def paired(self):
'''A device paired with the wallet was (re-)connected. This can be
called in any thread context.'''
self.print_error("paired")
def can_export(self):
return False
def is_watching_only(self):
'''The wallet is not watching-only; the user will be prompted for
pin and passphrase as appropriate when needed.'''
assert not self.has_seed()
return False
def can_change_password(self):
return False
def bip39_normalize_passphrase(passphrase):
return normalize('NFKD', passphrase or '')
def bip39_to_seed(mnemonic, passphrase):
import pbkdf2, hashlib, hmac
PBKDF2_ROUNDS = 2048
mnemonic = normalize('NFKD', ' '.join(mnemonic.split()))
passphrase = bip39_normalize_passphrase(passphrase)
return pbkdf2.PBKDF2(mnemonic, 'mnemonic' + passphrase,
iterations = PBKDF2_ROUNDS, macmodule = hmac,
digestmodule = hashlib.sha512).read(64)
# returns tuple (is_checksum_valid, is_wordlist_valid)
def bip39_is_checksum_valid(mnemonic):
words = [ normalize('NFKD', word) for word in mnemonic.split() ]
words_len = len(words)
wordlist = load_wordlist("english.txt")
n = len(wordlist)
checksum_length = 11*words_len//33
entropy_length = 32*checksum_length
i = 0
words.reverse()
while words:
w = words.pop()
try:
k = wordlist.index(w)
except ValueError:
return False, False
i = i*n + k
if words_len not in [12, 15, 18, 21, 24]:
return False, True
entropy = i >> checksum_length
checksum = i % 2**checksum_length
h = '{:x}'.format(entropy)
while len(h) < entropy_length/4:
h = '0'+h
b = bytearray.fromhex(h)
hashed = int(hfu(hashlib.sha256(b).digest()), 16)
calculated_checksum = hashed >> (256 - checksum_length)
return checksum == calculated_checksum, True
# extended pubkeys
def is_xpubkey(x_pubkey):
return x_pubkey[0:2] == 'ff'
def parse_xpubkey(x_pubkey):
assert x_pubkey[0:2] == 'ff'
return BIP32_KeyStore.parse_xpubkey(x_pubkey)
def xpubkey_to_address(x_pubkey):
if x_pubkey[0:2] == 'fd':
# TODO: check that ord() is OK here
addrtype = ord(bfh(x_pubkey[2:4]))
hash160 = bfh(x_pubkey[4:])
address = bitcoin.hash_160_to_bc_address(hash160, addrtype)
return x_pubkey, address
if x_pubkey[0:2] in ['02', '03', '04']:
pubkey = x_pubkey
elif x_pubkey[0:2] == 'ff':
xpub, s = BIP32_KeyStore.parse_xpubkey(x_pubkey)
pubkey = BIP32_KeyStore.get_pubkey_from_xpub(xpub, s)
elif x_pubkey[0:2] == 'fe':
mpk, s = Old_KeyStore.parse_xpubkey(x_pubkey)
pubkey = Old_KeyStore.get_pubkey_from_mpk(mpk, s[0], s[1])
else:
raise BaseException("Cannot parse pubkey")
if pubkey:
address = public_key_to_p2pkh(bfh(pubkey))
return pubkey, address
def xpubkey_to_pubkey(x_pubkey):
pubkey, address = xpubkey_to_address(x_pubkey)
return pubkey
hw_keystores = {}
def register_keystore(hw_type, constructor):
hw_keystores[hw_type] = constructor
def hardware_keystore(d):
hw_type = d['hw_type']
if hw_type in hw_keystores:
constructor = hw_keystores[hw_type]
return constructor(d)
raise BaseException('unknown hardware type', hw_type)
def load_keystore(storage, name):
w = storage.get('wallet_type', 'standard')
d = storage.get(name, {})
t = d.get('type')
if not t:
raise BaseException('wallet format requires update')
if t == 'old':
k = Old_KeyStore(d)
elif t == 'imported':
k = Imported_KeyStore(d)
elif t == 'bip32':
k = BIP32_KeyStore(d)
elif t == 'hardware':
k = hardware_keystore(d)
else:
raise BaseException('unknown wallet type', t)
return k
def is_old_mpk(mpk):
try:
int(mpk, 16)
except:
return False
return len(mpk) == 128
def is_address_list(text):
parts = text.split()
return bool(parts) and all(bitcoin.is_address(x) for x in parts)
def get_private_keys(text):
parts = text.split('\n')
parts = map(lambda x: ''.join(x.split()), parts)
parts = list(filter(bool, parts))
if bool(parts) and all(bitcoin.is_private_key(x) for x in parts):
return parts
def is_private_key_list(text):
return bool(get_private_keys(text))
is_mpk = lambda x: is_old_mpk(x) or is_xpub(x)
is_private = lambda x: is_seed(x) or is_xprv(x) or is_private_key_list(x)
is_any_key = lambda x: is_old_mpk(x) or is_xprv(x) or is_xpub(x) or is_private_key_list(x)
is_private_key = lambda x: is_xprv(x) or is_private_key_list(x)
is_bip32_key = lambda x: is_xprv(x) or is_xpub(x)
def bip44_derivation(account_id):
if bitcoin.TESTNET:
return "m/44'/1'/%d'"% int(account_id)
else:
return "m/44'/0'/%d'"% int(account_id)
def from_seed(seed, passphrase):
t = seed_type(seed)
if t == 'old':
keystore = Old_KeyStore({})
keystore.add_seed(seed)
elif t in ['standard', 'segwit']:
keystore = BIP32_KeyStore({})
keystore.add_seed(seed)
keystore.passphrase = passphrase
bip32_seed = Mnemonic.mnemonic_to_seed(seed, passphrase)
xtype = 0 if t == 'standard' else 1
keystore.add_xprv_from_seed(bip32_seed, xtype, "m/")
return keystore
def from_private_key_list(text):
keystore = Imported_KeyStore({})
for x in text.split():
keystore.import_key(x, None)
return keystore
def from_old_mpk(mpk):
keystore = Old_KeyStore({})
keystore.add_master_public_key(mpk)
return keystore
def from_xpub(xpub):
k = BIP32_KeyStore({})
k.xpub = xpub
return k
def from_xprv(xprv):
xpub = bitcoin.xpub_from_xprv(xprv)
k = BIP32_KeyStore({})
k.xprv = xprv
k.xpub = xpub
return k
def from_keys(text):
if is_xprv(text):
k = from_xprv(text)
elif is_old_mpk(text):
k = from_old_mpk(text)
elif is_xpub(text):
k = from_xpub(text)
elif is_private_key_list(text):
k = from_private_key_list(text)
else:
raise BaseException('Invalid key')
return k
| 1 | 11,776 | `purpose` and `coin_type` and `account` would mimic the BIP-0044 wording, but it's up to you. | spesmilo-electrum | py |
@@ -129,10 +129,10 @@ namespace pwiz.Skyline.Controls.Graphs
public void SetQValueTo(float qValue)
{
if (qValue == .01f)
- rbQValue01.Select();
+ rbQValue01.Checked = true;
else
{
- rbQValueCustom.Select();
+ rbQValueCustom.Checked = true;
txtQValueCustom.Text = qValue.ToString(CultureInfo.CurrentCulture);
}
} | 1 | /*
* Original author: Rita Chupalov <ritach .at. uw.edu>,
* MacCoss Lab, Department of Genome Sciences, UW
*
* Copyright 2020 University of Washington - Seattle, WA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
using System;
using System.Globalization;
using System.Windows.Forms;
using pwiz.Common.SystemUtil;
using pwiz.Skyline.Util;
using pwiz.Skyline.Properties;
using Settings = pwiz.Skyline.Controls.Graphs.DetectionsGraphController.Settings;
using IntLabeledValue = pwiz.Skyline.Controls.Graphs.DetectionsGraphController.IntLabeledValue;
namespace pwiz.Skyline.Controls.Graphs
{
public partial class DetectionToolbarProperties : FormEx
{
private readonly GraphSummary _graphSummary;
public DetectionToolbarProperties(GraphSummary graphSummary)
{
InitializeComponent();
_graphSummary = graphSummary;
}
private void DetectionToolbarProperties_Load(object sender, EventArgs e)
{
IntLabeledValue.PopulateCombo(cmbTargetType, Settings.TargetType);
IntLabeledValue.PopulateCombo(cmbCountMultiple, Settings.YScaleFactor);
txtQValueCustom.Text = Settings.QValueCutoff.ToString(LocalizationHelper.CurrentCulture);
switch (Settings.QValueCutoff)
{
case 0.01f:
rbQValue01.Select();
break;
default:
rbQValueCustom.Select();
break;
}
cbShowAtLeastN.Checked = Settings.ShowAtLeastN;
cbShowSelection.Checked = Settings.ShowSelection;
cbShowMeanStd.Checked = Settings.ShowMean;
cbShowLegend.Checked = Settings.ShowLegend;
GraphFontSize.PopulateCombo(cmbFontSize, Settings.FontSize);
if (_graphSummary.DocumentUIContainer.DocumentUI.IsLoaded &&
_graphSummary.DocumentUIContainer.DocumentUI.MeasuredResults.Chromatograms.Count > 0)
{
tbAtLeastN.Maximum = _graphSummary.DocumentUIContainer.DocumentUI.MeasuredResults.Chromatograms.Count;
if(Settings.RepCount < tbAtLeastN.Maximum && Settings.RepCount > tbAtLeastN.Minimum)
tbAtLeastN.Value = Settings.RepCount;
else
tbAtLeastN.Value = tbAtLeastN.Maximum / 2;
}
cmbTargetType.Focus();
}
private void btnOk_Click(object sender, EventArgs e)
{
OkDialog();
}
public void OkDialog()
{
var helper = new MessageBoxHelper(this);
if (rbQValue01.Checked)
Settings.QValueCutoff = 0.01f;
if (rbQValueCustom.Checked)
{
var qValueCutoff = double.NaN;
if (!string.IsNullOrEmpty(txtQValueCustom.Text)
&& !helper.ValidateDecimalTextBox(txtQValueCustom, 0, 1, out qValueCutoff))
return;
else
Settings.QValueCutoff = (float)qValueCutoff;
}
Settings.YScaleFactor = IntLabeledValue.GetValue(cmbCountMultiple, Settings.YScaleFactor);
Settings.TargetType = IntLabeledValue.GetValue(cmbTargetType, Settings.TargetType);
Settings.ShowAtLeastN = cbShowAtLeastN.Checked;
Settings.ShowSelection = cbShowSelection.Checked;
Settings.ShowMean = cbShowMeanStd.Checked;
Settings.ShowLegend = cbShowLegend.Checked;
Settings.RepCount = tbAtLeastN.Value;
Settings.FontSize = GraphFontSize.GetFontSize(cmbFontSize).PointSize;
DialogResult = DialogResult.OK;
}
private void txtQValueCustom_Enter(object sender, EventArgs e)
{
rbQValueCustom.Checked = true;
}
private void tbAtLeastN_ValueChanged(object sender, EventArgs e)
{
gbAtLeastN.Text = String.Format(CultureInfo.CurrentCulture,
Resources.DetectionToolbarProperties_AtLeastNReplicates, tbAtLeastN.Value);
}
private void cmbTargetType_SelectedIndexChanged(object sender, EventArgs e)
{
Settings.TargetType = IntLabeledValue.GetValue(cmbTargetType, Settings.TargetType);
IntLabeledValue.PopulateCombo(cmbCountMultiple, Settings.YScaleFactor);
}
#region Functional test support
public void SetQValueTo(float qValue)
{
if (qValue == .01f)
rbQValue01.Select();
else
{
rbQValueCustom.Select();
txtQValueCustom.Text = qValue.ToString(CultureInfo.CurrentCulture);
}
}
#endregion
}
}
| 1 | 13,751 | Is this the critical change? It does seem wrong to use Select() instead of Checked = true. Not sure why that would pass sometimes and not others, though. | ProteoWizard-pwiz | .cs |
@@ -111,7 +111,14 @@ func (p *VSphereCloudBuilder) addClusterDeploymentPlatform(o *Builder, cd *hivev
}
func (p *VSphereCloudBuilder) addMachinePoolPlatform(o *Builder, mp *hivev1.MachinePool) {
- mp.Spec.Platform.VSphere = &hivev1vsphere.MachinePool{}
+ mp.Spec.Platform.VSphere = &hivev1vsphere.MachinePool{
+ NumCPUs: 2,
+ NumCoresPerSocket: 1,
+ MemoryMiB: 8192,
+ OSDisk: hivev1vsphere.OSDisk{
+ DiskSizeGB: 120,
+ },
+ }
}
func (p *VSphereCloudBuilder) addInstallConfigPlatform(o *Builder, ic *installertypes.InstallConfig) { | 1 | package clusterresource
import (
"fmt"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
installertypes "github.com/openshift/installer/pkg/types"
installervsphere "github.com/openshift/installer/pkg/types/vsphere"
hivev1 "github.com/openshift/hive/pkg/apis/hive/v1"
hivev1vsphere "github.com/openshift/hive/pkg/apis/hive/v1/vsphere"
"github.com/openshift/hive/pkg/constants"
)
var _ CloudBuilder = (*VSphereCloudBuilder)(nil)
// VSphereCloudBuilder encapsulates cluster artifact generation logic specific to vSphere.
type VSphereCloudBuilder struct {
// VCenter is the domain name or IP address of the vCenter.
VCenter string
// Username is the name of the user to use to connect to the vCenter.
Username string
// Password is the password for the user to use to connect to the vCenter.
Password string
// Datacenter is the name of the datacenter to use in the vCenter.
Datacenter string
// DefaultDatastore is the default datastore to use for provisioning volumes.
DefaultDatastore string
// Folder is the name of the folder that will be used and/or created for
// virtual machines.
Folder string
// Cluster is the name of the cluster virtual machines will be cloned into.
Cluster string
// APIVIP is the virtual IP address for the api endpoint
APIVIP string
// IngressVIP is the virtual IP address for ingress
IngressVIP string
// DNSVIP is the virtual IP address for DNS
DNSVIP string
// Network specifies the name of the network to be used by the cluster.
Network string
// CACert is the CA certificate(s) used to communicate with the vCenter.
CACert []byte
}
func (p *VSphereCloudBuilder) generateCredentialsSecret(o *Builder) *corev1.Secret {
return &corev1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: corev1.SchemeGroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: p.credsSecretName(o),
Namespace: o.Namespace,
},
Type: corev1.SecretTypeOpaque,
StringData: map[string]string{
constants.UsernameSecretKey: p.Username,
constants.PasswordSecretKey: p.Password,
},
}
}
func (p *VSphereCloudBuilder) generateCloudCertificatesSecret(o *Builder) *corev1.Secret {
return &corev1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: corev1.SchemeGroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: p.certificatesSecretName(o),
Namespace: o.Namespace,
},
Type: corev1.SecretTypeOpaque,
Data: map[string][]byte{
".cacert": p.CACert,
},
}
}
func (p *VSphereCloudBuilder) addClusterDeploymentPlatform(o *Builder, cd *hivev1.ClusterDeployment) {
cd.Spec.Platform = hivev1.Platform{
VSphere: &hivev1vsphere.Platform{
CredentialsSecretRef: corev1.LocalObjectReference{
Name: p.credsSecretName(o),
},
CertificatesSecretRef: corev1.LocalObjectReference{
Name: p.certificatesSecretName(o),
},
VCenter: p.VCenter,
Datacenter: p.Datacenter,
DefaultDatastore: p.DefaultDatastore,
Folder: p.Folder,
Cluster: p.Cluster,
Network: p.Network,
},
}
}
func (p *VSphereCloudBuilder) addMachinePoolPlatform(o *Builder, mp *hivev1.MachinePool) {
mp.Spec.Platform.VSphere = &hivev1vsphere.MachinePool{}
}
func (p *VSphereCloudBuilder) addInstallConfigPlatform(o *Builder, ic *installertypes.InstallConfig) {
ic.Platform = installertypes.Platform{
VSphere: &installervsphere.Platform{
VCenter: p.VCenter,
Username: p.Username,
Password: p.Password,
Datacenter: p.Datacenter,
DefaultDatastore: p.DefaultDatastore,
Folder: p.Folder,
Cluster: p.Cluster,
APIVIP: p.APIVIP,
IngressVIP: p.IngressVIP,
DNSVIP: p.DNSVIP,
Network: p.Network,
},
}
}
func (p *VSphereCloudBuilder) credsSecretName(o *Builder) string {
return fmt.Sprintf("%s-vsphere-creds", o.Name)
}
func (p *VSphereCloudBuilder) certificatesSecretName(o *Builder) string {
return fmt.Sprintf("%s-vsphere-certs", o.Name)
}
| 1 | 12,323 | Seems a little low, but is this an installer default? | openshift-hive | go |
@@ -10,6 +10,19 @@ from kinto.core import utils
logger = structlog.get_logger()
+def reset_logger():
+ """Hack to work around https://github.com/hynek/structlog/issues/71.
+
+ This clears a magic field in the logger so that it will regenerate
+ the right kind of logger on its next use.
+
+ Do this when you call structlog.configure(). Otherwise, some parts
+ of the logger will be outdated and you may get strange behavior.
+
+ """
+ logger._logger = None
+
+
def decode_value(value):
try:
return six.text_type(value) | 1 | import os
import colorama
import six
import structlog
from kinto.core import utils
logger = structlog.get_logger()
def decode_value(value):
try:
return six.text_type(value)
except UnicodeDecodeError: # pragma: no cover
return six.binary_type(value).decode('utf-8')
class ClassicLogRenderer(object):
"""Classic log output for structlog.
::
"GET /v1/articles?_sort=title" 200 (3 ms) request.summary uid=234;
"""
def __init__(self, settings):
pass
def __call__(self, logger, name, event_dict):
RESET_ALL = colorama.Style.RESET_ALL
BRIGHT = colorama.Style.BRIGHT
CYAN = colorama.Fore.CYAN
MAGENTA = colorama.Fore.MAGENTA
YELLOW = colorama.Fore.YELLOW
if 'path' in event_dict:
pattern = (BRIGHT +
u'"{method: <5} {path}{querystring}"' +
RESET_ALL +
YELLOW + u' {code} ({t} ms)' +
RESET_ALL +
u' {event} {context}')
else:
pattern = u'{event} {context}'
output = {}
for field in ['method', 'path', 'code', 't', 'event']:
output[field] = decode_value(event_dict.pop(field, '?'))
querystring = event_dict.pop('querystring', {})
params = [decode_value('%s=%s' % qs) for qs in querystring.items()]
output['querystring'] = '?%s' % '&'.join(params) if params else ''
output['context'] = " ".join(
CYAN + key + RESET_ALL +
"=" +
MAGENTA + decode_value(event_dict[key]) +
RESET_ALL
for key in sorted(event_dict.keys())
)
log_msg = pattern.format(**output)
return log_msg
class MozillaHekaRenderer(object):
"""Build structured log entries as expected by Mozilla Services standard:
* https://mana.mozilla.org/wiki/display/CLOUDSERVICES/Logging+Standard
"""
ENV_VERSION = '2.0'
def __init__(self, settings):
super(MozillaHekaRenderer, self).__init__()
self.appname = settings['project_name']
self.hostname = utils.read_env('HOSTNAME', os.uname()[1])
self.pid = os.getpid()
def __call__(self, logger, name, event_dict):
SYSLOG_LEVELS = {
'critical': 0,
'fatal': 0,
'exception': 2,
'error': 2,
'warning': 4,
'info': 6,
'debug': 7,
}
severity = SYSLOG_LEVELS[name]
MSEC_TO_NANOSEC = 1000000
timestamp = utils.msec_time() * MSEC_TO_NANOSEC
event = event_dict.pop('event', '')
defaults = {
'Timestamp': timestamp,
'Logger': self.appname,
'Type': event,
'Hostname': self.hostname,
'Severity': severity,
'Pid': self.pid,
'EnvVersion': self.ENV_VERSION,
'Fields': {}
}
for f, v in defaults.items():
event_dict.setdefault(f, v)
fields = [k for k in event_dict.keys() if k not in defaults]
for f in fields:
value = event_dict.pop(f)
# Heka relies on Protobuf, which doesn't support recursive objects.
if isinstance(value, dict):
value = utils.json.dumps(value)
elif isinstance(value, (list, tuple)):
if not all([isinstance(i, six.string_types) for i in value]):
value = utils.json.dumps(value)
event_dict['Fields'][f] = value
return utils.json.dumps(event_dict)
| 1 | 9,220 | Maybe we could provide our own `configure` function that includes `logger._logger = None` because I don't see a use case in kinto for using `reset_logger` besides configuring the logger. | Kinto-kinto | py |
@@ -31,11 +31,8 @@ type Block struct {
// holders for an epoch.
Parents SortedCidSet `json:"parents"`
- // ParentWeightNum is the numerator of the aggregate chain weight of the parent set.
- ParentWeightNum Uint64 `json:"parentWeightNumerator"`
-
- // ParentWeightDenom is the denominator of the aggregate chain weight of the parent set
- ParentWeightDenom Uint64 `json:"parentWeightDenominator"`
+ // ParentWeight is the aggregate chain weight of the parent set.
+ ParentWeight Uint64 `json:"parentWeight"`
// Height is the chain height of this block.
Height Uint64 `json:"height"` | 1 | package types
import (
"bytes"
"encoding/json"
"fmt"
"sort"
"gx/ipfs/QmR8BauakNcBa3RbE4nbQu76PDiJgoQgz8AJdhJuiU4TAw/go-cid"
cbor "gx/ipfs/QmRoARq3nkUb13HSKZGepCZSWe5GrVPwx7xURJGZ7KWv9V/go-ipld-cbor"
node "gx/ipfs/QmcKKBwfz6FyQdHR2jsXrrF6XeSBXYL86anmWNewpFpoF5/go-ipld-format"
"github.com/filecoin-project/go-filecoin/address"
"github.com/filecoin-project/go-filecoin/proofs"
)
func init() {
cbor.RegisterCborType(Block{})
}
// Block is a block in the blockchain.
type Block struct {
// Miner is the address of the miner actor that mined this block.
Miner address.Address `json:"miner"`
// Ticket is the winning ticket that was submitted with this block.
Ticket Signature `json:"ticket"`
// Parents is the set of parents this block was based on. Typically one,
// but can be several in the case where there were multiple winning ticket-
// holders for an epoch.
Parents SortedCidSet `json:"parents"`
// ParentWeightNum is the numerator of the aggregate chain weight of the parent set.
ParentWeightNum Uint64 `json:"parentWeightNumerator"`
// ParentWeightDenom is the denominator of the aggregate chain weight of the parent set
ParentWeightDenom Uint64 `json:"parentWeightDenominator"`
// Height is the chain height of this block.
Height Uint64 `json:"height"`
// Nonce is a temporary field used to differentiate blocks for testing
Nonce Uint64 `json:"nonce"`
// Messages is the set of messages included in this block
// TODO: should be a merkletree-ish thing
Messages []*SignedMessage `json:"messages"`
// StateRoot is a cid pointer to the state tree after application of the
// transactions state transitions.
StateRoot cid.Cid `json:"stateRoot,omitempty" refmt:",omitempty"`
// MessageReceipts is a set of receipts matching to the sending of the `Messages`.
MessageReceipts []*MessageReceipt `json:"messageReceipts"`
// Proof is a proof of spacetime generated using the hash of the previous ticket as
// a challenge
Proof proofs.PoStProof `json:"proof"`
}
// Cid returns the content id of this block.
func (b *Block) Cid() cid.Cid {
// TODO: Cache ToNode() and/or ToNode().Cid(). We should be able to do this efficiently using
// DeepEquals(), or perhaps our own Equals() interface.
return b.ToNode().Cid()
}
// IsParentOf returns true if the argument is a parent of the receiver.
func (b Block) IsParentOf(c Block) bool {
return c.Parents.Has(b.Cid())
}
// ToNode converts the Block to an IPLD node.
func (b *Block) ToNode() node.Node {
// Use 32 byte / 256 bit digest. TODO pull this out into a constant?
obj, err := cbor.WrapObject(b, DefaultHashFunction, -1)
if err != nil {
panic(err)
}
return obj
}
func (b *Block) String() string {
errStr := "(error encoding Block)"
cid := b.Cid()
js, err := json.MarshalIndent(b, "", " ")
if err != nil {
return errStr
}
return fmt.Sprintf("Block cid=[%v]: %s", cid, string(js))
}
// DecodeBlock decodes raw cbor bytes into a Block.
func DecodeBlock(b []byte) (*Block, error) {
var out Block
if err := cbor.DecodeInto(b, &out); err != nil {
return nil, err
}
return &out, nil
}
// Score returns the score of this block. Naively this will just return the
// height. But in the future this will return a more sophisticated metric to be
// used in the fork choice rule
// Choosing height as the score gives us the same consensus rules as bitcoin
func (b *Block) Score() uint64 {
return uint64(b.Height)
}
// Equals returns true if the Block is equal to other.
func (b *Block) Equals(other *Block) bool {
return b.Cid().Equals(other.Cid())
}
// SortBlocks sorts a slice of blocks in the canonical order (by min tickets)
func SortBlocks(blks []*Block) {
sort.Slice(blks, func(i, j int) bool {
return bytes.Compare(blks[i].Ticket, blks[j].Ticket) == -1
})
}
| 1 | 15,666 | This will cause the same breakage that was caused when we added Proof to Block. Be sure to let people (infra?) know ahead of time | filecoin-project-venus | go |
@@ -464,11 +464,9 @@ func TestNodeCacheGCReal(t *testing.T) {
childNode1 = nil
runtime.GC()
- _ = <-finalizerChan
+ <-finalizerChan
- if len(ncs.nodes) != 2 {
- t.Errorf("Expected %d nodes, got %d", 2, len(ncs.nodes))
- }
+ require.Len(t, ncs.nodes, 1)
// Make sure childNode2 isn't GCed until after this point.
func(interface{}) {}(childNode2) | 1 | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"runtime"
"testing"
"github.com/keybase/kbfs/kbfsblock"
"github.com/keybase/kbfs/tlf"
"github.com/stretchr/testify/require"
)
func setupNodeCache(t *testing.T, id tlf.ID, branch BranchName, flat bool) (
ncs *nodeCacheStandard, parentNode Node, childNode1 Node, childNode2 Node,
childPath1 []pathNode, childPath2 []pathNode) {
ncs = newNodeCacheStandard(FolderBranch{id, branch})
parentPtr := BlockPointer{ID: kbfsblock.FakeID(0)}
parentName := "parent"
var err error
parentNode, err = ncs.GetOrCreate(parentPtr, parentName, nil)
if err != nil {
t.Errorf("Couldn't create top-level parent node: %v", err)
}
if parentNode.GetBasename() != parentName {
t.Errorf("Expected basename %s, got %s", parentName, parentNode.GetBasename())
}
// now create a child node for that parent
childPtr1 := BlockPointer{ID: kbfsblock.FakeID(1)}
childName1 := "child1"
childNode1, err = ncs.GetOrCreate(childPtr1, childName1, parentNode)
if err != nil {
t.Errorf("Couldn't create child node: %v", err)
}
if childNode1.GetBasename() != childName1 {
t.Errorf("Expected basename %s, got %s", childName1, childNode1.GetBasename())
}
parent2 := childNode1
if flat {
parent2 = parentNode
}
childPtr2 := BlockPointer{ID: kbfsblock.FakeID(2)}
childName2 := "child2"
childNode2, err = ncs.GetOrCreate(childPtr2, childName2, parent2)
if err != nil {
t.Errorf("Couldn't create second child node: %v", err)
}
if childNode2.GetBasename() != childName2 {
t.Errorf("Expected basename %s, got %s", childName2, childNode2.GetBasename())
}
childPath1 = []pathNode{
{
BlockPointer: parentPtr,
Name: parentName,
},
{
BlockPointer: childPtr1,
Name: childName1,
},
}
if flat {
childPath2 = []pathNode{
{
BlockPointer: parentPtr,
Name: parentName,
},
{
BlockPointer: childPtr2,
Name: childName2,
},
}
} else {
childPath2 = []pathNode{
{
BlockPointer: parentPtr,
Name: parentName,
},
{
BlockPointer: childPtr1,
Name: childName1,
},
{
BlockPointer: childPtr2,
Name: childName2,
},
}
}
return
}
// Simulate a GC cycle where all the nodes in liveList still have
// references.
//
// (Doing real GC cycles and running finalizers, etc. is brittle.)
func simulateGC(ncs *nodeCacheStandard, liveList []Node) {
hasWork := true
for hasWork {
hasWork = false
liveSet := make(map[*nodeCore]bool)
// Everything in liveList is live.
for _, n := range liveList {
liveSet[n.(*nodeStandard).core] = true
}
// Everything referenced as a parent is live.
for _, e := range ncs.nodes {
if e.core.parent != nil {
p := e.core.parent.Unwrap().(*nodeStandard)
liveSet[p.core] = true
}
}
// Forget everything not live.
for _, e := range ncs.nodes {
if _, ok := liveSet[e.core]; !ok {
ncs.forget(e.core)
hasWork = true
}
}
}
}
// Tests for simple GetOrCreate successes (with and without a parent)
func TestNodeCacheGetOrCreateSuccess(t *testing.T) {
ncs, parentNode, childNode1A, _, path1, path2 :=
setupNodeCache(t, tlf.FakeID(0, tlf.Private), MasterBranch, true)
parentPtr := path1[0].BlockPointer
childPtr1 := path1[1].BlockPointer
childPtr2 := path2[1].BlockPointer
// make sure we get the same node back for the second call
childNode1B, err := ncs.GetOrCreate(childPtr1, childNode1A.GetBasename(), parentNode)
if err != nil {
t.Errorf("Couldn't create child node: %v", err)
}
if childNode1A.(*nodeStandard).core != childNode1B.(*nodeStandard).core {
t.Error("Two creates for the same child!")
}
// now make sure the refCounts are right.
if ncs.nodes[parentPtr.Ref()].refCount != 1 {
t.Errorf("Parent has wrong refcount: %d", ncs.nodes[parentPtr.Ref()].refCount)
}
if ncs.nodes[childPtr1.Ref()].refCount != 2 {
t.Errorf("Child1 has wrong refcount: %d", ncs.nodes[childPtr1.Ref()].refCount)
}
if ncs.nodes[childPtr2.Ref()].refCount != 1 {
t.Errorf("Child1 has wrong refcount: %d", ncs.nodes[childPtr2.Ref()].refCount)
}
}
// Tests that a child can't be created with an unknown parent.
func TestNodeCacheGetOrCreateNoParent(t *testing.T) {
ncs := newNodeCacheStandard(FolderBranch{tlf.FakeID(0, tlf.Private), ""})
parentPtr := BlockPointer{ID: kbfsblock.FakeID(0)}
parentNode, err := ncs.GetOrCreate(parentPtr, "parent", nil)
if err != nil {
t.Errorf("Couldn't create top-level parent node: %v", err)
}
simulateGC(ncs, []Node{})
// now try to create a child node for that parent
childPtr1 := BlockPointer{ID: kbfsblock.FakeID(1)}
_, err = ncs.GetOrCreate(childPtr1, "child", parentNode)
expectedErr := ParentNodeNotFoundError{parentPtr.Ref()}
if err != expectedErr {
t.Errorf("Got unexpected error when creating w/o parent: %v", err)
}
}
// Tests that UpdatePointer works
func TestNodeCacheUpdatePointer(t *testing.T) {
ncs := newNodeCacheStandard(FolderBranch{tlf.FakeID(0, tlf.Private), ""})
parentPtr := BlockPointer{ID: kbfsblock.FakeID(0)}
parentNode, err := ncs.GetOrCreate(parentPtr, "parent", nil)
if err != nil {
t.Errorf("Couldn't create top-level parent node: %v", err)
}
newParentPtr := BlockPointer{ID: kbfsblock.FakeID(1)}
ncs.UpdatePointer(parentPtr.Ref(), newParentPtr)
if parentNode.(*nodeStandard).core.pathNode.BlockPointer != newParentPtr {
t.Errorf("UpdatePointer didn't work.")
}
}
// Tests that Move works as expected
func TestNodeCacheMoveSuccess(t *testing.T) {
ncs, parentNode, childNode1, childNode2, path1, path2 :=
setupNodeCache(t, tlf.FakeID(0, tlf.Private), MasterBranch, true)
parentPtr := path1[0].BlockPointer
childPtr1 := path1[1].BlockPointer
childPtr2 := path2[1].BlockPointer
// now move child2 under child1
undoMove, err := ncs.Move(childPtr2.Ref(), childNode1, "child3")
if err != nil {
t.Errorf("Couldn't update parent: %v", err)
}
if childNode2.GetBasename() != "child3" {
t.Errorf("Child2 has the wrong name after move: %s",
childNode2.GetBasename())
}
if childNode2.(*nodeStandard).core.parent != childNode1 {
t.Errorf("UpdateParent didn't work")
}
// now make sure all nodes have 1 reference.
if ncs.nodes[parentPtr.Ref()].refCount != 1 {
t.Errorf("Parent has wrong refcount: %d", ncs.nodes[parentPtr.Ref()].refCount)
}
if ncs.nodes[childPtr1.Ref()].refCount != 1 {
t.Errorf("Child1 has wrong refcount: %d", ncs.nodes[childPtr1.Ref()].refCount)
}
if ncs.nodes[childPtr2.Ref()].refCount != 1 {
t.Errorf("Child1 has wrong refcount: %d", ncs.nodes[childPtr2.Ref()].refCount)
}
undoMove()
if childNode2.GetBasename() != "child2" {
t.Errorf("Child2 has the wrong name after move: %s",
childNode2.GetBasename())
}
if childNode2.(*nodeStandard).core.parent != parentNode {
t.Errorf("UpdateParent didn't work")
}
}
// Tests that a child can't be updated with an unknown parent
func TestNodeCacheMoveNoParent(t *testing.T) {
ncs, _, childNode1, childNode2, path1, path2 :=
setupNodeCache(t, tlf.FakeID(0, tlf.Private), MasterBranch, true)
childPtr1 := path1[1].BlockPointer
childPtr2 := path2[1].BlockPointer
// get rid of child1
simulateGC(ncs, []Node{childNode2})
// now move child2 under child1
_, err := ncs.Move(childPtr2.Ref(), childNode1, "child3")
expectedErr := ParentNodeNotFoundError{childPtr1.Ref()}
if err != expectedErr {
t.Errorf("Got unexpected error when updating parent: %v", err)
}
}
func checkNodeCachePath(t *testing.T, id tlf.ID, branch BranchName,
path path, expectedPath []pathNode) {
if len(path.path) != len(expectedPath) {
t.Errorf("Bad path length: %v vs %v", len(path.path), len(expectedPath))
}
for i, n := range expectedPath {
if path.path[i] != n {
t.Errorf("Bad node on path, index %d: %v vs %v", i, path.path[i], n)
}
}
if path.Tlf != id {
t.Errorf("Wrong top dir: %v vs %v", path.Tlf, id)
}
if path.Branch != BranchName(branch) {
t.Errorf("Wrong branch: %s vs %s", path.Branch, branch)
}
}
// Tests that a child can be unlinked completely from the parent, and
// still have a path, but not a basename.
func TestNodeCacheUnlink(t *testing.T) {
id := tlf.FakeID(42, tlf.Private)
branch := BranchName("testBranch")
ncs, _, _, childNode2, _, path2 :=
setupNodeCache(t, id, branch, false)
childPtr2 := path2[2].BlockPointer
// unlink child2
undoFn := ncs.Unlink(
childPtr2.Ref(), ncs.PathFromNode(childNode2), DirEntry{})
if undoFn == nil {
t.Fatalf("Couldn't unlink")
}
path := ncs.PathFromNode(childNode2)
checkNodeCachePath(t, id, branch, path, path2)
if childNode2.GetBasename() != "" {
t.Errorf("Expected empty basename, got %s", childNode2.GetBasename())
}
// Undo
undoFn()
if childNode2.GetBasename() != path2[2].Name {
t.Errorf("Expected basename %s, got %s",
path2[2].Name, childNode2.GetBasename())
}
}
// Tests that a child's ancestor can be unlinked completely from its
// parent, and the child still has a path and a basename.
func TestNodeCacheUnlinkParent(t *testing.T) {
id := tlf.FakeID(42, tlf.Private)
branch := BranchName("testBranch")
ncs, _, childNode1, childNode2, _, path2 :=
setupNodeCache(t, id, branch, false)
childPtr1 := path2[1].BlockPointer
// unlink node 2's parent
undoFn := ncs.Unlink(
childPtr1.Ref(), ncs.PathFromNode(childNode1), DirEntry{})
if undoFn == nil {
t.Fatalf("Couldn't unlink")
}
path := ncs.PathFromNode(childNode2)
checkNodeCachePath(t, id, branch, path, path2)
if childNode2.GetBasename() != "child2" {
t.Errorf("Expected basename child2, got %s", childNode2.GetBasename())
}
}
// Tests that a child can be unlinked completely from the parent, and
// then re-added with a new pointer and still work, but with a new
// node core.
func TestNodeCacheUnlinkThenRelink(t *testing.T) {
id := tlf.FakeID(42, tlf.Private)
branch := BranchName("testBranch")
ncs, _, childNode1, childNode2, _, path2 :=
setupNodeCache(t, id, branch, false)
childPtr2 := path2[2].BlockPointer
// unlink child2
undoFn := ncs.Unlink(
childPtr2.Ref(), ncs.PathFromNode(childNode2), DirEntry{})
if undoFn == nil {
t.Fatalf("Couldn't unlink")
}
newChildName := "newChildName"
newChildPtr2 := BlockPointer{ID: kbfsblock.FakeID(22)}
ncs.UpdatePointer(childPtr2.Ref(), newChildPtr2) // NO-OP
childNode2B, err := ncs.GetOrCreate(newChildPtr2, newChildName, childNode1)
if err != nil {
t.Fatalf("Couldn't relink node: %v", err)
}
if childNode2.GetID() == childNode2B.GetID() {
t.Errorf("Relink left the node the same")
}
// Old unlinked node didn't get updated
path := ncs.PathFromNode(childNode2)
checkNodeCachePath(t, id, branch, path, path2)
// New node
path = ncs.PathFromNode(childNode2B)
path2[2].BlockPointer = newChildPtr2
path2[2].Name = newChildName
checkNodeCachePath(t, id, branch, path, path2)
if g, e := childNode2.GetBasename(), ""; g != e {
t.Errorf("Expected basename %s, got %s", e, g)
}
if g, e := childNode2B.GetBasename(), newChildName; g != e {
t.Errorf("Expected basename %s, got %s", e, g)
}
}
// Tests that PathFromNode works correctly
func TestNodeCachePathFromNode(t *testing.T) {
id := tlf.FakeID(42, tlf.Private)
branch := BranchName("testBranch")
ncs, _, _, childNode2, _, path2 :=
setupNodeCache(t, id, branch, false)
path := ncs.PathFromNode(childNode2)
checkNodeCachePath(t, id, branch, path, path2)
}
// Make sure that (simulated) GC works as expected.
func TestNodeCacheGCBasic(t *testing.T) {
ncs, parentNode, _, childNode2, _, _ :=
setupNodeCache(t, tlf.FakeID(0, tlf.Private), MasterBranch, true)
if len(ncs.nodes) != 3 {
t.Errorf("Expected %d nodes, got %d", 3, len(ncs.nodes))
}
simulateGC(ncs, []Node{parentNode, childNode2})
if len(ncs.nodes) != 2 {
t.Errorf("Expected %d nodes, got %d", 2, len(ncs.nodes))
}
simulateGC(ncs, []Node{parentNode})
if len(ncs.nodes) != 1 {
t.Errorf("Expected %d nodes, got %d", 1, len(ncs.nodes))
}
simulateGC(ncs, []Node{})
if len(ncs.nodes) != 0 {
t.Errorf("Expected %d nodes, got %d", 0, len(ncs.nodes))
}
}
// Make sure that GC works as expected when a child node holds the
// last reference to a parent.
func TestNodeCacheGCParent(t *testing.T) {
ncs, _, _, childNode2, _, _ :=
setupNodeCache(t, tlf.FakeID(0, tlf.Private), MasterBranch, true)
if len(ncs.nodes) != 3 {
t.Errorf("Expected %d nodes, got %d", 3, len(ncs.nodes))
}
simulateGC(ncs, []Node{childNode2})
if len(ncs.nodes) != 2 {
t.Errorf("Expected %d nodes, got %d", 2, len(ncs.nodes))
}
simulateGC(ncs, []Node{})
if len(ncs.nodes) != 0 {
t.Errorf("Expected %d nodes, got %d", 0, len(ncs.nodes))
}
}
var finalizerChan = make(chan struct{})
// Like nodeStandardFinalizer(), but sends on finalizerChan
// afterwards.
func testNodeStandardFinalizer(n *nodeStandard) {
nodeStandardFinalizer(n)
finalizerChan <- struct{}{}
}
// Make sure that that making a node unreachable runs the finalizer on GC.
func TestNodeCacheGCReal(t *testing.T) {
ncs, _, childNode1, childNode2, _, _ :=
setupNodeCache(t, tlf.FakeID(0, tlf.Private), MasterBranch, true)
if len(ncs.nodes) != 3 {
t.Errorf("Expected %d nodes, got %d", 3, len(ncs.nodes))
}
runtime.SetFinalizer(childNode1, nil)
runtime.SetFinalizer(childNode1, testNodeStandardFinalizer)
childNode1 = nil
runtime.GC()
_ = <-finalizerChan
if len(ncs.nodes) != 2 {
t.Errorf("Expected %d nodes, got %d", 2, len(ncs.nodes))
}
// Make sure childNode2 isn't GCed until after this point.
func(interface{}) {}(childNode2)
}
type wrappedTestNode struct {
Node
wrapChildCalled bool
}
func (wtn *wrappedTestNode) WrapChild(child Node) Node {
child = wtn.Node.WrapChild(child)
wtn.wrapChildCalled = true
return child
}
func TestNodeCacheWrapChild(t *testing.T) {
ncs := newNodeCacheStandard(
FolderBranch{tlf.FakeID(0, tlf.Private), MasterBranch})
var wtn1, wtn2 *wrappedTestNode
rw1 := func(root Node) Node {
wtn1 = &wrappedTestNode{root, false}
return wtn1
}
rw2 := func(root Node) Node {
wtn2 = &wrappedTestNode{root, false}
return wtn2
}
ncs.AddRootWrapper(rw1)
ncs.AddRootWrapper(rw2)
rootPtr := BlockPointer{ID: kbfsblock.FakeID(0)}
rootName := "root"
rootNode, err := ncs.GetOrCreate(rootPtr, rootName, nil)
require.NoError(t, err)
childPtr := BlockPointer{ID: kbfsblock.FakeID(1)}
childName := "child1"
_, err = ncs.GetOrCreate(childPtr, childName, rootNode)
require.NoError(t, err)
require.True(t, wtn1.wrapChildCalled)
require.True(t, wtn2.wrapChildCalled)
}
| 1 | 19,784 | Fixed the test; @strib want to validate that this is okay? Seems to be consistent new GC behavior. | keybase-kbfs | go |
@@ -45,7 +45,7 @@ func TestValidJsonAccount(t *testing.T) {
"domain": {
"fulldomain": "fooldom",
"password": "secret",
- "subdomain": "subdoom",
+ "subdomain": "subdom",
"username": "usernom"
}
}`) | 1 | /*
Copyright 2020 The cert-manager Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package acmedns
import (
"os"
"testing"
"github.com/jetstack/cert-manager/pkg/issuer/acme/dns/util"
"github.com/stretchr/testify/assert"
)
var (
acmednsLiveTest bool
acmednsHost string
acmednsAccountJson []byte
acmednsDomain string
)
func init() {
acmednsHost = os.Getenv("ACME_DNS_HOST")
acmednsAccountJson = []byte(os.Getenv("ACME_DNS_ACCOUNTS_JSON"))
acmednsDomain = os.Getenv("ACME_DNS_DOMAIN")
if len(acmednsHost) > 0 && len(acmednsAccountJson) > 0 {
acmednsLiveTest = true
}
}
func TestValidJsonAccount(t *testing.T) {
accountJson := []byte(`{
"domain": {
"fulldomain": "fooldom",
"password": "secret",
"subdomain": "subdoom",
"username": "usernom"
}
}`)
provider, err := NewDNSProviderHostBytes("http://localhost/", accountJson, util.RecursiveNameservers)
assert.NoError(t, err, "Expected no error constructing DNSProvider")
assert.Equal(t, provider.accounts["domain"].FullDomain, "fooldom")
}
func TestNoValidJsonAccount(t *testing.T) {
accountJson := []byte(`{"duck": "quack"}`)
_, err := NewDNSProviderHostBytes("http://localhost/", accountJson, util.RecursiveNameservers)
assert.Error(t, err, "Expected error constructing DNSProvider from invalid accountJson")
}
func TestNoValidJson(t *testing.T) {
accountJson := []byte("b00m")
_, err := NewDNSProviderHostBytes("http://localhost/", accountJson, util.RecursiveNameservers)
assert.Error(t, err, "Expected error constructing DNSProvider from invalid JSON")
}
func TestLiveAcmeDnsPresent(t *testing.T) {
if !acmednsLiveTest {
t.Skip("skipping live test")
}
provider, err := NewDNSProviderHostBytes(acmednsHost, acmednsAccountJson, util.RecursiveNameservers)
assert.NoError(t, err)
// ACME-DNS requires 43 character keys or it throws a bad TXT error
err = provider.Present(acmednsDomain, "", "LG3tptA6W7T1vw4ujbmDxH2lLu6r8TUIqLZD3pzPmgE")
assert.NoError(t, err)
}
| 1 | 26,166 | Afaict, nothing cares | jetstack-cert-manager | go |
@@ -507,7 +507,9 @@ func decryptMDPrivateData(ctx context.Context, codec kbfscodec.Codec,
}
}
- // Re-embed the block changes if it's needed.
+ // Re-embed the block changes if it's needed. TODO: we don't need
+ // to do this in minimal mode, since there's no node cache
+ // (KBFS-2026).
err := reembedBlockChanges(
ctx, codec, bcache, bops, rmdWithKeys.TlfID(),
&pmd, rmdWithKeys, log) | 1 | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"fmt"
"github.com/keybase/client/go/libkb"
"github.com/keybase/client/go/logger"
"github.com/keybase/client/go/protocol/keybase1"
"github.com/keybase/kbfs/kbfscodec"
"github.com/keybase/kbfs/kbfscrypto"
"github.com/keybase/kbfs/tlf"
"github.com/pkg/errors"
"golang.org/x/net/context"
)
type mdRange struct {
start MetadataRevision
end MetadataRevision
}
func makeRekeyReadErrorHelper(
err error, kmd KeyMetadata, resolvedHandle *TlfHandle,
uid keybase1.UID, username libkb.NormalizedUsername) error {
if resolvedHandle.IsPublic() {
panic("makeRekeyReadError called on public folder")
}
// If the user is not a legitimate reader of the folder, this is a
// normal read access error.
if !resolvedHandle.IsReader(uid) {
return NewReadAccessError(resolvedHandle, username, resolvedHandle.GetCanonicalPath())
}
// Otherwise, this folder needs to be rekeyed for this device.
tlfName := resolvedHandle.GetCanonicalName()
hasKeys, hasKeyErr := kmd.HasKeyForUser(uid)
if (hasKeyErr == nil) && hasKeys {
return NeedSelfRekeyError{tlfName, err}
}
return NeedOtherRekeyError{tlfName, err}
}
func makeRekeyReadError(
ctx context.Context, err error, kbpki KBPKI, kmd KeyMetadata,
uid keybase1.UID, username libkb.NormalizedUsername) error {
h := kmd.GetTlfHandle()
resolvedHandle, resolveErr := h.ResolveAgain(ctx, kbpki)
if resolveErr != nil {
// Ignore error and pretend h is already fully
// resolved.
resolvedHandle = h
}
return makeRekeyReadErrorHelper(err, kmd, resolvedHandle, uid, username)
}
// Helper which returns nil if the md block is uninitialized or readable by
// the current user. Otherwise an appropriate read access error is returned.
func isReadableOrError(
ctx context.Context, kbpki KBPKI, md ReadOnlyRootMetadata) error {
if !md.IsInitialized() || md.IsReadable() {
return nil
}
// this should only be the case if we're a new device not yet
// added to the set of reader/writer keys.
session, err := kbpki.GetCurrentSession(ctx)
if err != nil {
return err
}
err = errors.Errorf("%s is not readable by %s (uid:%s)", md.TlfID(),
session.Name, session.UID)
return makeRekeyReadError(ctx, err, kbpki, md,
session.UID, session.Name)
}
func getMDRange(ctx context.Context, config Config, id tlf.ID, bid BranchID,
start MetadataRevision, end MetadataRevision, mStatus MergeStatus) (
rmds []ImmutableRootMetadata, err error) {
// The range is invalid. Don't treat as an error though; it just
// indicates that we don't yet know about any revisions.
if start < MetadataRevisionInitial || end < MetadataRevisionInitial {
return nil, nil
}
mdcache := config.MDCache()
var toDownload []mdRange
// Fetch one at a time, and figure out what ranges to fetch as you
// go.
minSlot := int(end-start) + 1
maxSlot := -1
for i := start; i <= end; i++ {
irmd, err := mdcache.Get(id, i, bid)
if err != nil {
if len(toDownload) == 0 ||
toDownload[len(toDownload)-1].end != i-1 {
toDownload = append(toDownload, mdRange{i, i})
}
toDownload[len(toDownload)-1].end = i
irmd = ImmutableRootMetadata{}
} else {
slot := len(rmds)
if slot < minSlot {
minSlot = slot
}
if slot > maxSlot {
maxSlot = slot
}
}
rmds = append(rmds, irmd)
}
// Try to fetch the rest from the server. TODO: parallelize me.
for _, r := range toDownload {
var fetchedRmds []ImmutableRootMetadata
switch mStatus {
case Merged:
fetchedRmds, err = config.MDOps().GetRange(
ctx, id, r.start, r.end)
case Unmerged:
fetchedRmds, err = config.MDOps().GetUnmergedRange(
ctx, id, bid, r.start, r.end)
default:
panic(fmt.Sprintf("Unknown merged type: %s", mStatus))
}
if err != nil {
return nil, err
}
for _, rmd := range fetchedRmds {
slot := int(rmd.Revision() - start)
if slot < minSlot {
minSlot = slot
}
if slot > maxSlot {
maxSlot = slot
}
rmds[slot] = rmd
if err := mdcache.Put(rmd); err != nil {
config.MakeLogger("").CDebugf(ctx, "Error putting md "+
"%d into the cache: %v", rmd.Revision(), err)
}
}
}
if minSlot > maxSlot {
return nil, nil
}
rmds = rmds[minSlot : maxSlot+1]
// check to make sure there are no holes
for i, rmd := range rmds {
if rmd == (ImmutableRootMetadata{}) {
return nil, fmt.Errorf("No %s MD found for revision %d",
mStatus, int(start)+minSlot+i)
}
}
return rmds, nil
}
// getSingleMD returns an MD that is required to exist.
func getSingleMD(ctx context.Context, config Config, id tlf.ID, bid BranchID,
rev MetadataRevision, mStatus MergeStatus) (
ImmutableRootMetadata, error) {
rmds, err := getMDRange(ctx, config, id, bid, rev, rev, mStatus)
if err != nil {
return ImmutableRootMetadata{}, err
}
if len(rmds) != 1 {
return ImmutableRootMetadata{},
fmt.Errorf("Single expected revision %d not found", rev)
}
return rmds[0], nil
}
// getMergedMDUpdates returns a slice of all the merged MDs for a TLF,
// starting from the given startRev. The returned MDs are the same
// instances that are stored in the MD cache, so they should be
// modified with care.
//
// TODO: Accept a parameter to express that we want copies of the MDs
// instead of the cached versions.
func getMergedMDUpdates(ctx context.Context, config Config, id tlf.ID,
startRev MetadataRevision) (mergedRmds []ImmutableRootMetadata, err error) {
// We don't yet know about any revisions yet, so there's no range
// to get.
if startRev < MetadataRevisionInitial {
return nil, nil
}
start := startRev
for {
end := start + maxMDsAtATime - 1 // range is inclusive
rmds, err := getMDRange(ctx, config, id, NullBranchID, start, end,
Merged)
if err != nil {
return nil, err
}
mergedRmds = append(mergedRmds, rmds...)
// TODO: limit the number of MDs we're allowed to hold in
// memory at any one time?
if len(rmds) < maxMDsAtATime {
break
}
start = end + 1
}
var uid keybase1.UID
// Check the readability of each MD. Because rekeys can append a
// MD revision with the new key, older revisions might not be
// readable until the newer revision, containing the key for this
// device, is processed.
for i, rmd := range mergedRmds {
if err := isReadableOrError(ctx, config.KBPKI(), rmd.ReadOnly()); err != nil {
// The right secret key for the given rmd's
// key generation may only be present in the
// most recent rmd.
latestRmd := mergedRmds[len(mergedRmds)-1]
if uid == keybase1.UID("") {
session, err := config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return nil, err
}
uid = session.UID
}
pmd, err := decryptMDPrivateData(
ctx, config.Codec(), config.Crypto(),
config.BlockCache(), config.BlockOps(),
config.KeyManager(), uid,
rmd.GetSerializedPrivateMetadata(),
rmd, latestRmd, config.MakeLogger(""))
if err != nil {
return nil, err
}
rmdCopy, err := rmd.deepCopy(config.Codec())
if err != nil {
return nil, err
}
rmdCopy.data = pmd
// Overwrite the cached copy with the new copy
irmdCopy := MakeImmutableRootMetadata(rmdCopy,
rmd.LastModifyingWriterVerifyingKey(), rmd.MdID(),
rmd.LocalTimestamp())
if err := config.MDCache().Put(irmdCopy); err != nil {
return nil, err
}
mergedRmds[i] = irmdCopy
}
}
return mergedRmds, nil
}
// getUnmergedMDUpdates returns a slice of the unmerged MDs for a TLF
// and unmerged branch, between the merge point for that branch and
// startRev (inclusive). The returned MDs are the same instances that
// are stored in the MD cache, so they should be modified with care.
// If bid is NullBranchID, it returns an empty MD list.
//
// TODO: Accept a parameter to express that we want copies of the MDs
// instead of the cached versions.
func getUnmergedMDUpdates(ctx context.Context, config Config, id tlf.ID,
bid BranchID, startRev MetadataRevision) (
currHead MetadataRevision, unmergedRmds []ImmutableRootMetadata,
err error) {
if bid == NullBranchID {
// We're not really unmerged, so there's nothing to do.
// TODO: require the caller to avoid making this call if the
// bid isn't set (and change the mdserver behavior in that
// case as well).
return startRev, nil, nil
}
// We don't yet know about any revisions yet, so there's no range
// to get.
if startRev < MetadataRevisionInitial {
return MetadataRevisionUninitialized, nil, nil
}
// walk backwards until we find one that is merged
currHead = startRev
for {
// first look up all unmerged MD revisions older than my current head
startRev := currHead - maxMDsAtATime + 1 // (MetadataRevision is signed)
if startRev < MetadataRevisionInitial {
startRev = MetadataRevisionInitial
}
rmds, err := getMDRange(ctx, config, id, bid, startRev, currHead,
Unmerged)
if err != nil {
return MetadataRevisionUninitialized, nil, err
}
numNew := len(rmds)
// prepend to keep the ordering correct
unmergedRmds = append(rmds, unmergedRmds...)
// on the next iteration, start apply the previous root
if numNew > 0 {
currHead = rmds[0].Revision() - 1
}
if currHead < MetadataRevisionInitial {
return MetadataRevisionUninitialized, nil,
errors.New("ran out of MD updates to unstage")
}
// TODO: limit the number of MDs we're allowed to hold in
// memory at any one time?
if numNew < maxMDsAtATime {
break
}
}
return currHead, unmergedRmds, nil
}
// encryptMDPrivateData encrypts the private data of the given
// RootMetadata and makes other modifications to prepare it for
// signing (see signMD below). After this function is called, the
// MetadataID of the RootMetadata's BareRootMetadata can be computed.
func encryptMDPrivateData(
ctx context.Context, codec kbfscodec.Codec, crypto cryptoPure,
signer kbfscrypto.Signer, ekg encryptionKeyGetter, me keybase1.UID,
rmd *RootMetadata) error {
err := rmd.data.checkValid()
if err != nil {
return err
}
brmd := rmd.bareMd
privateData := rmd.data
if brmd.TlfID().IsPublic() || !brmd.IsWriterMetadataCopiedSet() {
// Record the last writer to modify this writer metadata
brmd.SetLastModifyingWriter(me)
if brmd.TlfID().IsPublic() {
// Encode the private metadata
encodedPrivateMetadata, err := codec.Encode(privateData)
if err != nil {
return err
}
brmd.SetSerializedPrivateMetadata(encodedPrivateMetadata)
} else if !brmd.IsWriterMetadataCopiedSet() {
// Encrypt and encode the private metadata
k, err := ekg.GetTLFCryptKeyForEncryption(ctx, rmd)
if err != nil {
return err
}
encryptedPrivateMetadata, err := crypto.EncryptPrivateMetadata(privateData, k)
if err != nil {
return err
}
encodedEncryptedPrivateMetadata, err := codec.Encode(encryptedPrivateMetadata)
if err != nil {
return err
}
brmd.SetSerializedPrivateMetadata(encodedEncryptedPrivateMetadata)
}
// Sign the writer metadata internally. This has to be
// done here, instead of in signMD, since the
// MetadataID may depend on it.
err := brmd.SignWriterMetadataInternally(ctx, codec, signer)
if err != nil {
return err
}
}
// Record the last user to modify this metadata
brmd.SetLastModifyingUser(me)
return nil
}
func getFileBlockForMD(ctx context.Context, bcache BlockCache, bops BlockOps,
ptr BlockPointer, tlfID tlf.ID, rmdWithKeys KeyMetadata) (
*FileBlock, error) {
// We don't have a convenient way to fetch the block from here via
// folderBlockOps, so just go directly via the
// BlockCache/BlockOps. No locking around the blocks is needed
// since these change blocks are read-only.
block, err := bcache.Get(ptr)
if err != nil {
block = NewFileBlock()
if err := bops.Get(ctx, rmdWithKeys, ptr, block, TransientEntry); err != nil {
return nil, err
}
}
fblock, ok := block.(*FileBlock)
if !ok {
return nil, NotFileBlockError{ptr, MasterBranch, path{}}
}
return fblock, nil
}
func reembedBlockChanges(ctx context.Context, codec kbfscodec.Codec,
bcache BlockCache, bops BlockOps, tlfID tlf.ID, pmd *PrivateMetadata,
rmdWithKeys KeyMetadata, log logger.Logger) error {
info := pmd.Changes.Info
if info.BlockPointer == zeroPtr {
return nil
}
// Treat the unembedded block change like a file so we can reuse
// the file reading code.
file := path{FolderBranch{tlfID, MasterBranch},
[]pathNode{{
info.BlockPointer, fmt.Sprintf("<MD with block change pointer %s>",
info.BlockPointer)}}}
getter := func(ctx context.Context, kmd KeyMetadata, ptr BlockPointer,
p path, rtype blockReqType) (*FileBlock, bool, error) {
block, err := getFileBlockForMD(ctx, bcache, bops, ptr, tlfID, kmd)
if err != nil {
return nil, false, err
}
return block, false, nil
}
cacher := func(ptr BlockPointer, block Block) error {
return nil
}
// Reading doesn't use crypto or the block splitter, so for now
// just pass in nil. Also, reading doesn't depend on the UID, so
// it's ok to be empty.
var uid keybase1.UID
fd := newFileData(file, uid, nil, nil, rmdWithKeys, getter, cacher, log)
buf, err := fd.getBytes(ctx, 0, -1)
if err != nil {
return err
}
err = codec.Decode(buf, &pmd.Changes)
if err != nil {
return err
}
// The changes block pointers are implicit ref blocks.
pmd.Changes.Ops[0].AddRefBlock(info.BlockPointer)
iptrs, err := fd.getIndirectFileBlockInfos(ctx)
if err != nil {
return err
}
for _, iptr := range iptrs {
pmd.Changes.Ops[0].AddRefBlock(iptr.BlockPointer)
}
pmd.cachedChanges.Info = info
return nil
}
// decryptMDPrivateData does not use uid if the handle is a public one.
func decryptMDPrivateData(ctx context.Context, codec kbfscodec.Codec,
crypto Crypto, bcache BlockCache, bops BlockOps,
keyGetter mdDecryptionKeyGetter, uid keybase1.UID,
serializedPrivateMetadata []byte,
rmdToDecrypt, rmdWithKeys KeyMetadata, log logger.Logger) (
PrivateMetadata, error) {
handle := rmdToDecrypt.GetTlfHandle()
var pmd PrivateMetadata
if handle.IsPublic() {
if err := codec.Decode(serializedPrivateMetadata,
&pmd); err != nil {
return PrivateMetadata{}, err
}
} else {
// decrypt the root data for non-public directories
var encryptedPrivateMetadata EncryptedPrivateMetadata
if err := codec.Decode(serializedPrivateMetadata,
&encryptedPrivateMetadata); err != nil {
return PrivateMetadata{}, err
}
k, err := keyGetter.GetTLFCryptKeyForMDDecryption(ctx,
rmdToDecrypt, rmdWithKeys)
if err != nil {
isReader := handle.IsReader(uid)
_, isSelfRekeyError := err.(NeedSelfRekeyError)
_, isOtherRekeyError := err.(NeedOtherRekeyError)
if isReader && (isOtherRekeyError || isSelfRekeyError) {
// Rekey errors are expected if this client is a
// valid folder participant but doesn't have the
// shared crypt key.
} else {
return PrivateMetadata{}, err
}
} else {
pmd, err = crypto.DecryptPrivateMetadata(
encryptedPrivateMetadata, k)
if err != nil {
return PrivateMetadata{}, err
}
}
}
// Re-embed the block changes if it's needed.
err := reembedBlockChanges(
ctx, codec, bcache, bops, rmdWithKeys.TlfID(),
&pmd, rmdWithKeys, log)
if err != nil {
return PrivateMetadata{}, err
}
return pmd, nil
}
| 1 | 16,227 | I must be missing something, but why does not having a node cache imply not neededing to re-embed the block changes, in particular? Isn't it just the fact that we don't do any writes? | keybase-kbfs | go |
@@ -5,7 +5,7 @@ using System.Linq;
namespace Datadog.Trace.Headers
{
- internal class NameValueHeadersCollection : IHeadersCollection
+ internal readonly struct NameValueHeadersCollection : IHeadersCollection
{
private readonly NameValueCollection _headers;
| 1 | using System;
using System.Collections.Generic;
using System.Collections.Specialized;
using System.Linq;
namespace Datadog.Trace.Headers
{
internal class NameValueHeadersCollection : IHeadersCollection
{
private readonly NameValueCollection _headers;
public NameValueHeadersCollection(NameValueCollection headers)
{
_headers = headers ?? throw new ArgumentNullException(nameof(headers));
}
public IEnumerable<string> GetValues(string name)
{
return _headers.GetValues(name) ?? Enumerable.Empty<string>();
}
public void Set(string name, string value)
{
_headers.Set(name, value);
}
public void Add(string name, string value)
{
_headers.Add(name, value);
}
public void Remove(string name)
{
_headers.Remove(name);
}
}
}
| 1 | 19,699 | Are these changes from `class` to `struct` breaking if called from an older version of `Datadog.Trace.ClrProfiler.Managed`? | DataDog-dd-trace-dotnet | .cs |
@@ -19,9 +19,12 @@ package org.hyperledger.besu.ethereum.api.query;
import org.hyperledger.besu.ethereum.core.Address;
import org.hyperledger.besu.ethereum.core.Log;
import org.hyperledger.besu.ethereum.core.LogTopic;
+import org.hyperledger.besu.ethereum.core.LogsBloomFilter;
import java.util.Arrays;
import java.util.List;
+import java.util.Objects;
+import java.util.stream.Collectors;
import com.google.common.collect.Lists;
| 1 | /*
*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*
*/
package org.hyperledger.besu.ethereum.api.query;
import org.hyperledger.besu.ethereum.core.Address;
import org.hyperledger.besu.ethereum.core.Log;
import org.hyperledger.besu.ethereum.core.LogTopic;
import java.util.Arrays;
import java.util.List;
import com.google.common.collect.Lists;
public class LogsQuery {
private final List<Address> queryAddresses;
private final List<List<LogTopic>> queryTopics;
private LogsQuery(final List<Address> addresses, final List<List<LogTopic>> topics) {
this.queryAddresses = addresses;
this.queryTopics = topics;
}
public boolean matches(final Log log) {
return matchesAddresses(log.getLogger()) && matchesTopics(log.getTopics());
}
private boolean matchesAddresses(final Address address) {
return queryAddresses.isEmpty() || queryAddresses.contains(address);
}
private boolean matchesTopics(final List<LogTopic> topics) {
if (queryTopics.isEmpty()) {
return true;
}
if (topics.size() < queryTopics.size()) {
return false;
}
for (int i = 0; i < queryTopics.size(); ++i) {
if (!matchesTopic(topics.get(i), queryTopics.get(i))) {
return false;
}
}
return true;
}
private boolean matchesTopic(final LogTopic topic, final List<LogTopic> matchCriteria) {
return matchCriteria.contains(null) || matchCriteria.contains(topic);
}
public static class Builder {
private final List<Address> queryAddresses = Lists.newArrayList();
private final List<List<LogTopic>> queryTopics = Lists.newArrayList();
public Builder address(final Address address) {
if (address != null) {
queryAddresses.add(address);
}
return this;
}
public Builder addresses(final Address... addresses) {
if (addresses != null && addresses.length > 0) {
queryAddresses.addAll(Arrays.asList(addresses));
}
return this;
}
public Builder addresses(final List<Address> addresses) {
if (addresses != null && !addresses.isEmpty()) {
queryAddresses.addAll(addresses);
}
return this;
}
public Builder topics(final List<List<LogTopic>> topics) {
if (topics != null && !topics.isEmpty()) {
queryTopics.addAll(topics);
}
return this;
}
public Builder topics(final TopicsParameter topicsParameter) {
if (topicsParameter != null) {
topics(topicsParameter.getTopics());
}
return this;
}
public LogsQuery build() {
return new LogsQuery(queryAddresses, queryTopics);
}
}
}
| 1 | 20,104 | These are just aliases now | hyperledger-besu | java |
@@ -1,4 +1,4 @@
-<% content_for :page_title, @video.title %>
+<% content_for :page_title, @video.title.html_safe %>
<% content_for :landing_page_back_link do %>
<%= link_to '← All Videos'.html_safe, '/the-weekly-iteration' %> | 1 | <% content_for :page_title, @video.title %>
<% content_for :landing_page_back_link do %>
<%= link_to '← All Videos'.html_safe, '/the-weekly-iteration' %>
<% end %>
<div class="text-box-wrapper">
<div class="text-box">
<%= render @video.preview, title: @video.title %>
<section class='video-notes'>
<h3>Notes</h3>
<%= raw(@video.notes_html) %>
</section>
</div>
<%= render "comments", video: @video %>
</div>
<aside>
<%= render "products/license", offering: @offering %>
<%= render 'products/terms', offering: @offering %>
</aside>
| 1 | 12,771 | Does this mean we can remove `raw` from `_head_contents` partial? | thoughtbot-upcase | rb |
@@ -113,7 +113,9 @@ class CapacitorSplashScreen {
`;
this.mainWindowRef.on('closed', () => {
- this.splashWindow.close();
+ if (this.splashWindow && !this.splashWindow.isDestroyed) {
+ this.splashWindow.close();
+ }
});
this.splashWindow.loadURL(`data:text/html;charset=UTF-8,${splashHtml}`, {baseURLForDataURL: `file://${rootPath}/splash_assets/`}); | 1 | const fs = require('fs');
const path = require('path');
const { app, ipcMain, BrowserWindow } = require('electron');
function getURLFileContents(path) {
console.trace();
return new Promise((resolve, reject) => {
fs.readFile(path, (err, data) => {
if(err)
reject(err);
resolve(data.toString());
});
});
}
const injectCapacitor = async function(url) {
console.warn('\nWARNING: injectCapacitor method is deprecated and will be removed in next major release. Check release notes for migration instructions\n')
try {
let urlFileContents = await getURLFileContents(url.substr(url.indexOf('://') + 3));
let pathing = path.join(url.substr(url.indexOf('://') + 3), '../../node_modules/@capacitor/electron/dist/electron-bridge.js');
urlFileContents = urlFileContents.replace('<body>', `<body><script>window.require('${pathing.replace(/\\/g,'\\\\')}')</script>`);
return 'data:text/html;charset=UTF-8,' + urlFileContents;
} catch(e) {
console.error(e);
return url;
}
};
class CapacitorSplashScreen {
/**
* @param {BrowserWindow} mainWindow
* @param {Object} splashOptions Options for customizing the splash screen.
* @param {string} splashOptions.imageFileName Name of file placed in splash_assets folder
* @param {number} splashOptions.windowWidth Width of the splash screen
* @param {number} splashOptions.windowHeight Height of the splash screen
* @param {string} splashOptions.textColor Loading text color
* @param {string} splashOptions.loadingText Loading text
* @param {number} splashOptions.textPercentageFromTop Relative distance of the loading text from top of the window
* @param {boolean} splashOptions.transparentWindow If the window should of transparent
* @param {boolean} splashOptions.autoHideLaunchSplash If auto hide the splash screen
* @param {string} splashOptions.customHtml Custom html string, if used all most of customization options will be ignored
*/
constructor(mainWindow, splashOptions) {
this.mainWindowRef = null;
this.splashWindow = null;
if(!splashOptions) {
splashOptions = {};
}
this.splashOptions = {
imageFileName: splashOptions.imageFileName || 'splash.png',
windowWidth: splashOptions.windowWidth || 400,
windowHeight: splashOptions.windowHeight || 400,
textColor: splashOptions.textColor || '#43A8FF',
loadingText: splashOptions.loadingText || 'Loading...',
textPercentageFromTop: splashOptions.textPercentageFromTop || 75,
transparentWindow: splashOptions.transparentWindow || false,
autoHideLaunchSplash: splashOptions.autoHideLaunchSplash || true,
customHtml: splashOptions.customHtml || false
};
this.mainWindowRef = mainWindow;
try {
let capConfigJson = JSON.parse(fs.readFileSync(`./capacitor.config.json`, 'utf-8'));
this.splashOptions = Object.assign(
this.splashOptions,
capConfigJson.plugins.SplashScreen
);
} catch (e) {
console.error(e.message);
}
ipcMain.on('showCapacitorSplashScreen', (event, options) => {
this.show();
if(options) {
if(options.autoHide) {
let showTime = options.showDuration || 3000;
setTimeout(() => {
this.hide();
}, showTime);
}
}
});
ipcMain.on('hideCapacitorSplashScreen', (event, options) => {
this.hide();
});
}
init(inject = true) {
let rootPath = app.getAppPath();
this.splashWindow = new BrowserWindow({
width: this.splashOptions.windowWidth,
height: this.splashOptions.windowHeight,
frame: false,
show: false,
transparent: this.splashOptions.transparentWindow,
});
let splashHtml = this.splashOptions.customHtml || `
<html style="width: 100%; height: 100%; margin: 0; overflow: hidden;">
<body style="background-image: url('./${this.splashOptions.imageFileName}'); background-position: center center; background-repeat: no-repeat; width: 100%; height: 100%; margin: 0; overflow: hidden;">
<div style="font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; color: ${this.splashOptions.textColor}; position: absolute; top: ${this.splashOptions.textPercentageFromTop}%; text-align: center; font-size: 10vw; width: 100vw;>
${this.splashOptions.loadingText}
</div>
</body>
</html>
`;
this.mainWindowRef.on('closed', () => {
this.splashWindow.close();
});
this.splashWindow.loadURL(`data:text/html;charset=UTF-8,${splashHtml}`, {baseURLForDataURL: `file://${rootPath}/splash_assets/`});
this.splashWindow.webContents.on('dom-ready', async () => {
this.splashWindow.show();
setTimeout(async () => {
if (inject) {
this.mainWindowRef.loadURL(await injectCapacitor(`file://${rootPath}/app/index.html`), {baseURLForDataURL: `file://${rootPath}/app/`});
} else {
this.mainWindowRef.loadURL(`file://${rootPath}/app/index.html`);
}
}, 4500);
});
if(this.splashOptions.autoHideLaunchSplash) {
this.mainWindowRef.webContents.on('dom-ready', () => {
this.mainWindowRef.show();
this.splashWindow.hide();
});
}
}
show() {
this.splashWindow.show();
this.mainWindowRef.hide();
}
hide() {
this.mainWindowRef.show();
this.splashWindow.hide();
}
}
module.exports = {
injectCapacitor,
CapacitorSplashScreen
};
| 1 | 8,188 | `isDestroyed` is a function, not a property. This condition will always return false. | ionic-team-capacitor | js |
@@ -30,6 +30,8 @@ const (
Version_1_28 DockerVersion = "1.28"
Version_1_29 DockerVersion = "1.29"
Version_1_30 DockerVersion = "1.30"
+ Version_1_31 DockerVersion = "1.31"
+ Version_1_32 DockerVersion = "1.32"
)
// getKnownAPIVersions returns all of the API versions that we know about. | 1 | // Copyright 2014-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package dockerclient
type DockerVersion string
const (
Version_1_17 DockerVersion = "1.17"
Version_1_18 DockerVersion = "1.18"
Version_1_19 DockerVersion = "1.19"
Version_1_20 DockerVersion = "1.20"
Version_1_21 DockerVersion = "1.21"
Version_1_22 DockerVersion = "1.22"
Version_1_23 DockerVersion = "1.23"
Version_1_24 DockerVersion = "1.24"
Version_1_25 DockerVersion = "1.25"
Version_1_26 DockerVersion = "1.26"
Version_1_27 DockerVersion = "1.27"
Version_1_28 DockerVersion = "1.28"
Version_1_29 DockerVersion = "1.29"
Version_1_30 DockerVersion = "1.30"
)
// getKnownAPIVersions returns all of the API versions that we know about.
// It doesn't care if the version is supported by Docker or ECS agent
func GetKnownAPIVersions() []DockerVersion {
return []DockerVersion{
Version_1_17,
Version_1_18,
Version_1_19,
Version_1_20,
Version_1_21,
Version_1_22,
Version_1_23,
Version_1_24,
Version_1_25,
Version_1_26,
Version_1_27,
Version_1_28,
Version_1_29,
Version_1_30,
}
}
| 1 | 21,165 | Why are we adding these versions? | aws-amazon-ecs-agent | go |
@@ -271,6 +271,8 @@ def process_one_user(user):
spotify.update_latest_listened_at(user.user_id, latest_listened_at)
spotify.update_last_updated(user.user_id)
+ current_app.logger.info('imported %d listens for %s' % (len(listens), str(user)))
+
def process_all_spotify_users():
""" Get a batch of users to be processed and import their Spotify plays. | 1 | #!/usr/bin/python3
import time
import listenbrainz.webserver
import json
from listenbrainz.utils import safely_import_config
safely_import_config()
from dateutil import parser
from flask import current_app, render_template
from listenbrainz.domain import spotify
from listenbrainz.webserver.views.api_tools import insert_payload, validate_listen, LISTEN_TYPE_IMPORT, LISTEN_TYPE_PLAYING_NOW
from listenbrainz.db import user as db_user
from listenbrainz.db.exceptions import DatabaseException
from spotipy import SpotifyException
from werkzeug.exceptions import BadRequest, InternalServerError, ServiceUnavailable
from brainzutils.mail import send_mail
from brainzutils import musicbrainz_db
from brainzutils.musicbrainz_db import editor as mb_editor
def notify_error(musicbrainz_row_id, error):
""" Notifies specified user via email about error during Spotify import.
Args:
musicbrainz_row_id (int): the MusicBrainz row ID of the user
error (str): a description of the error encountered.
"""
user_email = mb_editor.get_editor_by_id(musicbrainz_row_id)['email']
spotify_url = current_app.config['SERVER_ROOT_URL'] + '/profile/connect-spotify'
text = render_template('emails/spotify_import_error.txt', error=error, link=spotify_url)
send_mail(
subject='ListenBrainz Spotify Importer Error',
text=text,
recipients=[user_email],
from_name='ListenBrainz',
from_addr='noreply@'+current_app.config['MAIL_FROM_DOMAIN'],
)
def _convert_spotify_play_to_listen(play, listen_type):
""" Converts data retrieved from the Spotify API into a listen.
Args:
play (dict): a dict that represents a listen retrieved from Spotify
, this should be an "item" from the spotify response.
listen_type: the type of the listen (import or playing_now)
Returns:
listen (dict): dict that can be submitted to ListenBrainz
"""
if listen_type == LISTEN_TYPE_PLAYING_NOW:
track = play
listen = {}
else:
track = play['track']
# Spotify provides microseconds, but we only give seconds to listenbrainz
listen = {
'listened_at': int(parser.parse(play['played_at']).timestamp()),
}
album = track['album']
artists = track['artists']
album_artists = album['artists']
artist_name = ', '.join([a['name'] for a in artists])
album_artist_name = ', '.join([a['name'] for a in album_artists])
additional = {
'tracknumber': track['track_number'],
'spotify_artist_ids': [a['external_urls']['spotify'] for a in artists],
'artist_names': [a['name'] for a in artists],
'listening_from': 'spotify',
'discnumber': track['disc_number'],
'duration_ms': track['duration_ms'],
'spotify_album_id': track['album']['external_urls']['spotify'],
# Named 'release_*' because 'release_name' is an official name in the docs
'release_artist_name': album_artist_name,
'release_artist_names': [a['name'] for a in album_artists],
# Named 'album_*' because Spotify calls it album and this is spotify-specific
'spotify_album_artist_ids': [a['external_urls']['spotify'] for a in album_artists],
}
isrc = track.get('external_ids', {}).get('isrc')
spotify_url = track.get('external_urls', {}).get('spotify')
if isrc:
additional['isrc'] = isrc
if spotify_url:
additional['spotify_id'] = spotify_url
listen['track_metadata'] = {
'artist_name': artist_name,
'track_name': track['name'],
'release_name': album['name'],
'additional_info': additional,
}
return listen
def make_api_request(user, endpoint, **kwargs):
""" Make an request to the Spotify API for particular user at specified endpoint with args.
Args:
user (spotify.Spotify): the user whose plays are to be imported.
endpoint (str): the Spotify API endpoint to which the request is to be made
Returns:
the response from the spotify API
Raises:
spotify.SpotifyAPIError: if we encounter errors from the Spotify API.
spotify.SpotifyListenBrainzError: if we encounter a rate limit, even after retrying.
"""
retries = 10
delay = 1
tried_to_refresh_token = False
while retries > 0:
try:
recently_played = user.get_spotipy_client()._get(endpoint, **kwargs)
break
except SpotifyException as e:
retries -= 1
if e.http_status == 429:
# Rate Limit Problems -- the client handles these, but it can still give up
# after a certain number of retries, so we look at the header and try the
# request again, if the error is raised
time_to_sleep = e.headers.get('Retry-After', delay)
current_app.logger.warn('Encountered a rate limit, sleeping %d seconds and trying again...', time_to_sleep)
time.sleep(time_to_sleep)
delay += 1
if retries == 0:
raise spotify.SpotifyListenBrainzError('Encountered a rate limit.')
elif e.http_status in (400, 403):
current_app.logger.critical('Error from the Spotify API for user %s: %s', str(user), str(e), exc_info=True)
raise spotify.SpotifyAPIError('Error from the Spotify API while getting listens: %s', str(e))
elif e.http_status >= 500 and e.http_status < 600:
# these errors are not our fault, most probably. so just log them and retry.
current_app.logger.error('Error while trying to get listens for user %s: %s', str(user), str(e), exc_info=True)
if retries == 0:
raise spotify.SpotifyAPIError('Error from the spotify API while getting listens: %s', str(e))
elif e.http_status == 401:
# if we get 401 Unauthorized from Spotify, that means our token might have expired.
# In that case, try to refresh the token, if there is an error even while refreshing
# give up and report to the user.
# We only try to refresh the token once, if we still get 401 after that, we give up.
if not tried_to_refresh_token:
try:
user = spotify.refresh_user_token(user)
except SpotifyException as err:
raise spotify.SpotifyAPIError('Could not authenticate with Spotify, please unlink and link your account again.')
tried_to_refresh_token = True
else:
raise spotify.SpotifyAPIError('Could not authenticate with Spotify, please unlink and link your account again.')
elif e.http_status == 404:
current_app.logger.error("404 while trying to get listens for user %s", str(user), exc_info=True)
if retries == 0:
raise spotify.SpotifyListenBrainzError("404 while trying to get listens for user %s" % str(user))
except Exception as e:
retries -= 1
current_app.logger.error('Unexpected error while getting listens: %s', str(e), exc_info=True)
if retries == 0:
raise spotify.SpotifyListenBrainzError('Unexpected error while getting listens: %s' % str(e))
return recently_played
def get_user_recently_played(user):
""" Get tracks from the current user’s recently played tracks.
"""
return make_api_request(user, 'me/player/recently-played', limit=50)
def get_user_currently_playing(user):
""" Get the user's currently playing track.
"""
return make_api_request(user, 'me/player/currently-playing')
def submit_listens_to_listenbrainz(listenbrainz_user, listens, listen_type=LISTEN_TYPE_IMPORT):
""" Submit a batch of listens to ListenBrainz
Args:
listenbrainz_user (dict): the user whose listens are to be submitted
listens (list): a list of listens to be submitted
listen_type: the type of listen (single, import, playing_now)
"""
username = listenbrainz_user['musicbrainz_id']
retries = 10
while retries >= 0:
try:
current_app.logger.debug('Submitting %d listens for user %s', len(listens), username)
insert_payload(listens, listenbrainz_user, listen_type=listen_type)
current_app.logger.debug('Submitted!')
break
except (InternalServerError, ServiceUnavailable) as e:
retries -= 1
current_app.logger.error('ISE while trying to import listens for %s: %s', username, str(e))
if retries == 0:
raise spotify.SpotifyListenBrainzError('ISE while trying to import listens: %s', str(e))
def parse_and_validate_spotify_plays(plays, listen_type):
""" Converts and validates the listens received from the Spotify API.
Args:
plays: a list of items received from Spotify
listen_type: the type of the plays (import or playing now)
Returns:
a list of valid listens to submit to ListenBrainz
"""
listens = []
for play in plays:
listen = _convert_spotify_play_to_listen(play, listen_type=listen_type)
try:
validate_listen(listen, listen_type)
listens.append(listen)
except BadRequest as e:
current_app.logger.error(str(e))
raise
return listens
def process_one_user(user):
""" Get recently played songs for this user and submit them to ListenBrainz.
Args:
user (spotify.Spotify): the user whose plays are to be imported.
Raises:
spotify.SpotifyAPIError: if we encounter errors from the Spotify API.
spotify.SpotifyListenBrainzError: if we encounter a rate limit, even after retrying.
or if we get errors while submitting the data to ListenBrainz
"""
if user.token_expired:
try:
user = spotify.refresh_user_token(user)
except spotify.SpotifyAPIError:
current_app.logger.error('Could not refresh user token from spotify', exc_info=True)
raise
listenbrainz_user = db_user.get(user.user_id)
currently_playing = get_user_currently_playing(user)
if currently_playing is not None and 'item' in currently_playing:
current_app.logger.debug('Received a currently playing track for %s', str(user))
listens = parse_and_validate_spotify_plays([currently_playing['item']], LISTEN_TYPE_PLAYING_NOW)
submit_listens_to_listenbrainz(listenbrainz_user, listens, listen_type=LISTEN_TYPE_PLAYING_NOW)
recently_played = get_user_recently_played(user)
if recently_played is not None and 'items' in recently_played:
listens = parse_and_validate_spotify_plays(recently_played['items'], LISTEN_TYPE_IMPORT)
current_app.logger.debug('Received %d tracks for %s', len(listens), str(user))
# if we don't have any new listens, return
if len(listens) == 0:
return
latest_listened_at = max(listen['listened_at'] for listen in listens)
submit_listens_to_listenbrainz(listenbrainz_user, listens, listen_type=LISTEN_TYPE_IMPORT)
# we've succeeded so update the last_updated field for this user
spotify.update_latest_listened_at(user.user_id, latest_listened_at)
spotify.update_last_updated(user.user_id)
def process_all_spotify_users():
""" Get a batch of users to be processed and import their Spotify plays.
Returns:
(success, failure) where
success: the number of users whose plays were successfully imported.
failure: the number of users for whom we faced errors while importing.
"""
try:
users = spotify.get_active_users_to_process()
except DatabaseException as e:
current_app.logger.error('Cannot get list of users due to error %s', str(e), exc_info=True)
return 0, 0
if not users:
return 0, 0
current_app.logger.info('Process %d users...' % len(users))
success = 0
failure = 0
for u in users:
t = time.time()
current_app.logger.info('Importing spotify listens for user %s', str(u))
try:
process_one_user(u)
success += 1
except spotify.SpotifyAPIError as e:
# if it is an error from the Spotify API, show the error message to the user
spotify.update_last_updated(
user_id=u.user_id,
success=False,
error_message=str(e),
)
if not current_app.config['TESTING']:
notify_error(u.musicbrainz_row_id, str(e))
failure += 1
except spotify.SpotifyListenBrainzError as e:
current_app.logger.critical('spotify_reader could not import listens: %s', str(e), exc_info=True)
failure += 1
except Exception as e:
current_app.logger.critical('spotify_reader could not import listens: %s', str(e), exc_info=True)
failure += 1
current_app.logger.info('Took a total of %.2f seconds to process user %s', time.time() - t, str(u))
current_app.logger.info('Processed %d users successfully!', success)
current_app.logger.info('Encountered errors while processing %d users.', failure)
return success, failure
def main():
app = listenbrainz.webserver.create_app()
with app.app_context():
current_app.logger.info('Spotify Reader started...')
while True:
t = time.time()
success, failure = process_all_spotify_users()
if success + failure > 0:
current_app.logger.info('All %d users in batch have been processed.', success + failure)
current_app.logger.info('Total time taken: %.2f s, average time per user: %.2f s.', time.time() - t, (time.time() - t) / (success + failure))
time.sleep(10)
if __name__ == '__main__':
main()
| 1 | 15,491 | fyi, you can do this by doing `.info("string %s %s", formatparam, formatparam2)` instead of doing a string format with `"str" % (params)` the idea is that it'll only do the string interpolation if logging is enabled for this level, which theoretically is an optimisation, but in this case probably isn't important | metabrainz-listenbrainz-server | py |
@@ -0,0 +1,17 @@
+<%= render "offerings/meta", offering: @offering %>
+
+<p>
+ We're adding you to the GitHub repository, and we'll redirect you as soon it's
+ ready. You may receive an email from GitHub asking you to confirm your
+ membership, so make sure to take quick look in your inbox.
+</p>
+
+<% content_for :javascript do %>
+ <script type="text/javascript">
+ (function () {
+ setTimeout(function reload() {
+ window.location.reload();
+ }, 5000);
+ })();
+ </script>
+<% end -%> | 1 | 1 | 11,750 | Would this work, as a one liner? `setTimeout(window.location.reload, 5000);` | thoughtbot-upcase | rb |
|
@@ -33,16 +33,16 @@ import (
"sync"
"time"
+ "crypto/tls"
"github.com/aws/amazon-ecs-agent/agent/config"
+ "github.com/aws/amazon-ecs-agent/agent/utils"
+ "github.com/aws/amazon-ecs-agent/agent/utils/cipher"
"github.com/aws/amazon-ecs-agent/agent/wsclient/wsconn"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/private/protocol/json/jsonutil"
"github.com/cihub/seelog"
"github.com/gorilla/websocket"
"github.com/pkg/errors"
- "github.com/aws/amazon-ecs-agent/agent/utils/cipher"
- "crypto/tls"
- "github.com/aws/amazon-ecs-agent/agent/utils"
)
const ( | 1 | // Copyright 2014-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
// Package wsclient wraps the generated aws-sdk-go client to provide marshalling
// and unmarshalling of data over a websocket connection in the format expected
// by backend. It allows for bidirectional communication and acts as both a
// client-and-server in terms of requests, but only as a client in terms of
// connecting.
package wsclient
import (
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"net/url"
"os"
"reflect"
"strings"
"sync"
"time"
"github.com/aws/amazon-ecs-agent/agent/config"
"github.com/aws/amazon-ecs-agent/agent/wsclient/wsconn"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/private/protocol/json/jsonutil"
"github.com/cihub/seelog"
"github.com/gorilla/websocket"
"github.com/pkg/errors"
"github.com/aws/amazon-ecs-agent/agent/utils/cipher"
"crypto/tls"
"github.com/aws/amazon-ecs-agent/agent/utils"
)
const (
// ServiceName defines the service name for the agent. This is used to sign messages
// that are sent to the backend.
ServiceName = "ecs"
// wsConnectTimeout specifies the default connection timeout to the backend.
wsConnectTimeout = 30 * time.Second
// wsHandshakeTimeout specifies the default handshake timeout for the websocket client
wsHandshakeTimeout = wsConnectTimeout
// readBufSize is the size of the read buffer for the ws connection.
readBufSize = 4096
// writeBufSize is the size of the write buffer for the ws connection.
writeBufSize = 32768
// Default NO_PROXY env var IP addresses
defaultNoProxyIP = "169.254.169.254,169.254.170.2"
errClosed = "use of closed network connection"
)
// ReceivedMessage is the intermediate message used to unmarshal a
// message from backend
type ReceivedMessage struct {
Type string `json:"type"`
Message json.RawMessage `json:"message"`
}
// RequestMessage is the intermediate message marshalled to send to backend.
type RequestMessage struct {
Type string `json:"type"`
Message json.RawMessage `json:"message"`
}
// RequestHandler would be func(*ecsacs.T for T in ecsacs.*) to be more proper, but it needs
// to be interface{} to properly capture that
type RequestHandler interface{}
// ClientServer is a combined client and server for the backend websocket connection
type ClientServer interface {
AddRequestHandler(RequestHandler)
// SetAnyRequestHandler takes a function with the signature 'func(i
// interface{})' and calls it with every message the server passes down.
// Only a single 'AnyRequestHandler' will be active at a given time for a
// ClientServer
SetAnyRequestHandler(RequestHandler)
MakeRequest(input interface{}) error
WriteMessage(input []byte) error
Connect() error
IsConnected() bool
SetConnection(conn wsconn.WebsocketConn)
Disconnect(...interface{}) error
Serve() error
SetReadDeadline(t time.Time) error
io.Closer
}
// ClientServerImpl wraps commonly used methods defined in ClientServer interface.
type ClientServerImpl struct {
// AgentConfig is the user-specified runtime configuration
AgentConfig *config.Config
// conn holds the underlying low-level websocket connection
conn wsconn.WebsocketConn
// CredentialProvider is used to retrieve AWS credentials
CredentialProvider *credentials.Credentials
// RequestHandlers is a map from message types to handler functions of the
// form:
// "FooMessage": func(message *ecsacs.FooMessage)
RequestHandlers map[string]RequestHandler
// AnyRequestHandler is a request handler that, if set, is called on every
// message with said message. It will be called before a RequestHandler is
// called. It must take a single interface{} argument.
AnyRequestHandler RequestHandler
// MakeRequestHook is an optional callback that, if set, is called on every
// generated request with the raw request body.
MakeRequestHook MakeRequestHookFunc
// URL is the full url to the backend, including path, querystring, and so on.
URL string
// RWTimeout is the duration used for setting read and write deadlines
// for the websocket connection
RWTimeout time.Duration
// writeLock needed to ensure that only one routine is writing to the socket
writeLock sync.RWMutex
ClientServer
ServiceError
TypeDecoder
}
// MakeRequestHookFunc is a function that is invoked on every generated request
// with the raw request body. MakeRequestHookFunc must return either the body
// to send or an error.
type MakeRequestHookFunc func([]byte) ([]byte, error)
// Connect opens a connection to the backend and upgrades it to a websocket. Calls to
// 'MakeRequest' can be made after calling this, but responses will not be
// receivable until 'Serve' is also called.
func (cs *ClientServerImpl) Connect() error {
seelog.Infof("Establishing a Websocket connection to %s", cs.URL)
parsedURL, err := url.Parse(cs.URL)
if err != nil {
return err
}
wsScheme, err := websocketScheme(parsedURL.Scheme)
if err != nil {
return err
}
parsedURL.Scheme = wsScheme
// NewRequest never returns an error if the url parses and we just verified
// it did above
request, _ := http.NewRequest("GET", parsedURL.String(), nil)
// Sign the request; we'll send its headers via the websocket client which includes the signature
err = utils.SignHTTPRequest(request, cs.AgentConfig.AWSRegion, ServiceName, cs.CredentialProvider, nil)
if err != nil {
return err
}
timeoutDialer := &net.Dialer{Timeout: wsConnectTimeout}
tlsConfig := &tls.Config{ServerName: parsedURL.Host, InsecureSkipVerify: cs.AgentConfig.AcceptInsecureCert}
cipher.WithSupportedCipherSuites(tlsConfig)
// Ensure that NO_PROXY gets set
noProxy := os.Getenv("NO_PROXY")
if noProxy == "" {
dockerHost, err := url.Parse(cs.AgentConfig.DockerEndpoint)
if err == nil {
dockerHost.Scheme = ""
os.Setenv("NO_PROXY", fmt.Sprintf("%s,%s", defaultNoProxyIP, dockerHost.String()))
seelog.Info("NO_PROXY set:", os.Getenv("NO_PROXY"))
} else {
seelog.Errorf("NO_PROXY unable to be set: the configured Docker endpoint is invalid.")
}
}
dialer := websocket.Dialer{
ReadBufferSize: readBufSize,
WriteBufferSize: writeBufSize,
TLSClientConfig: tlsConfig,
Proxy: http.ProxyFromEnvironment,
NetDial: timeoutDialer.Dial,
HandshakeTimeout: wsHandshakeTimeout,
}
websocketConn, httpResponse, err := dialer.Dial(parsedURL.String(), request.Header)
if httpResponse != nil {
defer httpResponse.Body.Close()
}
if err != nil {
var resp []byte
if httpResponse != nil {
var readErr error
resp, readErr = ioutil.ReadAll(httpResponse.Body)
if readErr != nil {
return fmt.Errorf("Unable to read websocket connection: " + readErr.Error() + ", " + err.Error())
}
// If there's a response, we can try to unmarshal it into one of the
// modeled error types
possibleError, _, decodeErr := DecodeData(resp, cs.TypeDecoder)
if decodeErr == nil {
return cs.NewError(possibleError)
}
}
seelog.Warnf("Error creating a websocket client: %v", err)
return errors.Wrapf(err, "websocket client: unable to dial %s response: %s",
parsedURL.Host, string(resp))
}
cs.writeLock.Lock()
defer cs.writeLock.Unlock()
cs.conn = websocketConn
seelog.Debugf("Established a Websocket connection to %s", cs.URL)
return nil
}
// IsReady gives a boolean response that informs the caller if the websocket
// connection is fully established.
func (cs *ClientServerImpl) IsReady() bool {
cs.writeLock.RLock()
defer cs.writeLock.RUnlock()
return cs.conn != nil
}
// SetConnection passes a websocket connection object into the client. This is used only in
// testing and should be avoided in non-test code.
func (cs *ClientServerImpl) SetConnection(conn wsconn.WebsocketConn) {
cs.conn = conn
}
// SetReadDeadline sets the read deadline for the websocket connection
// A read timeout results in an io error if there are any outstanding reads
// that exceed the deadline
func (cs *ClientServerImpl) SetReadDeadline(t time.Time) error {
err := cs.conn.SetReadDeadline(t)
if err == nil {
return nil
}
seelog.Warnf("Unable to set read deadline for websocket connection: %v for %s", err, cs.URL)
// If we get connection closed error from SetReadDeadline, break out of the for loop and
// return an error
if opErr, ok := err.(*net.OpError); ok && strings.Contains(opErr.Err.Error(), errClosed) {
seelog.Errorf("Stopping redundant reads on closed network connection: %s", cs.URL)
return opErr
}
// An unhandled error has occurred while trying to extend read deadline.
// Try asynchronously closing the connection. We don't want to be blocked on stale connections
// taking too long to close. The flip side is that we might start accumulating stale connections.
// But, that still seems more desirable than waiting for ever for the connection to close
cs.forceCloseConnection()
return err
}
func (cs *ClientServerImpl) forceCloseConnection() {
closeChan := make(chan error)
go func() {
closeChan <- cs.Close()
}()
ctx, cancel := context.WithTimeout(context.TODO(), wsConnectTimeout)
defer cancel()
select {
case closeErr := <-closeChan:
if closeErr != nil {
seelog.Warnf("Unable to close websocket connection: %v for %s",
closeErr, cs.URL)
}
case <-ctx.Done():
if ctx.Err() != nil {
seelog.Warnf("Context canceled waiting for termination of websocket connection: %v for %s",
ctx.Err(), cs.URL)
}
}
}
// Disconnect disconnects the connection
func (cs *ClientServerImpl) Disconnect(...interface{}) error {
cs.writeLock.Lock()
defer cs.writeLock.Unlock()
if cs.conn == nil {
return fmt.Errorf("websocker client: no connection to close")
}
// Close() in turn results in a an internal flushFrame() call in gorilla
// as the close frame needs to be sent to the server. Set the deadline
// for that as well.
if err := cs.conn.SetWriteDeadline(time.Now().Add(cs.RWTimeout)); err != nil {
seelog.Warnf("Unable to set write deadline for websocket connection: %v for %s", err, cs.URL)
}
return cs.conn.Close()
}
// AddRequestHandler adds a request handler to this client.
// A request handler *must* be a function taking a single argument, and that
// argument *must* be a pointer to a recognized 'ecsacs' struct.
// E.g. if you desired to handle messages from acs of type 'FooMessage', you
// would pass the following handler in:
// func(message *ecsacs.FooMessage)
// This function will panic if the passed in function does not have one pointer
// argument or the argument is not a recognized type.
// Additionally, the request handler will block processing of further messages
// on this connection so it's important that it return quickly.
func (cs *ClientServerImpl) AddRequestHandler(f RequestHandler) {
firstArg := reflect.TypeOf(f).In(0)
firstArgTypeStr := firstArg.Elem().Name()
recognizedTypes := cs.GetRecognizedTypes()
_, ok := recognizedTypes[firstArgTypeStr]
if !ok {
panic("AddRequestHandler called with invalid function; argument type not recognized: " + firstArgTypeStr)
}
cs.RequestHandlers[firstArgTypeStr] = f
}
// SetAnyRequestHandler passes a RequestHandler object into the client.
func (cs *ClientServerImpl) SetAnyRequestHandler(f RequestHandler) {
cs.AnyRequestHandler = f
}
// MakeRequest makes a request using the given input. Note, the input *MUST* be
// a pointer to a valid backend type that this client recognises
func (cs *ClientServerImpl) MakeRequest(input interface{}) error {
send, err := cs.CreateRequestMessage(input)
if err != nil {
return err
}
if cs.MakeRequestHook != nil {
send, err = cs.MakeRequestHook(send)
if err != nil {
return err
}
}
// Over the wire we send something like
// {"type":"AckRequest","message":{"messageId":"xyz"}}
return cs.WriteMessage(send)
}
// WriteMessage wraps the low level websocket write method with a lock
func (cs *ClientServerImpl) WriteMessage(send []byte) error {
cs.writeLock.Lock()
defer cs.writeLock.Unlock()
// This is just future proofing. Ignore the error as the gorilla websocket
// library returns 'nil' anyway for SetWriteDeadline
// https://github.com/gorilla/websocket/blob/4201258b820c74ac8e6922fc9e6b52f71fe46f8d/conn.go#L761
if err := cs.conn.SetWriteDeadline(time.Now().Add(cs.RWTimeout)); err != nil {
seelog.Warnf("Unable to set write deadline for websocket connection: %v for %s", err, cs.URL)
}
return cs.conn.WriteMessage(websocket.TextMessage, send)
}
// ConsumeMessages reads messages from the websocket connection and handles read
// messages from an active connection.
func (cs *ClientServerImpl) ConsumeMessages() error {
for {
if err := cs.SetReadDeadline(time.Now().Add(cs.RWTimeout)); err != nil {
return err
}
messageType, message, err := cs.conn.ReadMessage()
switch {
case err == nil:
if messageType != websocket.TextMessage {
// maybe not fatal though, we'll try to process it anyways
seelog.Errorf("Unexpected messageType: %v", messageType)
}
cs.handleMessage(message)
case permissibleCloseCode(err):
seelog.Debugf("Connection closed for a valid reason: %s", err)
return io.EOF
default:
// Unexpected error occurred
seelog.Errorf("Error getting message from ws backend: error: [%v], messageType: [%v] ",
err, messageType)
return err
}
}
}
// CreateRequestMessage creates the request json message using the given input.
// Note, the input *MUST* be a pointer to a valid backend type that this
// client recognises.
func (cs *ClientServerImpl) CreateRequestMessage(input interface{}) ([]byte, error) {
msg := &RequestMessage{}
recognizedTypes := cs.GetRecognizedTypes()
for typeStr, typeVal := range recognizedTypes {
if reflect.TypeOf(input) == reflect.PtrTo(typeVal) {
msg.Type = typeStr
break
}
}
if msg.Type == "" {
return nil, &UnrecognizedWSRequestType{reflect.TypeOf(input).String()}
}
messageData, err := jsonutil.BuildJSON(input)
if err != nil {
return nil, &NotMarshallableWSRequest{msg.Type, err}
}
msg.Message = json.RawMessage(messageData)
send, err := json.Marshal(msg)
if err != nil {
return nil, &NotMarshallableWSRequest{msg.Type, err}
}
return send, nil
}
// handleMessage dispatches a message to the correct 'requestHandler' for its
// type. If no request handler is found, the message is discarded.
func (cs *ClientServerImpl) handleMessage(data []byte) {
typedMessage, typeStr, err := DecodeData(data, cs.TypeDecoder)
if err != nil {
seelog.Warnf("Unable to handle message from backend: %v", err)
return
}
seelog.Debugf("Received message of type: %s", typeStr)
if cs.AnyRequestHandler != nil {
reflect.ValueOf(cs.AnyRequestHandler).Call([]reflect.Value{reflect.ValueOf(typedMessage)})
}
if handler, ok := cs.RequestHandlers[typeStr]; ok {
reflect.ValueOf(handler).Call([]reflect.Value{reflect.ValueOf(typedMessage)})
} else {
seelog.Infof("No handler for message type: %s", typeStr)
}
}
func websocketScheme(httpScheme string) (string, error) {
// gorilla/websocket expects the websocket scheme (ws[s]://)
var wsScheme string
switch httpScheme {
case "http":
wsScheme = "ws"
case "https":
wsScheme = "wss"
default:
return "", fmt.Errorf("wsclient: unknown scheme %s", httpScheme)
}
return wsScheme, nil
}
// See https://github.com/gorilla/websocket/blob/87f6f6a22ebfbc3f89b9ccdc7fddd1b914c095f9/conn.go#L650
func permissibleCloseCode(err error) bool {
return websocket.IsCloseError(err,
websocket.CloseNormalClosure, // websocket error code 1000
websocket.CloseAbnormalClosure, // websocket error code 1006
websocket.CloseGoingAway, // websocket error code 1001
websocket.CloseInternalServerErr) // websocket error code 1011
}
| 1 | 21,402 | Can you move `crypto/tls` into the block at the top with all the other stdlib imports? | aws-amazon-ecs-agent | go |
@@ -403,7 +403,7 @@ for steps in config['predictionSteps']:
params={'errorMetric': 'aae', 'window': 1000, 'steps': steps}))
control['metrics'].append(
MetricSpec(field=config['predictedField'], metric='multiStep',
- inferenceElement='multiStepBestPredictions',
+ inferenceElement='anomalyScore',
params={'errorMetric': 'altMAPE', 'window': 1000, 'steps': steps}))
control['metrics'].append(
MetricSpec(field=config['predictedField'], metric='trivial', | 1 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'~/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/ExpGenerator.py'
"""
from nupic.frameworks.opf.expdescriptionapi import ExperimentDescriptionAPI
from nupic.frameworks.opf.expdescriptionhelpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.clamodelcallbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opfutils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opftaskdriver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "CLA",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'days': 0,
'fields': [('consumption', 'sum')],
'hours': 1,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'TemporalAnomaly',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': { 'consumption': { 'clipInput': True,
'fieldname': u'consumption',
'n': 100,
'name': u'consumption',
'type': 'AdaptiveScalarEncoder',
'w': 21},
'timestamp_dayOfWeek': { 'dayOfWeek': (21, 1),
'fieldname': u'timestamp',
'name': u'timestamp_dayOfWeek',
'type': 'DateEncoder'},
'timestamp_timeOfDay': { 'fieldname': u'timestamp',
'name': u'timestamp_timeOfDay',
'timeOfDay': (21, 1),
'type': 'DateEncoder'},
'timestamp_weekend': { 'fieldname': u'timestamp',
'name': u'timestamp_weekend',
'type': 'DateEncoder',
'weekend': 21}},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActivePerInhArea': 40,
'seed': 1956,
# coincInputPoolPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose coincInputPoolPct * (2*coincInputRadius+1)^2
'coincInputPoolPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TP and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
'randomSP': 1
},
# Controls whether TP is enabled or disabled;
# TP is necessary for making temporal predictions, such as predicting
# the next inputs. Without TP, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tpEnable' : True,
'tpParams': {
# TP diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 12,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TP how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
# Classifier implementation selection.
'implementation': 'cpp',
'regionName' : 'CLAClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'clVerbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.0001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1,5',
},
'anomalyParams': {
'mode': 'likelihood', # pure(=default) / weighted / likelihood
'slidingWindowSize': 5, # >=0 / None
},
'trainSPNetOnlyIfRequested': False,
},
'predictionSteps': [1, 5],
'predictedField': 'consumption',
'numRecords': 4000,
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
control = {
# The environment that the current model is being run in
"environment": 'nupic',
# Input stream specification per py/nupic/cluster/database/StreamDef.json.
#
'dataset' : {
u'info': u'test_hotgym',
u'streams': [ { u'columns': [u'*'],
u'info': u'hotGym.csv',
u'last_record': config['numRecords'],
u'source': u'file://extra/hotgym/hotgym.csv'}],
'aggregation': config['aggregationInfo'],
u'version': 1},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# A dictionary containing all the supplementary parameters for inference
"inferenceArgs":{'predictedField': config['predictedField'],
'predictionSteps': config['predictionSteps']},
# Metrics: A list of MetricSpecs that instantiate the metrics that are
# computed for this experiment
'metrics':[],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*aae.*'],
}
# Add multi-step prediction metrics
for steps in config['predictionSteps']:
control['metrics'].append(
MetricSpec(field=config['predictedField'], metric='multiStep',
inferenceElement='multiStepBestPredictions',
params={'errorMetric': 'aae', 'window': 1000, 'steps': steps}))
control['metrics'].append(
MetricSpec(field=config['predictedField'], metric='trivial',
inferenceElement='prediction',
params={'errorMetric': 'aae', 'window': 1000, 'steps': steps}))
control['metrics'].append(
MetricSpec(field=config['predictedField'], metric='multiStep',
inferenceElement='multiStepBestPredictions',
params={'errorMetric': 'altMAPE', 'window': 1000, 'steps': steps}))
control['metrics'].append(
MetricSpec(field=config['predictedField'], metric='trivial',
inferenceElement='prediction',
params={'errorMetric': 'altMAPE', 'window': 1000, 'steps': steps}))
################################################################################
################################################################################
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| 1 | 17,362 | What is the motivation for this change? | numenta-nupic | py |
@@ -74,9 +74,15 @@ type Topic interface {
// Subscription receives published messages.
type Subscription interface {
// ReceiveBatch should return a batch of messages that have queued up
- // for the subscription on the server. If no messages are available
- // yet, it must block until there is at least one, or the context is
- // done.
+ // for the subscription on the server.
+ //
+ // If there is a transient failure, this method should not retry but
+ // should return a nil slice and an error. The concrete API will take
+ // care of retry logic.
+ //
+ // If the service returns no messages for some other reason, this
+ // method should return the empty slice of messages and not attempt to
+ // retry.
//
// ReceiveBatch is only called sequentially for individual
// Subscriptions. | 1 | // Copyright 2018 The Go Cloud Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package driver defines a set of interfaces that the pubsub package uses to
// interact with the underlying pubsub services.
package driver
import "context"
// AckID is the identifier of a message for purposes of acknowledgement.
type AckID interface{}
// Message is data to be published (sent) to a topic and later received from
// subscriptions on that topic.
type Message struct {
// Body contains the content of the message.
Body []byte
// Metadata has key/value pairs describing the message.
Metadata map[string]string
// AckID should be set to something identifying the message on the
// server. It may be passed to Subscription.SendAcks() to acknowledge
// the message. This field should only be set by methods implementing
// Subscription.ReceiveBatch.
AckID AckID
}
// Topic publishes messages.
type Topic interface {
// SendBatch publishes all the messages in ms. This method should
// return only after all the messages are sent, an error occurs, or the
// context is cancelled.
//
// Only the Body and (optionally) Metadata fields of the Messages in ms
// should be set by the caller of SendBatch.
//
// Only one RPC should be made to send the messages, and the returned
// error should be based on the result of that RPC. Implementations
// that send only one message at a time should return a non-nil error
// if len(ms) != 1. Such implementations should set the batch size
// to 1 in the call to pubsub.NewTopic from the OpenTopic func for
// their package.
//
// The slice ms should not be retained past the end of the call to
// SendBatch.
//
// SendBatch is only called sequentially for individual Topics.
SendBatch(ctx context.Context, ms []*Message) error
// Close should disconnect the Topic.
//
// If Close is called after a call to SendBatch begins but before it
// ends, then the call to Close should wait for the SendBatch call to
// end, and then Close should finish.
//
// If Close is called and SendBatch is called before Close finishes,
// then the call to Close should proceed and the call to SendBatch
// should fail immediately after Close returns.
Close() error
}
// Subscription receives published messages.
type Subscription interface {
// ReceiveBatch should return a batch of messages that have queued up
// for the subscription on the server. If no messages are available
// yet, it must block until there is at least one, or the context is
// done.
//
// ReceiveBatch is only called sequentially for individual
// Subscriptions.
ReceiveBatch(ctx context.Context) ([]*Message, error)
// SendAcks should acknowledge the messages with the given ackIDs on
// the server so that they will not be received again for this
// subscription if the server gets the acks before their deadlines.
// This method should return only after all the ackIDs are sent, an
// error occurs, or the context is cancelled.
//
// Only one RPC should be made to send the messages, and the returned
// error should be based on the result of that RPC. Implementations
// that send only one ack at a time should return a non-nil error if
// len(ackIDs) != 1. Such implementations should set AckBatchCountThreshold to
// 1 in the call to pubsub.NewSubscription in the OpenSubscription
// func for their package.
//
// SendAcks is only called sequentially for individual Subscriptions.
SendAcks(ctx context.Context, ackIDs []AckID) error
// Close should disconnect the Subscription.
//
// If Close is called after a call to ReceiveBatch/SendAcks begins but
// before it ends, then the call to Close should wait for the other
// call to end, and then Close should finish.
//
// If Close is called and ReceiveBatch/SendAcks is called before Close
// finishes, then the call to Close should proceed and the other call
// should fail immediately after Close returns.
Close() error
}
| 1 | 12,113 | OK, so the concrete implementation will loop? But then it's important that this method waits for a while before it returns no messages. To be specific, a GCP implementation that set `ReturnImmediately` to true or false would both satisfy this requirement, but the first would cause busy-waiting. | google-go-cloud | go |
@@ -45,7 +45,10 @@ Status MatchValidator::validateImpl() {
auto matchClauseCtx = getContext<MatchClauseContext>();
matchClauseCtx->aliasesUsed = aliasesUsed;
- NG_RETURN_IF_ERROR(validatePath(matchClause->path(), *matchClauseCtx));
+ if (matchClause->path()->pathSize() > 1) {
+ return Status::SemanticError("Multi paths not supported.");
+ }
+ NG_RETURN_IF_ERROR(validatePath(matchClause->path()->path(0) /* TODO */, *matchClauseCtx));
if (matchClause->where() != nullptr) {
auto whereClauseCtx = getContext<WhereClauseContext>();
whereClauseCtx->aliasesUsed = &matchClauseCtx->aliasesGenerated; | 1 | /* Copyright (c) 2020 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License.
*/
#include "graph/validator/MatchValidator.h"
#include "graph/planner/match/MatchSolver.h"
#include "graph/util/ExpressionUtils.h"
#include "graph/visitor/RewriteVisitor.h"
namespace nebula {
namespace graph {
MatchValidator::MatchValidator(Sentence *sentence, QueryContext *context)
: Validator(sentence, context) {
matchCtx_ = getContext<MatchAstContext>();
}
AstContext *MatchValidator::getAstContext() { return matchCtx_.get(); }
Status MatchValidator::validateImpl() {
auto *sentence = static_cast<MatchSentence *>(sentence_);
auto &clauses = sentence->clauses();
std::unordered_map<std::string, AliasType> *aliasesUsed = nullptr;
YieldColumns *prevYieldColumns = nullptr;
auto retClauseCtx = getContext<ReturnClauseContext>();
auto retYieldCtx = getContext<YieldClauseContext>();
retClauseCtx->yield = std::move(retYieldCtx);
for (size_t i = 0; i < clauses.size(); ++i) {
auto kind = clauses[i]->kind();
if (i > 0 && kind == ReadingClause::Kind::kMatch) {
return Status::SemanticError(
"Match clause is not supported to be followed by other cypher "
"clauses");
}
switch (kind) {
case ReadingClause::Kind::kMatch: {
auto *matchClause = static_cast<MatchClause *>(clauses[i].get());
if (matchClause->isOptional()) {
return Status::SemanticError("OPTIONAL MATCH not supported");
}
auto matchClauseCtx = getContext<MatchClauseContext>();
matchClauseCtx->aliasesUsed = aliasesUsed;
NG_RETURN_IF_ERROR(validatePath(matchClause->path(), *matchClauseCtx));
if (matchClause->where() != nullptr) {
auto whereClauseCtx = getContext<WhereClauseContext>();
whereClauseCtx->aliasesUsed = &matchClauseCtx->aliasesGenerated;
NG_RETURN_IF_ERROR(validateFilter(matchClause->where()->filter(), *whereClauseCtx));
matchClauseCtx->where = std::move(whereClauseCtx);
}
if (aliasesUsed) {
NG_RETURN_IF_ERROR(combineAliases(matchClauseCtx->aliasesGenerated, *aliasesUsed));
}
aliasesUsed = &matchClauseCtx->aliasesGenerated;
matchCtx_->clauses.emplace_back(std::move(matchClauseCtx));
break;
}
case ReadingClause::Kind::kUnwind: {
auto *unwindClause = static_cast<UnwindClause *>(clauses[i].get());
auto unwindClauseCtx = getContext<UnwindClauseContext>();
unwindClauseCtx->aliasesUsed = aliasesUsed;
NG_RETURN_IF_ERROR(validateUnwind(unwindClause, *unwindClauseCtx));
aliasesUsed = unwindClauseCtx->aliasesUsed;
matchCtx_->clauses.emplace_back(std::move(unwindClauseCtx));
// TODO: delete prevYieldColumns
UNUSED(prevYieldColumns);
break;
}
case ReadingClause::Kind::kWith: {
auto *withClause = static_cast<WithClause *>(clauses[i].get());
auto withClauseCtx = getContext<WithClauseContext>();
auto withYieldCtx = getContext<YieldClauseContext>();
withClauseCtx->yield = std::move(withYieldCtx);
withClauseCtx->yield->aliasesUsed = aliasesUsed;
NG_RETURN_IF_ERROR(
validateWith(withClause,
matchCtx_->clauses.empty() ? nullptr : matchCtx_->clauses.back().get(),
*withClauseCtx));
if (withClause->where() != nullptr) {
auto whereClauseCtx = getContext<WhereClauseContext>();
whereClauseCtx->aliasesUsed = &withClauseCtx->aliasesGenerated;
NG_RETURN_IF_ERROR(validateFilter(withClause->where()->filter(), *whereClauseCtx));
withClauseCtx->where = std::move(whereClauseCtx);
}
aliasesUsed = &withClauseCtx->aliasesGenerated;
prevYieldColumns = const_cast<YieldColumns *>(withClauseCtx->yield->yieldColumns);
matchCtx_->clauses.emplace_back(std::move(withClauseCtx));
break;
}
}
}
retClauseCtx->yield->aliasesUsed = aliasesUsed;
NG_RETURN_IF_ERROR(
validateReturn(sentence->ret(), matchCtx_->clauses.back().get(), *retClauseCtx));
NG_RETURN_IF_ERROR(buildOutputs(retClauseCtx->yield->yieldColumns));
matchCtx_->clauses.emplace_back(std::move(retClauseCtx));
return Status::OK();
}
Status MatchValidator::validatePath(const MatchPath *path,
MatchClauseContext &matchClauseCtx) const {
NG_RETURN_IF_ERROR(
buildNodeInfo(path, matchClauseCtx.nodeInfos, matchClauseCtx.aliasesGenerated));
NG_RETURN_IF_ERROR(
buildEdgeInfo(path, matchClauseCtx.edgeInfos, matchClauseCtx.aliasesGenerated));
NG_RETURN_IF_ERROR(buildPathExpr(path, matchClauseCtx));
return Status::OK();
}
Status MatchValidator::buildPathExpr(const MatchPath *path,
MatchClauseContext &matchClauseCtx) const {
auto *pathAlias = path->alias();
if (pathAlias == nullptr) {
return Status::OK();
}
if (!matchClauseCtx.aliasesGenerated.emplace(*pathAlias, AliasType::kPath).second) {
return Status::SemanticError("`%s': Redefined alias", pathAlias->c_str());
}
auto &nodeInfos = matchClauseCtx.nodeInfos;
auto &edgeInfos = matchClauseCtx.edgeInfos;
auto *pool = qctx_->objPool();
auto pathBuild = PathBuildExpression::make(pool);
for (size_t i = 0; i < edgeInfos.size(); ++i) {
pathBuild->add(VariablePropertyExpression::make(pool, "", nodeInfos[i].alias));
pathBuild->add(VariablePropertyExpression::make(pool, "", edgeInfos[i].alias));
}
pathBuild->add(VariablePropertyExpression::make(pool, "", nodeInfos.back().alias));
matchClauseCtx.pathBuild = std::move(pathBuild);
return Status::OK();
}
Status MatchValidator::buildNodeInfo(const MatchPath *path,
std::vector<NodeInfo> &nodeInfos,
std::unordered_map<std::string, AliasType> &aliases) const {
auto *sm = qctx_->schemaMng();
auto steps = path->steps();
auto *pool = qctx_->objPool();
nodeInfos.resize(steps + 1);
for (auto i = 0u; i <= steps; i++) {
auto *node = path->node(i);
auto alias = node->alias();
auto *props = node->props();
auto anonymous = false;
if (node->labels() != nullptr) {
auto &labels = node->labels()->labels();
for (const auto &label : labels) {
if (label != nullptr) {
auto tid = sm->toTagID(space_.id, *label->label());
if (!tid.ok()) {
return Status::SemanticError("`%s': Unknown tag", label->label()->c_str());
}
nodeInfos[i].tids.emplace_back(tid.value());
nodeInfos[i].labels.emplace_back(*label->label());
nodeInfos[i].labelProps.emplace_back(label->props());
}
}
}
if (alias.empty()) {
anonymous = true;
alias = vctx_->anonVarGen()->getVar();
}
if (!aliases.emplace(alias, AliasType::kNode).second) {
return Status::SemanticError("`%s': Redefined alias", alias.c_str());
}
Expression *filter = nullptr;
if (props != nullptr) {
auto result = makeSubFilter(alias, props);
NG_RETURN_IF_ERROR(result);
filter = result.value();
} else if (node->labels() != nullptr && !node->labels()->labels().empty()) {
const auto &labels = node->labels()->labels();
for (const auto &label : labels) {
auto result = makeSubFilter(alias, label->props(), *label->label());
NG_RETURN_IF_ERROR(result);
filter = andConnect(pool, filter, result.value());
}
}
nodeInfos[i].anonymous = anonymous;
nodeInfos[i].alias = alias;
nodeInfos[i].props = props;
nodeInfos[i].filter = filter;
}
return Status::OK();
}
Status MatchValidator::buildEdgeInfo(const MatchPath *path,
std::vector<EdgeInfo> &edgeInfos,
std::unordered_map<std::string, AliasType> &aliases) const {
auto *sm = qctx_->schemaMng();
auto steps = path->steps();
edgeInfos.resize(steps);
for (auto i = 0u; i < steps; i++) {
auto *edge = path->edge(i);
auto &types = edge->types();
auto alias = edge->alias();
auto *props = edge->props();
auto direction = edge->direction();
auto anonymous = false;
if (!types.empty()) {
for (auto &type : types) {
auto etype = sm->toEdgeType(space_.id, *type);
if (!etype.ok()) {
return Status::SemanticError("`%s': Unknown edge type", type->c_str());
}
edgeInfos[i].edgeTypes.emplace_back(etype.value());
edgeInfos[i].types.emplace_back(*type);
}
} else {
const auto allEdgesResult = matchCtx_->qctx->schemaMng()->getAllVerEdgeSchema(space_.id);
NG_RETURN_IF_ERROR(allEdgesResult);
const auto allEdges = std::move(allEdgesResult).value();
for (const auto &edgeSchema : allEdges) {
edgeInfos[i].edgeTypes.emplace_back(edgeSchema.first);
auto typeName = matchCtx_->qctx->schemaMng()->toEdgeName(space_.id, edgeSchema.first);
NG_RETURN_IF_ERROR(typeName);
edgeInfos[i].types.emplace_back(typeName.value());
}
}
auto *stepRange = edge->range();
if (stepRange != nullptr) {
NG_RETURN_IF_ERROR(validateStepRange(stepRange));
edgeInfos[i].range = stepRange;
}
if (alias.empty()) {
anonymous = true;
alias = vctx_->anonVarGen()->getVar();
}
if (!aliases.emplace(alias, AliasType::kEdge).second) {
return Status::SemanticError("`%s': Redefined alias", alias.c_str());
}
Expression *filter = nullptr;
if (props != nullptr) {
auto result = makeSubFilter(alias, props);
NG_RETURN_IF_ERROR(result);
filter = result.value();
}
edgeInfos[i].anonymous = anonymous;
edgeInfos[i].direction = direction;
edgeInfos[i].alias = alias;
edgeInfos[i].props = props;
edgeInfos[i].filter = filter;
}
return Status::OK();
}
Status MatchValidator::validateFilter(const Expression *filter,
WhereClauseContext &whereClauseCtx) const {
auto transformRes = ExpressionUtils::filterTransform(filter);
NG_RETURN_IF_ERROR(transformRes);
whereClauseCtx.filter = transformRes.value();
auto typeStatus = deduceExprType(whereClauseCtx.filter);
NG_RETURN_IF_ERROR(typeStatus);
auto type = typeStatus.value();
if (type != Value::Type::BOOL && type != Value::Type::NULLVALUE &&
type != Value::Type::__EMPTY__) {
std::stringstream ss;
ss << "`" << filter->toString() << "', expected Boolean, "
<< "but was `" << type << "'";
return Status::SemanticError(ss.str());
}
NG_RETURN_IF_ERROR(validateAliases({whereClauseCtx.filter}, whereClauseCtx.aliasesUsed));
return Status::OK();
}
Status MatchValidator::includeExisting(const CypherClauseContextBase *cypherClauseCtx,
YieldColumns *columns) const {
if (cypherClauseCtx == nullptr) {
return Status::OK();
}
auto kind = cypherClauseCtx->kind;
if (kind != CypherClauseKind::kMatch && kind != CypherClauseKind::kUnwind &&
kind != CypherClauseKind::kWith) {
return Status::SemanticError("Must be a MATCH/UNWIND/WITH");
}
auto *pool = qctx_->objPool();
auto makeColumn = [&pool](const std::string &name) {
auto *expr = LabelExpression::make(pool, name);
return new YieldColumn(expr, name);
};
if (kind == CypherClauseKind::kMatch) {
auto matchClauseCtx = static_cast<const MatchClauseContext *>(cypherClauseCtx);
auto steps = matchClauseCtx->edgeInfos.size();
if (!matchClauseCtx->nodeInfos[0].anonymous) {
columns->addColumn(makeColumn(matchClauseCtx->nodeInfos[0].alias));
}
for (auto i = 0u; i < steps; i++) {
if (!matchClauseCtx->edgeInfos[i].anonymous) {
columns->addColumn(makeColumn(matchClauseCtx->edgeInfos[i].alias));
}
if (!matchClauseCtx->nodeInfos[i + 1].anonymous) {
columns->addColumn(makeColumn(matchClauseCtx->nodeInfos[i + 1].alias));
}
}
for (auto &aliasPair : matchClauseCtx->aliasesGenerated) {
if (aliasPair.second == AliasType::kPath) {
columns->addColumn(makeColumn(aliasPair.first));
}
}
} else if (kind == CypherClauseKind::kUnwind) {
auto unwindClauseCtx = static_cast<const UnwindClauseContext *>(cypherClauseCtx);
columns->addColumn(makeColumn(unwindClauseCtx->alias));
} else {
// kWith
auto withClauseCtx = static_cast<const WithClauseContext *>(cypherClauseCtx);
for (auto &aliasPair : withClauseCtx->aliasesGenerated) {
columns->addColumn(makeColumn(aliasPair.first));
}
}
return Status::OK();
}
Status MatchValidator::validateReturn(MatchReturn *ret,
const CypherClauseContextBase *cypherClauseCtx,
ReturnClauseContext &retClauseCtx) const {
YieldColumns *columns = saveObject(new YieldColumns());
if (ret->returnItems()->includeExisting()) {
auto status = includeExisting(cypherClauseCtx, columns);
if (!status.ok()) {
return status;
}
if (columns->empty() && !ret->returnItems()->columns()) {
return Status::SemanticError("RETURN * is not allowed when there are no variables in scope");
}
}
if (ret->returnItems()->columns()) {
for (auto *column : ret->returnItems()->columns()->columns()) {
if (ExpressionUtils::hasAny(column->expr(),
{Expression::Kind::kVertex, Expression::Kind::kEdge})) {
return Status::SemanticError(
"keywords: vertex and edge are not supported in return clause `%s'",
column->toString().c_str());
}
columns->addColumn(column->clone().release());
}
}
DCHECK(!columns->empty());
retClauseCtx.yield->yieldColumns = columns;
// Check all referencing expressions are valid
std::vector<const Expression *> exprs;
exprs.reserve(retClauseCtx.yield->yieldColumns->size());
for (auto *col : retClauseCtx.yield->yieldColumns->columns()) {
if (!retClauseCtx.yield->hasAgg_ &&
ExpressionUtils::hasAny(col->expr(), {Expression::Kind::kAggregate})) {
retClauseCtx.yield->hasAgg_ = true;
}
exprs.push_back(col->expr());
}
NG_RETURN_IF_ERROR(validateAliases(exprs, retClauseCtx.yield->aliasesUsed));
NG_RETURN_IF_ERROR(validateYield(*retClauseCtx.yield));
retClauseCtx.yield->distinct = ret->isDistinct();
auto paginationCtx = getContext<PaginationContext>();
NG_RETURN_IF_ERROR(validatePagination(ret->skip(), ret->limit(), *paginationCtx));
retClauseCtx.pagination = std::move(paginationCtx);
if (ret->orderFactors() != nullptr) {
auto orderByCtx = getContext<OrderByClauseContext>();
NG_RETURN_IF_ERROR(
validateOrderBy(ret->orderFactors(), retClauseCtx.yield->yieldColumns, *orderByCtx));
retClauseCtx.order = std::move(orderByCtx);
}
return Status::OK();
}
Status MatchValidator::validateAliases(
const std::vector<const Expression *> &exprs,
const std::unordered_map<std::string, AliasType> *aliasesUsed) const {
static const std::unordered_set<Expression::Kind> kinds = {Expression::Kind::kLabel,
Expression::Kind::kLabelAttribute,
// primitive props
Expression::Kind::kEdgeSrc,
Expression::Kind::kEdgeDst,
Expression::Kind::kEdgeRank,
Expression::Kind::kEdgeType};
for (auto *expr : exprs) {
auto refExprs = ExpressionUtils::collectAll(expr, kinds);
if (refExprs.empty()) {
continue;
}
for (auto *refExpr : refExprs) {
NG_RETURN_IF_ERROR(checkAlias(refExpr, aliasesUsed));
}
}
return Status::OK();
}
Status MatchValidator::validateStepRange(const MatchStepRange *range) const {
auto min = range->min();
auto max = range->max();
if (min > max) {
return Status::SemanticError(
"Max hop must be greater equal than min hop: %ld vs. %ld", max, min);
}
if (max == std::numeric_limits<int64_t>::max()) {
return Status::SemanticError("Cannot set maximum hop for variable length relationships");
}
if (min < 0) {
return Status::SemanticError(
"Cannot set negative steps minumum hop for variable length "
"relationships");
}
return Status::OK();
}
Status MatchValidator::validateWith(const WithClause *with,
const CypherClauseContextBase *cypherClauseCtx,
WithClauseContext &withClauseCtx) const {
YieldColumns *columns = saveObject(new YieldColumns());
if (with->returnItems()->includeExisting()) {
auto status = includeExisting(cypherClauseCtx, columns);
if (!status.ok()) {
return status;
}
}
if (with->returnItems()->columns()) {
for (auto *column : with->returnItems()->columns()->columns()) {
columns->addColumn(column->clone().release());
}
}
withClauseCtx.yield->yieldColumns = columns;
// Check all referencing expressions are valid
std::vector<const Expression *> exprs;
exprs.reserve(withClauseCtx.yield->yieldColumns->size());
for (auto *col : withClauseCtx.yield->yieldColumns->columns()) {
auto labelExprs = ExpressionUtils::collectAll(col->expr(), {Expression::Kind::kLabel});
for (auto *labelExpr : labelExprs) {
DCHECK_EQ(labelExpr->kind(), Expression::Kind::kLabel);
auto label = static_cast<const LabelExpression *>(labelExpr)->name();
if (!withClauseCtx.yield->aliasesUsed || !withClauseCtx.yield->aliasesUsed->count(label)) {
return Status::SemanticError("Variable `%s` not defined", label.c_str());
}
}
if (col->alias().empty()) {
if (col->expr()->kind() == Expression::Kind::kLabel) {
col->setAlias(col->toString());
} else {
return Status::SemanticError("Expression in WITH must be aliased (use AS)");
}
}
if (!withClauseCtx.aliasesGenerated.emplace(col->alias(), AliasType::kDefault).second) {
return Status::SemanticError("`%s': Redefined alias", col->alias().c_str());
}
if (!withClauseCtx.yield->hasAgg_ &&
ExpressionUtils::hasAny(col->expr(), {Expression::Kind::kAggregate})) {
withClauseCtx.yield->hasAgg_ = true;
}
exprs.push_back(col->expr());
}
NG_RETURN_IF_ERROR(validateAliases(exprs, withClauseCtx.yield->aliasesUsed));
NG_RETURN_IF_ERROR(validateYield(*withClauseCtx.yield));
withClauseCtx.yield->distinct = with->isDistinct();
auto paginationCtx = getContext<PaginationContext>();
NG_RETURN_IF_ERROR(validatePagination(with->skip(), with->limit(), *paginationCtx));
withClauseCtx.pagination = std::move(paginationCtx);
if (with->orderFactors() != nullptr) {
auto orderByCtx = getContext<OrderByClauseContext>();
NG_RETURN_IF_ERROR(
validateOrderBy(with->orderFactors(), withClauseCtx.yield->yieldColumns, *orderByCtx));
withClauseCtx.order = std::move(orderByCtx);
}
return Status::OK();
}
Status MatchValidator::validateUnwind(const UnwindClause *unwindClause,
UnwindClauseContext &unwindCtx) const {
if (unwindClause->alias().empty()) {
return Status::SemanticError("Expression in UNWIND must be aliased (use AS)");
}
unwindCtx.alias = unwindClause->alias();
unwindCtx.unwindExpr = unwindClause->expr()->clone();
auto labelExprs = ExpressionUtils::collectAll(unwindCtx.unwindExpr, {Expression::Kind::kLabel});
for (auto *labelExpr : labelExprs) {
DCHECK_EQ(labelExpr->kind(), Expression::Kind::kLabel);
auto label = static_cast<const LabelExpression *>(labelExpr)->name();
if (!unwindCtx.aliasesUsed || !unwindCtx.aliasesUsed->count(label)) {
return Status::SemanticError("Variable `%s` not defined", label.c_str());
}
}
unwindCtx.aliasesGenerated.emplace(unwindCtx.alias, AliasType::kDefault);
if (!unwindCtx.aliasesUsed) {
unwindCtx.aliasesUsed = &unwindCtx.aliasesGenerated;
} else if (!unwindCtx.aliasesUsed->emplace(unwindCtx.alias, AliasType::kDefault).second) {
return Status::SemanticError("Variable `%s` already declared", unwindCtx.alias.c_str());
}
return Status::OK();
}
StatusOr<Expression *> MatchValidator::makeSubFilter(const std::string &alias,
const MapExpression *map,
const std::string &label) const {
auto *pool = qctx_->objPool();
// Node has tag without property
if (!label.empty() && map == nullptr) {
auto *left = ConstantExpression::make(pool, label);
auto *args = ArgumentList::make(pool);
args->addArgument(LabelExpression::make(pool, alias));
auto *right = FunctionCallExpression::make(pool, "tags", args);
Expression *root = RelationalExpression::makeIn(pool, left, right);
return root;
}
DCHECK(map != nullptr);
auto &items = map->items();
DCHECK(!items.empty());
// TODO(dutor) Check if evaluable and evaluate
if (items[0].second->kind() != Expression::Kind::kConstant) {
return Status::SemanticError("Props must be constant: `%s'",
items[0].second->toString().c_str());
}
Expression *root = RelationalExpression::makeEQ(
pool,
LabelAttributeExpression::make(
pool, LabelExpression::make(pool, alias), ConstantExpression::make(pool, items[0].first)),
items[0].second->clone());
for (auto i = 1u; i < items.size(); i++) {
if (items[i].second->kind() != Expression::Kind::kConstant) {
return Status::SemanticError("Props must be constant: `%s'",
items[i].second->toString().c_str());
}
auto *left = root;
auto *right = RelationalExpression::makeEQ(
pool,
LabelAttributeExpression::make(pool,
LabelExpression::make(pool, alias),
ConstantExpression::make(pool, items[i].first)),
items[i].second->clone());
root = LogicalExpression::makeAnd(pool, left, right);
}
return root;
}
/*static*/ Expression *MatchValidator::andConnect(ObjectPool *pool,
Expression *left,
Expression *right) {
if (left == nullptr) {
return right;
}
if (right == nullptr) {
return left;
}
return LogicalExpression::makeAnd(pool, left, right);
}
Status MatchValidator::combineAliases(
std::unordered_map<std::string, AliasType> &curAliases,
const std::unordered_map<std::string, AliasType> &lastAliases) const {
for (auto &aliasPair : lastAliases) {
if (!curAliases.emplace(aliasPair).second) {
return Status::SemanticError("`%s': Redefined alias", aliasPair.first.c_str());
}
}
return Status::OK();
}
Status MatchValidator::combineYieldColumns(YieldColumns *yieldColumns,
YieldColumns *prevYieldColumns) const {
auto *pool = qctx_->objPool();
const auto &prevColumns = prevYieldColumns->columns();
for (auto &column : prevColumns) {
DCHECK(!column->alias().empty());
auto *newColumn = new YieldColumn(VariablePropertyExpression::make(pool, "", column->alias()),
column->alias());
yieldColumns->addColumn(newColumn);
}
return Status::OK();
}
Status MatchValidator::validatePagination(const Expression *skipExpr,
const Expression *limitExpr,
PaginationContext &paginationCtx) const {
int64_t skip = 0;
int64_t limit = std::numeric_limits<int64_t>::max();
if (skipExpr != nullptr) {
if (!ExpressionUtils::isEvaluableExpr(skipExpr)) {
return Status::SemanticError("SKIP should be instantly evaluable");
}
QueryExpressionContext ctx;
auto value = const_cast<Expression *>(skipExpr)->eval(ctx);
if (!value.isInt()) {
return Status::SemanticError("SKIP should be of type integer");
}
if (value.getInt() < 0) {
return Status::SemanticError("SKIP should not be negative");
}
skip = value.getInt();
}
if (limitExpr != nullptr) {
if (!ExpressionUtils::isEvaluableExpr(limitExpr)) {
return Status::SemanticError("SKIP should be instantly evaluable");
}
QueryExpressionContext ctx;
auto value = const_cast<Expression *>(limitExpr)->eval(ctx);
if (!value.isInt()) {
return Status::SemanticError("LIMIT should be of type integer");
}
if (value.getInt() < 0) {
return Status::SemanticError("LIMIT should not be negative");
}
limit = value.getInt();
}
paginationCtx.skip = skip;
paginationCtx.limit = limit;
return Status::OK();
}
Status MatchValidator::validateOrderBy(const OrderFactors *factors,
const YieldColumns *yieldColumns,
OrderByClauseContext &orderByCtx) const {
if (factors != nullptr) {
std::vector<std::string> inputColList;
inputColList.reserve(yieldColumns->columns().size());
for (auto *col : yieldColumns->columns()) {
inputColList.emplace_back(col->name());
}
std::unordered_map<std::string, size_t> inputColIndices;
for (auto i = 0u; i < inputColList.size(); i++) {
if (!inputColIndices.emplace(inputColList[i], i).second) {
return Status::SemanticError("Duplicated columns not allowed: %s", inputColList[i].c_str());
}
}
for (auto &factor : factors->factors()) {
if (factor->expr()->kind() != Expression::Kind::kLabel) {
return Status::SemanticError("Only column name can be used as sort item");
}
auto &name = static_cast<const LabelExpression *>(factor->expr())->name();
auto iter = inputColIndices.find(name);
if (iter == inputColIndices.end()) {
return Status::SemanticError("Column `%s' not found", name.c_str());
}
orderByCtx.indexedOrderFactors.emplace_back(iter->second, factor->orderType());
}
}
return Status::OK();
}
Status MatchValidator::validateGroup(YieldClauseContext &yieldCtx) const {
auto cols = yieldCtx.yieldColumns->columns();
auto *pool = qctx_->objPool();
DCHECK(!cols.empty());
for (auto *col : cols) {
auto *colExpr = col->expr();
auto colOldName = col->name();
if (colExpr->kind() != Expression::Kind::kAggregate) {
auto collectAggCol = colExpr->clone();
auto aggs = ExpressionUtils::collectAll(collectAggCol, {Expression::Kind::kAggregate});
for (auto *agg : aggs) {
DCHECK_EQ(agg->kind(), Expression::Kind::kAggregate);
if (!ExpressionUtils::checkAggExpr(static_cast<const AggregateExpression *>(agg)).ok()) {
return Status::SemanticError("Aggregate function nesting is not allowed: `%s'",
colExpr->toString().c_str());
}
yieldCtx.groupItems_.emplace_back(agg->clone());
yieldCtx.needGenProject_ = true;
yieldCtx.aggOutputColumnNames_.emplace_back(agg->toString());
}
if (!aggs.empty()) {
auto *rewritedExpr = ExpressionUtils::rewriteAgg2VarProp(colExpr);
yieldCtx.projCols_->addColumn(new YieldColumn(rewritedExpr, colOldName));
yieldCtx.projOutputColumnNames_.emplace_back(colOldName);
continue;
}
}
if (colExpr->kind() == Expression::Kind::kAggregate) {
auto *aggExpr = static_cast<AggregateExpression *>(colExpr);
NG_RETURN_IF_ERROR(ExpressionUtils::checkAggExpr(aggExpr));
} else if (!ExpressionUtils::isEvaluableExpr(colExpr)) {
yieldCtx.groupKeys_.emplace_back(colExpr);
}
yieldCtx.groupItems_.emplace_back(colExpr);
yieldCtx.projCols_->addColumn(
new YieldColumn(VariablePropertyExpression::make(pool, "", colOldName), colOldName));
yieldCtx.projOutputColumnNames_.emplace_back(colOldName);
yieldCtx.aggOutputColumnNames_.emplace_back(colOldName);
}
return Status::OK();
}
Status MatchValidator::validateYield(YieldClauseContext &yieldCtx) const {
auto cols = yieldCtx.yieldColumns->columns();
if (cols.empty()) {
return Status::OK();
}
yieldCtx.projCols_ = yieldCtx.qctx->objPool()->add(new YieldColumns());
if (!yieldCtx.hasAgg_) {
for (auto &col : yieldCtx.yieldColumns->columns()) {
yieldCtx.projCols_->addColumn(col->clone().release());
yieldCtx.projOutputColumnNames_.emplace_back(col->name());
}
return Status::OK();
} else {
return validateGroup(yieldCtx);
}
}
StatusOr<AliasType> MatchValidator::getAliasType(
const std::unordered_map<std::string, AliasType> *aliasesUsed, const std::string &name) const {
auto iter = aliasesUsed->find(name);
if (iter == aliasesUsed->end()) {
return Status::SemanticError("Alias used but not defined: `%s'", name.c_str());
}
return iter->second;
}
Status MatchValidator::checkAlias(
const Expression *refExpr,
const std::unordered_map<std::string, AliasType> *aliasesUsed) const {
auto kind = refExpr->kind();
AliasType aliasType = AliasType::kDefault;
switch (kind) {
case Expression::Kind::kLabel: {
auto name = static_cast<const LabelExpression *>(refExpr)->name();
auto res = getAliasType(aliasesUsed, name);
if (!res.ok()) {
return res.status();
}
return Status::OK();
}
case Expression::Kind::kLabelAttribute: {
auto name = static_cast<const LabelAttributeExpression *>(refExpr)->left()->name();
auto res = getAliasType(aliasesUsed, name);
if (!res.ok()) {
return res.status();
}
return Status::OK();
}
case Expression::Kind::kEdgeSrc: {
auto name = static_cast<const EdgeSrcIdExpression *>(refExpr)->sym();
auto res = getAliasType(aliasesUsed, name);
if (!res.ok()) {
return res.status();
}
aliasType = res.value();
switch (aliasType) {
case AliasType::kNode:
return Status::SemanticError("Vertex `%s' does not have the src attribute", name.c_str());
case AliasType::kEdge:
return Status::SemanticError("To get the src vid of the edge, use src(%s)", name.c_str());
case AliasType::kPath:
return Status::SemanticError("To get the start node of the path, use startNode(%s)",
name.c_str());
default:
return Status::SemanticError("Alias `%s' does not have the edge property src",
name.c_str());
}
}
case Expression::Kind::kEdgeDst: {
auto name = static_cast<const EdgeDstIdExpression *>(refExpr)->sym();
auto res = getAliasType(aliasesUsed, name);
if (!res.ok()) {
return res.status();
}
aliasType = res.value();
switch (aliasType) {
case AliasType::kNode:
return Status::SemanticError("Vertex `%s' does not have the dst attribute", name.c_str());
case AliasType::kEdge:
return Status::SemanticError("To get the dst vid of the edge, use dst(%s)", name.c_str());
case AliasType::kPath:
return Status::SemanticError("To get the end node of the path, use endNode(%s)",
name.c_str());
default:
return Status::SemanticError("Alias `%s' does not have the edge property dst",
name.c_str());
}
}
case Expression::Kind::kEdgeRank: {
auto name = static_cast<const EdgeRankExpression *>(refExpr)->sym();
auto res = getAliasType(aliasesUsed, name);
if (!res.ok()) {
return res.status();
}
aliasType = res.value();
switch (aliasType) {
case AliasType::kNode:
return Status::SemanticError("Vertex `%s' does not have the ranking attribute",
name.c_str());
case AliasType::kEdge:
return Status::SemanticError("To get the ranking of the edge, use rank(%s)",
name.c_str());
case AliasType::kPath:
return Status::SemanticError("Path `%s' does not have the ranking attribute",
name.c_str());
default:
return Status::SemanticError("Alias `%s' does not have the edge property ranking",
name.c_str());
}
}
case Expression::Kind::kEdgeType: {
auto name = static_cast<const EdgeTypeExpression *>(refExpr)->sym();
auto res = getAliasType(aliasesUsed, name);
if (!res.ok()) {
return res.status();
}
aliasType = res.value();
switch (aliasType) {
case AliasType::kNode:
return Status::SemanticError("Vertex `%s' does not have the type attribute",
name.c_str());
case AliasType::kEdge:
return Status::SemanticError("To get the type of the edge, use type(%s)", name.c_str());
case AliasType::kPath:
return Status::SemanticError("Path `%s' does not have the type attribute", name.c_str());
default:
return Status::SemanticError("Alias `%s' does not have the edge property ranking",
name.c_str());
}
}
default: // refExpr must satisfy one of cases and should never hit this branch
break;
}
return Status::SemanticError("Invalid expression `%s' does not contain alias",
refExpr->toString().c_str());
}
Status MatchValidator::buildOutputs(const YieldColumns *yields) {
for (auto *col : yields->columns()) {
auto colName = col->name();
auto typeStatus = deduceExprType(col->expr());
NG_RETURN_IF_ERROR(typeStatus);
auto type = typeStatus.value();
outputs_.emplace_back(colName, type);
}
return Status::OK();
}
} // namespace graph
} // namespace nebula
| 1 | 32,088 | The data structure of `MatchClauseContext` may need to be redesigned. | vesoft-inc-nebula | cpp |
@@ -236,8 +236,13 @@ func testSendReceive(t *testing.T, newHarness HarnessMaker) {
}
defer cleanup()
- want := publishN(ctx, t, top, 3)
- got := receiveN(ctx, t, sub, len(want))
+ wantChan := make(chan []*pubsub.Message)
+ gotChan := make(chan []*pubsub.Message)
+ n := 3
+ go func() { gotChan <- receiveN(ctx, t, sub, n) }()
+ go func() { wantChan <- publishN(ctx, t, top, n) }()
+ want := <-wantChan
+ got := <-gotChan
// Check that the received messages match the sent ones.
if diff := diffMessageSets(got, want); diff != "" { | 1 | // Copyright 2018 The Go Cloud Development Kit Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package drivertest provides a conformance test for implementations of
// driver.
package drivertest // import "gocloud.dev/pubsub/drivertest"
import (
"bytes"
"context"
"errors"
"sort"
"strconv"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"gocloud.dev/internal/escape"
"gocloud.dev/internal/retry"
"gocloud.dev/pubsub"
"gocloud.dev/pubsub/driver"
"golang.org/x/sync/errgroup"
)
// Harness descibes the functionality test harnesses must provide to run
// conformance tests.
type Harness interface {
// CreateTopic creates a new topic in the provider and returns a driver.Topic for testing.
// The topic may have to be removed manually if the test is abruptly terminated or the network connection fails.
CreateTopic(ctx context.Context, testName string) (dt driver.Topic, cleanup func(), err error)
// MakeNonexistentTopic makes a driver.Topic referencing a topic that
// does not exist.
MakeNonexistentTopic(ctx context.Context) (driver.Topic, error)
// CreateSubscription creates a new subscription in the provider, subscribed to the given topic, and returns
// a driver.Subscription for testing.
// The subscription may have to be cleaned up manually if the test is abruptly terminated or the network connection
// fails.
CreateSubscription(ctx context.Context, t driver.Topic, testName string) (ds driver.Subscription, cleanup func(), err error)
// MakeNonexistentSubscription makes a driver.Subscription referencing a
// subscription that does not exist.
MakeNonexistentSubscription(ctx context.Context) (driver.Subscription, error)
// Close closes resources used by the harness, but does not call Close
// on the Topics and Subscriptions generated by the Harness.
Close()
}
// HarnessMaker describes functions that construct a harness for running tests.
// It is called exactly once per test; Harness.Close() will be called when the test is complete.
type HarnessMaker func(ctx context.Context, t *testing.T) (Harness, error)
// AsTest represents a test of As functionality.
// The conformance test:
// 1. Calls TopicCheck.
// 2. Calls SubscriptionCheck.
// 3. Calls TopicErrorCheck.
// 4. Calls SubscriptionErrorCheck.
// 5. Calls MessageCheck.
type AsTest interface {
// Name should return a descriptive name for the test.
Name() string
// TopicCheck will be called to allow verifcation of Topic.As.
TopicCheck(t *pubsub.Topic) error
// SubscriptionCheck will be called to allow verification of Subscription.As.
SubscriptionCheck(s *pubsub.Subscription) error
// TopicErrorCheck will be called to allow verification of Topic.ErrorAs.
// The error will be the one returned from SendBatch when called with
// a non-existent topic.
TopicErrorCheck(t *pubsub.Topic, err error) error
// SubscriptionErrorCheck will be called to allow verification of
// Subscription.ErrorAs.
// The error will be the one returned from ReceiveBatch when called with
// a non-existent subscription.
SubscriptionErrorCheck(s *pubsub.Subscription, err error) error
// MessageCheck will be called to allow verification of Message.As.
MessageCheck(m *pubsub.Message) error
}
type verifyAsFailsOnNil struct{}
func (verifyAsFailsOnNil) Name() string {
return "verify As returns false when passed nil"
}
func (verifyAsFailsOnNil) TopicCheck(t *pubsub.Topic) error {
if t.As(nil) {
return errors.New("want Topic.As to return false when passed nil")
}
return nil
}
func (verifyAsFailsOnNil) SubscriptionCheck(s *pubsub.Subscription) error {
if s.As(nil) {
return errors.New("want Subscription.As to return false when passed nil")
}
return nil
}
func (verifyAsFailsOnNil) TopicErrorCheck(t *pubsub.Topic, err error) (ret error) {
defer func() {
if recover() == nil {
ret = errors.New("want Topic.ErrorAs to panic when passed nil")
}
}()
t.ErrorAs(err, nil)
return nil
}
func (verifyAsFailsOnNil) SubscriptionErrorCheck(s *pubsub.Subscription, err error) (ret error) {
defer func() {
if recover() == nil {
ret = errors.New("want Subscription.ErrorAs to panic when passed nil")
}
}()
s.ErrorAs(err, nil)
return nil
}
func (verifyAsFailsOnNil) MessageCheck(m *pubsub.Message) error {
if m.As(nil) {
return errors.New("want Message.As to return false when passed nil")
}
return nil
}
// RunConformanceTests runs conformance tests for provider implementations of pubsub.
func RunConformanceTests(t *testing.T, newHarness HarnessMaker, asTests []AsTest) {
tests := map[string]func(t *testing.T, newHarness HarnessMaker){
"TestSendReceive": testSendReceive,
"TestSendReceiveTwo": testSendReceiveTwo,
"TestErrorOnSendToClosedTopic": testErrorOnSendToClosedTopic,
"TestErrorOnReceiveFromClosedSubscription": testErrorOnReceiveFromClosedSubscription,
"TestCancelSendReceive": testCancelSendReceive,
"TestNonExistentTopicSucceedsOnOpenButFailsOnSend": testNonExistentTopicSucceedsOnOpenButFailsOnSend,
"TestNonExistentSubscriptionSucceedsOnOpenButFailsOnSend": testNonExistentSubscriptionSucceedsOnOpenButFailsOnSend,
"TestMetadata": testMetadata,
"TestNonUTF8MessageBody": testNonUTF8MessageBody,
}
for name, test := range tests {
t.Run(name, func(t *testing.T) { test(t, newHarness) })
}
asTests = append(asTests, verifyAsFailsOnNil{})
t.Run("TestAs", func(t *testing.T) {
for _, st := range asTests {
if st.Name() == "" {
t.Fatalf("AsTest.Name is required")
}
t.Run(st.Name(), func(t *testing.T) { testAs(t, newHarness, st) })
}
})
}
// RunBenchmarks runs benchmarks for provider implementations of pubsub.
func RunBenchmarks(b *testing.B, topic *pubsub.Topic, sub *pubsub.Subscription) {
b.Run("BenchmarkReceive", func(b *testing.B) {
benchmark(b, topic, sub, false)
})
b.Run("BenchmarkSend", func(b *testing.B) {
benchmark(b, topic, sub, true)
})
}
func testNonExistentTopicSucceedsOnOpenButFailsOnSend(t *testing.T, newHarness HarnessMaker) {
// Set up.
ctx := context.Background()
h, err := newHarness(ctx, t)
if err != nil {
t.Fatal(err)
}
defer h.Close()
dt, err := h.MakeNonexistentTopic(ctx)
if err != nil {
// Failure shouldn't happen for non-existent topics until messages are sent
// to them.
t.Fatalf("creating a local topic that doesn't exist on the server: %v", err)
}
top := pubsub.NewTopic(dt)
defer top.Shutdown(ctx)
m := &pubsub.Message{}
err = top.Send(ctx, m)
if err == nil {
t.Errorf("got no error for send to non-existent topic")
}
}
func testNonExistentSubscriptionSucceedsOnOpenButFailsOnSend(t *testing.T, newHarness HarnessMaker) {
// Set up.
ctx := context.Background()
h, err := newHarness(ctx, t)
if err != nil {
t.Fatal(err)
}
defer h.Close()
ds, err := h.MakeNonexistentSubscription(ctx)
if err != nil {
t.Fatalf("failed to make non-existent subscription: %v", err)
}
sub := pubsub.NewSubscription(ds, nil)
defer sub.Shutdown(ctx)
_, err = sub.Receive(ctx)
if err == nil {
t.Errorf("got no error for send to non-existent topic")
}
}
func testSendReceive(t *testing.T, newHarness HarnessMaker) {
// Set up.
ctx := context.Background()
h, err := newHarness(ctx, t)
if err != nil {
t.Fatal(err)
}
defer h.Close()
top, sub, cleanup, err := makePair(ctx, h, t.Name())
if err != nil {
t.Fatal(err)
}
defer cleanup()
want := publishN(ctx, t, top, 3)
got := receiveN(ctx, t, sub, len(want))
// Check that the received messages match the sent ones.
if diff := diffMessageSets(got, want); diff != "" {
t.Error(diff)
}
}
// Receive from two subscriptions to the same topic.
// Verify both get all the messages.
func testSendReceiveTwo(t *testing.T, newHarness HarnessMaker) {
// Set up.
ctx := context.Background()
h, err := newHarness(ctx, t)
if err != nil {
t.Fatal(err)
}
defer h.Close()
dt, cleanup, err := h.CreateTopic(ctx, t.Name())
if err != nil {
t.Fatal(err)
}
defer cleanup()
top := pubsub.NewTopic(dt)
defer top.Shutdown(ctx)
var ss []*pubsub.Subscription
for i := 0; i < 2; i++ {
ds, cleanup, err := h.CreateSubscription(ctx, dt, t.Name())
if err != nil {
t.Fatal(err)
}
defer cleanup()
s := pubsub.NewSubscription(ds, nil)
defer s.Shutdown(ctx)
ss = append(ss, s)
}
want := publishN(ctx, t, top, 3)
for i, s := range ss {
got := receiveN(ctx, t, s, len(want))
if diff := diffMessageSets(got, want); diff != "" {
t.Errorf("sub #%d: %s", i, diff)
}
}
}
// Publish n different messages to the topic. Return the messages.
func publishN(ctx context.Context, t *testing.T, top *pubsub.Topic, n int) []*pubsub.Message {
var ms []*pubsub.Message
for i := 0; i < n; i++ {
m := &pubsub.Message{
Body: []byte(strconv.Itoa(i)),
Metadata: map[string]string{"a": strconv.Itoa(i)},
}
if err := top.Send(ctx, m); err != nil {
t.Fatal(err)
}
ms = append(ms, m)
}
return ms
}
// Receive and ack n messages from sub.
func receiveN(ctx context.Context, t *testing.T, sub *pubsub.Subscription, n int) []*pubsub.Message {
var ms []*pubsub.Message
for i := 0; i < n; i++ {
m, err := sub.Receive(ctx)
if err != nil {
t.Fatal(err)
}
ms = append(ms, m)
m.Ack()
}
return ms
}
// Find the differences between two sets of messages.
func diffMessageSets(got, want []*pubsub.Message) string {
less := func(x, y *pubsub.Message) bool { return bytes.Compare(x.Body, y.Body) < 0 }
return cmp.Diff(got, want, cmpopts.SortSlices(less), cmpopts.IgnoreUnexported(pubsub.Message{}))
}
func testErrorOnSendToClosedTopic(t *testing.T, newHarness HarnessMaker) {
// Set up.
ctx := context.Background()
h, err := newHarness(ctx, t)
if err != nil {
t.Fatal(err)
}
defer h.Close()
top, _, cleanup, err := makePair(ctx, h, t.Name())
if err != nil {
t.Fatal(err)
}
defer cleanup()
top.Shutdown(ctx)
// Check that sending to the closed topic fails.
m := &pubsub.Message{}
if err := top.Send(ctx, m); err == nil {
t.Error("top.Send returned nil, want error")
}
}
func testErrorOnReceiveFromClosedSubscription(t *testing.T, newHarness HarnessMaker) {
ctx := context.Background()
h, err := newHarness(ctx, t)
if err != nil {
t.Fatal(err)
}
defer h.Close()
_, sub, cleanup, err := makePair(ctx, h, t.Name())
if err != nil {
t.Fatal(err)
}
defer cleanup()
sub.Shutdown(ctx)
if _, err = sub.Receive(ctx); err == nil {
t.Error("sub.Receive returned nil, want error")
}
}
func testCancelSendReceive(t *testing.T, newHarness HarnessMaker) {
ctx, cancel := context.WithCancel(context.Background())
h, err := newHarness(ctx, t)
if err != nil {
t.Fatal(err)
}
defer h.Close()
top, sub, cleanup, err := makePair(ctx, h, t.Name())
if err != nil {
t.Fatal(err)
}
defer cleanup()
cancel()
m := &pubsub.Message{}
if err := top.Send(ctx, m); !isCanceled(err) {
t.Errorf("top.Send returned %v (%T), want context.Canceled", err, err)
}
if _, err := sub.Receive(ctx); !isCanceled(err) {
t.Errorf("sub.Receive returned %v (%T), want context.Canceled", err, err)
}
}
func testMetadata(t *testing.T, newHarness HarnessMaker) {
// Set up.
ctx := context.Background()
h, err := newHarness(ctx, t)
if err != nil {
t.Fatal(err)
}
defer h.Close()
weirdMetadata := map[string]string{}
for _, k := range escape.WeirdStrings {
weirdMetadata[k] = k
}
top, sub, cleanup, err := makePair(ctx, h, t.Name())
if err != nil {
t.Fatal(err)
}
defer cleanup()
m := &pubsub.Message{
Body: []byte("hello world"),
Metadata: weirdMetadata,
}
if err := top.Send(ctx, m); err != nil {
t.Fatal(err)
}
m, err = sub.Receive(ctx)
if err != nil {
t.Fatal(err)
}
m.Ack()
if diff := cmp.Diff(m.Metadata, weirdMetadata); diff != "" {
t.Fatalf("got\n%v\nwant\n%v\ndiff\n%s", m.Metadata, weirdMetadata, diff)
}
// Verify that non-UTF8 strings in metadata key or value fail.
m = &pubsub.Message{
Body: []byte("hello world"),
Metadata: map[string]string{escape.NonUTF8String: "bar"},
}
if err := top.Send(ctx, m); err == nil {
t.Error("got nil error, expected error for using non-UTF8 string as metadata key")
}
m.Metadata = map[string]string{"foo": escape.NonUTF8String}
if err := top.Send(ctx, m); err == nil {
t.Error("got nil error, expected error for using non-UTF8 string as metadata value")
}
}
func testNonUTF8MessageBody(t *testing.T, newHarness HarnessMaker) {
// Set up.
ctx := context.Background()
h, err := newHarness(ctx, t)
if err != nil {
t.Fatal(err)
}
defer h.Close()
top, sub, cleanup, err := makePair(ctx, h, t.Name())
if err != nil {
t.Fatal(err)
}
defer cleanup()
// Sort the WeirdStrings map for record/replay consistency.
var weirdStrings [][]string // [0] = key, [1] = value
for k, v := range escape.WeirdStrings {
weirdStrings = append(weirdStrings, []string{k, v})
}
sort.Slice(weirdStrings, func(i, j int) bool { return weirdStrings[i][0] < weirdStrings[j][0] })
// Construct a message body with the weird strings and some non-UTF-8 bytes.
var body []byte
for _, v := range weirdStrings {
body = append(body, []byte(v[1])...)
}
body = append(body, []byte(escape.NonUTF8String)...)
m := &pubsub.Message{Body: body}
if err := top.Send(ctx, m); err != nil {
t.Fatal(err)
}
m, err = sub.Receive(ctx)
if err != nil {
t.Fatal(err)
}
m.Ack()
if diff := cmp.Diff(m.Body, body); diff != "" {
t.Fatalf("got\n%v\nwant\n%v\ndiff\n%s", m.Body, body, diff)
}
}
func isCanceled(err error) bool {
if err == context.Canceled {
return true
}
if cerr, ok := err.(*retry.ContextError); ok {
return cerr.CtxErr == context.Canceled
}
return false
}
func makePair(ctx context.Context, h Harness, testName string) (*pubsub.Topic, *pubsub.Subscription, func(), error) {
dt, topicCleanup, err := h.CreateTopic(ctx, testName)
if err != nil {
return nil, nil, nil, err
}
ds, subCleanup, err := h.CreateSubscription(ctx, dt, testName)
if err != nil {
return nil, nil, nil, err
}
t := pubsub.NewTopic(dt)
s := pubsub.NewSubscription(ds, nil)
cleanup := func() {
topicCleanup()
subCleanup()
t.Shutdown(ctx)
s.Shutdown(ctx)
}
return t, s, cleanup, nil
}
// testAs tests the various As functions, using AsTest.
func testAs(t *testing.T, newHarness HarnessMaker, st AsTest) {
ctx := context.Background()
h, err := newHarness(ctx, t)
if err != nil {
t.Fatal(err)
}
defer h.Close()
top, sub, cleanup, err := makePair(ctx, h, t.Name())
if err != nil {
t.Fatal(err)
}
defer cleanup()
if err := st.TopicCheck(top); err != nil {
t.Error(err)
}
if err := st.SubscriptionCheck(sub); err != nil {
t.Error(err)
}
dt, err := h.MakeNonexistentTopic(ctx)
if err != nil {
t.Fatal(err)
}
if err := top.Send(ctx, &pubsub.Message{Body: []byte("x")}); err != nil {
t.Fatal(err)
}
m, err := sub.Receive(ctx)
if err != nil {
t.Fatal(err)
}
if err := st.MessageCheck(m); err != nil {
t.Error(err)
}
top = pubsub.NewTopic(dt)
defer top.Shutdown(ctx)
topicErr := top.Send(ctx, &pubsub.Message{})
if topicErr == nil {
t.Error("got nil expected error sending to nonexistent topic")
} else if err := st.TopicErrorCheck(top, topicErr); err != nil {
t.Error(err)
}
ds, err := h.MakeNonexistentSubscription(ctx)
if err != nil {
t.Fatal(err)
}
sub = pubsub.NewSubscription(ds, nil)
defer sub.Shutdown(ctx)
_, subErr := sub.Receive(ctx)
if subErr == nil {
t.Error("got nil expected error sending to nonexistent subscription")
} else if err := st.SubscriptionErrorCheck(sub, subErr); err != nil {
t.Error(err)
}
}
// Publishes a large number of messages to topic concurrently, and then times
// how long it takes to send (if timeSend is true) or receive (if timeSend
// is false) them all.
func benchmark(b *testing.B, topic *pubsub.Topic, sub *pubsub.Subscription, timeSend bool) {
attrs := map[string]string{"label": "value"}
body := []byte("hello, world")
const (
nMessages = 1000
concurrencySend = 10
concurrencyReceive = 10
)
if nMessages%concurrencySend != 0 || nMessages%concurrencyReceive != 0 {
b.Fatal("nMessages must be divisible by # of sending/receiving goroutines")
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
if !timeSend {
b.StopTimer()
}
if err := publishNConcurrently(topic, nMessages, concurrencySend, attrs, body); err != nil {
b.Fatalf("publishing: %v", err)
}
b.Logf("published %d messages", nMessages)
if timeSend {
b.StopTimer()
} else {
b.StartTimer()
}
if err := receiveNConcurrently(sub, nMessages, concurrencyReceive); err != nil {
b.Fatalf("receiving: %v", err)
}
b.SetBytes(nMessages * 1e6)
b.Log("MB/s is actually number of messages received per second")
if timeSend {
b.StartTimer()
}
}
}
func publishNConcurrently(topic *pubsub.Topic, nMessages, nGoroutines int, attrs map[string]string, body []byte) error {
return runConcurrently(nMessages, nGoroutines, func(ctx context.Context) error {
return topic.Send(ctx, &pubsub.Message{Metadata: attrs, Body: body})
})
}
func receiveNConcurrently(sub *pubsub.Subscription, nMessages, nGoroutines int) error {
return runConcurrently(nMessages, nGoroutines, func(ctx context.Context) error {
m, err := sub.Receive(ctx)
if err != nil {
return err
}
m.Ack()
return nil
})
}
// Call function f n times concurrently, using g goroutines. g must divide n.
// Wait until all calls complete. If any fail, cancel the remaining ones.
func runConcurrently(n, g int, f func(context.Context) error) error {
gr, ctx := errgroup.WithContext(context.Background())
ng := n / g
for i := 0; i < g; i++ {
gr.Go(func() error {
for j := 0; j < ng; j++ {
if err := f(ctx); err != nil {
return err
}
}
return nil
})
}
return gr.Wait()
}
| 1 | 14,826 | You say on the issue that something has to be receiving while the messages are being published. But here it's possible that the publish goroutine starts, and maybe even completes, before Receive is ever called. | google-go-cloud | go |
@@ -50,7 +50,7 @@ class ProposalsController < ApplicationController
end
def approve
- approval = proposal.existing_approval_for(current_user)
+ approval = proposal.existing_step_for(current_user)
approval.update_attributes!(completer: current_user)
approval.approve!
flash[:success] = "You have approved #{proposal.public_id}." | 1 | class ProposalsController < ApplicationController
include TokenAuth
skip_before_action :authenticate_user!, only: [:approve]
skip_before_action :check_disabled_client, only: [:approve]
# TODO use Policy for all actions
before_action ->{authorize proposal}, only: [:show, :cancel, :cancel_form, :history]
before_action :needs_token_on_get, only: :approve
before_action :validate_access, only: :approve
helper_method :display_status
add_template_helper ProposalsHelper
rescue_from Pundit::NotAuthorizedError, with: :auth_errors
def show
@proposal = proposal.decorate
end
def index
@CLOSED_PROPOSAL_LIMIT = 10
@pending_data = listing.pending
@pending_review_data = listing.pending_review
@approved_data = listing.approved.alter_query{ |rel| rel.limit(@CLOSED_PROPOSAL_LIMIT) }
@cancelled_data = listing.cancelled
end
def archive
@proposals_data = listing.closed
end
def cancel_form
@proposal = proposal.decorate
end
def cancel
if params[:reason_input].present?
comments = "Request cancelled with comments: " + params[:reason_input]
proposal.cancel!
proposal.comments.create!(comment_text: comments, user: current_user)
flash[:success] = "Your request has been cancelled"
redirect_to proposal_path(proposal)
Dispatcher.new.deliver_cancellation_emails(proposal, params[:reason_input])
else
redirect_to(
cancel_form_proposal_path(params[:id]),
alert: "A reason for cancellation is required. Please indicate why this request needs to be cancelled."
)
end
end
def approve
approval = proposal.existing_approval_for(current_user)
approval.update_attributes!(completer: current_user)
approval.approve!
flash[:success] = "You have approved #{proposal.public_id}."
redirect_to proposal
end
def query
query_listing = listing
@proposals_data = query_listing.query
@text = params[:text]
@start_date = query_listing.start_date
@end_date = query_listing.end_date
end
def history
@container = Query::Proposal::Versions.new(proposal).container
@container.set_state_from_params(params)
end
protected
def proposal
@cached_proposal ||= Proposal.find(params[:id])
end
def auth_errors(exception)
if ['cancel','cancel_form'].include?(params[:action])
redirect_to proposal_path, alert: exception.message
else
super
end
end
def listing
Query::Proposal::Listing.new(current_user, params)
end
end
| 1 | 16,210 | how do we know that the step in question is an approval type step? | 18F-C2 | rb |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.