patch
stringlengths
17
31.2k
y
int64
1
1
oldf
stringlengths
0
2.21M
idx
int64
1
1
id
int64
4.29k
68.4k
msg
stringlengths
8
843
proj
stringclasses
212 values
lang
stringclasses
9 values
@@ -36,3 +36,17 @@ TWO_ENABLED = {'scanners': [ {'name': 'cloudsql_acl', 'enabled': False}, {'name': 'iam_policy', 'enabled': True} ]} + +NONEXISTENT_ENABLED = {'scanners': [ + {'name': 'bigquery', 'enabled': False}, + {'name': 'bucket_acl', 'enabled': True}, + {'name': 'cloudsql_acl', 'enabled': False}, + {'name': 'non_exist_scanner', 'enabled': True} +]} + +ALL_EXIST = {'scanners': [ + {'name': 'bigquery', 'enabled': True}, + {'name': 'bucket_acl', 'enabled': True}, + {'name': 'cloudsql_acl', 'enabled': True}, + {'name': 'iam_policy', 'enabled': True} +]}
1
# Copyright 2017 The Forseti Security Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Fake runnable scanners.""" ALL_ENABLED = {'scanners': [ {'name': 'bigquery', 'enabled': True}, {'name': 'bucket_acl', 'enabled': True}, {'name': 'cloudsql_acl', 'enabled': True}, {'name': 'iam_policy', 'enabled': True} ]} ALL_DISABLED = {'scanners': []} ONE_ENABLED = {'scanners': [ {'name': 'bigquery', 'enabled': False}, {'name': 'bucket_acl', 'enabled': False}, {'name': 'cloudsql_acl', 'enabled': False}, {'name': 'iam_policy', 'enabled': True} ]} TWO_ENABLED = {'scanners': [ {'name': 'bigquery', 'enabled': False}, {'name': 'bucket_acl', 'enabled': True}, {'name': 'cloudsql_acl', 'enabled': False}, {'name': 'iam_policy', 'enabled': True} ]}
1
32,440
This is not being used anymore, so can be removed.
forseti-security-forseti-security
py
@@ -127,6 +127,17 @@ type PodIPs struct { ipStrings []string } +func (p PodIPs) String() string { + res := "" + if p.ipv4 != nil { + res += fmt.Sprintf("IPv4: %s, ", p.ipv4.String()) + } + if p.ipv6 != nil { + res += fmt.Sprintf("IPv6: %s, ", p.ipv6.String()) + } + return fmt.Sprintf("%sIP strings: %s", res, strings.Join(p.ipStrings, ", ")) +} + func (p *PodIPs) hasSameIP(p1 *PodIPs) bool { if len(p.ipStrings) == 0 && len(p1.ipStrings) == 0 { return true
1
// Copyright 2019 Antrea Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package e2e import ( "bytes" "context" "encoding/json" "fmt" "math/rand" "net" "os" "path/filepath" "regexp" "strconv" "strings" "time" corev1 "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/tools/remotecommand" aggregatorclientset "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset" "github.com/vmware-tanzu/antrea/pkg/agent/config" crdclientset "github.com/vmware-tanzu/antrea/pkg/client/clientset/versioned" secv1alpha1 "github.com/vmware-tanzu/antrea/pkg/client/clientset/versioned/typed/security/v1alpha1" "github.com/vmware-tanzu/antrea/test/e2e/providers" ) const ( defaultTimeout = 90 * time.Second // antreaNamespace is the K8s Namespace in which all Antrea resources are running. antreaNamespace string = "kube-system" antreaConfigVolume string = "antrea-config" antreaDaemonSet string = "antrea-agent" antreaDeployment string = "antrea-controller" antreaDefaultGW string = "antrea-gw0" testNamespace string = "antrea-test" busyboxContainerName string = "busybox" ovsContainerName string = "antrea-ovs" agentContainerName string = "antrea-agent" antreaYML string = "antrea.yml" antreaIPSecYML string = "antrea-ipsec.yml" antreaCovYML string = "antrea-coverage.yml" antreaIPSecCovYML string = "antrea-ipsec-coverage.yml" defaultBridgeName string = "br-int" monitoringNamespace string = "monitoring" antreaControllerCovBinary string = "antrea-controller-coverage" antreaAgentCovBinary string = "antrea-agent-coverage" antreaControllerCovFile string = "antrea-controller.cov.out" antreaAgentCovFile string = "antrea-agent.cov.out" nameSuffixLength int = 8 ) type ClusterNode struct { idx int // 0 for master Node name string } type ClusterInfo struct { numWorkerNodes int numNodes int podV4NetworkCIDR string podV6NetworkCIDR string svcV4NetworkCIDR string svcV6NetworkCIDR string masterNodeName string nodes map[int]ClusterNode } var clusterInfo ClusterInfo type TestOptions struct { providerName string providerConfigPath string logsExportDir string logsExportOnSuccess bool withBench bool enableCoverage bool coverageDir string } var testOptions TestOptions var provider providers.ProviderInterface // TestData stores the state required for each test case. type TestData struct { kubeConfig *restclient.Config clientset kubernetes.Interface aggregatorClient aggregatorclientset.Interface securityClient secv1alpha1.SecurityV1alpha1Interface crdClient crdclientset.Interface logsDirForTestCase string } var testData *TestData type PodIPs struct { ipv4 *net.IP ipv6 *net.IP ipStrings []string } func (p *PodIPs) hasSameIP(p1 *PodIPs) bool { if len(p.ipStrings) == 0 && len(p1.ipStrings) == 0 { return true } if p.ipv4 != nil && p1.ipv4 != nil && p.ipv4.Equal(*(p1.ipv4)) { return true } if p.ipv6 != nil && p1.ipv6 != nil && p.ipv6.Equal(*(p1.ipv6)) { return true } return false } // workerNodeName returns an empty string if there is no worker Node with the provided idx // (including if idx is 0, which is reserved for the master Node) func workerNodeName(idx int) string { if idx == 0 { // master Node return "" } if node, ok := clusterInfo.nodes[idx]; !ok { return "" } else { return node.name } } func masterNodeName() string { return clusterInfo.masterNodeName } // nodeName returns an empty string if there is no Node with the provided idx. If idx is 0, the name // of the master Node will be returned. func nodeName(idx int) string { if node, ok := clusterInfo.nodes[idx]; !ok { return "" } else { return node.name } } func initProvider() error { providerFactory := map[string]func(string) (providers.ProviderInterface, error){ "vagrant": providers.NewVagrantProvider, "kind": providers.NewKindProvider, "remote": providers.NewRemoteProvider, } if fn, ok := providerFactory[testOptions.providerName]; ok { if newProvider, err := fn(testOptions.providerConfigPath); err != nil { return err } else { provider = newProvider } } else { return fmt.Errorf("unknown provider '%s'", testOptions.providerName) } return nil } // RunCommandOnNode is a convenience wrapper around the Provider interface RunCommandOnNode method. func RunCommandOnNode(nodeName string, cmd string) (code int, stdout string, stderr string, err error) { return provider.RunCommandOnNode(nodeName, cmd) } func collectClusterInfo() error { // retrieve Node information nodes, err := testData.clientset.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) if err != nil { return fmt.Errorf("error when listing cluster Nodes: %v", err) } workerIdx := 1 clusterInfo.nodes = make(map[int]ClusterNode) for _, node := range nodes.Items { isMaster := func() bool { _, ok := node.Labels["node-role.kubernetes.io/master"] return ok }() var nodeIdx int // If multiple master Nodes (HA), we will select the last one in the list if isMaster { nodeIdx = 0 clusterInfo.masterNodeName = node.Name } else { nodeIdx = workerIdx workerIdx++ } clusterInfo.nodes[nodeIdx] = ClusterNode{ idx: nodeIdx, name: node.Name, } } if clusterInfo.masterNodeName == "" { return fmt.Errorf("error when listing cluster Nodes: master Node not found") } clusterInfo.numNodes = workerIdx clusterInfo.numWorkerNodes = clusterInfo.numNodes - 1 retrieveCIDRs := func(cmd string, reg string) ([]string, error) { res := make([]string, 2) rc, stdout, _, err := RunCommandOnNode(clusterInfo.masterNodeName, cmd) if err != nil || rc != 0 { return res, fmt.Errorf("error when running the following command `%s` on master Node: %v, %s", cmd, err, stdout) } re := regexp.MustCompile(reg) if matches := re.FindStringSubmatch(stdout); len(matches) == 0 { return res, fmt.Errorf("cannot retrieve CIDR, unexpected kubectl output: %s", stdout) } else { cidrs := strings.Split(matches[1], ",") if len(cidrs) == 1 { _, cidr, err := net.ParseCIDR(cidrs[0]) if err != nil { return res, fmt.Errorf("CIDR cannot be parsed: %s", cidrs[0]) } if cidr.IP.To4() != nil { res[0] = cidrs[0] } else { res[1] = cidrs[0] } } else if len(cidrs) == 2 { _, cidr, err := net.ParseCIDR(cidrs[0]) if err != nil { return res, fmt.Errorf("CIDR cannot be parsed: %s", cidrs[0]) } if cidr.IP.To4() != nil { res[0] = cidrs[0] res[1] = cidrs[1] } else { res[0] = cidrs[1] res[1] = cidrs[0] } } else { return res, fmt.Errorf("unexpected cluster CIDR: %s", matches[1]) } } return res, nil } // retrieve cluster CIDRs podCIDRs, err := retrieveCIDRs("kubectl cluster-info dump | grep cluster-cidr", `cluster-cidr=([^"]+)`) if err != nil { return err } clusterInfo.podV4NetworkCIDR = podCIDRs[0] clusterInfo.podV6NetworkCIDR = podCIDRs[1] // retrieve service CIDRs svcCIDRs, err := retrieveCIDRs("kubectl cluster-info dump | grep service-cluster-ip-range", `service-cluster-ip-range=([^"]+)`) if err != nil { return err } clusterInfo.svcV4NetworkCIDR = svcCIDRs[0] clusterInfo.svcV6NetworkCIDR = svcCIDRs[1] return nil } // createNamespace creates the provided namespace. func (data *TestData) createNamespace(namespace string) error { ns := corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: namespace, }, } if ns, err := data.clientset.CoreV1().Namespaces().Create(context.TODO(), &ns, metav1.CreateOptions{}); err != nil { // Ignore error if the namespace already exists if !errors.IsAlreadyExists(err) { return fmt.Errorf("error when creating '%s' Namespace: %v", namespace, err) } // When namespace already exists, check phase if ns.Status.Phase == corev1.NamespaceTerminating { return fmt.Errorf("error when creating '%s' Namespace: namespace exists but is in 'Terminating' phase", namespace) } } return nil } // createTestNamespace creates the namespace used for tests. func (data *TestData) createTestNamespace() error { return data.createNamespace(testNamespace) } // deleteNamespace deletes the provided namespace and waits for deletion to actually complete. func (data *TestData) deleteNamespace(namespace string, timeout time.Duration) error { var gracePeriodSeconds int64 = 0 var propagationPolicy = metav1.DeletePropagationForeground deleteOptions := metav1.DeleteOptions{ GracePeriodSeconds: &gracePeriodSeconds, PropagationPolicy: &propagationPolicy, } if err := data.clientset.CoreV1().Namespaces().Delete(context.TODO(), namespace, deleteOptions); err != nil { if errors.IsNotFound(err) { // namespace does not exist, we return right away return nil } return fmt.Errorf("error when deleting '%s' Namespace: %v", namespace, err) } err := wait.Poll(1*time.Second, timeout, func() (bool, error) { if ns, err := data.clientset.CoreV1().Namespaces().Get(context.TODO(), namespace, metav1.GetOptions{}); err != nil { if errors.IsNotFound(err) { // Success return true, nil } return false, fmt.Errorf("error when getting Namespace '%s' after delete: %v", namespace, err) } else if ns.Status.Phase != corev1.NamespaceTerminating { return false, fmt.Errorf("deleted Namespace '%s' should be in 'Terminating' phase", namespace) } // Keep trying return false, nil }) return err } // deleteTestNamespace deletes test namespace and waits for deletion to actually complete. func (data *TestData) deleteTestNamespace(timeout time.Duration) error { return data.deleteNamespace(testNamespace, timeout) } // deployAntreaCommon deploys Antrea using kubectl on the master node. func (data *TestData) deployAntreaCommon(yamlFile string, extraOptions string) error { // TODO: use the K8s apiserver when server side apply is available? // See https://kubernetes.io/docs/reference/using-api/api-concepts/#server-side-apply rc, _, _, err := provider.RunCommandOnNode(masterNodeName(), fmt.Sprintf("kubectl apply %s -f %s", extraOptions, yamlFile)) if err != nil || rc != 0 { return fmt.Errorf("error when deploying Antrea; is %s available on the master Node?", yamlFile) } rc, _, _, err = provider.RunCommandOnNode(masterNodeName(), fmt.Sprintf("kubectl -n %s rollout status deploy/%s --timeout=%v", antreaNamespace, antreaDeployment, defaultTimeout)) if err != nil || rc != 0 { return fmt.Errorf("error when waiting for antrea-controller rollout to complete") } rc, _, _, err = provider.RunCommandOnNode(masterNodeName(), fmt.Sprintf("kubectl -n %s rollout status ds/%s --timeout=%v", antreaNamespace, antreaDaemonSet, defaultTimeout)) if err != nil || rc != 0 { return fmt.Errorf("error when waiting for antrea-agent rollout to complete") } return nil } // deployAntrea deploys Antrea with the standard manifest. func (data *TestData) deployAntrea() error { if testOptions.enableCoverage { return data.deployAntreaCommon(antreaCovYML, "") } return data.deployAntreaCommon(antreaYML, "") } // deployAntreaIPSec deploys Antrea with IPSec tunnel enabled. func (data *TestData) deployAntreaIPSec() error { if testOptions.enableCoverage { return data.deployAntreaCommon(antreaIPSecCovYML, "") } return data.deployAntreaCommon(antreaIPSecYML, "") } // deployAntreaFlowExporter deploys Antrea with flow exporter config params enabled. func (data *TestData) deployAntreaFlowExporter(ipfixCollector string) error { // Enable flow exporter feature and add related config params to antrea agent configmap. return data.mutateAntreaConfigMap(func(data map[string]string) { antreaAgentConf, _ := data["antrea-agent.conf"] antreaAgentConf = strings.Replace(antreaAgentConf, "# FlowExporter: false", " FlowExporter: true", 1) antreaAgentConf = strings.Replace(antreaAgentConf, "#flowCollectorAddr: \"\"", fmt.Sprintf("flowCollectorAddr: \"%s\"", ipfixCollector), 1) antreaAgentConf = strings.Replace(antreaAgentConf, "#flowPollInterval: \"5s\"", "flowPollInterval: \"1s\"", 1) antreaAgentConf = strings.Replace(antreaAgentConf, "#flowExportFrequency: 12", "flowExportFrequency: 5", 1) data["antrea-agent.conf"] = antreaAgentConf }, false, true) } // getAgentContainersRestartCount reads the restart count for every container across all Antrea // Agent Pods and returns the sum of all the read values. func (data *TestData) getAgentContainersRestartCount() (int, error) { listOptions := metav1.ListOptions{ LabelSelector: "app=antrea,component=antrea-agent", } pods, err := data.clientset.CoreV1().Pods(antreaNamespace).List(context.TODO(), listOptions) if err != nil { return 0, fmt.Errorf("failed to list antrea-agent Pods: %v", err) } containerRestarts := 0 for _, pod := range pods.Items { for _, containerStatus := range pod.Status.ContainerStatuses { containerRestarts += int(containerStatus.RestartCount) } } return containerRestarts, nil } // waitForAntreaDaemonSetPods waits for the K8s apiserver to report that all the Antrea Pods are // available, i.e. all the Nodes have one or more of the Antrea daemon Pod running and available. func (data *TestData) waitForAntreaDaemonSetPods(timeout time.Duration) error { err := wait.Poll(1*time.Second, timeout, func() (bool, error) { daemonSet, err := data.clientset.AppsV1().DaemonSets(antreaNamespace).Get(context.TODO(), antreaDaemonSet, metav1.GetOptions{}) if err != nil { return false, fmt.Errorf("error when getting Antrea daemonset: %v", err) } // Make sure that all Daemon Pods are available. // We use clusterInfo.numNodes instead of DesiredNumberScheduled because // DesiredNumberScheduled may not be updated right away. If it is still set to 0 the // first time we get the DaemonSet's Status, we would return immediately instead of // waiting. desiredNumber := int32(clusterInfo.numNodes) if daemonSet.Status.NumberAvailable != desiredNumber || daemonSet.Status.UpdatedNumberScheduled != desiredNumber { return false, nil } // Make sure that all antrea-agent Pods are not terminating. This is required because NumberAvailable of // DaemonSet counts Pods even if they are terminating. Deleting antrea-agent Pods directly does not cause the // number to decrease if the process doesn't quit immediately, e.g. when the signal is caught by bincover // program and triggers coverage calculation. pods, err := data.clientset.CoreV1().Pods(antreaNamespace).List(context.TODO(), metav1.ListOptions{ LabelSelector: "app=antrea,component=antrea-agent", }) if err != nil { return false, fmt.Errorf("failed to list antrea-agent Pods: %v", err) } if len(pods.Items) != clusterInfo.numNodes { return false, nil } for _, pod := range pods.Items { if pod.DeletionTimestamp != nil { return false, nil } } return true, nil }) if err == wait.ErrWaitTimeout { return fmt.Errorf("antrea-agent DaemonSet not ready within %v", defaultTimeout) } else if err != nil { return err } return nil } // waitForCoreDNSPods waits for the K8s apiserver to report that all the CoreDNS Pods are available. func (data *TestData) waitForCoreDNSPods(timeout time.Duration) error { err := wait.PollImmediate(1*time.Second, timeout, func() (bool, error) { deployment, err := data.clientset.AppsV1().Deployments("kube-system").Get(context.TODO(), "coredns", metav1.GetOptions{}) if err != nil { return false, fmt.Errorf("error when retrieving CoreDNS deployment: %v", err) } if deployment.Status.UnavailableReplicas == 0 { return true, nil } // Keep trying return false, nil }) if err == wait.ErrWaitTimeout { return fmt.Errorf("some CoreDNS replicas are still unavailable after %v", defaultTimeout) } else if err != nil { return err } return nil } // restartCoreDNSPods deletes all the CoreDNS Pods to force them to be re-scheduled. It then waits // for all the Pods to become available, by calling waitForCoreDNSPods. func (data *TestData) restartCoreDNSPods(timeout time.Duration) error { var gracePeriodSeconds int64 = 1 deleteOptions := metav1.DeleteOptions{ GracePeriodSeconds: &gracePeriodSeconds, } listOptions := metav1.ListOptions{ LabelSelector: "k8s-app=kube-dns", } if err := data.clientset.CoreV1().Pods(antreaNamespace).DeleteCollection(context.TODO(), deleteOptions, listOptions); err != nil { return fmt.Errorf("error when deleting all CoreDNS Pods: %v", err) } return data.waitForCoreDNSPods(timeout) } // checkCoreDNSPods checks that all the Pods for the CoreDNS deployment are ready. If not, it // deletes all the Pods to force them to restart and waits up to timeout for the Pods to become // ready. func (data *TestData) checkCoreDNSPods(timeout time.Duration) error { if deployment, err := data.clientset.AppsV1().Deployments(antreaNamespace).Get(context.TODO(), "coredns", metav1.GetOptions{}); err != nil { return fmt.Errorf("error when retrieving CoreDNS deployment: %v", err) } else if deployment.Status.UnavailableReplicas == 0 { // deployment ready, nothing to do return nil } return data.restartCoreDNSPods(timeout) } // createClient initializes the K8s clientset in the TestData structure. func (data *TestData) createClient() error { kubeconfigPath, err := provider.GetKubeconfigPath() if err != nil { return fmt.Errorf("error when getting Kubeconfig path: %v", err) } loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() loadingRules.ExplicitPath = kubeconfigPath configOverrides := &clientcmd.ConfigOverrides{} kubeConfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides).ClientConfig() if err != nil { return fmt.Errorf("error when building kube config: %v", err) } clientset, err := kubernetes.NewForConfig(kubeConfig) if err != nil { return fmt.Errorf("error when creating kubernetes client: %v", err) } aggregatorClient, err := aggregatorclientset.NewForConfig(kubeConfig) if err != nil { return fmt.Errorf("error when creating kubernetes aggregatorClient: %v", err) } securityClient, err := secv1alpha1.NewForConfig(kubeConfig) if err != nil { return fmt.Errorf("error when creating Antrea securityClient: %v", err) } crdClient, err := crdclientset.NewForConfig(kubeConfig) if err != nil { return fmt.Errorf("error when creating CRD client: %v", err) } data.kubeConfig = kubeConfig data.clientset = clientset data.aggregatorClient = aggregatorClient data.securityClient = securityClient data.crdClient = crdClient return nil } // deleteAntrea deletes the Antrea DaemonSet; we use cascading deletion, which means all the Pods created // by Antrea will be deleted. After issuing the deletion request, we poll the K8s apiserver to ensure // that the DaemonSet does not exist any more. This function is a no-op if the Antrea DaemonSet does // not exist at the time the function is called. func (data *TestData) deleteAntrea(timeout time.Duration) error { if testOptions.enableCoverage { data.gracefulExitAntreaAgent(testOptions.coverageDir, "all") } var gracePeriodSeconds int64 = 5 // Foreground deletion policy ensures that by the time the DaemonSet is deleted, there are // no Antrea Pods left. var propagationPolicy = metav1.DeletePropagationForeground deleteOptions := metav1.DeleteOptions{ GracePeriodSeconds: &gracePeriodSeconds, PropagationPolicy: &propagationPolicy, } if err := data.clientset.AppsV1().DaemonSets(antreaNamespace).Delete(context.TODO(), antreaDaemonSet, deleteOptions); err != nil { if errors.IsNotFound(err) { // no Antrea DaemonSet running, we return right away return nil } return fmt.Errorf("error when trying to delete Antrea DaemonSet: %v", err) } err := wait.Poll(1*time.Second, timeout, func() (bool, error) { if _, err := data.clientset.AppsV1().DaemonSets(antreaNamespace).Get(context.TODO(), antreaDaemonSet, metav1.GetOptions{}); err != nil { if errors.IsNotFound(err) { // Antrea DaemonSet does not exist any more, success return true, nil } return false, fmt.Errorf("error when trying to get Antrea DaemonSet after deletion: %v", err) } // Keep trying return false, nil }) return err } // getImageName gets the image name from the fully qualified URI. // For example: "gcr.io/kubernetes-e2e-test-images/agnhost:2.8" gets "agnhost". func getImageName(uri string) string { registryAndImage := strings.Split(uri, ":")[0] paths := strings.Split(registryAndImage, "/") return paths[len(paths)-1] } // createPodOnNode creates a pod in the test namespace with a container whose type is decided by imageName. // Pod will be scheduled on the specified Node (if nodeName is not empty). func (data *TestData) createPodOnNode(name string, nodeName string, image string, command []string, args []string, env []corev1.EnvVar, ports []corev1.ContainerPort, hostNetwork bool) error { // image could be a fully qualified URI which can't be used as container name and label value, // extract the image name from it. imageName := getImageName(image) podSpec := corev1.PodSpec{ Containers: []corev1.Container{ { Name: imageName, Image: image, ImagePullPolicy: corev1.PullIfNotPresent, Command: command, Args: args, Env: env, Ports: ports, }, }, RestartPolicy: corev1.RestartPolicyNever, HostNetwork: hostNetwork, } if nodeName != "" { podSpec.NodeSelector = map[string]string{ "kubernetes.io/hostname": nodeName, } } if nodeName == masterNodeName() { // tolerate NoSchedule taint if we want Pod to run on master node noScheduleToleration := corev1.Toleration{ Key: "node-role.kubernetes.io/master", Operator: corev1.TolerationOpExists, Effect: corev1.TaintEffectNoSchedule, } podSpec.Tolerations = []corev1.Toleration{noScheduleToleration} } pod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: name, Labels: map[string]string{ "antrea-e2e": name, "app": imageName, }, }, Spec: podSpec, } if _, err := data.clientset.CoreV1().Pods(testNamespace).Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil { return err } return nil } // createBusyboxPodOnNode creates a Pod in the test namespace with a single busybox container. The // Pod will be scheduled on the specified Node (if nodeName is not empty). func (data *TestData) createBusyboxPodOnNode(name string, nodeName string) error { sleepDuration := 3600 // seconds return data.createPodOnNode(name, nodeName, "busybox", []string{"sleep", strconv.Itoa(sleepDuration)}, nil, nil, nil, false) } // createBusyboxPod creates a Pod in the test namespace with a single busybox container. func (data *TestData) createBusyboxPod(name string) error { return data.createBusyboxPodOnNode(name, "") } // createNginxPodOnNode creates a Pod in the test namespace with a single nginx container. The // Pod will be scheduled on the specified Node (if nodeName is not empty). func (data *TestData) createNginxPodOnNode(name string, nodeName string) error { return data.createPodOnNode(name, nodeName, "nginx", []string{}, nil, nil, []corev1.ContainerPort{ { Name: "http", ContainerPort: 80, Protocol: corev1.ProtocolTCP, }, }, false) } // createNginxPod creates a Pod in the test namespace with a single nginx container. func (data *TestData) createNginxPod(name, nodeName string) error { return data.createNginxPodOnNode(name, nodeName) } // createServerPod creates a Pod that can listen to specified port and have named port set. func (data *TestData) createServerPod(name string, portName string, portNum int, setHostPort bool) error { // See https://github.com/kubernetes/kubernetes/blob/master/test/images/agnhost/porter/porter.go#L17 for the image's detail. image := "gcr.io/kubernetes-e2e-test-images/agnhost:2.8" cmd := "porter" env := corev1.EnvVar{Name: fmt.Sprintf("SERVE_PORT_%d", portNum), Value: "foo"} port := corev1.ContainerPort{Name: portName, ContainerPort: int32(portNum)} if setHostPort { // If hostPort is to be set, it must match the container port number. port.HostPort = int32(portNum) } return data.createPodOnNode(name, "", image, nil, []string{cmd}, []corev1.EnvVar{env}, []corev1.ContainerPort{port}, false) } // deletePod deletes a Pod in the test namespace. func (data *TestData) deletePod(name string) error { var gracePeriodSeconds int64 = 5 deleteOptions := metav1.DeleteOptions{ GracePeriodSeconds: &gracePeriodSeconds, } if err := data.clientset.CoreV1().Pods(testNamespace).Delete(context.TODO(), name, deleteOptions); err != nil { if !errors.IsNotFound(err) { return err } } return nil } // Deletes a Pod in the test namespace then waits us to timeout for the Pod not to be visible to the // client any more. func (data *TestData) deletePodAndWait(timeout time.Duration, name string) error { if err := data.deletePod(name); err != nil { return err } if err := wait.Poll(1*time.Second, timeout, func() (bool, error) { if _, err := data.clientset.CoreV1().Pods(testNamespace).Get(context.TODO(), name, metav1.GetOptions{}); err != nil { if errors.IsNotFound(err) { return true, nil } return false, fmt.Errorf("error when getting Pod: %v", err) } // Keep trying return false, nil }); err == wait.ErrWaitTimeout { return fmt.Errorf("Pod '%s' still visible to client after %v", name, timeout) } else { return err } } type PodCondition func(*corev1.Pod) (bool, error) // podWaitFor polls the K8s apiserver until the specified Pod is found (in the test Namespace) and // the condition predicate is met (or until the provided timeout expires). func (data *TestData) podWaitFor(timeout time.Duration, name, namespace string, condition PodCondition) (*corev1.Pod, error) { err := wait.Poll(1*time.Second, timeout, func() (bool, error) { if pod, err := data.clientset.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{}); err != nil { if errors.IsNotFound(err) { return false, nil } return false, fmt.Errorf("error when getting Pod '%s': %v", name, err) } else { return condition(pod) } }) if err != nil { return nil, err } return data.clientset.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{}) } // podWaitForRunning polls the k8s apiserver until the specified Pod is in the "running" state (or // until the provided timeout expires). func (data *TestData) podWaitForRunning(timeout time.Duration, name, namespace string) error { _, err := data.podWaitFor(timeout, name, namespace, func(pod *corev1.Pod) (bool, error) { return pod.Status.Phase == corev1.PodRunning, nil }) return err } // podWaitForIPs polls the K8s apiserver until the specified Pod is in the "running" state (or until // the provided timeout expires). The function then returns the IP addresses assigned to the Pod. If the // Pod is not using "hostNetwork", the function also checks that an IP address exists in each required // Address Family in the cluster. func (data *TestData) podWaitForIPs(timeout time.Duration, name, namespace string) (*PodIPs, error) { pod, err := data.podWaitFor(timeout, name, namespace, func(pod *corev1.Pod) (bool, error) { return pod.Status.Phase == corev1.PodRunning, nil }) if err != nil { return nil, err } // According to the K8s API documentation (https://godoc.org/k8s.io/api/core/v1#PodStatus), // the PodIP field should only be empty if the Pod has not yet been scheduled, and "running" // implies scheduled. if pod.Status.PodIP == "" { return nil, fmt.Errorf("Pod is running but has no assigned IP, which should never happen") } podIPStrings := sets.NewString(pod.Status.PodIP) for _, podIP := range pod.Status.PodIPs { ipStr := strings.TrimSpace(podIP.IP) if ipStr != "" { podIPStrings.Insert(ipStr) } } ips, err := parsePodIPs(podIPStrings) if err != nil { return nil, err } if !pod.Spec.HostNetwork { if clusterInfo.podV4NetworkCIDR != "" && ips.ipv4 == nil { return nil, fmt.Errorf("no IPv4 address is assigned while cluster was configured with IPv4 Pod CIDR %s", clusterInfo.podV4NetworkCIDR) } if clusterInfo.podV6NetworkCIDR != "" && ips.ipv6 == nil { return nil, fmt.Errorf("no IPv6 address is assigned while cluster was configured with IPv6 Pod CIDR %s", clusterInfo.podV6NetworkCIDR) } } return ips, nil } func parsePodIPs(podIPStrings sets.String) (*PodIPs, error) { ips := new(PodIPs) for idx := range podIPStrings.List() { ipStr := podIPStrings.List()[idx] ip := net.ParseIP(ipStr) if ip.To4() != nil { if ips.ipv4 != nil && ipStr != ips.ipv4.String() { return nil, fmt.Errorf("Pod is assigned multiple IPv4 addresses: %s and %s", ips.ipv4.String(), ipStr) } if ips.ipv4 == nil { ips.ipv4 = &ip ips.ipStrings = append(ips.ipStrings, ipStr) } } else { if ips.ipv6 != nil && ipStr != ips.ipv6.String() { return nil, fmt.Errorf("Pod is assigned multiple IPv6 addresses: %s and %s", ips.ipv6.String(), ipStr) } if ips.ipv6 == nil { ips.ipv6 = &ip ips.ipStrings = append(ips.ipStrings, ipStr) } } } if len(ips.ipStrings) == 0 { return nil, fmt.Errorf("pod is running but has no assigned IP, which should never happen") } return ips, nil } // deleteAntreaAgentOnNode deletes the antrea-agent Pod on a specific Node and measure how long it // takes for the Pod not to be visible to the client any more. It also waits for a new antrea-agent // Pod to be running on the Node. func (data *TestData) deleteAntreaAgentOnNode(nodeName string, gracePeriodSeconds int64, timeout time.Duration) (time.Duration, error) { if testOptions.enableCoverage { data.gracefulExitAntreaAgent(testOptions.coverageDir, nodeName) } listOptions := metav1.ListOptions{ LabelSelector: "app=antrea,component=antrea-agent", FieldSelector: fmt.Sprintf("spec.nodeName=%s", nodeName), } // we do not use DeleteCollection directly because we want to ensure the resources no longer // exist by the time we return pods, err := data.clientset.CoreV1().Pods(antreaNamespace).List(context.TODO(), listOptions) if err != nil { return 0, fmt.Errorf("failed to list antrea-agent Pods on Node '%s': %v", nodeName, err) } // in the normal case, there should be a single Pod in the list if len(pods.Items) == 0 { return 0, fmt.Errorf("no available antrea-agent Pods on Node '%s'", nodeName) } deleteOptions := metav1.DeleteOptions{ GracePeriodSeconds: &gracePeriodSeconds, } start := time.Now() if err := data.clientset.CoreV1().Pods(antreaNamespace).DeleteCollection(context.TODO(), deleteOptions, listOptions); err != nil { return 0, fmt.Errorf("error when deleting antrea-agent Pods on Node '%s': %v", nodeName, err) } if err := wait.Poll(1*time.Second, timeout, func() (bool, error) { for _, pod := range pods.Items { if _, err := data.clientset.CoreV1().Pods(antreaNamespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}); err != nil { if errors.IsNotFound(err) { continue } return false, fmt.Errorf("error when getting Pod: %v", err) } // Keep trying, at least one Pod left return false, nil } return true, nil }); err != nil { return 0, err } delay := time.Since(start) // wait for new antrea-agent Pod if err := wait.Poll(1*time.Second, timeout, func() (bool, error) { pods, err := data.clientset.CoreV1().Pods(antreaNamespace).List(context.TODO(), listOptions) if err != nil { return false, fmt.Errorf("failed to list antrea-agent Pods on Node '%s': %v", nodeName, err) } if len(pods.Items) == 0 { // keep trying return false, nil } for _, pod := range pods.Items { if pod.Status.Phase != corev1.PodRunning { return false, nil } } return true, nil }); err != nil { return 0, err } return delay, nil } // getAntreaPodOnNode retrieves the name of the Antrea Pod (antrea-agent-*) running on a specific Node. func (data *TestData) getAntreaPodOnNode(nodeName string) (podName string, err error) { listOptions := metav1.ListOptions{ LabelSelector: "app=antrea,component=antrea-agent", FieldSelector: fmt.Sprintf("spec.nodeName=%s", nodeName), } pods, err := data.clientset.CoreV1().Pods(antreaNamespace).List(context.TODO(), listOptions) if err != nil { return "", fmt.Errorf("failed to list Antrea Pods: %v", err) } if len(pods.Items) != 1 { return "", fmt.Errorf("expected *exactly* one Pod") } return pods.Items[0].Name, nil } // getAntreaController retrieves the name of the Antrea Controller (antrea-controller-*) running in the k8s cluster. func (data *TestData) getAntreaController() (*corev1.Pod, error) { listOptions := metav1.ListOptions{ LabelSelector: "app=antrea,component=antrea-controller", } pods, err := data.clientset.CoreV1().Pods(antreaNamespace).List(context.TODO(), listOptions) if err != nil { return nil, fmt.Errorf("failed to list Antrea Controller: %v", err) } if len(pods.Items) != 1 { return nil, fmt.Errorf("expected *exactly* one Pod") } return &pods.Items[0], nil } // restartAntreaControllerPod deletes the antrea-controller Pod to force it to be re-scheduled. It then waits // for the new Pod to become available, and returns it. func (data *TestData) restartAntreaControllerPod(timeout time.Duration) (*corev1.Pod, error) { if testOptions.enableCoverage { data.gracefulExitAntreaController(testOptions.coverageDir) } var gracePeriodSeconds int64 = 1 deleteOptions := metav1.DeleteOptions{ GracePeriodSeconds: &gracePeriodSeconds, } listOptions := metav1.ListOptions{ LabelSelector: "app=antrea,component=antrea-controller", } if err := data.clientset.CoreV1().Pods(antreaNamespace).DeleteCollection(context.TODO(), deleteOptions, listOptions); err != nil { return nil, fmt.Errorf("error when deleting antrea-controller Pod: %v", err) } var newPod *corev1.Pod // wait for new antrea-controller Pod if err := wait.Poll(1*time.Second, timeout, func() (bool, error) { pods, err := data.clientset.CoreV1().Pods(antreaNamespace).List(context.TODO(), listOptions) if err != nil { return false, fmt.Errorf("failed to list antrea-controller Pods: %v", err) } // Even though the strategy is "Recreate", the old Pod might still be in terminating state when the new Pod is // running as this is deleting a Pod manually, not upgrade. // See https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#recreate-deployment. // So we should ensure there's only 1 Pod and it's running. if len(pods.Items) != 1 || pods.Items[0].DeletionTimestamp != nil { return false, nil } pod := pods.Items[0] ready := false for _, condition := range pod.Status.Conditions { if condition.Type == corev1.PodReady { ready = condition.Status == corev1.ConditionTrue break } } if !ready { return false, nil } newPod = &pod return true, nil }); err != nil { return nil, err } return newPod, nil } // restartAntreaAgentPods deletes all the antrea-agent Pods to force them to be re-scheduled. It // then waits for the new Pods to become available. func (data *TestData) restartAntreaAgentPods(timeout time.Duration) error { if testOptions.enableCoverage { data.gracefulExitAntreaAgent(testOptions.coverageDir, "all") } var gracePeriodSeconds int64 = 1 deleteOptions := metav1.DeleteOptions{ GracePeriodSeconds: &gracePeriodSeconds, } listOptions := metav1.ListOptions{ LabelSelector: "app=antrea,component=antrea-agent", } if err := data.clientset.CoreV1().Pods(antreaNamespace).DeleteCollection(context.TODO(), deleteOptions, listOptions); err != nil { return fmt.Errorf("error when deleting antrea-agent Pods: %v", err) } return data.waitForAntreaDaemonSetPods(timeout) } // validatePodIP checks that the provided IP address is in the Pod Network CIDR for the cluster. func validatePodIP(podNetworkCIDR string, ip net.IP) (bool, error) { _, cidr, err := net.ParseCIDR(podNetworkCIDR) if err != nil { return false, fmt.Errorf("podNetworkCIDR '%s' is not a valid CIDR", podNetworkCIDR) } return cidr.Contains(ip), nil } // createService creates a service with port and targetPort. func (data *TestData) createService(serviceName string, port, targetPort int, selector map[string]string, affinity bool, serviceType corev1.ServiceType) (*corev1.Service, error) { affinityType := corev1.ServiceAffinityNone if affinity { affinityType = corev1.ServiceAffinityClientIP } service := corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: serviceName, Namespace: testNamespace, Labels: map[string]string{ "antrea-e2e": serviceName, "app": serviceName, }, }, Spec: corev1.ServiceSpec{ SessionAffinity: affinityType, Ports: []corev1.ServicePort{{ Port: int32(port), TargetPort: intstr.FromInt(targetPort), }}, Type: serviceType, Selector: selector, }, } return data.clientset.CoreV1().Services(testNamespace).Create(context.TODO(), &service, metav1.CreateOptions{}) } // createNginxClusterIPService create a nginx service with the given name. func (data *TestData) createNginxClusterIPService(affinity bool) (*corev1.Service, error) { return data.createService("nginx", 80, 80, map[string]string{"app": "nginx"}, affinity, corev1.ServiceTypeClusterIP) } func (data *TestData) createNginxLoadBalancerService(affinity bool, ingressIPs []string) (*corev1.Service, error) { svc, err := data.createService("nginx-loadbalancer", 80, 80, map[string]string{"app": "nginx"}, affinity, corev1.ServiceTypeLoadBalancer) if err != nil { return svc, err } ingress := make([]corev1.LoadBalancerIngress, len(ingressIPs)) for idx, ingressIP := range ingressIPs { ingress[idx].IP = ingressIP } updatedSvc := svc.DeepCopy() updatedSvc.Status.LoadBalancer.Ingress = ingress patchData, err := json.Marshal(updatedSvc) if err != nil { return svc, err } return data.clientset.CoreV1().Services(svc.Namespace).Patch(context.TODO(), svc.Name, types.MergePatchType, patchData, metav1.PatchOptions{}, "status") } // deleteService deletes the service. func (data *TestData) deleteService(name string) error { if err := data.clientset.CoreV1().Services(testNamespace).Delete(context.TODO(), name, metav1.DeleteOptions{}); err != nil { return fmt.Errorf("unable to cleanup service %v: %v", name, err) } return nil } // createNetworkPolicy creates a network policy with spec. func (data *TestData) createNetworkPolicy(name string, spec *networkingv1.NetworkPolicySpec) (*networkingv1.NetworkPolicy, error) { policy := &networkingv1.NetworkPolicy{ ObjectMeta: metav1.ObjectMeta{ Name: name, Labels: map[string]string{ "antrea-e2e": name, }, }, Spec: *spec, } return data.clientset.NetworkingV1().NetworkPolicies(testNamespace).Create(context.TODO(), policy, metav1.CreateOptions{}) } // deleteNetworkpolicy deletes the network policy. func (data *TestData) deleteNetworkpolicy(policy *networkingv1.NetworkPolicy) error { if err := data.clientset.NetworkingV1().NetworkPolicies(policy.Namespace).Delete(context.TODO(), policy.Name, metav1.DeleteOptions{}); err != nil { return fmt.Errorf("unable to cleanup policy %v: %v", policy.Name, err) } return nil } // A DNS-1123 subdomain must consist of lower case alphanumeric characters var lettersAndDigits = []rune("abcdefghijklmnopqrstuvwxyz0123456789") func randSeq(n int) string { b := make([]rune, n) for i := range b { randIdx := rand.Intn(len(lettersAndDigits)) b[i] = lettersAndDigits[randIdx] } return string(b) } func randName(prefix string) string { return prefix + randSeq(nameSuffixLength) } // Run the provided command in the specified Container for the give Pod and returns the contents of // stdout and stderr as strings. An error either indicates that the command couldn't be run or that // the command returned a non-zero error code. func (data *TestData) runCommandFromPod(podNamespace string, podName string, containerName string, cmd []string) (stdout string, stderr string, err error) { request := data.clientset.CoreV1().RESTClient().Post(). Namespace(podNamespace). Resource("pods"). Name(podName). SubResource("exec"). Param("container", containerName). VersionedParams(&corev1.PodExecOptions{ Command: cmd, Stdin: false, Stdout: true, Stderr: true, TTY: false, }, scheme.ParameterCodec) exec, err := remotecommand.NewSPDYExecutor(data.kubeConfig, "POST", request.URL()) if err != nil { return "", "", err } var stdoutB, stderrB bytes.Buffer if err := exec.Stream(remotecommand.StreamOptions{ Stdout: &stdoutB, Stderr: &stderrB, }); err != nil { return stdoutB.String(), stderrB.String(), err } return stdoutB.String(), stderrB.String(), nil } func forAllNodes(fn func(nodeName string) error) error { for idx := 0; idx < clusterInfo.numNodes; idx++ { name := nodeName(idx) if name == "" { return fmt.Errorf("unexpected empty name for Node %d", idx) } if err := fn(name); err != nil { return err } } return nil } // forAllMatchingPodsInNamespace invokes the provided function for every Pod currently running on every Node in a given // namespace and which matches labelSelector criteria. func (data *TestData) forAllMatchingPodsInNamespace( labelSelector, nsName string, fn func(nodeName string, podName string, nsName string) error) error { for _, node := range clusterInfo.nodes { listOptions := metav1.ListOptions{ LabelSelector: labelSelector, FieldSelector: fmt.Sprintf("spec.nodeName=%s", node.name), } pods, err := data.clientset.CoreV1().Pods(nsName).List(context.TODO(), listOptions) if err != nil { return fmt.Errorf("failed to list Antrea Pods on Node '%s': %v", node.name, err) } for _, pod := range pods.Items { if err := fn(node.name, pod.Name, nsName); err != nil { return err } } } return nil } func parseArpingStdout(out string) (sent uint32, received uint32, loss float32, err error) { re := regexp.MustCompile(`Sent\s+(\d+)\s+probe.*\nReceived\s+(\d+)\s+response`) matches := re.FindStringSubmatch(out) if len(matches) == 0 { return 0, 0, 0.0, fmt.Errorf("Unexpected arping output") } if v, err := strconv.ParseUint(matches[1], 10, 32); err != nil { return 0, 0, 0.0, fmt.Errorf("Error when retrieving 'sent probes' from arpping output: %v", err) } else { sent = uint32(v) } if v, err := strconv.ParseUint(matches[2], 10, 32); err != nil { return 0, 0, 0.0, fmt.Errorf("Error when retrieving 'received responses' from arpping output: %v", err) } else { received = uint32(v) } loss = 100. * float32(sent-received) / float32(sent) return sent, received, loss, nil } func (data *TestData) runPingCommandFromTestPod(podName string, targetPodIPs *PodIPs, count int) error { var cmd []string if targetPodIPs.ipv4 != nil { cmd = []string{"ping", "-c", strconv.Itoa(count), targetPodIPs.ipv4.String()} if _, _, err := data.runCommandFromPod(testNamespace, podName, busyboxContainerName, cmd); err != nil { return err } } if targetPodIPs.ipv6 != nil { cmd = []string{"ping", "-6", "-c", strconv.Itoa(count), targetPodIPs.ipv6.String()} if _, _, err := data.runCommandFromPod(testNamespace, podName, busyboxContainerName, cmd); err != nil { return err } } return nil } func (data *TestData) runNetcatCommandFromTestPod(podName string, server string, port int) error { // Retrying several times to avoid flakes as the test may involve DNS (coredns) and Service/Endpoints (kube-proxy). cmd := []string{ "/bin/sh", "-c", fmt.Sprintf("for i in $(seq 1 5); do nc -vz -w 4 %s %d && exit 0 || sleep 1; done; exit 1", server, port), } stdout, stderr, err := data.runCommandFromPod(testNamespace, podName, busyboxContainerName, cmd) if err == nil { return nil } return fmt.Errorf("nc stdout: <%v>, stderr: <%v>, err: <%v>", stdout, stderr, err) } func (data *TestData) doesOVSPortExist(antreaPodName string, portName string) (bool, error) { cmd := []string{"ovs-vsctl", "port-to-br", portName} _, stderr, err := data.runCommandFromPod(antreaNamespace, antreaPodName, ovsContainerName, cmd) if err == nil { return true, nil } else if strings.Contains(stderr, "no port named") { return false, nil } return false, fmt.Errorf("error when running ovs-vsctl command on Pod '%s': %v", antreaPodName, err) } func (data *TestData) GetEncapMode() (config.TrafficEncapModeType, error) { mapList, err := data.clientset.CoreV1().ConfigMaps(antreaNamespace).List(context.TODO(), metav1.ListOptions{}) if err != nil { return config.TrafficEncapModeInvalid, err } for _, m := range mapList.Items { if strings.HasPrefix(m.Name, "antrea-config") { configMap, err := data.clientset.CoreV1().ConfigMaps(antreaNamespace).Get(context.TODO(), m.Name, metav1.GetOptions{}) if err != nil { return config.TrafficEncapModeInvalid, err } for _, antreaConfig := range configMap.Data { for _, mode := range config.GetTrafficEncapModes() { searchStr := fmt.Sprintf("trafficEncapMode: %s", mode) if strings.Index(strings.ToLower(antreaConfig), strings.ToLower(searchStr)) != -1 { return mode, nil } } } return config.TrafficEncapModeEncap, nil } } return config.TrafficEncapModeInvalid, fmt.Errorf("antrea-conf config map is not found") } func (data *TestData) GetAntreaConfigMap(antreaNamespace string) (*corev1.ConfigMap, error) { deployment, err := data.clientset.AppsV1().Deployments(antreaNamespace).Get(context.TODO(), antreaDeployment, metav1.GetOptions{}) if err != nil { return nil, fmt.Errorf("failed to retrieve Antrea Controller deployment: %v", err) } var configMapName string for _, volume := range deployment.Spec.Template.Spec.Volumes { if volume.ConfigMap != nil && volume.Name == antreaConfigVolume { configMapName = volume.ConfigMap.Name break } } if len(configMapName) == 0 { return nil, fmt.Errorf("failed to locate %s ConfigMap volume", antreaConfigVolume) } configMap, err := data.clientset.CoreV1().ConfigMaps(antreaNamespace).Get(context.TODO(), configMapName, metav1.GetOptions{}) if err != nil { return nil, fmt.Errorf("failed to get ConfigMap %s: %v", configMapName, err) } return configMap, nil } func (data *TestData) GetGatewayInterfaceName(antreaNamespace string) (string, error) { configMap, err := data.GetAntreaConfigMap(antreaNamespace) if err != nil { return "", err } agentConfData := configMap.Data["antrea-agent.conf"] for _, line := range strings.Split(agentConfData, "\n") { if strings.HasPrefix(line, "hostGateway") { return strings.Fields(line)[1], nil } } return antreaDefaultGW, nil } func (data *TestData) mutateAntreaConfigMap(mutatingFunc func(data map[string]string), restartController, restartAgent bool) error { configMap, err := data.GetAntreaConfigMap(antreaNamespace) if err != nil { return err } mutatingFunc(configMap.Data) if _, err := data.clientset.CoreV1().ConfigMaps(antreaNamespace).Update(context.TODO(), configMap, metav1.UpdateOptions{}); err != nil { return fmt.Errorf("failed to update ConfigMap %s: %v", configMap.Name, err) } if restartAgent { err = data.restartAntreaAgentPods(defaultTimeout) if err != nil { return fmt.Errorf("error when restarting antrea-agent Pod: %v", err) } } // controller should be restarted after agents in case of dataplane disruption caused by agent restart on Kind cluster. if restartController { _, err = data.restartAntreaControllerPod(defaultTimeout) if err != nil { return fmt.Errorf("error when restarting antrea-controller Pod: %v", err) } } return nil } // gracefulExitAntreaController copies the Antrea controller binary coverage data file out before terminating the Pod func (data *TestData) gracefulExitAntreaController(covDir string) error { antreaController, err := data.getAntreaController() if err != nil { return fmt.Errorf("error when getting antrea-controller Pod: %v", err) } podName := antreaController.Name cmds := []string{"pgrep", "-f", antreaControllerCovBinary, "-P", "1"} stdout, stderr, err := data.runCommandFromPod(antreaNamespace, podName, "antrea-controller", cmds) if err != nil { return fmt.Errorf("error when getting pid of '%s': <%v>, err: <%v>", antreaControllerCovBinary, stderr, err) } cmds = []string{"kill", "-SIGINT", strings.TrimSpace(stdout)} _, stderr, err = data.runCommandFromPod(antreaNamespace, podName, "antrea-controller", cmds) if err != nil { return fmt.Errorf("error when sending SIGINT signal to '%s': <%v>, err: <%v>", antreaControllerCovBinary, stderr, err) } err = data.copyPodFiles(podName, "antrea-controller", antreaNamespace, antreaControllerCovFile, covDir) if err != nil { return fmt.Errorf("error when graceful exit Antrea controller: copy pod files out, error:%v", err) } return nil } // gracefulExitAntreaAgent copies the Antrea agent binary coverage data file out before terminating the Pod func (data *TestData) gracefulExitAntreaAgent(covDir string, nodeName string) error { listOptions := metav1.ListOptions{ LabelSelector: "app=antrea,component=antrea-agent", } if nodeName != "all" { listOptions.FieldSelector = fmt.Sprintf("spec.nodeName=%s", nodeName) } pods, err := data.clientset.CoreV1().Pods(antreaNamespace).List(context.TODO(), listOptions) if err != nil { return fmt.Errorf("failed to list antrea-agent pods: %v", err) } for _, pod := range pods.Items { podName := pod.Name cmds := []string{"pgrep", "-f", antreaAgentCovBinary, "-P", "1"} stdout, stderr, err := data.runCommandFromPod(antreaNamespace, podName, "antrea-agent", cmds) if err != nil { return fmt.Errorf("error when getting pid of '%s': <%v>, err: <%v>", antreaAgentCovBinary, stderr, err) } cmds = []string{"kill", "-SIGINT", strings.TrimSpace(stdout)} _, stderr, err = data.runCommandFromPod(antreaNamespace, podName, "antrea-agent", cmds) if err != nil { return fmt.Errorf("error when sending SIGINT signal to '%s': <%v>, err: <%v>", antreaAgentCovBinary, stderr, err) } err = data.copyPodFiles(podName, "antrea-agent", antreaNamespace, antreaAgentCovFile, covDir) if err != nil { return fmt.Errorf("error when graceful exit Antrea agent: copy pod files out, error:%v", err) } } return nil } // gracefulExitAntreaAgent copies the Antrea agent binary coverage data file out before terminating the Pod func (data *TestData) copyPodFiles(podName string, containerName string, nsName string, fileName string, covDir string) error { fmt.Printf("Copying file %s from pod %s podName to '%s'", fileName, podName, covDir) // getPodWriter creates the file with name podName-suffix. It returns nil if the // file cannot be created. File must be closed by the caller. getPodWriter := func(podName, suffix string) *os.File { covFile := filepath.Join(covDir, fmt.Sprintf("%s-%s", podName, suffix)) f, err := os.Create(covFile) if err != nil { _ = fmt.Errorf("error when creating coverage file '%s': '%v'", covFile, err) return nil } return f } // runKubectl runs the provided kubectl command on the master Node and returns the // output. It returns an empty string in case of error. runKubectl := func(cmd string) string { rc, stdout, _, err := RunCommandOnNode(masterNodeName(), cmd) if err != nil || rc != 0 { _ = fmt.Errorf("error when running this kubectl command on master Node: %s", cmd) return "" } return stdout } // dump the file from Antrea Pods to disk. // a filepath-friendly timestamp format. const timeFormat = "Jan02-15-04-05" timeStamp := time.Now().Format(timeFormat) w := getPodWriter(podName, timeStamp) if w == nil { return nil } defer w.Close() cmd := fmt.Sprintf("kubectl exec -i %s -c %s -n %s -- cat %s", podName, containerName, nsName, fileName) stdout := runKubectl(cmd) if stdout == "" { return nil } w.WriteString(stdout) return nil }
1
26,252
Is it called some where?
antrea-io-antrea
go
@@ -20,10 +20,11 @@ import ( "bytes" "encoding/json" "fmt" - . "github.com/openebs/maya/pkg/msg/v1alpha1" + "reflect" + + _ "github.com/openebs/maya/pkg/msg/v1alpha1" ft "k8s.io/client-go/third_party/forked/golang/template" jp "k8s.io/client-go/util/jsonpath" - "reflect" ) type selection struct {
1
/* Copyright 2018 The OpenEBS Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1alpha1 import ( "bytes" "encoding/json" "fmt" . "github.com/openebs/maya/pkg/msg/v1alpha1" ft "k8s.io/client-go/third_party/forked/golang/template" jp "k8s.io/client-go/util/jsonpath" "reflect" ) type selection struct { Name string `json:"name"` // name of selection Path string `json:"path"` // selection path used to build jsonpath query Values []string `json:"values"` // resulting values due to selection path *Msgs } func Selection(name, path string) *selection { return &selection{ Name: name, Path: path, Msgs: &Msgs{}, } } func (s *selection) SetValues(rvals [][]reflect.Value) { if len(rvals) == 0 { s.AddWarn(fmt.Sprintf("no value(s) found for %s %s", s.Name, s.Path)) s.Values = append(s.Values, "<no value>") return } for _, rvs := range rvals { for _, rv := range rvs { pv, ok := ft.PrintableValue(rv) if !ok { s.AddWarn(fmt.Sprintf("can not print type %s: failed to query %s %s", rv.Type(), s.Name, s.Path)) pv = "<not printable>" } var buffer bytes.Buffer fmt.Fprint(&buffer, pv) s.Values = append(s.Values, buffer.String()) } } return } type SelectionList []*selection func (l SelectionList) String() string { return YamlString("selectionlist", l) } func (l SelectionList) ValuesByName(name string) (vals []string) { for _, s := range l { if s.Name == name { return s.Values } } return } func (l SelectionList) ValueByName(name string) (value string) { vals := l.ValuesByName(name) if len(vals) == 0 { return } return vals[0] } func (l SelectionList) ValuesByPath(path string) (vals []string) { for _, s := range l { if s.Path == path { return s.Values } } return } func (l SelectionList) ValueByPath(path string) (value string) { vals := l.ValuesByPath(path) if len(vals) == 0 { return } return vals[0] } type jsonpath struct { name string // name given to the json querying target interface{} // target to be queried against jpath *jp.JSONPath // instance that understands querying json doc selects SelectionList // selective queries to be done against json doc *Msgs } func JSONPath(name string) (j *jsonpath) { return &jsonpath{ name: name, jpath: jp.New(name).AllowMissingKeys(true), Msgs: &Msgs{}, } } func (j *jsonpath) WithTarget(target interface{}) (u *jsonpath) { j.target = target return j } func (j *jsonpath) WithTargetAsRaw(target []byte) (u *jsonpath) { var t interface{} err := json.Unmarshal(target, &t) if err != nil { j.AddError(fmt.Errorf("failed to build target for jsonpath %s: error - %s", j.name, err.Error())) return j } j.target = t return j } func (j *jsonpath) Values(path string) (vals [][]reflect.Value, err error) { err = j.jpath.Parse(path) if err != nil { return } return j.jpath.FindResults(j.target) } func (j *jsonpath) Query(selects SelectionList) (l SelectionList) { for _, s := range selects { vals, err := j.Values(s.Path) if err != nil { j.AddError(fmt.Errorf("failed to query %s %s: error - %s", s.Name, s.Path, err.Error())) } s.SetValues(vals) l = append(l, s) j.Msgs.Merge(s.Msgs) } return }
1
9,882
Please remove this extra blank line.
openebs-maya
go
@@ -176,7 +176,7 @@ public class CheckUnusedDependenciesTask extends DefaultTask { return ignore.get().contains(BaselineExactDependencies.asString(artifact)); } - @Input + @InputFiles public final Provider<List<Configuration>> getDependenciesConfigurations() { return dependenciesConfigurations; }
1
/* * (c) Copyright 2019 Palantir Technologies Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.palantir.baseline.tasks; import com.google.common.base.Preconditions; import com.google.common.collect.Sets; import com.google.common.collect.Streams; import com.palantir.baseline.plugins.BaselineExactDependencies; import java.nio.file.Path; import java.util.Collections; import java.util.Comparator; import java.util.List; import java.util.Objects; import java.util.Optional; import java.util.Set; import java.util.stream.Collectors; import org.gradle.api.DefaultTask; import org.gradle.api.GradleException; import org.gradle.api.artifacts.Configuration; import org.gradle.api.artifacts.ResolvedArtifact; import org.gradle.api.artifacts.ResolvedDependency; import org.gradle.api.file.FileCollection; import org.gradle.api.provider.ListProperty; import org.gradle.api.provider.Property; import org.gradle.api.provider.Provider; import org.gradle.api.provider.SetProperty; import org.gradle.api.tasks.Input; import org.gradle.api.tasks.InputFiles; import org.gradle.api.tasks.TaskAction; public class CheckUnusedDependenciesTask extends DefaultTask { private final ListProperty<Configuration> dependenciesConfigurations; private final ListProperty<Configuration> sourceOnlyConfigurations; private final Property<FileCollection> sourceClasses; private final SetProperty<String> ignore; public CheckUnusedDependenciesTask() { setGroup("Verification"); setDescription("Ensures no extraneous dependencies are declared"); dependenciesConfigurations = getProject().getObjects().listProperty(Configuration.class); dependenciesConfigurations.set(Collections.emptyList()); sourceOnlyConfigurations = getProject().getObjects().listProperty(Configuration.class); sourceOnlyConfigurations.set(Collections.emptyList()); sourceClasses = getProject().getObjects().property(FileCollection.class); ignore = getProject().getObjects().setProperty(String.class); ignore.set(Collections.emptySet()); } @TaskAction public final void checkUnusedDependencies() { Set<ResolvedDependency> declaredDependencies = dependenciesConfigurations.get().stream() .map(Configuration::getResolvedConfiguration) .flatMap(resolved -> resolved.getFirstLevelModuleDependencies().stream()) .collect(Collectors.toSet()); BaselineExactDependencies.INDEXES.populateIndexes(declaredDependencies); Set<ResolvedArtifact> declaredArtifacts = declaredDependencies.stream() .flatMap(dependency -> dependency.getModuleArtifacts().stream()) .filter(dependency -> BaselineExactDependencies.VALID_ARTIFACT_EXTENSIONS.contains(dependency.getExtension())) .collect(Collectors.toSet()); excludeSourceOnlyDependencies(); Set<ResolvedArtifact> necessaryArtifacts = Streams.stream( sourceClasses.get().iterator()) .flatMap(BaselineExactDependencies::referencedClasses) .map(BaselineExactDependencies.INDEXES::classToDependency) .filter(Optional::isPresent) .map(Optional::get) .collect(Collectors.toSet()); Set<ResolvedArtifact> possiblyUnused = Sets.difference(declaredArtifacts, necessaryArtifacts); getLogger() .debug( "Possibly unused dependencies: {}", possiblyUnused.stream() .map(BaselineExactDependencies::asString) .sorted() .collect(Collectors.toList())); List<ResolvedArtifact> declaredButUnused = possiblyUnused.stream() .filter(artifact -> !shouldIgnore(artifact)) .sorted(Comparator.comparing(BaselineExactDependencies::asString)) .collect(Collectors.toList()); if (!declaredButUnused.isEmpty()) { // TODO(dfox): don't print warnings for jars that define service loaded classes (e.g. meta-inf) StringBuilder builder = new StringBuilder(); builder.append(String.format( "Found %s dependencies unused during compilation, please delete them from '%s' or choose one of " + "the suggested fixes:\n", declaredButUnused.size(), buildFile())); for (ResolvedArtifact resolvedArtifact : declaredButUnused) { builder.append('\t') .append(BaselineExactDependencies.asDependencyStringWithName(resolvedArtifact)) .append('\n'); // Suggest fixes by looking at all transitive classes, filtering the ones we have declarations on, // and mapping the remaining ones back to the jars they came from. ResolvedDependency dependency = BaselineExactDependencies.INDEXES.artifactsFromDependency(resolvedArtifact); Set<ResolvedArtifact> didYouMean = dependency.getAllModuleArtifacts().stream() .filter(artifact -> BaselineExactDependencies.VALID_ARTIFACT_EXTENSIONS.contains(artifact.getExtension())) .flatMap(BaselineExactDependencies.INDEXES::classesFromArtifact) .filter(referencedClasses()::contains) .map(BaselineExactDependencies.INDEXES::classToDependency) .filter(Optional::isPresent) .map(Optional::get) .filter(artifact -> !declaredArtifacts.contains(artifact)) .collect(Collectors.toSet()); if (!didYouMean.isEmpty()) { builder.append("\t\tDid you mean:\n"); didYouMean.stream() .map(BaselineExactDependencies::asDependencyStringWithoutName) .sorted() .forEach(dependencyString -> builder.append("\t\t\t") .append(dependencyString) .append("\n")); } } throw new GradleException(builder.toString()); } } /** * Excludes any source only dependencies configured by the user, as they would be incorrectly flagged as unused by * this task due to BaselineExactDependencies use of * {@link org.apache.maven.shared.dependency.analyzer.asm.ASMDependencyAnalyzer} which only looks at the * dependencies of the generated byte-code, not the union of compile + runtime dependencies. */ private void excludeSourceOnlyDependencies() { sourceOnlyConfigurations .get() .forEach(config -> config.getResolvedConfiguration().getFirstLevelModuleDependencies().stream() .flatMap(dependency -> dependency.getModuleArtifacts().stream()) .forEach(artifact -> ignoreDependency(config, artifact))); } private void ignoreDependency(Configuration config, ResolvedArtifact artifact) { String dependencyId = BaselineExactDependencies.asString(artifact); getLogger().info("Ignoring {} dependency: {}", config.getName(), dependencyId); ignore.add(dependencyId); } /** All classes which are mentioned in this project's source code. */ private Set<String> referencedClasses() { return Streams.stream(sourceClasses.get().iterator()) .flatMap(BaselineExactDependencies::referencedClasses) .collect(Collectors.toSet()); } private Path buildFile() { return getProject() .getRootDir() .toPath() .relativize(getProject().getBuildFile().toPath()); } private boolean shouldIgnore(ResolvedArtifact artifact) { return ignore.get().contains(BaselineExactDependencies.asString(artifact)); } @Input public final Provider<List<Configuration>> getDependenciesConfigurations() { return dependenciesConfigurations; } public final void dependenciesConfiguration(Configuration dependenciesConfiguration) { this.dependenciesConfigurations.add(Objects.requireNonNull(dependenciesConfiguration)); } @Input public final Provider<List<Configuration>> getSourceOnlyConfigurations() { return sourceOnlyConfigurations; } /** * Don't use this unless this configuration is resolvable. * * @deprecated This task only looks at <em>directly declared</em> compile dependencies that also appear in the * runtime classpath, so there's no need to exclude anything like {@code compileOnly} anymore. */ @Deprecated public final void sourceOnlyConfiguration(Configuration configuration) { Preconditions.checkNotNull(configuration, "This method requires a non-null configuration"); Preconditions.checkArgument( configuration.isCanBeResolved(), "May only add sourceOnlyConfiguration if it is resolvable: %s", configuration); this.sourceOnlyConfigurations.add(Objects.requireNonNull(configuration)); } @InputFiles public final Provider<FileCollection> getSourceClasses() { return sourceClasses; } public final void setSourceClasses(FileCollection newClasses) { this.sourceClasses.set(getProject().files(newClasses)); } public final void ignore(Provider<Set<String>> value) { ignore.addAll(value); } public final void ignore(String group, String name) { ignore.add(BaselineExactDependencies.ignoreCoordinate(group, name)); } @Input public final Provider<Set<String>> getIgnored() { return ignore; } }
1
8,351
this is really the key, if you don't set this then gradle doesn't wire up the tasks that produce various files inside these configurations as inputs... I think we were just getting lucky before.
palantir-gradle-baseline
java
@@ -14,3 +14,13 @@ func TestNewSnapshot(t *testing.T) { _, err := restic.NewSnapshot(paths, nil, "foo", time.Now()) rtest.OK(t, err) } + +func TestTagList(t *testing.T) { + paths := []string{"/home/foobar"} + tags := []string{""} + + sn, _ := restic.NewSnapshot(paths, nil, "foo", time.Now()) + + r := sn.HasTags(tags) + rtest.Assert(t, r, "Failed to match untagged snapshot") +}
1
package restic_test import ( "testing" "time" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" ) func TestNewSnapshot(t *testing.T) { paths := []string{"/home/foobar"} _, err := restic.NewSnapshot(paths, nil, "foo", time.Now()) rtest.OK(t, err) }
1
15,225
Sorry if I'm missing something here, but shouldn't this message be about failing to match with an *empty* selector, and/or perhaps even an empty *tag* instead of selector? The message seems a bit disconnected from what we're testing?
restic-restic
go
@@ -1,5 +1,8 @@ // Copyright (c) Microsoft. All rights reserved. +using System.IO; +using Microsoft.TestPlatform.VsTestConsole.TranslationLayer.Payloads; + namespace Microsoft.TestPlatform.VsTestConsole.TranslationLayer.UnitTests { using System;
1
// Copyright (c) Microsoft. All rights reserved. namespace Microsoft.TestPlatform.VsTestConsole.TranslationLayer.UnitTests { using System; using System.Collections.Generic; using System.Linq; using System.Linq.Expressions; using System.Threading; using System.Threading.Tasks; using Microsoft.TestPlatform.VsTestConsole.TranslationLayer.Interfaces; using Microsoft.VisualStudio.TestPlatform.CommunicationUtilities; using Microsoft.VisualStudio.TestPlatform.CommunicationUtilities.Interfaces; using Microsoft.VisualStudio.TestPlatform.CommunicationUtilities.ObjectModel; using Microsoft.VisualStudio.TestPlatform.CommunicationUtilities.Serialization; using Microsoft.VisualStudio.TestPlatform.ObjectModel; using Microsoft.VisualStudio.TestPlatform.ObjectModel.Client; using Microsoft.VisualStudio.TestPlatform.ObjectModel.Logging; using Microsoft.VisualStudio.TestTools.UnitTesting; using Moq; using Newtonsoft.Json.Linq; using Newtonsoft.Json; using VisualStudio.TestPlatform.ObjectModel.Client.Interfaces; [TestClass] public class VsTestConsoleRequestSenderTests { private ITranslationLayerRequestSender requestSender; private Mock<ICommunicationManager> mockCommunicationManager; private int WaitTimeout = 2000; [TestInitialize] public void TestInit() { this.mockCommunicationManager = new Mock<ICommunicationManager>(); this.requestSender = new VsTestConsoleRequestSender(mockCommunicationManager.Object, JsonDataSerializer.Instance); } #region Communication Tests [TestMethod] public void InitializeCommunicationShouldSucceed() { this.InitializeCommunication(); this.mockCommunicationManager.Verify(cm => cm.HostServer(), Times.Once); this.mockCommunicationManager.Verify(cm => cm.AcceptClientAsync(), Times.Once); this.mockCommunicationManager.Verify(cm => cm.WaitForClientConnection(Timeout.Infinite), Times.Once); this.mockCommunicationManager.Verify(cm => cm.ReceiveMessage(), Times.Exactly(2)); this.mockCommunicationManager.Verify(cm => cm.SendMessage(MessageType.VersionCheck), Times.Once); } [TestMethod] public void InitializeCommunicationShouldReturnInvalidPortNumberIfHostServerFails() { this.mockCommunicationManager.Setup(cm => cm.HostServer()).Throws(new Exception("Fail")); var portOutput = this.requestSender.InitializeCommunication(); Assert.IsTrue(portOutput < 0, "Negative port number must be returned if Hosting Server fails."); var connectionSuccess = this.requestSender.WaitForRequestHandlerConnection(WaitTimeout); Assert.IsFalse(connectionSuccess, "Connection must fail as server fai;ed to host."); this.mockCommunicationManager.Verify(cm => cm.HostServer(), Times.Once); this.mockCommunicationManager.Verify(cm => cm.AcceptClientAsync(), Times.Never); this.mockCommunicationManager.Verify(cm => cm.WaitForClientConnection(Timeout.Infinite), Times.Never); this.mockCommunicationManager.Verify(cm => cm.ReceiveMessage(), Times.Never); } [TestMethod] public void InitializeCommunicationShouldFailConnectionIfMessageReceiveFailed() { var dummyPortInput = 123; this.mockCommunicationManager.Setup(cm => cm.HostServer()).Returns(dummyPortInput); this.mockCommunicationManager.Setup(cm => cm.AcceptClientAsync()).Callback(() => { }); this.mockCommunicationManager.Setup(cm => cm.WaitForClientConnection(Timeout.Infinite)) .Callback((int timeout) => Task.Delay(200).Wait()); this.mockCommunicationManager.Setup(cm => cm.ReceiveMessage()).Throws(new Exception("Fail")); var portOutput = this.requestSender.InitializeCommunication(); // Hosting server didn't server, so port number should still be valid Assert.AreEqual(dummyPortInput, portOutput, "Port number must return without changes."); // Connection must not succeed as handshake failed var connectionSuccess = this.requestSender.WaitForRequestHandlerConnection(WaitTimeout); Assert.IsFalse(connectionSuccess, "Connection must fail if handshake failed."); this.mockCommunicationManager.Verify(cm => cm.HostServer(), Times.Once); this.mockCommunicationManager.Verify(cm => cm.AcceptClientAsync(), Times.Once); this.mockCommunicationManager.Verify(cm => cm.WaitForClientConnection(Timeout.Infinite), Times.Once); } [TestMethod] public void InitializeCommunicationShouldFailConnectionIfSessionConnectedDidNotComeFirst() { var dummyPortInput = 123; this.mockCommunicationManager.Setup(cm => cm.HostServer()).Returns(dummyPortInput); this.mockCommunicationManager.Setup(cm => cm.AcceptClientAsync()).Callback(() => { }); this.mockCommunicationManager.Setup(cm => cm.WaitForClientConnection(Timeout.Infinite)) .Callback((int timeout) => Task.Delay(200).Wait()); var discoveryMessage = new Message() { MessageType = MessageType.StartDiscovery }; this.mockCommunicationManager.Setup(cm => cm.ReceiveMessage()).Returns(discoveryMessage); var portOutput = this.requestSender.InitializeCommunication(); Assert.AreEqual(dummyPortInput, portOutput, "Port number must return without changes."); var connectionSuccess = this.requestSender.WaitForRequestHandlerConnection(WaitTimeout); Assert.IsFalse(connectionSuccess, "Connection must fail if version check failed."); this.mockCommunicationManager.Verify(cm => cm.HostServer(), Times.Once); this.mockCommunicationManager.Verify(cm => cm.AcceptClientAsync(), Times.Once); this.mockCommunicationManager.Verify(cm => cm.WaitForClientConnection(Timeout.Infinite), Times.Once); this.mockCommunicationManager.Verify(cm => cm.ReceiveMessage(), Times.Once); this.mockCommunicationManager.Verify(cm => cm.SendMessage(It.IsAny<string>()), Times.Never); } [TestMethod] public void InitializeCommunicationShouldFailConnectionIfSendMessageFailed() { var dummyPortInput = 123; this.mockCommunicationManager.Setup(cm => cm.HostServer()).Returns(dummyPortInput); this.mockCommunicationManager.Setup(cm => cm.AcceptClientAsync()).Callback(() => { }); this.mockCommunicationManager.Setup(cm => cm.WaitForClientConnection(Timeout.Infinite)) .Callback((int timeout) => Task.Delay(200).Wait()); var sessionConnected = new Message() { MessageType = MessageType.SessionConnected }; this.mockCommunicationManager.Setup(cm => cm.ReceiveMessage()).Returns(sessionConnected); this.mockCommunicationManager.Setup(cm => cm.SendMessage(MessageType.VersionCheck)).Throws(new Exception("Fail")); var portOutput = this.requestSender.InitializeCommunication(); Assert.AreEqual(dummyPortInput, portOutput, "Port number must return without changes."); var connectionSuccess = this.requestSender.WaitForRequestHandlerConnection(WaitTimeout); Assert.IsFalse(connectionSuccess, "Connection must fail if version check failed."); this.mockCommunicationManager.Verify(cm => cm.HostServer(), Times.Once); this.mockCommunicationManager.Verify(cm => cm.AcceptClientAsync(), Times.Once); this.mockCommunicationManager.Verify(cm => cm.WaitForClientConnection(Timeout.Infinite), Times.Once); this.mockCommunicationManager.Verify(cm => cm.ReceiveMessage(), Times.Once); this.mockCommunicationManager.Verify(cm => cm.SendMessage(It.IsAny<string>()), Times.Once); } [TestMethod] public void InitializeCommunicationShouldFailConnectionIfVersionIsWrong() { var dummyPortInput = 123; this.mockCommunicationManager.Setup(cm => cm.HostServer()).Returns(dummyPortInput); this.mockCommunicationManager.Setup(cm => cm.AcceptClientAsync()).Callback(() => { }); this.mockCommunicationManager.Setup(cm => cm.WaitForClientConnection(Timeout.Infinite)) .Callback((int timeout) => Task.Delay(200).Wait()); var sessionConnected = new Message() { MessageType = MessageType.SessionConnected }; // Give wrong version var versionCheck = new Message() { MessageType = MessageType.VersionCheck, Payload = JToken.FromObject("2") }; Action changedMessage = () => { this.mockCommunicationManager.Setup(cm => cm.ReceiveMessage()).Returns(versionCheck); }; this.mockCommunicationManager.Setup(cm => cm.ReceiveMessage()).Returns(sessionConnected); this.mockCommunicationManager.Setup(cm => cm.SendMessage(MessageType.VersionCheck)).Callback(changedMessage); var portOutput = this.requestSender.InitializeCommunication(); Assert.AreEqual(dummyPortInput, portOutput, "Port number must return without changes."); var connectionSuccess = this.requestSender.WaitForRequestHandlerConnection(WaitTimeout); Assert.IsFalse(connectionSuccess, "Connection must fail if version check failed."); this.mockCommunicationManager.Verify(cm => cm.HostServer(), Times.Once); this.mockCommunicationManager.Verify(cm => cm.AcceptClientAsync(), Times.Once); this.mockCommunicationManager.Verify(cm => cm.WaitForClientConnection(Timeout.Infinite), Times.Once); this.mockCommunicationManager.Verify(cm => cm.ReceiveMessage(), Times.Exactly(2)); this.mockCommunicationManager.Verify(cm => cm.SendMessage(MessageType.VersionCheck), Times.Once); } #endregion #region Discovery Tests [TestMethod] public void DiscoverTestsShouldCompleteWithZeroTests() { this.InitializeCommunication(); var mockHandler = new Mock<ITestDiscoveryEventsHandler>(); var payload = new DiscoveryCompletePayload() { TotalTests = 0, LastDiscoveredTests = null, IsAborted = false }; var discoveryComplete = new Message() { MessageType = MessageType.DiscoveryComplete, Payload = JToken.FromObject(payload) }; this.mockCommunicationManager.Setup(cm => cm.ReceiveMessage()).Returns(discoveryComplete); this.requestSender.DiscoverTests(new List<string>() { "1.dll" }, null, mockHandler.Object); mockHandler.Verify(mh => mh.HandleDiscoveryComplete(0, null, false), Times.Once, "Discovery Complete must be called"); mockHandler.Verify(mh => mh.HandleDiscoveredTests(It.IsAny<IEnumerable<TestCase>>()), Times.Never, "DiscoveredTests must not be called"); mockHandler.Verify(mh => mh.HandleLogMessage(It.IsAny<TestMessageLevel>(), It.IsAny<string>()), Times.Never, "TestMessage event must not be called"); } [TestMethod] public void DiscoverTestsShouldCompleteWithSingleTest() { this.InitializeCommunication(); var mockHandler = new Mock<ITestDiscoveryEventsHandler>(); var testCase = new TestCase("hello", new Uri("world://how"), "1.dll"); var testCaseList = new List<TestCase>() { testCase }; var testsFound = new Message() { MessageType = MessageType.TestCasesFound, Payload = JToken.FromObject(testCaseList) }; var payload = new DiscoveryCompletePayload() { TotalTests = 1, LastDiscoveredTests = null, IsAborted = false }; var discoveryComplete = new Message() { MessageType = MessageType.DiscoveryComplete, Payload = JToken.FromObject(payload) }; this.mockCommunicationManager.Setup(cm => cm.ReceiveMessage()).Returns(testsFound); mockHandler.Setup(mh => mh.HandleDiscoveredTests(It.IsAny<IEnumerable<TestCase>>())).Callback( () => this.mockCommunicationManager.Setup(cm => cm.ReceiveMessage()).Returns(discoveryComplete)); this.requestSender.DiscoverTests(new List<string>() { "1.dll" }, null, mockHandler.Object); mockHandler.Verify(mh => mh.HandleDiscoveryComplete(1, null, false), Times.Once, "Discovery Complete must be called"); mockHandler.Verify(mh => mh.HandleDiscoveredTests(It.IsAny<IEnumerable<TestCase>>()), Times.Once, "DiscoveredTests must be called"); mockHandler.Verify(mh => mh.HandleLogMessage(It.IsAny<TestMessageLevel>(), It.IsAny<string>()), Times.Never, "TestMessage event must not be called"); } [TestMethod] public void DiscoverTestsShouldReportBackTestsWithTraitsInTestsFoundMessage() { this.InitializeCommunication(); var mockHandler = new Mock<ITestDiscoveryEventsHandler>(); var testCase = new TestCase("hello", new Uri("world://how"), "1.dll"); testCase.Traits.Add(new Trait("a", "b")); List<TestCase> receivedTestCases = null; var testCaseList = new List<TestCase>() { testCase }; var testsFound = CreateMessage(MessageType.TestCasesFound, testCaseList); var payload = new DiscoveryCompletePayload() { TotalTests = 1, LastDiscoveredTests = null, IsAborted = false }; var discoveryComplete = CreateMessage(MessageType.DiscoveryComplete, payload); this.mockCommunicationManager.Setup(cm => cm.ReceiveMessage()).Returns(testsFound); mockHandler.Setup(mh => mh.HandleDiscoveredTests(It.IsAny<IEnumerable<TestCase>>())) .Callback( (IEnumerable<TestCase> tests) => { receivedTestCases = tests?.ToList(); this.mockCommunicationManager.Setup(cm => cm.ReceiveMessage()).Returns(discoveryComplete); }); this.requestSender.DiscoverTests(new List<string>() { "1.dll" }, null, mockHandler.Object); Assert.IsNotNull(receivedTestCases); Assert.AreEqual(1, receivedTestCases.Count); // Verify that the traits are passed through properly. var traits = receivedTestCases.ToArray()[0].Traits; Assert.IsNotNull(traits); Assert.AreEqual(traits.ToArray()[0].Name, "a"); Assert.AreEqual(traits.ToArray()[0].Value, "b"); } [TestMethod] public void DiscoverTestsShouldReportBackTestsWithTraitsInDiscoveryCompleteMessage() { this.InitializeCommunication(); var mockHandler = new Mock<ITestDiscoveryEventsHandler>(); var testCase = new TestCase("hello", new Uri("world://how"), "1.dll"); testCase.Traits.Add(new Trait("a", "b")); List<TestCase> receivedTestCases = null; var testCaseList = new List<TestCase>() { testCase }; var payload = new DiscoveryCompletePayload() { TotalTests = 1, LastDiscoveredTests = testCaseList, IsAborted = false }; var discoveryComplete = CreateMessage(MessageType.DiscoveryComplete, payload); this.mockCommunicationManager.Setup(cm => cm.ReceiveMessage()).Returns(discoveryComplete); mockHandler.Setup(mh => mh.HandleDiscoveryComplete(It.IsAny<long>(), It.IsAny<IEnumerable<TestCase>>(), It.IsAny<bool>())) .Callback( (long totalTests, IEnumerable<TestCase> tests, bool isAborted) => { receivedTestCases = tests?.ToList(); }); this.requestSender.DiscoverTests(new List<string>() { "1.dll" }, null, mockHandler.Object); Assert.IsNotNull(receivedTestCases); Assert.AreEqual(1, receivedTestCases.Count); // Verify that the traits are passed through properly. var traits = receivedTestCases.ToArray()[0].Traits; Assert.IsNotNull(traits); Assert.AreEqual(traits.ToArray()[0].Name, "a"); Assert.AreEqual(traits.ToArray()[0].Value, "b"); } [TestMethod] public void DiscoverTestsShouldCompleteWithTestMessage() { this.InitializeCommunication(); var mockHandler = new Mock<ITestDiscoveryEventsHandler>(); var testCase = new TestCase("hello", new Uri("world://how"), "1.dll"); var testCaseList = new List<TestCase>() { testCase }; var testsFound = CreateMessage(MessageType.TestCasesFound, testCaseList); var payload = new DiscoveryCompletePayload() { TotalTests = 1, LastDiscoveredTests = null, IsAborted = false }; var discoveryComplete = CreateMessage(MessageType.DiscoveryComplete, payload); var mpayload = new TestMessagePayload() { MessageLevel = TestMessageLevel.Informational, Message = "Hello" }; var message = CreateMessage(MessageType.TestMessage, mpayload); this.mockCommunicationManager.Setup(cm => cm.ReceiveMessage()).Returns(testsFound); mockHandler.Setup(mh => mh.HandleDiscoveredTests(It.IsAny<IEnumerable<TestCase>>())).Callback( () => this.mockCommunicationManager.Setup(cm => cm.ReceiveMessage()).Returns(message)); mockHandler.Setup(mh => mh.HandleLogMessage(It.IsAny<TestMessageLevel>(), It.IsAny<string>())).Callback( () => this.mockCommunicationManager.Setup(cm => cm.ReceiveMessage()).Returns(discoveryComplete)); this.requestSender.DiscoverTests(new List<string>() { "1.dll" }, null, mockHandler.Object); mockHandler.Verify(mh => mh.HandleDiscoveryComplete(1, null, false), Times.Once, "Discovery Complete must be called"); mockHandler.Verify(mh => mh.HandleDiscoveredTests(It.IsAny<IEnumerable<TestCase>>()), Times.Once, "DiscoveredTests must be called"); mockHandler.Verify(mh => mh.HandleLogMessage(TestMessageLevel.Informational, "Hello"), Times.Once, "TestMessage event must be called"); } #endregion #region RunTests [TestMethod] public void StartTestRunShouldCompleteWithZeroTests() { this.InitializeCommunication(); var mockHandler = new Mock<ITestRunEventsHandler>(); var dummyCompleteArgs = new TestRunCompleteEventArgs(null, false, false, null, null, TimeSpan.FromMilliseconds(1)); var dummyLastRunArgs = new TestRunChangedEventArgs(null, null, null); var payload = new TestRunCompletePayload() { ExecutorUris = null, LastRunTests = dummyLastRunArgs, RunAttachments = null, TestRunCompleteArgs = dummyCompleteArgs }; var runComplete = CreateMessage(MessageType.ExecutionComplete, payload); this.mockCommunicationManager.Setup(cm => cm.ReceiveMessage()).Returns(runComplete); this.requestSender.StartTestRun(new List<string>() { "1.dll" }, null, mockHandler.Object); mockHandler.Verify(mh => mh.HandleTestRunComplete(It.IsAny<TestRunCompleteEventArgs>(), It.IsAny<TestRunChangedEventArgs>(), null, null), Times.Once, "Run Complete must be called"); mockHandler.Verify(mh => mh.HandleTestRunStatsChange(It.IsAny<TestRunChangedEventArgs>()), Times.Never, "RunChangedArgs must not be called"); mockHandler.Verify(mh => mh.HandleLogMessage(It.IsAny<TestMessageLevel>(), It.IsAny<string>()), Times.Never, "TestMessage event must not be called"); } [TestMethod] public void StartTestRunShouldCompleteWithSingleTestAndMessage() { this.InitializeCommunication(); var mockHandler = new Mock<ITestRunEventsHandler>(); var testCase = new TestCase("hello", new Uri("world://how"), "1.dll"); var testResult = new VisualStudio.TestPlatform.ObjectModel.TestResult(testCase); testResult.Outcome = TestOutcome.Passed; var dummyCompleteArgs = new TestRunCompleteEventArgs(null, false, false, null, null, TimeSpan.FromMilliseconds(1)); var dummyLastRunArgs = new TestRunChangedEventArgs(null, null, null); var testsChangedArgs = new TestRunChangedEventArgs(null, new List<VisualStudio.TestPlatform.ObjectModel.TestResult>() { testResult }, null); var testsPayload = CreateMessage(MessageType.TestRunStatsChange, testsChangedArgs); var payload = new TestRunCompletePayload() { ExecutorUris = null, LastRunTests = dummyLastRunArgs, RunAttachments = null, TestRunCompleteArgs = dummyCompleteArgs }; var runComplete = CreateMessage(MessageType.ExecutionComplete, payload); var mpayload = new TestMessagePayload() { MessageLevel = TestMessageLevel.Informational, Message = "Hello" }; var message = CreateMessage(MessageType.TestMessage, mpayload); this.mockCommunicationManager.Setup(cm => cm.ReceiveMessage()).Returns(testsPayload); mockHandler.Setup(mh => mh.HandleTestRunStatsChange(It.IsAny<TestRunChangedEventArgs>())).Callback<TestRunChangedEventArgs>( (testRunChangedArgs) => { Assert.IsTrue(testRunChangedArgs.NewTestResults != null && testsChangedArgs.NewTestResults.Count() > 0, "TestResults must be passed properly"); this.mockCommunicationManager.Setup(cm => cm.ReceiveMessage()).Returns(message); }); mockHandler.Setup(mh => mh.HandleLogMessage(It.IsAny<TestMessageLevel>(), It.IsAny<string>())).Callback( () => { this.mockCommunicationManager.Setup(cm => cm.ReceiveMessage()).Returns(runComplete); }); this.requestSender.StartTestRun(new List<string>() { "1.dll" }, null, mockHandler.Object); mockHandler.Verify(mh => mh.HandleTestRunComplete(It.IsAny<TestRunCompleteEventArgs>(), It.IsAny<TestRunChangedEventArgs>(), null, null), Times.Once, "Run Complete must be called"); mockHandler.Verify(mh => mh.HandleTestRunStatsChange(It.IsAny<TestRunChangedEventArgs>()), Times.Once, "RunChangedArgs must be called"); mockHandler.Verify(mh => mh.HandleLogMessage(It.IsAny<TestMessageLevel>(), It.IsAny<string>()), Times.Once, "TestMessage event must be called"); } [TestMethod] public void StartTestRunWithCustomHostShouldComplete() { this.InitializeCommunication(); var mockHandler = new Mock<ITestRunEventsHandler>(); var testCase = new TestCase("hello", new Uri("world://how"), "1.dll"); var testResult = new VisualStudio.TestPlatform.ObjectModel.TestResult(testCase); testResult.Outcome = TestOutcome.Passed; var dummyCompleteArgs = new TestRunCompleteEventArgs(null, false, false, null, null, TimeSpan.FromMilliseconds(1)); var dummyLastRunArgs = new TestRunChangedEventArgs(null, null, null); var testsChangedArgs = new TestRunChangedEventArgs(null, new List<VisualStudio.TestPlatform.ObjectModel.TestResult>() { testResult }, null); var testsPayload = CreateMessage(MessageType.TestRunStatsChange, testsChangedArgs); var payload = new TestRunCompletePayload() { ExecutorUris = null, LastRunTests = dummyLastRunArgs, RunAttachments = null, TestRunCompleteArgs = dummyCompleteArgs }; var runComplete = CreateMessage(MessageType.ExecutionComplete, payload); var mpayload = new TestMessagePayload() { MessageLevel = TestMessageLevel.Informational, Message = "Hello" }; var message = CreateMessage(MessageType.TestMessage, mpayload); var runprocessInfoPayload = new Message() { MessageType = MessageType.CustomTestHostLaunch, Payload = JToken.FromObject(new TestProcessStartInfo()) }; mockHandler.Setup(mh => mh.HandleTestRunStatsChange(It.IsAny<TestRunChangedEventArgs>())).Callback<TestRunChangedEventArgs>( (testRunChangedArgs) => { Assert.IsTrue(testRunChangedArgs.NewTestResults != null && testsChangedArgs.NewTestResults.Count() > 0, "TestResults must be passed properly"); this.mockCommunicationManager.Setup(cm => cm.ReceiveMessage()).Returns(message); }); mockHandler.Setup(mh => mh.HandleLogMessage(It.IsAny<TestMessageLevel>(), It.IsAny<string>())).Callback( () => { this.mockCommunicationManager.Setup(cm => cm.ReceiveMessage()).Returns(runComplete); }); var mockLauncher = new Mock<ITestHostLauncher>(); mockLauncher.Setup(ml => ml.LaunchTestHost(It.IsAny<TestProcessStartInfo>())).Callback (() => this.mockCommunicationManager.Setup(cm => cm.ReceiveMessage()).Returns(testsPayload)); this.mockCommunicationManager.Setup(cm => cm.ReceiveMessage()).Returns(runprocessInfoPayload); this.requestSender.StartTestRunWithCustomHost(new List<string>() { "1.dll" }, null, mockHandler.Object, mockLauncher.Object); mockHandler.Verify(mh => mh.HandleTestRunComplete(It.IsAny<TestRunCompleteEventArgs>(), It.IsAny<TestRunChangedEventArgs>(), null, null), Times.Once, "Run Complete must be called"); mockHandler.Verify(mh => mh.HandleTestRunStatsChange(It.IsAny<TestRunChangedEventArgs>()), Times.Once, "RunChangedArgs must be called"); mockHandler.Verify(mh => mh.HandleLogMessage(It.IsAny<TestMessageLevel>(), It.IsAny<string>()), Times.Once, "TestMessage event must be called"); mockLauncher.Verify(ml => ml.LaunchTestHost(It.IsAny<TestProcessStartInfo>()), Times.Once, "Custom TestHostLauncher must be called"); } [TestMethod] public void StartTestRunWithSelectedTestsShouldCompleteWithZeroTests() { this.InitializeCommunication(); var mockHandler = new Mock<ITestRunEventsHandler>(); var dummyCompleteArgs = new TestRunCompleteEventArgs(null, false, false, null, null, TimeSpan.FromMilliseconds(1)); var dummyLastRunArgs = new TestRunChangedEventArgs(null, null, null); var payload = new TestRunCompletePayload() { ExecutorUris = null, LastRunTests = dummyLastRunArgs, RunAttachments = null, TestRunCompleteArgs = dummyCompleteArgs }; var runComplete = CreateMessage(MessageType.ExecutionComplete, payload); this.mockCommunicationManager.Setup(cm => cm.ReceiveMessage()).Returns(runComplete); this.requestSender.StartTestRun(new List<TestCase>(), null, mockHandler.Object); mockHandler.Verify(mh => mh.HandleTestRunComplete(It.IsAny<TestRunCompleteEventArgs>(), It.IsAny<TestRunChangedEventArgs>(), null, null), Times.Once, "Run Complete must be called"); mockHandler.Verify(mh => mh.HandleTestRunStatsChange(It.IsAny<TestRunChangedEventArgs>()), Times.Never, "RunChangedArgs must not be called"); mockHandler.Verify(mh => mh.HandleLogMessage(It.IsAny<TestMessageLevel>(), It.IsAny<string>()), Times.Never, "TestMessage event must not be called"); } [TestMethod] public void StartTestRunWithSelectedTestsShouldCompleteWithSingleTestAndMessage() { this.InitializeCommunication(); var mockHandler = new Mock<ITestRunEventsHandler>(); var testCase = new TestCase("hello", new Uri("world://how"), "1.dll"); var testResult = new VisualStudio.TestPlatform.ObjectModel.TestResult(testCase); testResult.Outcome = TestOutcome.Passed; var testCaseList = new List<TestCase>() { testCase }; var dummyCompleteArgs = new TestRunCompleteEventArgs(null, false, false, null, null, TimeSpan.FromMilliseconds(1)); var dummyLastRunArgs = new TestRunChangedEventArgs(null, null, null); var testsChangedArgs = new TestRunChangedEventArgs(null, new List<VisualStudio.TestPlatform.ObjectModel.TestResult>() { testResult }, null); var testsPayload = CreateMessage(MessageType.TestRunStatsChange, testsChangedArgs); var payload = new TestRunCompletePayload() { ExecutorUris = null, LastRunTests = dummyLastRunArgs, RunAttachments = null, TestRunCompleteArgs = dummyCompleteArgs }; var runComplete = CreateMessage(MessageType.ExecutionComplete, payload); var mpayload = new TestMessagePayload() { MessageLevel = TestMessageLevel.Informational, Message = "Hello" }; var message = CreateMessage(MessageType.TestMessage, mpayload); this.mockCommunicationManager.Setup(cm => cm.ReceiveMessage()).Returns(testsPayload); mockHandler.Setup(mh => mh.HandleTestRunStatsChange(It.IsAny<TestRunChangedEventArgs>())).Callback<TestRunChangedEventArgs>( (testRunChangedArgs) => { Assert.IsTrue(testRunChangedArgs.NewTestResults != null && testsChangedArgs.NewTestResults.Count() > 0, "TestResults must be passed properly"); this.mockCommunicationManager.Setup(cm => cm.ReceiveMessage()).Returns(message); }); mockHandler.Setup(mh => mh.HandleLogMessage(It.IsAny<TestMessageLevel>(), It.IsAny<string>())).Callback( () => { this.mockCommunicationManager.Setup(cm => cm.ReceiveMessage()).Returns(runComplete); }); this.requestSender.StartTestRun(testCaseList, null, mockHandler.Object); mockHandler.Verify(mh => mh.HandleTestRunComplete(It.IsAny<TestRunCompleteEventArgs>(), It.IsAny<TestRunChangedEventArgs>(), null, null), Times.Once, "Run Complete must be called"); mockHandler.Verify(mh => mh.HandleTestRunStatsChange(It.IsAny<TestRunChangedEventArgs>()), Times.Once, "RunChangedArgs must be called"); mockHandler.Verify(mh => mh.HandleLogMessage(It.IsAny<TestMessageLevel>(), It.IsAny<string>()), Times.Once, "TestMessage event must be called"); } [TestMethod] public void StartTestRunWithSelectedTestsHavingTraitsShouldReturnTestRunCompleteWithTraitsIntact() { this.InitializeCommunication(); var mockHandler = new Mock<ITestRunEventsHandler>(); var testCase = new TestCase("hello", new Uri("world://how"), "1.dll"); testCase.Traits.Add(new Trait("a", "b")); var testResult = new VisualStudio.TestPlatform.ObjectModel.TestResult(testCase); testResult.Outcome = TestOutcome.Passed; var testCaseList = new List<TestCase>() { testCase }; TestRunChangedEventArgs receivedChangeEventArgs = null; var dummyCompleteArgs = new TestRunCompleteEventArgs(null, false, false, null, null, TimeSpan.FromMilliseconds(1)); var dummyLastRunArgs = new TestRunChangedEventArgs(null, new List<VisualStudio.TestPlatform.ObjectModel.TestResult> { testResult }, null); var payload = new TestRunCompletePayload() { ExecutorUris = null, LastRunTests = dummyLastRunArgs, RunAttachments = null, TestRunCompleteArgs = dummyCompleteArgs }; var runComplete = CreateMessage(MessageType.ExecutionComplete, payload); this.mockCommunicationManager.Setup(cm => cm.ReceiveMessage()).Returns(runComplete); mockHandler.Setup(mh => mh.HandleTestRunComplete( It.IsAny<TestRunCompleteEventArgs>(), It.IsAny<TestRunChangedEventArgs>(), It.IsAny<ICollection<AttachmentSet>>(), It.IsAny<ICollection<string>>())) .Callback( (TestRunCompleteEventArgs complete, TestRunChangedEventArgs stats, ICollection<AttachmentSet> attachments, ICollection<string> executorUris) => { receivedChangeEventArgs = stats; }); this.requestSender.StartTestRun(testCaseList, null, mockHandler.Object); Assert.IsNotNull(receivedChangeEventArgs); Assert.IsTrue(receivedChangeEventArgs.NewTestResults.Count() > 0); // Verify that the traits are passed through properly. var traits = receivedChangeEventArgs.NewTestResults.ToArray()[0].TestCase.Traits; Assert.IsNotNull(traits); Assert.AreEqual(traits.ToArray()[0].Name, "a"); Assert.AreEqual(traits.ToArray()[0].Value, "b"); } [TestMethod] public void StartTestRunWithSelectedTestsHavingTraitsShouldReturnTestRunStatsWithTraitsIntact() { this.InitializeCommunication(); var mockHandler = new Mock<ITestRunEventsHandler>(); var testCase = new TestCase("hello", new Uri("world://how"), "1.dll"); testCase.Traits.Add(new Trait("a", "b")); var testResult = new VisualStudio.TestPlatform.ObjectModel.TestResult(testCase); testResult.Outcome = TestOutcome.Passed; var testCaseList = new List<TestCase>() { testCase }; TestRunChangedEventArgs receivedChangeEventArgs = null; var dummyCompleteArgs = new TestRunCompleteEventArgs(null, false, false, null, null, TimeSpan.FromMilliseconds(1)); var dummyLastRunArgs = new TestRunChangedEventArgs(null, null, null); var testsChangedArgs = new TestRunChangedEventArgs( null, new List<VisualStudio.TestPlatform.ObjectModel.TestResult>() { testResult }, null); var testsRunStatsPayload = CreateMessage(MessageType.TestRunStatsChange, testsChangedArgs); var testRunCompletepayload = new TestRunCompletePayload() { ExecutorUris = null, LastRunTests = dummyLastRunArgs, RunAttachments = null, TestRunCompleteArgs = dummyCompleteArgs }; var runComplete = CreateMessage(MessageType.ExecutionComplete, testRunCompletepayload); this.mockCommunicationManager.Setup(cm => cm.ReceiveMessage()).Returns(testsRunStatsPayload); mockHandler.Setup(mh => mh.HandleTestRunStatsChange( It.IsAny<TestRunChangedEventArgs>())) .Callback( (TestRunChangedEventArgs stats) => { receivedChangeEventArgs = stats; this.mockCommunicationManager.Setup(cm => cm.ReceiveMessage()).Returns(runComplete); }); this.requestSender.StartTestRun(testCaseList, null, mockHandler.Object); Assert.IsNotNull(receivedChangeEventArgs); Assert.IsTrue(receivedChangeEventArgs.NewTestResults.Any()); // Verify that the traits are passed through properly. var traits = receivedChangeEventArgs.NewTestResults.ToArray()[0].TestCase.Traits; Assert.IsNotNull(traits); Assert.AreEqual(traits.ToArray()[0].Name, "a"); Assert.AreEqual(traits.ToArray()[0].Value, "b"); } [TestMethod] public void StartTestRunWithSelectedTestsAndCustomHostShouldComplete() { this.InitializeCommunication(); var mockHandler = new Mock<ITestRunEventsHandler>(); var testCase = new TestCase("hello", new Uri("world://how"), "1.dll"); var testResult = new VisualStudio.TestPlatform.ObjectModel.TestResult(testCase); testResult.Outcome = TestOutcome.Passed; var testCaseList = new List<TestCase>() { testCase }; var dummyCompleteArgs = new TestRunCompleteEventArgs(null, false, false, null, null, TimeSpan.FromMilliseconds(1)); var dummyLastRunArgs = new TestRunChangedEventArgs(null, null, null); var testsChangedArgs = new TestRunChangedEventArgs(null, new List<VisualStudio.TestPlatform.ObjectModel.TestResult>() { testResult }, null); var testsPayload = CreateMessage(MessageType.TestRunStatsChange, testsChangedArgs); var payload = new TestRunCompletePayload() { ExecutorUris = null, LastRunTests = dummyLastRunArgs, RunAttachments = null, TestRunCompleteArgs = dummyCompleteArgs }; var runComplete = CreateMessage(MessageType.ExecutionComplete, payload); var mpayload = new TestMessagePayload() { MessageLevel = TestMessageLevel.Informational, Message = "Hello" }; var message = CreateMessage(MessageType.TestMessage, mpayload); var runprocessInfoPayload = CreateMessage(MessageType.CustomTestHostLaunch, new TestProcessStartInfo()); mockHandler.Setup(mh => mh.HandleTestRunStatsChange(It.IsAny<TestRunChangedEventArgs>())).Callback<TestRunChangedEventArgs>( (testRunChangedArgs) => { Assert.IsTrue(testRunChangedArgs.NewTestResults != null && testsChangedArgs.NewTestResults.Count() > 0, "TestResults must be passed properly"); this.mockCommunicationManager.Setup(cm => cm.ReceiveMessage()).Returns(message); }); mockHandler.Setup(mh => mh.HandleLogMessage(It.IsAny<TestMessageLevel>(), It.IsAny<string>())).Callback( () => { this.mockCommunicationManager.Setup(cm => cm.ReceiveMessage()).Returns(runComplete); }); var mockLauncher = new Mock<ITestHostLauncher>(); mockLauncher.Setup(ml => ml.LaunchTestHost(It.IsAny<TestProcessStartInfo>())).Callback (() => this.mockCommunicationManager.Setup(cm => cm.ReceiveMessage()).Returns(testsPayload)); this.mockCommunicationManager.Setup(cm => cm.ReceiveMessage()).Returns(runprocessInfoPayload); this.requestSender.StartTestRunWithCustomHost(testCaseList, null, mockHandler.Object, mockLauncher.Object); mockHandler.Verify(mh => mh.HandleTestRunComplete(It.IsAny<TestRunCompleteEventArgs>(), It.IsAny<TestRunChangedEventArgs>(), null, null), Times.Once, "Run Complete must be called"); mockHandler.Verify(mh => mh.HandleTestRunStatsChange(It.IsAny<TestRunChangedEventArgs>()), Times.Once, "RunChangedArgs must be called"); mockHandler.Verify(mh => mh.HandleLogMessage(It.IsAny<TestMessageLevel>(), It.IsAny<string>()), Times.Once, "TestMessage event must be called"); mockLauncher.Verify(ml => ml.LaunchTestHost(It.IsAny<TestProcessStartInfo>()), Times.Once, "Custom TestHostLauncher must be called"); } [TestMethod] public void StartTestRunWithCustomHostInParallelShouldCallCustomHostMultipleTimes() { var mockLauncher = new Mock<ITestHostLauncher>(); var mockHandler = new Mock<ITestRunEventsHandler>(); IEnumerable<string> sources = new List<string> { "1.dll" }; var p1 = new TestProcessStartInfo() { FileName = "X" }; var p2 = new TestProcessStartInfo() { FileName = "Y" }; var message1 = CreateMessage(MessageType.CustomTestHostLaunch, p1); var message2 = CreateMessage(MessageType.CustomTestHostLaunch, p2); var dummyCompleteArgs = new TestRunCompleteEventArgs(null, false, false, null, null, TimeSpan.FromMilliseconds(1)); var completepayload = new TestRunCompletePayload() { ExecutorUris = null, LastRunTests = null, RunAttachments = null, TestRunCompleteArgs = dummyCompleteArgs }; var runComplete = CreateMessage(MessageType.ExecutionComplete, completepayload); this.mockCommunicationManager.Setup(cm => cm.ReceiveMessage()).Returns(message1); mockLauncher.Setup(ml => ml.LaunchTestHost(It.IsAny<TestProcessStartInfo>())) .Callback<TestProcessStartInfo>((startInfo) => { if(startInfo.FileName.Equals(p1.FileName)) { this.mockCommunicationManager.Setup(cm => cm.ReceiveMessage()).Returns(message2); } else if (startInfo.FileName.Equals(p2.FileName)) { this.mockCommunicationManager.Setup(cm => cm.ReceiveMessage()).Returns(runComplete); } }); this.requestSender.StartTestRunWithCustomHost(sources, null, mockHandler.Object, mockLauncher.Object); mockLauncher.Verify(ml => ml.LaunchTestHost(It.IsAny<TestProcessStartInfo>()), Times.Exactly(2)); } #endregion #region private methods private static Message CreateMessage<T>(string messageType, T payload) { var message = new Message() { MessageType = messageType, Payload = JToken.FromObject( payload, JsonSerializer.Create( new JsonSerializerSettings { ContractResolver = new TestPlatformContractResolver(), TypeNameHandling = TypeNameHandling.None })) }; return message; } private void InitializeCommunication() { var dummyPortInput = 123; this.mockCommunicationManager.Setup(cm => cm.HostServer()).Returns(dummyPortInput); this.mockCommunicationManager.Setup(cm => cm.AcceptClientAsync()).Callback(() => { }); this.mockCommunicationManager.Setup(cm => cm.WaitForClientConnection(Timeout.Infinite)) .Callback((int timeout) => Task.Delay(200).Wait()); var sessionConnected = new Message() { MessageType = MessageType.SessionConnected }; var versionCheck = new Message() { MessageType = MessageType.VersionCheck, Payload = JToken.FromObject("1") }; Action changedMessage = () => { this.mockCommunicationManager.Setup(cm => cm.ReceiveMessage()).Returns(versionCheck); }; this.mockCommunicationManager.Setup(cm => cm.ReceiveMessage()).Returns(sessionConnected); this.mockCommunicationManager.Setup(cm => cm.SendMessage(MessageType.VersionCheck)).Callback(changedMessage); var portOutput = this.requestSender.InitializeCommunication(); Assert.AreEqual(dummyPortInput, portOutput, "Port number must return without changes."); var connectionSuccess = this.requestSender.WaitForRequestHandlerConnection(WaitTimeout); Assert.IsTrue(connectionSuccess, "Connection must succeed."); } #endregion } }
1
11,178
nit: please move these inside namespace.
microsoft-vstest
.cs
@@ -139,6 +139,13 @@ var ( Value: metadata.Testnet3Definition.Testnet3HermesURL, Hidden: true, } + // FlagPaymentsDuringSessionDebug sets if we're in debug more for the payments done in a VPN session. + FlagPaymentsDuringSessionDebug = cli.BoolFlag{ + Name: "payments.during-session-debug", + Usage: "Set debug mode for payments made during a session, it will bypass any price validation and allow absurd prices during sessions", + Value: false, + Hidden: true, + } ) // RegisterFlagsPayments function register payments flags to flag list.
1
/* * Copyright (C) 2019 The "MysteriumNetwork/node" Authors. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package config import ( "time" "github.com/urfave/cli/v2" "github.com/mysteriumnetwork/node/metadata" ) var ( // FlagPaymentsMaxHermesFee represents the max hermes fee. FlagPaymentsMaxHermesFee = cli.IntFlag{ Name: "payments.hermes.max.fee", Value: 3000, Usage: "The max fee that we'll accept from an hermes. In percentiles. 3000 means 30%", } // FlagPaymentsBCTimeout represents the BC call timeout. FlagPaymentsBCTimeout = cli.DurationFlag{ Name: "payments.bc.timeout", Value: time.Second * 30, Usage: "The duration we'll wait before timing out BC calls.", } // FlagPaymentsHermesPromiseSettleThreshold represents the percentage of balance left when we go for promise settling. FlagPaymentsHermesPromiseSettleThreshold = cli.Float64Flag{ Name: "payments.hermes.promise.threshold", Value: 0.1, Usage: "The percentage of balance before we settle promises", } // FlagPaymentsHermesPromiseSettleTimeout represents the time we wait for confirmation of the promise settlement. FlagPaymentsHermesPromiseSettleTimeout = cli.DurationFlag{ Name: "payments.hermes.settle.timeout", Value: time.Minute * 3, Usage: "The duration we'll wait before timing out our wait for promise settle.", Hidden: true, } // FlagPaymentsHermesPromiseSettleCheckInterval represents the time for polling for confirmation of the promise settlement. FlagPaymentsHermesPromiseSettleCheckInterval = cli.DurationFlag{ Name: "payments.hermes.settle.check-interval", Value: time.Second * 30, Usage: "The duration we'll wait before trying to fetch new events.", Hidden: true, } // FlagPaymentsLongBalancePollInterval determines how often we resync balance on chain. FlagPaymentsLongBalancePollInterval = cli.DurationFlag{ Name: "payments.balance-long-poll.interval", Value: time.Hour * 1, Usage: "The duration we'll wait before trying to fetch new balance.", Hidden: true, } // FlagPaymentsFastBalancePollInterval determines how often we resync balance on chain after on chain events. FlagPaymentsFastBalancePollInterval = cli.DurationFlag{ Name: "payments.balance-short-poll.interval", Value: time.Minute, Usage: "The duration we'll wait before trying to fetch new balance.", Hidden: true, } // FlagPaymentsFastBalancePollTimeout determines how long we try to resync balance on chain after on chain events. FlagPaymentsFastBalancePollTimeout = cli.DurationFlag{ Name: "payments.balance-short-poll.timeout", Value: time.Minute * 10, Usage: "The duration we'll wait before giving up trying to fetch new balance.", Hidden: true, } // FlagPaymentsZeroStakeUnsettledAmount determines the minimum amount of myst required before auto settling is triggered if zero stake is used. FlagPaymentsZeroStakeUnsettledAmount = cli.Float64Flag{ Name: "payments.zero-stake-unsettled-amount", Value: 0.25, Usage: "The settling threshold if provider uses a zero stake", } // FlagPaymentsRegistryTransactorPollInterval The duration we'll wait before calling transactor to check for new status updates. FlagPaymentsRegistryTransactorPollInterval = cli.DurationFlag{ Name: "payments.registry-transactor-poll.interval", Value: time.Second * 20, Usage: "The duration we'll wait before calling transactor to check for new status updates", Hidden: true, } // FlagPaymentsRegistryTransactorPollTimeout The duration we'll wait before polling up the transactors registration status again. FlagPaymentsRegistryTransactorPollTimeout = cli.DurationFlag{ Name: "payments.registry-transactor-poll.timeout", Value: time.Minute * 20, Usage: "The duration we'll wait before giving up on transactors registration status", Hidden: true, } // FlagPaymentsProviderInvoiceFrequency determines how often the provider sends invoices. FlagPaymentsProviderInvoiceFrequency = cli.DurationFlag{ Name: "payments.provider.invoice-frequency", Value: time.Minute, Usage: "Determines how often the provider sends invoices.", } // FlagPaymentsConsumerDataLeewayMegabytes sets the data amount the consumer agrees to pay before establishing a session FlagPaymentsConsumerDataLeewayMegabytes = cli.Uint64Flag{ Name: "payments.consumer.data-leeway-megabytes", Usage: "sets the data amount the consumer agrees to pay before establishing a session", Value: metadata.MainnetDefinition.Payments.Consumer.DataLeewayMegabytes, } // FlagPaymentsMaxUnpaidInvoiceValue sets the upper limit of session payment value before forcing an invoice FlagPaymentsMaxUnpaidInvoiceValue = cli.StringFlag{ Name: "payments.provider.max-unpaid-invoice-value", Usage: "sets the upper limit of session payment value before forcing an invoice. If this value is exceeded before a payment interval is reached, an invoice is sent.", Value: "3000000000000000", } // FlagPaymentsHermesStatusRecheckInterval sets how often we re-check the hermes status on bc. Higher values allow for less bc lookups but increase the risk for provider. FlagPaymentsHermesStatusRecheckInterval = cli.DurationFlag{ Hidden: true, Name: "payments.provider.hermes-status-recheck-interval", Usage: "sets the hermes status recheck interval. Setting this to a lower value will decrease potential loss in case of Hermes getting locked.", Value: time.Hour * 2, } // FlagOffchainBalanceExpiration sets how often we re-check offchain balance on hermes when balance is depleting FlagOffchainBalanceExpiration = cli.DurationFlag{ Hidden: true, Name: "payments.consumer.offchain-expiration", Usage: "after syncing offchain balance, how long should node wait for next check to occur", Value: time.Minute * 30, } // FlagTestnet3HermesURL sets the default value for legacy (testnet3) hermes URL. // TODO: Remove after migrations are considered done. FlagTestnet3HermesURL = cli.StringFlag{ Name: "payments.testnet3-hermes-url", Usage: "sets the URL for legacy testnet3 hermes", Value: metadata.Testnet3Definition.Testnet3HermesURL, Hidden: true, } ) // RegisterFlagsPayments function register payments flags to flag list. func RegisterFlagsPayments(flags *[]cli.Flag) { *flags = append( *flags, &FlagPaymentsMaxHermesFee, &FlagPaymentsBCTimeout, &FlagPaymentsHermesPromiseSettleThreshold, &FlagPaymentsHermesPromiseSettleTimeout, &FlagPaymentsHermesPromiseSettleCheckInterval, &FlagPaymentsLongBalancePollInterval, &FlagPaymentsFastBalancePollInterval, &FlagPaymentsFastBalancePollTimeout, &FlagPaymentsRegistryTransactorPollTimeout, &FlagPaymentsRegistryTransactorPollInterval, &FlagPaymentsProviderInvoiceFrequency, &FlagPaymentsConsumerDataLeewayMegabytes, &FlagPaymentsMaxUnpaidInvoiceValue, &FlagPaymentsHermesStatusRecheckInterval, &FlagOffchainBalanceExpiration, &FlagTestnet3HermesURL, &FlagPaymentsZeroStakeUnsettledAmount, ) } // ParseFlagsPayments function fills in payments options from CLI context. func ParseFlagsPayments(ctx *cli.Context) { Current.ParseIntFlag(ctx, FlagPaymentsMaxHermesFee) Current.ParseDurationFlag(ctx, FlagPaymentsBCTimeout) Current.ParseFloat64Flag(ctx, FlagPaymentsHermesPromiseSettleThreshold) Current.ParseDurationFlag(ctx, FlagPaymentsHermesPromiseSettleTimeout) Current.ParseDurationFlag(ctx, FlagPaymentsHermesPromiseSettleCheckInterval) Current.ParseDurationFlag(ctx, FlagPaymentsProviderInvoiceFrequency) Current.ParseDurationFlag(ctx, FlagPaymentsFastBalancePollInterval) Current.ParseDurationFlag(ctx, FlagPaymentsFastBalancePollTimeout) Current.ParseDurationFlag(ctx, FlagPaymentsLongBalancePollInterval) Current.ParseDurationFlag(ctx, FlagPaymentsLongBalancePollInterval) Current.ParseDurationFlag(ctx, FlagPaymentsRegistryTransactorPollInterval) Current.ParseDurationFlag(ctx, FlagPaymentsRegistryTransactorPollTimeout) Current.ParseUInt64Flag(ctx, FlagPaymentsConsumerDataLeewayMegabytes) Current.ParseStringFlag(ctx, FlagPaymentsMaxUnpaidInvoiceValue) Current.ParseDurationFlag(ctx, FlagPaymentsHermesStatusRecheckInterval) Current.ParseDurationFlag(ctx, FlagOffchainBalanceExpiration) Current.ParseStringFlag(ctx, FlagTestnet3HermesURL) Current.ParseFloat64Flag(ctx, FlagPaymentsZeroStakeUnsettledAmount) }
1
17,383
may users try to abuse it?
mysteriumnetwork-node
go
@@ -242,8 +242,9 @@ public class FlatBufferBuilder { int new_buf_size = old_buf_size == 0 ? 1 : old_buf_size << 1; bb.position(0); ByteBuffer nbb = bb_factory.newByteBuffer(new_buf_size); + new_buf_size = nbb.capacity(); // TODO - Maybe we get the buffer's `limit()` nbb.position(new_buf_size - old_buf_size); - nbb.put(bb); + nbb.put(bb); return nbb; }
1
/* * Copyright 2014 Google Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.flatbuffers; import static com.google.flatbuffers.Constants.*; import java.io.IOException; import java.io.InputStream; import java.nio.*; import java.util.Arrays; /// @file /// @addtogroup flatbuffers_java_api /// @{ /** * Class that helps you build a FlatBuffer. See the section * "Use in Java/C#" in the main FlatBuffers documentation. */ public class FlatBufferBuilder { /// @cond FLATBUFFERS_INTERNAL ByteBuffer bb; // Where we construct the FlatBuffer. int space; // Remaining space in the ByteBuffer. int minalign = 1; // Minimum alignment encountered so far. int[] vtable = null; // The vtable for the current table. int vtable_in_use = 0; // The amount of fields we're actually using. boolean nested = false; // Whether we are currently serializing a table. boolean finished = false; // Whether the buffer is finished. int object_start; // Starting offset of the current struct/table. int[] vtables = new int[16]; // List of offsets of all vtables. int num_vtables = 0; // Number of entries in `vtables` in use. int vector_num_elems = 0; // For the current vector being built. boolean force_defaults = false; // False omits default values from the serialized data. ByteBufferFactory bb_factory; // Factory for allocating the internal buffer final Utf8 utf8; // UTF-8 encoder to use /// @endcond /** * Start with a buffer of size `initial_size`, then grow as required. * * @param initial_size The initial size of the internal buffer to use. * @param bb_factory The factory to be used for allocating the internal buffer */ public FlatBufferBuilder(int initial_size, ByteBufferFactory bb_factory) { this(initial_size, bb_factory, null, Utf8.getDefault()); } /** * Start with a buffer of size `initial_size`, then grow as required. * * @param initial_size The initial size of the internal buffer to use. * @param bb_factory The factory to be used for allocating the internal buffer * @param existing_bb The byte buffer to reuse. * @param utf8 The Utf8 codec */ public FlatBufferBuilder(int initial_size, ByteBufferFactory bb_factory, ByteBuffer existing_bb, Utf8 utf8) { if (initial_size <= 0) { initial_size = 1; } this.bb_factory = bb_factory; if (existing_bb != null) { bb = existing_bb; bb.clear(); bb.order(ByteOrder.LITTLE_ENDIAN); } else { bb = bb_factory.newByteBuffer(initial_size); } this.utf8 = utf8; space = bb.capacity(); } /** * Start with a buffer of size `initial_size`, then grow as required. * * @param initial_size The initial size of the internal buffer to use. */ public FlatBufferBuilder(int initial_size) { this(initial_size, HeapByteBufferFactory.INSTANCE, null, Utf8.getDefault()); } /** * Start with a buffer of 1KiB, then grow as required. */ public FlatBufferBuilder() { this(1024); } /** * Alternative constructor allowing reuse of {@link ByteBuffer}s. The builder * can still grow the buffer as necessary. User classes should make sure * to call {@link #dataBuffer()} to obtain the resulting encoded message. * * @param existing_bb The byte buffer to reuse. * @param bb_factory The factory to be used for allocating a new internal buffer if * the existing buffer needs to grow */ public FlatBufferBuilder(ByteBuffer existing_bb, ByteBufferFactory bb_factory) { this(existing_bb.capacity(), bb_factory, existing_bb, Utf8.getDefault()); } /** * Alternative constructor allowing reuse of {@link ByteBuffer}s. The builder * can still grow the buffer as necessary. User classes should make sure * to call {@link #dataBuffer()} to obtain the resulting encoded message. * * @param existing_bb The byte buffer to reuse. */ public FlatBufferBuilder(ByteBuffer existing_bb) { this(existing_bb, new HeapByteBufferFactory()); } /** * Alternative initializer that allows reusing this object on an existing * `ByteBuffer`. This method resets the builder's internal state, but keeps * objects that have been allocated for temporary storage. * * @param existing_bb The byte buffer to reuse. * @param bb_factory The factory to be used for allocating a new internal buffer if * the existing buffer needs to grow * @return Returns `this`. */ public FlatBufferBuilder init(ByteBuffer existing_bb, ByteBufferFactory bb_factory){ this.bb_factory = bb_factory; bb = existing_bb; bb.clear(); bb.order(ByteOrder.LITTLE_ENDIAN); minalign = 1; space = bb.capacity(); vtable_in_use = 0; nested = false; finished = false; object_start = 0; num_vtables = 0; vector_num_elems = 0; return this; } /** * An interface that provides a user of the FlatBufferBuilder class the ability to specify * the method in which the internal buffer gets allocated. This allows for alternatives * to the default behavior, which is to allocate memory for a new byte-array * backed `ByteBuffer` array inside the JVM. * * The FlatBufferBuilder class contains the HeapByteBufferFactory class to * preserve the default behavior in the event that the user does not provide * their own implementation of this interface. */ public static abstract class ByteBufferFactory { /** * Create a `ByteBuffer` with a given capacity. * The returned ByteBuf must have a ByteOrder.LITTLE_ENDIAN ByteOrder. * * @param capacity The size of the `ByteBuffer` to allocate. * @return Returns the new `ByteBuffer` that was allocated. */ public abstract ByteBuffer newByteBuffer(int capacity); /** * Release a ByteBuffer. Current {@link FlatBufferBuilder} * released any reference to it, so it is safe to dispose the buffer * or return it to a pool. * It is not guaranteed that the buffer has been created * with {@link #newByteBuffer(int) }. * * @param bb the buffer to release */ public void releaseByteBuffer(ByteBuffer bb) { } } /** * An implementation of the ByteBufferFactory interface that is used when * one is not provided by the user. * * Allocate memory for a new byte-array backed `ByteBuffer` array inside the JVM. */ public static final class HeapByteBufferFactory extends ByteBufferFactory { public static final HeapByteBufferFactory INSTANCE = new HeapByteBufferFactory(); @Override public ByteBuffer newByteBuffer(int capacity) { return ByteBuffer.allocate(capacity).order(ByteOrder.LITTLE_ENDIAN); } } /** * Helper function to test if a field is present in the table * * @param table Flatbuffer table * @param offset virtual table offset * @return true if the filed is present */ public static boolean isFieldPresent(Table table, int offset) { return table.__offset(offset) != 0; } /** * Reset the FlatBufferBuilder by purging all data that it holds. */ public void clear(){ space = bb.capacity(); bb.clear(); minalign = 1; while(vtable_in_use > 0) vtable[--vtable_in_use] = 0; vtable_in_use = 0; nested = false; finished = false; object_start = 0; num_vtables = 0; vector_num_elems = 0; } /** * Doubles the size of the backing {@link ByteBuffer} and copies the old data towards the * end of the new buffer (since we build the buffer backwards). * * @param bb The current buffer with the existing data. * @param bb_factory The factory to be used for allocating the new internal buffer * @return A new byte buffer with the old data copied copied to it. The data is * located at the end of the buffer. */ static ByteBuffer growByteBuffer(ByteBuffer bb, ByteBufferFactory bb_factory) { int old_buf_size = bb.capacity(); if ((old_buf_size & 0xC0000000) != 0) // Ensure we don't grow beyond what fits in an int. throw new AssertionError("FlatBuffers: cannot grow buffer beyond 2 gigabytes."); int new_buf_size = old_buf_size == 0 ? 1 : old_buf_size << 1; bb.position(0); ByteBuffer nbb = bb_factory.newByteBuffer(new_buf_size); nbb.position(new_buf_size - old_buf_size); nbb.put(bb); return nbb; } /** * Offset relative to the end of the buffer. * * @return Offset relative to the end of the buffer. */ public int offset() { return bb.capacity() - space; } /** * Add zero valued bytes to prepare a new entry to be added. * * @param byte_size Number of bytes to add. */ public void pad(int byte_size) { for (int i = 0; i < byte_size; i++) bb.put(--space, (byte)0); } /** * Prepare to write an element of `size` after `additional_bytes` * have been written, e.g. if you write a string, you need to align such * the int length field is aligned to {@link com.google.flatbuffers.Constants#SIZEOF_INT}, and * the string data follows it directly. If all you need to do is alignment, `additional_bytes` * will be 0. * * @param size This is the of the new element to write. * @param additional_bytes The padding size. */ public void prep(int size, int additional_bytes) { // Track the biggest thing we've ever aligned to. if (size > minalign) minalign = size; // Find the amount of alignment needed such that `size` is properly // aligned after `additional_bytes` int align_size = ((~(bb.capacity() - space + additional_bytes)) + 1) & (size - 1); // Reallocate the buffer if needed. while (space < align_size + size + additional_bytes) { int old_buf_size = bb.capacity(); ByteBuffer old = bb; bb = growByteBuffer(old, bb_factory); if (old != bb) { bb_factory.releaseByteBuffer(old); } space += bb.capacity() - old_buf_size; } pad(align_size); } /** * Add a `boolean` to the buffer, backwards from the current location. Doesn't align nor * check for space. * * @param x A `boolean` to put into the buffer. */ public void putBoolean(boolean x) { bb.put (space -= Constants.SIZEOF_BYTE, (byte)(x ? 1 : 0)); } /** * Add a `byte` to the buffer, backwards from the current location. Doesn't align nor * check for space. * * @param x A `byte` to put into the buffer. */ public void putByte (byte x) { bb.put (space -= Constants.SIZEOF_BYTE, x); } /** * Add a `short` to the buffer, backwards from the current location. Doesn't align nor * check for space. * * @param x A `short` to put into the buffer. */ public void putShort (short x) { bb.putShort (space -= Constants.SIZEOF_SHORT, x); } /** * Add an `int` to the buffer, backwards from the current location. Doesn't align nor * check for space. * * @param x An `int` to put into the buffer. */ public void putInt (int x) { bb.putInt (space -= Constants.SIZEOF_INT, x); } /** * Add a `long` to the buffer, backwards from the current location. Doesn't align nor * check for space. * * @param x A `long` to put into the buffer. */ public void putLong (long x) { bb.putLong (space -= Constants.SIZEOF_LONG, x); } /** * Add a `float` to the buffer, backwards from the current location. Doesn't align nor * check for space. * * @param x A `float` to put into the buffer. */ public void putFloat (float x) { bb.putFloat (space -= Constants.SIZEOF_FLOAT, x); } /** * Add a `double` to the buffer, backwards from the current location. Doesn't align nor * check for space. * * @param x A `double` to put into the buffer. */ public void putDouble (double x) { bb.putDouble(space -= Constants.SIZEOF_DOUBLE, x); } /// @endcond /** * Add a `boolean` to the buffer, properly aligned, and grows the buffer (if necessary). * * @param x A `boolean` to put into the buffer. */ public void addBoolean(boolean x) { prep(Constants.SIZEOF_BYTE, 0); putBoolean(x); } /** * Add a `byte` to the buffer, properly aligned, and grows the buffer (if necessary). * * @param x A `byte` to put into the buffer. */ public void addByte (byte x) { prep(Constants.SIZEOF_BYTE, 0); putByte (x); } /** * Add a `short` to the buffer, properly aligned, and grows the buffer (if necessary). * * @param x A `short` to put into the buffer. */ public void addShort (short x) { prep(Constants.SIZEOF_SHORT, 0); putShort (x); } /** * Add an `int` to the buffer, properly aligned, and grows the buffer (if necessary). * * @param x An `int` to put into the buffer. */ public void addInt (int x) { prep(Constants.SIZEOF_INT, 0); putInt (x); } /** * Add a `long` to the buffer, properly aligned, and grows the buffer (if necessary). * * @param x A `long` to put into the buffer. */ public void addLong (long x) { prep(Constants.SIZEOF_LONG, 0); putLong (x); } /** * Add a `float` to the buffer, properly aligned, and grows the buffer (if necessary). * * @param x A `float` to put into the buffer. */ public void addFloat (float x) { prep(Constants.SIZEOF_FLOAT, 0); putFloat (x); } /** * Add a `double` to the buffer, properly aligned, and grows the buffer (if necessary). * * @param x A `double` to put into the buffer. */ public void addDouble (double x) { prep(Constants.SIZEOF_DOUBLE, 0); putDouble (x); } /** * Adds on offset, relative to where it will be written. * * @param off The offset to add. */ public void addOffset(int off) { prep(SIZEOF_INT, 0); // Ensure alignment is already done. assert off <= offset(); off = offset() - off + SIZEOF_INT; putInt(off); } /// @cond FLATBUFFERS_INTERNAL /** * Start a new array/vector of objects. Users usually will not call * this directly. The `FlatBuffers` compiler will create a start/end * method for vector types in generated code. * <p> * The expected sequence of calls is: * <ol> * <li>Start the array using this method.</li> * <li>Call {@link #addOffset(int)} `num_elems` number of times to set * the offset of each element in the array.</li> * <li>Call {@link #endVector()} to retrieve the offset of the array.</li> * </ol> * <p> * For example, to create an array of strings, do: * <pre>{@code * // Need 10 strings * FlatBufferBuilder builder = new FlatBufferBuilder(existingBuffer); * int[] offsets = new int[10]; * * for (int i = 0; i < 10; i++) { * offsets[i] = fbb.createString(" " + i); * } * * // Have the strings in the buffer, but don't have a vector. * // Add a vector that references the newly created strings: * builder.startVector(4, offsets.length, 4); * * // Add each string to the newly created vector * // The strings are added in reverse order since the buffer * // is filled in back to front * for (int i = offsets.length - 1; i >= 0; i--) { * builder.addOffset(offsets[i]); * } * * // Finish off the vector * int offsetOfTheVector = fbb.endVector(); * }</pre> * * @param elem_size The size of each element in the array. * @param num_elems The number of elements in the array. * @param alignment The alignment of the array. */ public void startVector(int elem_size, int num_elems, int alignment) { notNested(); vector_num_elems = num_elems; prep(SIZEOF_INT, elem_size * num_elems); prep(alignment, elem_size * num_elems); // Just in case alignment > int. nested = true; } /** * Finish off the creation of an array and all its elements. The array * must be created with {@link #startVector(int, int, int)}. * * @return The offset at which the newly created array starts. * @see #startVector(int, int, int) */ public int endVector() { if (!nested) throw new AssertionError("FlatBuffers: endVector called without startVector"); nested = false; putInt(vector_num_elems); return offset(); } /// @endcond /** * Create a new array/vector and return a ByteBuffer to be filled later. * Call {@link #endVector} after this method to get an offset to the beginning * of vector. * * @param elem_size the size of each element in bytes. * @param num_elems number of elements in the vector. * @param alignment byte alignment. * @return ByteBuffer with position and limit set to the space allocated for the array. */ public ByteBuffer createUnintializedVector(int elem_size, int num_elems, int alignment) { int length = elem_size * num_elems; startVector(elem_size, num_elems, alignment); bb.position(space -= length); // Slice and limit the copy vector to point to the 'array' ByteBuffer copy = bb.slice().order(ByteOrder.LITTLE_ENDIAN); copy.limit(length); return copy; } /** * Create a vector of tables. * * @param offsets Offsets of the tables. * @return Returns offset of the vector. */ public int createVectorOfTables(int[] offsets) { notNested(); startVector(Constants.SIZEOF_INT, offsets.length, Constants.SIZEOF_INT); for(int i = offsets.length - 1; i >= 0; i--) addOffset(offsets[i]); return endVector(); } /** * Create a vector of sorted by the key tables. * * @param obj Instance of the table subclass. * @param offsets Offsets of the tables. * @return Returns offset of the sorted vector. */ public <T extends Table> int createSortedVectorOfTables(T obj, int[] offsets) { obj.sortTables(offsets, bb); return createVectorOfTables(offsets); } /** * Encode the string `s` in the buffer using UTF-8. If {@code s} is * already a {@link CharBuffer}, this method is allocation free. * * @param s The string to encode. * @return The offset in the buffer where the encoded string starts. */ public int createString(CharSequence s) { int length = utf8.encodedLength(s); addByte((byte)0); startVector(1, length, 1); bb.position(space -= length); utf8.encodeUtf8(s, bb); return endVector(); } /** * Create a string in the buffer from an already encoded UTF-8 string in a ByteBuffer. * * @param s An already encoded UTF-8 string as a `ByteBuffer`. * @return The offset in the buffer where the encoded string starts. */ public int createString(ByteBuffer s) { int length = s.remaining(); addByte((byte)0); startVector(1, length, 1); bb.position(space -= length); bb.put(s); return endVector(); } /** * Create a byte array in the buffer. * * @param arr A source array with data * @return The offset in the buffer where the encoded array starts. */ public int createByteVector(byte[] arr) { int length = arr.length; startVector(1, length, 1); bb.position(space -= length); bb.put(arr); return endVector(); } /// @cond FLATBUFFERS_INTERNAL /** * Should not be accessing the final buffer before it is finished. */ public void finished() { if (!finished) throw new AssertionError( "FlatBuffers: you can only access the serialized buffer after it has been" + " finished by FlatBufferBuilder.finish()."); } /** * Should not be creating any other object, string or vector * while an object is being constructed. */ public void notNested() { if (nested) throw new AssertionError("FlatBuffers: object serialization must not be nested."); } /** * Structures are always stored inline, they need to be created right * where they're used. You'll get this assertion failure if you * created it elsewhere. * * @param obj The offset of the created object. */ public void Nested(int obj) { if (obj != offset()) throw new AssertionError("FlatBuffers: struct must be serialized inline."); } /** * Start encoding a new object in the buffer. Users will not usually need to * call this directly. The `FlatBuffers` compiler will generate helper methods * that call this method internally. * <p> * For example, using the "Monster" code found on the "landing page". An * object of type `Monster` can be created using the following code: * * <pre>{@code * int testArrayOfString = Monster.createTestarrayofstringVector(fbb, new int[] { * fbb.createString("test1"), * fbb.createString("test2") * }); * * Monster.startMonster(fbb); * Monster.addPos(fbb, Vec3.createVec3(fbb, 1.0f, 2.0f, 3.0f, 3.0, * Color.Green, (short)5, (byte)6)); * Monster.addHp(fbb, (short)80); * Monster.addName(fbb, str); * Monster.addInventory(fbb, inv); * Monster.addTestType(fbb, (byte)Any.Monster); * Monster.addTest(fbb, mon2); * Monster.addTest4(fbb, test4); * Monster.addTestarrayofstring(fbb, testArrayOfString); * int mon = Monster.endMonster(fbb); * }</pre> * <p> * Here: * <ul> * <li>The call to `Monster#startMonster(FlatBufferBuilder)` will call this * method with the right number of fields set.</li> * <li>`Monster#endMonster(FlatBufferBuilder)` will ensure {@link #endObject()} is called.</li> * </ul> * <p> * It's not recommended to call this method directly. If it's called manually, you must ensure * to audit all calls to it whenever fields are added or removed from your schema. This is * automatically done by the code generated by the `FlatBuffers` compiler. * * @param numfields The number of fields found in this object. */ public void startTable(int numfields) { notNested(); if (vtable == null || vtable.length < numfields) vtable = new int[numfields]; vtable_in_use = numfields; Arrays.fill(vtable, 0, vtable_in_use, 0); nested = true; object_start = offset(); } /** * Add a `boolean` to a table at `o` into its vtable, with value `x` and default `d`. * * @param o The index into the vtable. * @param x A `boolean` to put into the buffer, depending on how defaults are handled. If * `force_defaults` is `false`, compare `x` against the default value `d`. If `x` contains the * default value, it can be skipped. * @param d A `boolean` default value to compare against when `force_defaults` is `false`. */ public void addBoolean(int o, boolean x, boolean d) { if(force_defaults || x != d) { addBoolean(x); slot(o); } } /** * Add a `byte` to a table at `o` into its vtable, with value `x` and default `d`. * * @param o The index into the vtable. * @param x A `byte` to put into the buffer, depending on how defaults are handled. If * `force_defaults` is `false`, compare `x` against the default value `d`. If `x` contains the * default value, it can be skipped. * @param d A `byte` default value to compare against when `force_defaults` is `false`. */ public void addByte (int o, byte x, int d) { if(force_defaults || x != d) { addByte (x); slot(o); } } /** * Add a `short` to a table at `o` into its vtable, with value `x` and default `d`. * * @param o The index into the vtable. * @param x A `short` to put into the buffer, depending on how defaults are handled. If * `force_defaults` is `false`, compare `x` against the default value `d`. If `x` contains the * default value, it can be skipped. * @param d A `short` default value to compare against when `force_defaults` is `false`. */ public void addShort (int o, short x, int d) { if(force_defaults || x != d) { addShort (x); slot(o); } } /** * Add an `int` to a table at `o` into its vtable, with value `x` and default `d`. * * @param o The index into the vtable. * @param x An `int` to put into the buffer, depending on how defaults are handled. If * `force_defaults` is `false`, compare `x` against the default value `d`. If `x` contains the * default value, it can be skipped. * @param d An `int` default value to compare against when `force_defaults` is `false`. */ public void addInt (int o, int x, int d) { if(force_defaults || x != d) { addInt (x); slot(o); } } /** * Add a `long` to a table at `o` into its vtable, with value `x` and default `d`. * * @param o The index into the vtable. * @param x A `long` to put into the buffer, depending on how defaults are handled. If * `force_defaults` is `false`, compare `x` against the default value `d`. If `x` contains the * default value, it can be skipped. * @param d A `long` default value to compare against when `force_defaults` is `false`. */ public void addLong (int o, long x, long d) { if(force_defaults || x != d) { addLong (x); slot(o); } } /** * Add a `float` to a table at `o` into its vtable, with value `x` and default `d`. * * @param o The index into the vtable. * @param x A `float` to put into the buffer, depending on how defaults are handled. If * `force_defaults` is `false`, compare `x` against the default value `d`. If `x` contains the * default value, it can be skipped. * @param d A `float` default value to compare against when `force_defaults` is `false`. */ public void addFloat (int o, float x, double d) { if(force_defaults || x != d) { addFloat (x); slot(o); } } /** * Add a `double` to a table at `o` into its vtable, with value `x` and default `d`. * * @param o The index into the vtable. * @param x A `double` to put into the buffer, depending on how defaults are handled. If * `force_defaults` is `false`, compare `x` against the default value `d`. If `x` contains the * default value, it can be skipped. * @param d A `double` default value to compare against when `force_defaults` is `false`. */ public void addDouble (int o, double x, double d) { if(force_defaults || x != d) { addDouble (x); slot(o); } } /** * Add an `offset` to a table at `o` into its vtable, with value `x` and default `d`. * * @param o The index into the vtable. * @param x An `offset` to put into the buffer, depending on how defaults are handled. If * `force_defaults` is `false`, compare `x` against the default value `d`. If `x` contains the * default value, it can be skipped. * @param d An `offset` default value to compare against when `force_defaults` is `false`. */ public void addOffset (int o, int x, int d) { if(force_defaults || x != d) { addOffset (x); slot(o); } } /** * Add a struct to the table. Structs are stored inline, so nothing additional is being added. * * @param voffset The index into the vtable. * @param x The offset of the created struct. * @param d The default value is always `0`. */ public void addStruct(int voffset, int x, int d) { if(x != d) { Nested(x); slot(voffset); } } /** * Set the current vtable at `voffset` to the current location in the buffer. * * @param voffset The index into the vtable to store the offset relative to the end of the * buffer. */ public void slot(int voffset) { vtable[voffset] = offset(); } /** * Finish off writing the object that is under construction. * * @return The offset to the object inside {@link #dataBuffer()}. * @see #startTable(int) */ public int endTable() { if (vtable == null || !nested) throw new AssertionError("FlatBuffers: endTable called without startTable"); addInt(0); int vtableloc = offset(); // Write out the current vtable. int i = vtable_in_use - 1; // Trim trailing zeroes. for (; i >= 0 && vtable[i] == 0; i--) {} int trimmed_size = i + 1; for (; i >= 0 ; i--) { // Offset relative to the start of the table. short off = (short)(vtable[i] != 0 ? vtableloc - vtable[i] : 0); addShort(off); } final int standard_fields = 2; // The fields below: addShort((short)(vtableloc - object_start)); addShort((short)((trimmed_size + standard_fields) * SIZEOF_SHORT)); // Search for an existing vtable that matches the current one. int existing_vtable = 0; outer_loop: for (i = 0; i < num_vtables; i++) { int vt1 = bb.capacity() - vtables[i]; int vt2 = space; short len = bb.getShort(vt1); if (len == bb.getShort(vt2)) { for (int j = SIZEOF_SHORT; j < len; j += SIZEOF_SHORT) { if (bb.getShort(vt1 + j) != bb.getShort(vt2 + j)) { continue outer_loop; } } existing_vtable = vtables[i]; break outer_loop; } } if (existing_vtable != 0) { // Found a match: // Remove the current vtable. space = bb.capacity() - vtableloc; // Point table to existing vtable. bb.putInt(space, existing_vtable - vtableloc); } else { // No match: // Add the location of the current vtable to the list of vtables. if (num_vtables == vtables.length) vtables = Arrays.copyOf(vtables, num_vtables * 2); vtables[num_vtables++] = offset(); // Point table to current vtable. bb.putInt(bb.capacity() - vtableloc, offset() - vtableloc); } nested = false; return vtableloc; } /** * Checks that a required field has been set in a given table that has * just been constructed. * * @param table The offset to the start of the table from the `ByteBuffer` capacity. * @param field The offset to the field in the vtable. */ public void required(int table, int field) { int table_start = bb.capacity() - table; int vtable_start = table_start - bb.getInt(table_start); boolean ok = bb.getShort(vtable_start + field) != 0; // If this fails, the caller will show what field needs to be set. if (!ok) throw new AssertionError("FlatBuffers: field " + field + " must be set"); } /// @endcond /** * Finalize a buffer, pointing to the given `root_table`. * * @param root_table An offset to be added to the buffer. * @param size_prefix Whether to prefix the size to the buffer. */ protected void finish(int root_table, boolean size_prefix) { prep(minalign, SIZEOF_INT + (size_prefix ? SIZEOF_INT : 0)); addOffset(root_table); if (size_prefix) { addInt(bb.capacity() - space); } bb.position(space); finished = true; } /** * Finalize a buffer, pointing to the given `root_table`. * * @param root_table An offset to be added to the buffer. */ public void finish(int root_table) { finish(root_table, false); } /** * Finalize a buffer, pointing to the given `root_table`, with the size prefixed. * * @param root_table An offset to be added to the buffer. */ public void finishSizePrefixed(int root_table) { finish(root_table, true); } /** * Finalize a buffer, pointing to the given `root_table`. * * @param root_table An offset to be added to the buffer. * @param file_identifier A FlatBuffer file identifier to be added to the buffer before * `root_table`. * @param size_prefix Whether to prefix the size to the buffer. */ protected void finish(int root_table, String file_identifier, boolean size_prefix) { prep(minalign, SIZEOF_INT + FILE_IDENTIFIER_LENGTH + (size_prefix ? SIZEOF_INT : 0)); if (file_identifier.length() != FILE_IDENTIFIER_LENGTH) throw new AssertionError("FlatBuffers: file identifier must be length " + FILE_IDENTIFIER_LENGTH); for (int i = FILE_IDENTIFIER_LENGTH - 1; i >= 0; i--) { addByte((byte)file_identifier.charAt(i)); } finish(root_table, size_prefix); } /** * Finalize a buffer, pointing to the given `root_table`. * * @param root_table An offset to be added to the buffer. * @param file_identifier A FlatBuffer file identifier to be added to the buffer before * `root_table`. */ public void finish(int root_table, String file_identifier) { finish(root_table, file_identifier, false); } /** * Finalize a buffer, pointing to the given `root_table`, with the size prefixed. * * @param root_table An offset to be added to the buffer. * @param file_identifier A FlatBuffer file identifier to be added to the buffer before * `root_table`. */ public void finishSizePrefixed(int root_table, String file_identifier) { finish(root_table, file_identifier, true); } /** * In order to save space, fields that are set to their default value * don't get serialized into the buffer. Forcing defaults provides a * way to manually disable this optimization. * * @param forceDefaults When set to `true`, always serializes default values. * @return Returns `this`. */ public FlatBufferBuilder forceDefaults(boolean forceDefaults){ this.force_defaults = forceDefaults; return this; } /** * Get the ByteBuffer representing the FlatBuffer. Only call this after you've * called `finish()`. The actual data starts at the ByteBuffer's current position, * not necessarily at `0`. * * @return The {@link ByteBuffer} representing the FlatBuffer */ public ByteBuffer dataBuffer() { finished(); return bb; } /** * The FlatBuffer data doesn't start at offset 0 in the {@link ByteBuffer}, but * now the {@code ByteBuffer}'s position is set to that location upon {@link #finish(int)}. * * @return The {@link ByteBuffer#position() position} the data starts in {@link #dataBuffer()} * @deprecated This method should not be needed anymore, but is left * here for the moment to document this API change. It will be removed in the future. */ @Deprecated private int dataStart() { finished(); return space; } /** * A utility function to copy and return the ByteBuffer data from `start` to * `start` + `length` as a `byte[]`. * * @param start Start copying at this offset. * @param length How many bytes to copy. * @return A range copy of the {@link #dataBuffer() data buffer}. * @throws IndexOutOfBoundsException If the range of bytes is ouf of bound. */ public byte[] sizedByteArray(int start, int length){ finished(); byte[] array = new byte[length]; bb.position(start); bb.get(array); return array; } /** * A utility function to copy and return the ByteBuffer data as a `byte[]`. * * @return A full copy of the {@link #dataBuffer() data buffer}. */ public byte[] sizedByteArray() { return sizedByteArray(space, bb.capacity() - space); } /** * A utility function to return an InputStream to the ByteBuffer data * * @return An InputStream that starts at the beginning of the ByteBuffer data * and can read to the end of it. */ public InputStream sizedInputStream() { finished(); ByteBuffer duplicate = bb.duplicate(); duplicate.position(space); duplicate.limit(bb.capacity()); return new ByteBufferBackedInputStream(duplicate); } /** * A class that allows a user to create an InputStream from a ByteBuffer. */ static class ByteBufferBackedInputStream extends InputStream { ByteBuffer buf; public ByteBufferBackedInputStream(ByteBuffer buf) { this.buf = buf; } public int read() throws IOException { try { return buf.get() & 0xFF; } catch(BufferUnderflowException e) { return -1; } } } } /// @}
1
16,603
why is this space removed?
google-flatbuffers
java
@@ -1,3 +1,4 @@ +//go:build !windows // +build !windows // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
1
// +build !windows // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package main const ( shortDescription = "👩‍✈️ Launch and manage containerized applications on AWS." )
1
19,874
Can we remove this other line now then?
aws-copilot-cli
go
@@ -52,6 +52,7 @@ PERFECT_FILES = [ 'qutebrowser/misc/checkpyver.py', 'qutebrowser/misc/guiprocess.py', 'qutebrowser/misc/editor.py', + 'qutebrowser/misc/cmdhistory.py' 'qutebrowser/mainwindow/statusbar/keystring.py', 'qutebrowser/mainwindow/statusbar/percentage.py',
1
#!/usr/bin/env python3 # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: # Copyright 2015 Florian Bruhin (The Compiler) <[email protected]> # This file is part of qutebrowser. # # qutebrowser is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # qutebrowser is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. """Enforce perfect coverage on some files.""" import os import sys import os.path from xml.etree import ElementTree sys.path.insert(0, os.path.join(os.path.dirname(__file__), os.pardir, os.pardir)) from scripts import utils PERFECT_FILES = [ 'qutebrowser/commands/cmdexc.py', 'qutebrowser/commands/cmdutils.py', 'qutebrowser/commands/argparser.py', 'qutebrowser/browser/tabhistory.py', 'qutebrowser/browser/http.py', 'qutebrowser/browser/rfc6266.py', 'qutebrowser/browser/webelem.py', 'qutebrowser/browser/network/schemehandler.py', 'qutebrowser/browser/network/filescheme.py', 'qutebrowser/browser/network/networkreply.py', 'qutebrowser/browser/signalfilter.py', 'qutebrowser/misc/readline.py', 'qutebrowser/misc/split.py', 'qutebrowser/misc/msgbox.py', 'qutebrowser/misc/checkpyver.py', 'qutebrowser/misc/guiprocess.py', 'qutebrowser/misc/editor.py', 'qutebrowser/mainwindow/statusbar/keystring.py', 'qutebrowser/mainwindow/statusbar/percentage.py', 'qutebrowser/mainwindow/statusbar/progress.py', 'qutebrowser/mainwindow/statusbar/tabindex.py', 'qutebrowser/mainwindow/statusbar/textbase.py', 'qutebrowser/config/configtypes.py', 'qutebrowser/config/configdata.py', 'qutebrowser/config/configexc.py', 'qutebrowser/config/textwrapper.py', 'qutebrowser/config/style.py', 'qutebrowser/utils/qtutils.py', 'qutebrowser/utils/standarddir.py', 'qutebrowser/utils/urlutils.py', 'qutebrowser/utils/usertypes.py', 'qutebrowser/utils/utils.py', 'qutebrowser/utils/version.py', 'qutebrowser/utils/debug.py', 'qutebrowser/utils/jinja.py', 'qutebrowser/utils/error.py', ] def main(): """Main entry point. Return: The return code to return. """ utils.change_cwd() if sys.platform != 'linux': print("Skipping coverage checks on non-Linux system.") sys.exit() elif '-k' in sys.argv[1:]: print("Skipping coverage checks because -k is given.") sys.exit() elif '-m' in sys.argv[1:]: print("Skipping coverage checks because -m is given.") sys.exit() elif any(arg.startswith('tests' + os.sep) for arg in sys.argv[1:]): print("Skipping coverage checks because a filename is given.") sys.exit() for path in PERFECT_FILES: assert os.path.exists(os.path.join(*path.split('/'))), path with open('.coverage.xml', encoding='utf-8') as f: tree = ElementTree.parse(f) classes = tree.getroot().findall('./packages/package/classes/class') status = 0 for klass in classes: filename = klass.attrib['filename'] line_cov = float(klass.attrib['line-rate']) * 100 branch_cov = float(klass.attrib['branch-rate']) * 100 assert 0 <= line_cov <= 100, line_cov assert 0 <= branch_cov <= 100, branch_cov assert '\\' not in filename, filename assert '/' in filename, filename # Files without any branches have 0% coverage if branch_cov < 100 and klass.find('./lines/line[@branch="true"]'): is_bad = True else: is_bad = line_cov < 100 if filename in PERFECT_FILES and is_bad: status = 1 print("{} has {}% line and {}% branch coverage!".format( filename, line_cov, branch_cov)) elif filename not in PERFECT_FILES and not is_bad: status = 1 print("{} has 100% coverage but is not in PERFECT_FILES!".format( filename)) os.remove('.coverage.xml') return status if __name__ == '__main__': sys.exit(main())
1
13,411
There's a comma missing here at the end :wink:
qutebrowser-qutebrowser
py
@@ -19,13 +19,8 @@ namespace Microsoft.DotNet.Build.Tasks.Feed [Required] public string AccountKey { get; set; } - [Required] public ITaskItem[] ItemsToPush { get; set; } - public string IndexDirectory { get; set; } - - public bool PublishFlatContainer { get; set; } - public bool Overwrite { get; set; } public override bool Execute()
1
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // See the LICENSE file in the project root for more information. using System; using System.Threading.Tasks; using MSBuild = Microsoft.Build.Utilities; using Microsoft.Build.Framework; using System.Collections.Generic; namespace Microsoft.DotNet.Build.Tasks.Feed { public class PushToBlobFeed : MSBuild.Task { [Required] public string ExpectedFeedUrl { get; set; } [Required] public string AccountKey { get; set; } [Required] public ITaskItem[] ItemsToPush { get; set; } public string IndexDirectory { get; set; } public bool PublishFlatContainer { get; set; } public bool Overwrite { get; set; } public override bool Execute() { return ExecuteAsync().GetAwaiter().GetResult(); } public async Task<bool> ExecuteAsync() { try { Log.LogMessage(MessageImportance.High, "Performing feed push..."); if (ItemsToPush == null) { Log.LogError($"No items to push. Please check ItemGroup ItemsToPush."); } BlobFeedAction blobFeedAction = new BlobFeedAction(ExpectedFeedUrl, AccountKey, IndexDirectory, Log); bool containerExists = await blobFeedAction.feed.CheckIfFeedExists(); if (!containerExists) { await blobFeedAction.feed.CreateFeedContainer(); } if (!PublishFlatContainer) { if (!containerExists) { await blobFeedAction.PushToFeed(ConvertToStringLists(ItemsToPush)); } else { await blobFeedAction.PushToFeed(ConvertToStringLists(ItemsToPush), Overwrite); } } else { await blobFeedAction.PushToFeedFlat(ConvertToStringLists(ItemsToPush), Overwrite); } } catch (Exception e) { Log.LogErrorFromException(e, true); } return !Log.HasLoggedErrors; } private List<string> ConvertToStringLists(ITaskItem[] taskItems) { List<string> stringList = new List<string>(); foreach (var item in taskItems) { stringList.Add(item.ItemSpec); } return stringList; } } }
1
13,761
Why is this not required any longer?
dotnet-buildtools
.cs
@@ -6,11 +6,13 @@ package admin import ( "github.com/Unknwon/com" + "github.com/Unknwon/paginater" "github.com/gogits/gogs/models" "github.com/gogits/gogs/modules/base" "github.com/gogits/gogs/modules/log" "github.com/gogits/gogs/modules/middleware" + "github.com/gogits/gogs/modules/setting" ) const (
1
// Copyright 2014 The Gogs Authors. All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package admin import ( "github.com/Unknwon/com" "github.com/gogits/gogs/models" "github.com/gogits/gogs/modules/base" "github.com/gogits/gogs/modules/log" "github.com/gogits/gogs/modules/middleware" ) const ( NOTICES base.TplName = "admin/notice" ) func Notices(ctx *middleware.Context) { ctx.Data["Title"] = ctx.Tr("admin.notices") ctx.Data["PageIsAdmin"] = true ctx.Data["PageIsAdminNotices"] = true pageNum := 50 p := pagination(ctx, models.CountNotices(), pageNum) notices, err := models.GetNotices(pageNum, (p-1)*pageNum) if err != nil { ctx.Handle(500, "GetNotices", err) return } ctx.Data["Notices"] = notices ctx.HTML(200, NOTICES) } func DeleteNotice(ctx *middleware.Context) { id := com.StrTo(ctx.Params(":id")).MustInt64() if err := models.DeleteNotice(id); err != nil { ctx.Handle(500, "DeleteNotice", err) return } log.Trace("System notice deleted by admin(%s): %d", ctx.User.Name, id) ctx.Flash.Success(ctx.Tr("admin.notices.delete_success")) ctx.Redirect("/admin/notices") }
1
9,379
Indentation seems a bit fucked up here. Run `go fmt`.
gogs-gogs
go
@@ -650,6 +650,9 @@ func (api *Server) GetLogs( ctx context.Context, in *iotexapi.GetLogsRequest, ) (*iotexapi.GetLogsResponse, error) { + if in.GetFilter() == nil { + return nil, status.Error(codes.InvalidArgument, "empty filter") + } switch { case in.GetByBlock() != nil: req := in.GetByBlock()
1
// Copyright (c) 2019 IoTeX Foundation // This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no // warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent // permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache // License 2.0 that can be found in the LICENSE file. package api import ( "bytes" "context" "encoding/hex" "math" "math/big" "net" "strconv" "time" "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" "github.com/pkg/errors" "go.uber.org/zap" "google.golang.org/genproto/googleapis/rpc/errdetails" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/reflection" "google.golang.org/grpc/status" "github.com/iotexproject/go-pkgs/hash" "github.com/iotexproject/iotex-address/address" "github.com/iotexproject/iotex-election/committee" "github.com/iotexproject/iotex-proto/golang/iotexapi" "github.com/iotexproject/iotex-proto/golang/iotextypes" "github.com/iotexproject/iotex-core/action" "github.com/iotexproject/iotex-core/action/protocol" accountutil "github.com/iotexproject/iotex-core/action/protocol/account/util" "github.com/iotexproject/iotex-core/action/protocol/poll" "github.com/iotexproject/iotex-core/action/protocol/rolldpos" "github.com/iotexproject/iotex-core/actpool" "github.com/iotexproject/iotex-core/blockchain" "github.com/iotexproject/iotex-core/blockchain/block" "github.com/iotexproject/iotex-core/blockchain/blockdao" "github.com/iotexproject/iotex-core/blockindex" "github.com/iotexproject/iotex-core/config" "github.com/iotexproject/iotex-core/db" "github.com/iotexproject/iotex-core/gasstation" "github.com/iotexproject/iotex-core/pkg/log" "github.com/iotexproject/iotex-core/pkg/version" "github.com/iotexproject/iotex-core/state" "github.com/iotexproject/iotex-core/state/factory" ) var ( // ErrInternalServer indicates the internal server error ErrInternalServer = errors.New("internal server error") // ErrReceipt indicates the error of receipt ErrReceipt = errors.New("invalid receipt") // ErrAction indicates the error of action ErrAction = errors.New("invalid action") ) // BroadcastOutbound sends a broadcast message to the whole network type BroadcastOutbound func(ctx context.Context, chainID uint32, msg proto.Message) error // Config represents the config to setup api type Config struct { broadcastHandler BroadcastOutbound electionCommittee committee.Committee } // Option is the option to override the api config type Option func(cfg *Config) error // WithBroadcastOutbound is the option to broadcast msg outbound func WithBroadcastOutbound(broadcastHandler BroadcastOutbound) Option { return func(cfg *Config) error { cfg.broadcastHandler = broadcastHandler return nil } } // WithNativeElection is the option to return native election data through API. func WithNativeElection(committee committee.Committee) Option { return func(cfg *Config) error { cfg.electionCommittee = committee return nil } } // Server provides api for user to query blockchain data type Server struct { bc blockchain.Blockchain sf factory.Factory dao blockdao.BlockDAO indexer blockindex.Indexer ap actpool.ActPool gs *gasstation.GasStation broadcastHandler BroadcastOutbound cfg config.Config registry *protocol.Registry chainListener Listener grpcServer *grpc.Server hasActionIndex bool electionCommittee committee.Committee } // NewServer creates a new server func NewServer( cfg config.Config, chain blockchain.Blockchain, sf factory.Factory, dao blockdao.BlockDAO, indexer blockindex.Indexer, actPool actpool.ActPool, registry *protocol.Registry, opts ...Option, ) (*Server, error) { apiCfg := Config{} for _, opt := range opts { if err := opt(&apiCfg); err != nil { return nil, err } } if cfg.API == (config.API{}) { log.L().Warn("API server is not configured.") cfg.API = config.Default.API } if cfg.API.RangeQueryLimit < uint64(cfg.API.TpsWindow) { return nil, errors.New("range query upper limit cannot be less than tps window") } svr := &Server{ bc: chain, sf: sf, dao: dao, indexer: indexer, ap: actPool, broadcastHandler: apiCfg.broadcastHandler, cfg: cfg, registry: registry, chainListener: NewChainListener(), gs: gasstation.NewGasStation(chain, sf.SimulateExecution, dao, cfg.API), electionCommittee: apiCfg.electionCommittee, } if _, ok := cfg.Plugins[config.GatewayPlugin]; ok { svr.hasActionIndex = true } svr.grpcServer = grpc.NewServer( grpc.StreamInterceptor(grpc_prometheus.StreamServerInterceptor), grpc.UnaryInterceptor(grpc_prometheus.UnaryServerInterceptor), ) iotexapi.RegisterAPIServiceServer(svr.grpcServer, svr) grpc_prometheus.Register(svr.grpcServer) reflection.Register(svr.grpcServer) return svr, nil } // GetAccount returns the metadata of an account func (api *Server) GetAccount(ctx context.Context, in *iotexapi.GetAccountRequest) (*iotexapi.GetAccountResponse, error) { if in.Address == address.RewardingPoolAddr || in.Address == address.StakingBucketPoolAddr { return api.getProtocolAccount(ctx, in.Address) } state, tipHeight, err := accountutil.AccountStateWithHeight(api.sf, in.Address) if err != nil { return nil, status.Error(codes.NotFound, err.Error()) } pendingNonce, err := api.ap.GetPendingNonce(in.Address) if err != nil { return nil, status.Error(codes.Internal, err.Error()) } if api.indexer == nil { return nil, status.Error(codes.NotFound, blockindex.ErrActionIndexNA.Error()) } addr, err := address.FromString(in.Address) if err != nil { return nil, err } numActions, err := api.indexer.GetActionCountByAddress(hash.BytesToHash160(addr.Bytes())) if err != nil { return nil, status.Error(codes.NotFound, err.Error()) } accountMeta := &iotextypes.AccountMeta{ Address: in.Address, Balance: state.Balance.String(), Nonce: state.Nonce, PendingNonce: pendingNonce, NumActions: numActions, } header, err := api.bc.BlockHeaderByHeight(tipHeight) if err != nil { return nil, status.Error(codes.NotFound, err.Error()) } hash := header.HashBlock() return &iotexapi.GetAccountResponse{AccountMeta: accountMeta, BlockIdentifier: &iotextypes.BlockIdentifier{ Hash: hex.EncodeToString(hash[:]), Height: tipHeight, }}, nil } // GetActions returns actions func (api *Server) GetActions(ctx context.Context, in *iotexapi.GetActionsRequest) (*iotexapi.GetActionsResponse, error) { if (!api.hasActionIndex || api.indexer == nil) && (in.GetByHash() != nil || in.GetByAddr() != nil) { return nil, status.Error(codes.NotFound, blockindex.ErrActionIndexNA.Error()) } switch { case in.GetByIndex() != nil: request := in.GetByIndex() return api.getActions(request.Start, request.Count) case in.GetByHash() != nil: request := in.GetByHash() return api.getSingleAction(request.ActionHash, request.CheckPending) case in.GetByAddr() != nil: request := in.GetByAddr() return api.getActionsByAddress(request.Address, request.Start, request.Count) case in.GetUnconfirmedByAddr() != nil: request := in.GetUnconfirmedByAddr() return api.getUnconfirmedActionsByAddress(request.Address, request.Start, request.Count) case in.GetByBlk() != nil: request := in.GetByBlk() return api.getActionsByBlock(request.BlkHash, request.Start, request.Count) default: return nil, status.Error(codes.NotFound, "invalid GetActionsRequest type") } } // GetBlockMetas returns block metadata func (api *Server) GetBlockMetas(ctx context.Context, in *iotexapi.GetBlockMetasRequest) (*iotexapi.GetBlockMetasResponse, error) { switch { case in.GetByIndex() != nil: request := in.GetByIndex() return api.getBlockMetas(request.Start, request.Count) case in.GetByHash() != nil: request := in.GetByHash() return api.getBlockMeta(request.BlkHash) default: return nil, status.Error(codes.NotFound, "invalid GetBlockMetasRequest type") } } // GetChainMeta returns blockchain metadata func (api *Server) GetChainMeta(ctx context.Context, in *iotexapi.GetChainMetaRequest) (*iotexapi.GetChainMetaResponse, error) { tipHeight := api.bc.TipHeight() if tipHeight == 0 { return &iotexapi.GetChainMetaResponse{ ChainMeta: &iotextypes.ChainMeta{ Epoch: &iotextypes.EpochData{}, }, }, nil } if api.indexer == nil { // TODO: in case indexer does not exist, may consider return a value like 0 instead of exit return nil, status.Error(codes.NotFound, blockindex.ErrActionIndexNA.Error()) } totalActions, err := api.indexer.GetTotalActions() if err != nil { return nil, status.Error(codes.Internal, err.Error()) } blockLimit := int64(api.cfg.API.TpsWindow) if blockLimit <= 0 { return nil, status.Errorf(codes.Internal, "block limit is %d", blockLimit) } // avoid genesis block if int64(tipHeight) < blockLimit { blockLimit = int64(tipHeight) } r, err := api.getBlockMetas(tipHeight-uint64(blockLimit)+1, uint64(blockLimit)) if err != nil { return nil, status.Error(codes.NotFound, err.Error()) } blks := r.BlkMetas if len(blks) == 0 { return nil, status.Error(codes.NotFound, "get 0 blocks! not able to calculate aps") } var numActions int64 for _, blk := range blks { numActions += blk.NumActions } t1 := time.Unix(blks[0].Timestamp.GetSeconds(), int64(blks[0].Timestamp.GetNanos())) t2 := time.Unix(blks[len(blks)-1].Timestamp.GetSeconds(), int64(blks[len(blks)-1].Timestamp.GetNanos())) // duration of time difference in milli-seconds // TODO: use config.Genesis.BlockInterval after PR1289 merges timeDiff := (t2.Sub(t1) + 10*time.Second) / time.Millisecond tps := float32(numActions*1000) / float32(timeDiff) chainMeta := &iotextypes.ChainMeta{ Height: tipHeight, NumActions: int64(totalActions), Tps: int64(math.Ceil(float64(tps))), TpsFloat: tps, } rp := rolldpos.FindProtocol(api.registry) if rp != nil { epochNum := rp.GetEpochNum(tipHeight) epochHeight := rp.GetEpochHeight(epochNum) gravityChainStartHeight, err := api.getGravityChainStartHeight(epochHeight) if err != nil { return nil, status.Error(codes.NotFound, err.Error()) } chainMeta.Epoch = &iotextypes.EpochData{ Num: epochNum, Height: epochHeight, GravityChainStartHeight: gravityChainStartHeight, } } return &iotexapi.GetChainMetaResponse{ChainMeta: chainMeta}, nil } // GetServerMeta gets the server metadata func (api *Server) GetServerMeta(ctx context.Context, in *iotexapi.GetServerMetaRequest) (*iotexapi.GetServerMetaResponse, error) { return &iotexapi.GetServerMetaResponse{ServerMeta: &iotextypes.ServerMeta{ PackageVersion: version.PackageVersion, PackageCommitID: version.PackageCommitID, GitStatus: version.GitStatus, GoVersion: version.GoVersion, BuildTime: version.BuildTime, }}, nil } // SendAction is the API to send an action to blockchain. func (api *Server) SendAction(ctx context.Context, in *iotexapi.SendActionRequest) (*iotexapi.SendActionResponse, error) { log.L().Debug("receive send action request") var selp action.SealedEnvelope var err error if err = selp.LoadProto(in.Action); err != nil { return nil, status.Error(codes.InvalidArgument, err.Error()) } // Add to local actpool ctx = protocol.WithRegistry(ctx, api.registry) if err = api.ap.Add(ctx, selp); err != nil { log.L().Debug(err.Error()) var desc string switch errors.Cause(err) { case action.ErrBalance: desc = "Invalid balance" case action.ErrInsufficientBalanceForGas: desc = "Insufficient balance for gas" case action.ErrNonce: desc = "Invalid nonce" case action.ErrAddress: desc = "Blacklisted address" case action.ErrActPool: desc = "Invalid actpool" case action.ErrGasPrice: desc = "Invalid gas price" default: desc = "Unknown" } st := status.New(codes.Internal, err.Error()) v := &errdetails.BadRequest_FieldViolation{ Field: "Action rejected", Description: desc, } br := &errdetails.BadRequest{} br.FieldViolations = append(br.FieldViolations, v) st, err := st.WithDetails(br) if err != nil { log.S().Panicf("Unexpected error attaching metadata: %v", err) } return nil, st.Err() } // If there is no error putting into local actpool, // Broadcast it to the network if err = api.broadcastHandler(context.Background(), api.bc.ChainID(), in.Action); err != nil { log.L().Warn("Failed to broadcast SendAction request.", zap.Error(err)) } hash := selp.Hash() return &iotexapi.SendActionResponse{ActionHash: hex.EncodeToString(hash[:])}, nil } // GetReceiptByAction gets receipt with corresponding action hash func (api *Server) GetReceiptByAction(ctx context.Context, in *iotexapi.GetReceiptByActionRequest) (*iotexapi.GetReceiptByActionResponse, error) { if !api.hasActionIndex || api.indexer == nil { return nil, status.Error(codes.NotFound, blockindex.ErrActionIndexNA.Error()) } actHash, err := hash.HexStringToHash256(in.ActionHash) if err != nil { return nil, status.Error(codes.InvalidArgument, err.Error()) } receipt, err := api.GetReceiptByActionHash(actHash) if err != nil { return nil, status.Error(codes.NotFound, err.Error()) } blkHash, err := api.getBlockHashByActionHash(actHash) if err != nil { return nil, status.Error(codes.NotFound, err.Error()) } return &iotexapi.GetReceiptByActionResponse{ ReceiptInfo: &iotexapi.ReceiptInfo{ Receipt: receipt.ConvertToReceiptPb(), BlkHash: hex.EncodeToString(blkHash[:]), }, }, nil } // ReadContract reads the state in a contract address specified by the slot func (api *Server) ReadContract(ctx context.Context, in *iotexapi.ReadContractRequest) (*iotexapi.ReadContractResponse, error) { log.L().Debug("receive read smart contract request") sc := &action.Execution{} if err := sc.LoadProto(in.Execution); err != nil { return nil, status.Error(codes.InvalidArgument, err.Error()) } state, err := accountutil.AccountState(api.sf, in.CallerAddress) if err != nil { return nil, status.Error(codes.InvalidArgument, err.Error()) } sc, _ = action.NewExecution( sc.Contract(), state.Nonce+1, sc.Amount(), api.cfg.Genesis.BlockGasLimit, big.NewInt(0), sc.Data(), ) callerAddr, err := address.FromString(in.CallerAddress) if err != nil { return nil, status.Error(codes.InvalidArgument, err.Error()) } ctx, err = api.bc.Context() if err != nil { return nil, err } retval, receipt, err := api.sf.SimulateExecution(ctx, callerAddr, sc, api.dao.GetBlockHash) if err != nil { return nil, status.Error(codes.Internal, err.Error()) } return &iotexapi.ReadContractResponse{ Data: hex.EncodeToString(retval), Receipt: receipt.ConvertToReceiptPb(), }, nil } // ReadState reads state on blockchain func (api *Server) ReadState(ctx context.Context, in *iotexapi.ReadStateRequest) (*iotexapi.ReadStateResponse, error) { p, ok := api.registry.Find(string(in.ProtocolID)) if !ok { return nil, status.Errorf(codes.Internal, "protocol %s isn't registered", string(in.ProtocolID)) } data, readStateHeight, err := api.readState(ctx, p, in.GetHeight(), in.MethodName, in.Arguments...) if err != nil { return nil, status.Error(codes.NotFound, err.Error()) } blkHash, err := api.dao.GetBlockHash(readStateHeight) if err != nil { if errors.Cause(err) == db.ErrNotExist { return nil, status.Error(codes.NotFound, err.Error()) } return nil, status.Error(codes.Internal, err.Error()) } out := iotexapi.ReadStateResponse{ Data: data, BlockIdentifier: &iotextypes.BlockIdentifier{ Height: readStateHeight, Hash: hex.EncodeToString(blkHash[:]), }, } return &out, nil } // SuggestGasPrice suggests gas price func (api *Server) SuggestGasPrice(ctx context.Context, in *iotexapi.SuggestGasPriceRequest) (*iotexapi.SuggestGasPriceResponse, error) { suggestPrice, err := api.gs.SuggestGasPrice() if err != nil { return nil, status.Error(codes.Internal, err.Error()) } return &iotexapi.SuggestGasPriceResponse{GasPrice: suggestPrice}, nil } // EstimateGasForAction estimates gas for action func (api *Server) EstimateGasForAction(ctx context.Context, in *iotexapi.EstimateGasForActionRequest) (*iotexapi.EstimateGasForActionResponse, error) { estimateGas, err := api.gs.EstimateGasForAction(in.Action) if err != nil { return nil, status.Error(codes.Internal, err.Error()) } return &iotexapi.EstimateGasForActionResponse{Gas: estimateGas}, nil } // EstimateActionGasConsumption estimate gas consume for action without signature func (api *Server) EstimateActionGasConsumption(ctx context.Context, in *iotexapi.EstimateActionGasConsumptionRequest) (respone *iotexapi.EstimateActionGasConsumptionResponse, err error) { respone = &iotexapi.EstimateActionGasConsumptionResponse{} switch { case in.GetExecution() != nil: request := in.GetExecution() return api.estimateActionGasConsumptionForExecution(request, in.GetCallerAddress()) case in.GetTransfer() != nil: respone.Gas = uint64(len(in.GetTransfer().Payload))*action.TransferPayloadGas + action.TransferBaseIntrinsicGas case in.GetStakeCreate() != nil: respone.Gas = uint64(len(in.GetStakeCreate().Payload))*action.CreateStakePayloadGas + action.CreateStakeBaseIntrinsicGas case in.GetStakeUnstake() != nil: respone.Gas = uint64(len(in.GetStakeUnstake().Payload))*action.ReclaimStakePayloadGas + action.ReclaimStakeBaseIntrinsicGas case in.GetStakeWithdraw() != nil: respone.Gas = uint64(len(in.GetStakeWithdraw().Payload))*action.ReclaimStakePayloadGas + action.ReclaimStakeBaseIntrinsicGas case in.GetStakeAddDeposit() != nil: respone.Gas = uint64(len(in.GetStakeAddDeposit().Payload))*action.DepositToStakePayloadGas + action.DepositToStakeBaseIntrinsicGas case in.GetStakeRestake() != nil: respone.Gas = uint64(len(in.GetStakeRestake().Payload))*action.RestakePayloadGas + action.RestakeBaseIntrinsicGas case in.GetStakeChangeCandidate() != nil: respone.Gas = uint64(len(in.GetStakeChangeCandidate().Payload))*action.MoveStakePayloadGas + action.MoveStakeBaseIntrinsicGas case in.GetStakeTransferOwnership() != nil: respone.Gas = uint64(len(in.GetStakeTransferOwnership().Payload))*action.MoveStakePayloadGas + action.MoveStakeBaseIntrinsicGas case in.GetCandidateRegister() != nil: respone.Gas = uint64(len(in.GetCandidateRegister().Payload))*action.CandidateRegisterPayloadGas + action.CandidateRegisterBaseIntrinsicGas case in.GetCandidateUpdate() != nil: respone.Gas = action.CandidateUpdateBaseIntrinsicGas default: return nil, status.Error(codes.InvalidArgument, "invalid argument") } return } // GetEpochMeta gets epoch metadata func (api *Server) GetEpochMeta( ctx context.Context, in *iotexapi.GetEpochMetaRequest, ) (*iotexapi.GetEpochMetaResponse, error) { rp := rolldpos.FindProtocol(api.registry) if rp == nil { return &iotexapi.GetEpochMetaResponse{}, nil } if in.EpochNumber < 1 { return nil, status.Error(codes.InvalidArgument, "epoch number cannot be less than one") } epochHeight := rp.GetEpochHeight(in.EpochNumber) gravityChainStartHeight, err := api.getGravityChainStartHeight(epochHeight) if err != nil { return nil, status.Error(codes.NotFound, err.Error()) } epochData := &iotextypes.EpochData{ Num: in.EpochNumber, Height: epochHeight, GravityChainStartHeight: gravityChainStartHeight, } pp := poll.FindProtocol(api.registry) if pp == nil { return nil, status.Error(codes.Internal, "poll protocol is not registered") } methodName := []byte("ActiveBlockProducersByEpoch") arguments := [][]byte{[]byte(strconv.FormatUint(in.EpochNumber, 10))} height := strconv.FormatUint(epochHeight, 10) data, _, err := api.readState(context.Background(), pp, height, methodName, arguments...) if err != nil { return nil, status.Error(codes.NotFound, err.Error()) } var activeConsensusBlockProducers state.CandidateList if err := activeConsensusBlockProducers.Deserialize(data); err != nil { return nil, status.Error(codes.Internal, err.Error()) } numBlks, produce, err := api.getProductivityByEpoch(rp, in.EpochNumber, api.bc.TipHeight(), activeConsensusBlockProducers) if err != nil { return nil, status.Error(codes.NotFound, err.Error()) } methodName = []byte("BlockProducersByEpoch") data, _, err = api.readState(context.Background(), pp, height, methodName, arguments...) if err != nil { return nil, status.Error(codes.NotFound, err.Error()) } var BlockProducers state.CandidateList if err := BlockProducers.Deserialize(data); err != nil { return nil, status.Error(codes.Internal, err.Error()) } var blockProducersInfo []*iotexapi.BlockProducerInfo for _, bp := range BlockProducers { var active bool var blockProduction uint64 if production, ok := produce[bp.Address]; ok { active = true blockProduction = production } blockProducersInfo = append(blockProducersInfo, &iotexapi.BlockProducerInfo{ Address: bp.Address, Votes: bp.Votes.String(), Active: active, Production: blockProduction, }) } return &iotexapi.GetEpochMetaResponse{ EpochData: epochData, TotalBlocks: numBlks, BlockProducersInfo: blockProducersInfo, }, nil } // GetRawBlocks gets raw block data func (api *Server) GetRawBlocks( ctx context.Context, in *iotexapi.GetRawBlocksRequest, ) (*iotexapi.GetRawBlocksResponse, error) { if in.Count == 0 || in.Count > api.cfg.API.RangeQueryLimit { return nil, status.Error(codes.InvalidArgument, "range exceeds the limit") } tipHeight := api.bc.TipHeight() if in.StartHeight > tipHeight { return nil, status.Error(codes.InvalidArgument, "start height should not exceed tip height") } var res []*iotexapi.BlockInfo for height := int(in.StartHeight); height <= int(tipHeight); height++ { if uint64(len(res)) >= in.Count { break } blk, err := api.dao.GetBlockByHeight(uint64(height)) if err != nil { return nil, status.Error(codes.NotFound, err.Error()) } var receiptsPb []*iotextypes.Receipt if in.WithReceipts { receipts, err := api.dao.GetReceipts(uint64(height)) if err != nil { return nil, status.Error(codes.NotFound, err.Error()) } for _, receipt := range receipts { receiptsPb = append(receiptsPb, receipt.ConvertToReceiptPb()) } } res = append(res, &iotexapi.BlockInfo{ Block: blk.ConvertToBlockPb(), Receipts: receiptsPb, }) } return &iotexapi.GetRawBlocksResponse{Blocks: res}, nil } // GetLogs get logs filtered by contract address and topics func (api *Server) GetLogs( ctx context.Context, in *iotexapi.GetLogsRequest, ) (*iotexapi.GetLogsResponse, error) { switch { case in.GetByBlock() != nil: req := in.GetByBlock() h, err := api.dao.GetBlockHeight(hash.BytesToHash256(req.BlockHash)) if err != nil { return nil, status.Error(codes.InvalidArgument, "invalid block hash") } filter, ok := NewLogFilter(in.Filter, nil, nil).(*LogFilter) if !ok { return nil, status.Error(codes.Internal, "cannot convert to *LogFilter") } logs, err := api.getLogsInBlock(filter, h, 1) return &iotexapi.GetLogsResponse{Logs: logs}, err case in.GetByRange() != nil: req := in.GetByRange() if req.FromBlock > api.bc.TipHeight() { return nil, status.Error(codes.InvalidArgument, "start block > tip height") } filter, ok := NewLogFilter(in.Filter, nil, nil).(*LogFilter) if !ok { return nil, status.Error(codes.Internal, "cannot convert to *LogFilter") } logs, err := api.getLogsInBlock(filter, req.FromBlock, req.Count) return &iotexapi.GetLogsResponse{Logs: logs}, err default: return nil, status.Error(codes.InvalidArgument, "invalid GetLogsRequest type") } } // StreamBlocks streams blocks func (api *Server) StreamBlocks(in *iotexapi.StreamBlocksRequest, stream iotexapi.APIService_StreamBlocksServer) error { errChan := make(chan error) if err := api.chainListener.AddResponder(NewBlockListener(stream, errChan)); err != nil { return status.Error(codes.Internal, err.Error()) } for { select { case err := <-errChan: if err != nil { err = status.Error(codes.Aborted, err.Error()) } return err } } } // StreamLogs streams logs that match the filter condition func (api *Server) StreamLogs(in *iotexapi.StreamLogsRequest, stream iotexapi.APIService_StreamLogsServer) error { errChan := make(chan error) // register the log filter so it will match logs in new blocks if err := api.chainListener.AddResponder(NewLogFilter(in.Filter, stream, errChan)); err != nil { return status.Error(codes.Internal, err.Error()) } for { select { case err := <-errChan: if err != nil { err = status.Error(codes.Aborted, err.Error()) } return err } } } // GetElectionBuckets returns the native election buckets. func (api *Server) GetElectionBuckets( ctx context.Context, in *iotexapi.GetElectionBucketsRequest, ) (*iotexapi.GetElectionBucketsResponse, error) { if api.electionCommittee == nil { return nil, status.Error(codes.Unavailable, "Native election no supported") } buckets, err := api.electionCommittee.NativeBucketsByEpoch(in.GetEpochNum()) if err != nil { return nil, status.Error(codes.Internal, err.Error()) } re := make([]*iotextypes.ElectionBucket, len(buckets)) for i, b := range buckets { startTime, err := ptypes.TimestampProto(b.StartTime()) if err != nil { return nil, status.Error(codes.Internal, err.Error()) } re[i] = &iotextypes.ElectionBucket{ Voter: b.Voter(), Candidate: b.Candidate(), Amount: b.Amount().Bytes(), StartTime: startTime, Duration: ptypes.DurationProto(b.Duration()), Decay: b.Decay(), } } return &iotexapi.GetElectionBucketsResponse{Buckets: re}, nil } // GetReceiptByActionHash returns receipt by action hash func (api *Server) GetReceiptByActionHash(h hash.Hash256) (*action.Receipt, error) { if !api.hasActionIndex || api.indexer == nil { return nil, status.Error(codes.NotFound, blockindex.ErrActionIndexNA.Error()) } actIndex, err := api.indexer.GetActionIndex(h[:]) if err != nil { return nil, err } return api.dao.GetReceiptByActionHash(h, actIndex.BlockHeight()) } // GetActionByActionHash returns action by action hash func (api *Server) GetActionByActionHash(h hash.Hash256) (action.SealedEnvelope, error) { if !api.hasActionIndex || api.indexer == nil { return action.SealedEnvelope{}, status.Error(codes.NotFound, blockindex.ErrActionIndexNA.Error()) } selp, _, _, err := api.getActionByActionHash(h) return selp, err } // GetEvmTransfersByActionHash returns evm transfers by action hash func (api *Server) GetEvmTransfersByActionHash(ctx context.Context, in *iotexapi.GetEvmTransfersByActionHashRequest) (*iotexapi.GetEvmTransfersByActionHashResponse, error) { return nil, status.Error(codes.Unimplemented, "evm transfer index is deprecated, call GetSystemLogByActionHash instead") } // GetEvmTransfersByBlockHeight returns evm transfers by block height func (api *Server) GetEvmTransfersByBlockHeight(ctx context.Context, in *iotexapi.GetEvmTransfersByBlockHeightRequest) (*iotexapi.GetEvmTransfersByBlockHeightResponse, error) { return nil, status.Error(codes.Unimplemented, "evm transfer index is deprecated, call GetSystemLogByBlockHeight instead") } // GetImplicitTransferLogByActionHash returns implict transfer log by action hash func (api *Server) GetImplicitTransferLogByActionHash( ctx context.Context, in *iotexapi.GetImplicitTransferLogByActionHashRequest) (*iotexapi.GetImplicitTransferLogByActionHashResponse, error) { if !api.hasActionIndex || api.indexer == nil { return nil, status.Error(codes.Unimplemented, blockindex.ErrActionIndexNA.Error()) } if !api.dao.ContainsImplicitTransferLog() { return nil, status.Error(codes.Unimplemented, blockdao.ErrNotSupported.Error()) } h, err := hex.DecodeString(in.ActionHash) if err != nil { return nil, status.Error(codes.InvalidArgument, err.Error()) } actIndex, err := api.indexer.GetActionIndex(h) if err != nil { if errors.Cause(err) == db.ErrNotExist { return nil, status.Error(codes.NotFound, err.Error()) } return nil, status.Error(codes.Internal, err.Error()) } sysLog, err := api.dao.GetImplicitTransferLog(actIndex.BlockHeight()) if err != nil { if errors.Cause(err) == db.ErrNotExist { return nil, status.Error(codes.NotFound, err.Error()) } return nil, status.Error(codes.Internal, err.Error()) } for _, log := range sysLog.ImplicitTransferLog { if bytes.Compare(h, log.ActionHash) == 0 { return &iotexapi.GetImplicitTransferLogByActionHashResponse{ ImplicitTransferLog: log, }, nil } } return nil, status.Errorf(codes.NotFound, "implicit transfer log not found for action %s", in.ActionHash) } // GetImplicitTransferLogByBlockHeight returns implict transfer log by block height func (api *Server) GetImplicitTransferLogByBlockHeight( ctx context.Context, in *iotexapi.GetImplicitTransferLogByBlockHeightRequest) (*iotexapi.GetImplicitTransferLogByBlockHeightResponse, error) { if !api.dao.ContainsImplicitTransferLog() { return nil, status.Error(codes.Unimplemented, blockdao.ErrNotSupported.Error()) } tip, err := api.dao.Height() if err != nil { return nil, status.Error(codes.Internal, err.Error()) } if in.BlockHeight < 1 || in.BlockHeight > tip { return nil, status.Errorf(codes.InvalidArgument, "invalid block height = %d", in.BlockHeight) } h, err := api.dao.GetBlockHash(in.BlockHeight) if err != nil { if errors.Cause(err) == db.ErrNotExist { return nil, status.Error(codes.NotFound, err.Error()) } return nil, status.Error(codes.Internal, err.Error()) } sysLog, err := api.dao.GetImplicitTransferLog(in.BlockHeight) if err != nil { if errors.Cause(err) == db.ErrNotExist { return nil, status.Error(codes.NotFound, err.Error()) } return nil, status.Error(codes.Internal, err.Error()) } return &iotexapi.GetImplicitTransferLogByBlockHeightResponse{ BlockImplicitTransferLog: sysLog, BlockIdentifier: &iotextypes.BlockIdentifier{ Hash: hex.EncodeToString(h[:]), Height: in.BlockHeight, }, }, nil } // Start starts the API server func (api *Server) Start() error { portStr := ":" + strconv.Itoa(api.cfg.API.Port) lis, err := net.Listen("tcp", portStr) if err != nil { log.L().Error("API server failed to listen.", zap.Error(err)) return errors.Wrap(err, "API server failed to listen") } log.L().Info("API server is listening.", zap.String("addr", lis.Addr().String())) go func() { if err := api.grpcServer.Serve(lis); err != nil { log.L().Fatal("Node failed to serve.", zap.Error(err)) } }() if err := api.bc.AddSubscriber(api.chainListener); err != nil { return errors.Wrap(err, "failed to subscribe to block creations") } if err := api.chainListener.Start(); err != nil { return errors.Wrap(err, "failed to start blockchain listener") } return nil } // Stop stops the API server func (api *Server) Stop() error { api.grpcServer.Stop() if err := api.bc.RemoveSubscriber(api.chainListener); err != nil { return errors.Wrap(err, "failed to unsubscribe blockchain listener") } return api.chainListener.Stop() } func (api *Server) readState(ctx context.Context, p protocol.Protocol, height string, methodName []byte, arguments ...[]byte) ([]byte, uint64, error) { // TODO: need to complete the context tipHeight := api.bc.TipHeight() ctx = protocol.WithBlockCtx(ctx, protocol.BlockCtx{ BlockHeight: tipHeight, }) ctx = protocol.WithBlockchainCtx( protocol.WithRegistry(ctx, api.registry), protocol.BlockchainCtx{ Genesis: api.cfg.Genesis, }, ) rp := rolldpos.FindProtocol(api.registry) if rp == nil { return nil, uint64(0), errors.New("rolldpos is not registered") } tipEpochNum := rp.GetEpochNum(tipHeight) if height != "" { inputHeight, err := strconv.ParseUint(height, 0, 64) if err != nil { return nil, uint64(0), err } inputEpochNum := rp.GetEpochNum(inputHeight) if inputEpochNum < tipEpochNum { // old data, wrap to history state reader return p.ReadState(ctx, factory.NewHistoryStateReader(api.sf, rp.GetEpochHeight(inputEpochNum)), methodName, arguments...) } } // TODO: need to distinguish user error and system error return p.ReadState(ctx, api.sf, methodName, arguments...) } func (api *Server) getActionsFromIndex(totalActions, start, count uint64) (*iotexapi.GetActionsResponse, error) { var actionInfo []*iotexapi.ActionInfo hashes, err := api.indexer.GetActionHashFromIndex(start, count) if err != nil { return nil, status.Error(codes.Unavailable, err.Error()) } for i := range hashes { act, err := api.getAction(hash.BytesToHash256(hashes[i]), false) if err != nil { return nil, status.Error(codes.Unavailable, err.Error()) } actionInfo = append(actionInfo, act) } return &iotexapi.GetActionsResponse{ Total: totalActions, ActionInfo: actionInfo, }, nil } // GetActions returns actions within the range func (api *Server) getActions(start uint64, count uint64) (*iotexapi.GetActionsResponse, error) { if count == 0 { return nil, status.Error(codes.InvalidArgument, "count must be greater than zero") } if count > api.cfg.API.RangeQueryLimit { return nil, status.Error(codes.InvalidArgument, "range exceeds the limit") } totalActions, err := api.indexer.GetTotalActions() if err != nil { return nil, status.Error(codes.Internal, err.Error()) } if start >= totalActions { return nil, status.Error(codes.InvalidArgument, "start exceeds the limit") } if totalActions == uint64(0) || count == 0 { return &iotexapi.GetActionsResponse{}, nil } if start+count > totalActions { count = totalActions - start } if api.hasActionIndex { return api.getActionsFromIndex(totalActions, start, count) } // Finding actions in reverse order saves time for querying most recent actions reverseStart := totalActions - (start + count) if totalActions < start+count { reverseStart = uint64(0) count = totalActions - start } var res []*iotexapi.ActionInfo var hit bool for height := api.bc.TipHeight(); height >= 1 && count > 0; height-- { blk, err := api.dao.GetBlockByHeight(height) if err != nil { return nil, status.Error(codes.NotFound, err.Error()) } if !hit && reverseStart >= uint64(len(blk.Actions)) { reverseStart -= uint64(len(blk.Actions)) continue } // now reverseStart < len(blk.Actions), we are going to fetch actions from this block hit = true act := api.reverseActionsInBlock(blk, reverseStart, count) res = append(act, res...) count -= uint64(len(act)) reverseStart = 0 } return &iotexapi.GetActionsResponse{ Total: totalActions, ActionInfo: res, }, nil } // getSingleAction returns action by action hash func (api *Server) getSingleAction(actionHash string, checkPending bool) (*iotexapi.GetActionsResponse, error) { actHash, err := hash.HexStringToHash256(actionHash) if err != nil { return nil, status.Error(codes.InvalidArgument, err.Error()) } act, err := api.getAction(actHash, checkPending) if err != nil { return nil, status.Error(codes.Unavailable, err.Error()) } return &iotexapi.GetActionsResponse{ Total: 1, ActionInfo: []*iotexapi.ActionInfo{act}, }, nil } // getActionsByAddress returns all actions associated with an address func (api *Server) getActionsByAddress(addrStr string, start uint64, count uint64) (*iotexapi.GetActionsResponse, error) { if count == 0 { return nil, status.Error(codes.InvalidArgument, "count must be greater than zero") } if count > api.cfg.API.RangeQueryLimit { return nil, status.Error(codes.InvalidArgument, "range exceeds the limit") } addr, err := address.FromString(addrStr) if err != nil { return nil, err } actions, err := api.indexer.GetActionsByAddress(hash.BytesToHash160(addr.Bytes()), start, count) if err != nil && (errors.Cause(err) == db.ErrBucketNotExist || errors.Cause(err) == db.ErrNotExist) { // no actions associated with address, return nil return &iotexapi.GetActionsResponse{}, nil } if err != nil { return nil, status.Error(codes.NotFound, err.Error()) } res := &iotexapi.GetActionsResponse{Total: uint64(len(actions))} for i := range actions { act, err := api.getAction(hash.BytesToHash256(actions[i]), false) if err != nil { continue } res.ActionInfo = append(res.ActionInfo, act) } return res, nil } // getBlockHashByActionHash returns block hash by action hash func (api *Server) getBlockHashByActionHash(h hash.Hash256) (hash.Hash256, error) { actIndex, err := api.indexer.GetActionIndex(h[:]) if err != nil { return hash.ZeroHash256, err } return api.dao.GetBlockHash(actIndex.BlockHeight()) } // getActionByActionHash returns action by action hash func (api *Server) getActionByActionHash(h hash.Hash256) (action.SealedEnvelope, hash.Hash256, uint64, error) { actIndex, err := api.indexer.GetActionIndex(h[:]) if err != nil { return action.SealedEnvelope{}, hash.ZeroHash256, 0, err } blk, err := api.dao.GetBlockByHeight(actIndex.BlockHeight()) if err != nil { return action.SealedEnvelope{}, hash.ZeroHash256, 0, err } selp, err := api.dao.GetActionByActionHash(h, actIndex.BlockHeight()) return selp, blk.HashBlock(), actIndex.BlockHeight(), err } // getUnconfirmedActionsByAddress returns all unconfirmed actions in actpool associated with an address func (api *Server) getUnconfirmedActionsByAddress(address string, start uint64, count uint64) (*iotexapi.GetActionsResponse, error) { if count == 0 { return nil, status.Error(codes.InvalidArgument, "count must be greater than zero") } if count > api.cfg.API.RangeQueryLimit { return nil, status.Error(codes.InvalidArgument, "range exceeds the limit") } selps := api.ap.GetUnconfirmedActs(address) if len(selps) == 0 { return &iotexapi.GetActionsResponse{}, nil } if start >= uint64(len(selps)) { return nil, status.Error(codes.InvalidArgument, "start exceeds the limit") } var res []*iotexapi.ActionInfo for i := start; i < uint64(len(selps)) && i < start+count; i++ { act, err := api.pendingAction(selps[i]) if err != nil { continue } res = append(res, act) } return &iotexapi.GetActionsResponse{ Total: uint64(len(selps)), ActionInfo: res, }, nil } // getActionsByBlock returns all actions in a block func (api *Server) getActionsByBlock(blkHash string, start uint64, count uint64) (*iotexapi.GetActionsResponse, error) { if count == 0 { return nil, status.Error(codes.InvalidArgument, "count must be greater than zero") } if count > api.cfg.API.RangeQueryLimit { return nil, status.Error(codes.InvalidArgument, "range exceeds the limit") } hash, err := hash.HexStringToHash256(blkHash) if err != nil { return nil, status.Error(codes.InvalidArgument, err.Error()) } blk, err := api.dao.GetBlock(hash) if err != nil { return nil, status.Error(codes.NotFound, err.Error()) } if len(blk.Actions) == 0 { return &iotexapi.GetActionsResponse{}, nil } if start >= uint64(len(blk.Actions)) { return nil, status.Error(codes.InvalidArgument, "start exceeds the limit") } res := api.actionsInBlock(blk, start, count) return &iotexapi.GetActionsResponse{ Total: uint64(len(blk.Actions)), ActionInfo: res, }, nil } // getBlockMetas returns blockmetas response within the height range func (api *Server) getBlockMetas(start uint64, count uint64) (*iotexapi.GetBlockMetasResponse, error) { if count == 0 { return nil, status.Error(codes.InvalidArgument, "count must be greater than zero") } if count > api.cfg.API.RangeQueryLimit { return nil, status.Error(codes.InvalidArgument, "range exceeds the limit") } tipHeight := api.bc.TipHeight() if start > tipHeight { return nil, status.Error(codes.InvalidArgument, "start height should not exceed tip height") } var res []*iotextypes.BlockMeta for height := start; height <= tipHeight && count > 0; height++ { blockMeta, err := api.getBlockMetasByHeader(height) if errors.Cause(err) == db.ErrNotExist { blockMeta, err = api.getBlockMetasByBlock(height) if err != nil { return nil, err } } else if err != nil { return nil, err } res = append(res, blockMeta) count-- } return &iotexapi.GetBlockMetasResponse{ Total: tipHeight, BlkMetas: res, }, nil } // getBlockMeta returns blockmetas response by block hash func (api *Server) getBlockMeta(blkHash string) (*iotexapi.GetBlockMetasResponse, error) { hash, err := hash.HexStringToHash256(blkHash) if err != nil { return nil, status.Error(codes.InvalidArgument, err.Error()) } blockMeta, err := api.getBlockMetaByHeader(hash) if errors.Cause(err) == db.ErrNotExist { blockMeta, err = api.getBlockMetaByBlock(hash) if err != nil { return nil, err } } else if err != nil { return nil, err } return &iotexapi.GetBlockMetasResponse{ Total: 1, BlkMetas: []*iotextypes.BlockMeta{blockMeta}, }, nil } // putBlockMetaUpgradeByBlock puts numActions and transferAmount for blockmeta by block func (api *Server) putBlockMetaUpgradeByBlock(blk *block.Block, blockMeta *iotextypes.BlockMeta) *iotextypes.BlockMeta { blockMeta.NumActions = int64(len(blk.Actions)) blockMeta.TransferAmount = blk.CalculateTransferAmount().String() return blockMeta } // putBlockMetaUpgradeByHeader puts numActions and transferAmount for blockmeta by header height func (api *Server) putBlockMetaUpgradeByHeader(height uint64, blockMeta *iotextypes.BlockMeta) (*iotextypes.BlockMeta, error) { index, err := api.indexer.GetBlockIndex(height) if err != nil { return nil, errors.Wrapf(err, "missing block index at height %d", height) } blockMeta.NumActions = int64(index.NumAction()) blockMeta.TransferAmount = index.TsfAmount().String() return blockMeta, nil } // getBlockMetasByHeader gets block header by height func (api *Server) getBlockMetasByHeader(height uint64) (*iotextypes.BlockMeta, error) { header, err := api.bc.BlockHeaderByHeight(height) if err != nil { return nil, status.Error(codes.NotFound, err.Error()) } blockMeta := api.getCommonBlockMeta(header) blockMeta, err = api.putBlockMetaUpgradeByHeader(header.Height(), blockMeta) if err != nil { return nil, err } return blockMeta, nil } // getBlockMetasByBlock gets block by height func (api *Server) getBlockMetasByBlock(height uint64) (*iotextypes.BlockMeta, error) { blk, err := api.dao.GetBlockByHeight(height) if err != nil { return nil, status.Error(codes.NotFound, err.Error()) } blockMeta := api.getCommonBlockMeta(blk) blockMeta = api.putBlockMetaUpgradeByBlock(blk, blockMeta) return blockMeta, nil } // getBlockMetaByHeader gets block header by hash func (api *Server) getBlockMetaByHeader(h hash.Hash256) (*iotextypes.BlockMeta, error) { header, err := api.dao.Header(h) if err != nil { return nil, status.Error(codes.NotFound, err.Error()) } blockMeta := api.getCommonBlockMeta(header) blockMeta, err = api.putBlockMetaUpgradeByHeader(header.Height(), blockMeta) if err != nil { return nil, err } return blockMeta, nil } // getBlockMetaByBlock gets block by hash func (api *Server) getBlockMetaByBlock(h hash.Hash256) (*iotextypes.BlockMeta, error) { blk, err := api.dao.GetBlock(h) if err != nil { return nil, status.Error(codes.NotFound, err.Error()) } blockMeta := api.getCommonBlockMeta(blk) blockMeta = api.putBlockMetaUpgradeByBlock(blk, blockMeta) return blockMeta, nil } // getCommonBlockMeta gets blockmeta by empty interface func (api *Server) getCommonBlockMeta(common interface{}) *iotextypes.BlockMeta { header, ok := common.(*block.Header) if !ok { blk := common.(*block.Block) header = &blk.Header } hash := header.HashBlock() height := header.Height() ts, _ := ptypes.TimestampProto(header.Timestamp()) producerAddress := header.ProducerAddress() txRoot := header.TxRoot() receiptRoot := header.ReceiptRoot() deltaStateDigest := header.DeltaStateDigest() logsBloom := header.LogsBloomfilter() blockMeta := &iotextypes.BlockMeta{ Hash: hex.EncodeToString(hash[:]), Height: height, Timestamp: ts, ProducerAddress: producerAddress, TxRoot: hex.EncodeToString(txRoot[:]), ReceiptRoot: hex.EncodeToString(receiptRoot[:]), DeltaStateDigest: hex.EncodeToString(deltaStateDigest[:]), } if logsBloom != nil { blockMeta.LogsBloom = hex.EncodeToString(logsBloom.Bytes()) } return blockMeta } func (api *Server) getGravityChainStartHeight(epochHeight uint64) (uint64, error) { gravityChainStartHeight := epochHeight if pp := poll.FindProtocol(api.registry); pp != nil { methodName := []byte("GetGravityChainStartHeight") arguments := [][]byte{[]byte(strconv.FormatUint(epochHeight, 10))} data, _, err := api.readState(context.Background(), pp, "", methodName, arguments...) if err != nil { return 0, err } if len(data) == 0 { return 0, nil } if gravityChainStartHeight, err = strconv.ParseUint(string(data), 10, 64); err != nil { return 0, err } } return gravityChainStartHeight, nil } func (api *Server) committedAction(selp action.SealedEnvelope, blkHash hash.Hash256, blkHeight uint64) ( *iotexapi.ActionInfo, error) { actHash := selp.Hash() header, err := api.dao.Header(blkHash) if err != nil { return nil, err } sender, _ := address.FromBytes(selp.SrcPubkey().Hash()) receipt, err := api.dao.GetReceiptByActionHash(actHash, blkHeight) if err != nil { return nil, err } gas := new(big.Int) gas = gas.Mul(selp.GasPrice(), big.NewInt(int64(receipt.GasConsumed))) return &iotexapi.ActionInfo{ Action: selp.Proto(), ActHash: hex.EncodeToString(actHash[:]), BlkHash: hex.EncodeToString(blkHash[:]), BlkHeight: header.Height(), Sender: sender.String(), GasFee: gas.String(), Timestamp: header.BlockHeaderCoreProto().Timestamp, }, nil } func (api *Server) pendingAction(selp action.SealedEnvelope) (*iotexapi.ActionInfo, error) { actHash := selp.Hash() sender, _ := address.FromBytes(selp.SrcPubkey().Hash()) return &iotexapi.ActionInfo{ Action: selp.Proto(), ActHash: hex.EncodeToString(actHash[:]), BlkHash: hex.EncodeToString(hash.ZeroHash256[:]), BlkHeight: 0, Sender: sender.String(), Timestamp: nil, }, nil } func (api *Server) getAction(actHash hash.Hash256, checkPending bool) (*iotexapi.ActionInfo, error) { selp, blkHash, blkHeight, err := api.getActionByActionHash(actHash) if err == nil { return api.committedAction(selp, blkHash, blkHeight) } // Try to fetch pending action from actpool if checkPending { selp, err = api.ap.GetActionByHash(actHash) } if err != nil { return nil, err } return api.pendingAction(selp) } func (api *Server) actionsInBlock(blk *block.Block, start, count uint64) []*iotexapi.ActionInfo { h := blk.HashBlock() blkHash := hex.EncodeToString(h[:]) blkHeight := blk.Height() ts := blk.Header.BlockHeaderCoreProto().Timestamp var res []*iotexapi.ActionInfo for i := start; i < uint64(len(blk.Actions)) && i < start+count; i++ { selp := blk.Actions[i] actHash := selp.Hash() sender, _ := address.FromBytes(selp.SrcPubkey().Hash()) res = append(res, &iotexapi.ActionInfo{ Action: selp.Proto(), ActHash: hex.EncodeToString(actHash[:]), BlkHash: blkHash, BlkHeight: blkHeight, Sender: sender.String(), Timestamp: ts, }) } return res } func (api *Server) reverseActionsInBlock(blk *block.Block, reverseStart, count uint64) []*iotexapi.ActionInfo { h := blk.HashBlock() blkHash := hex.EncodeToString(h[:]) blkHeight := blk.Height() ts := blk.Header.BlockHeaderCoreProto().Timestamp var res []*iotexapi.ActionInfo for i := reverseStart; i < uint64(len(blk.Actions)) && i < reverseStart+count; i++ { ri := uint64(len(blk.Actions)) - 1 - i selp := blk.Actions[ri] actHash := selp.Hash() sender, _ := address.FromBytes(selp.SrcPubkey().Hash()) res = append([]*iotexapi.ActionInfo{ { Action: selp.Proto(), ActHash: hex.EncodeToString(actHash[:]), BlkHash: blkHash, BlkHeight: blkHeight, Sender: sender.String(), Timestamp: ts, }, }, res...) } return res } func (api *Server) getLogsInBlock(filter *LogFilter, start, count uint64) ([]*iotextypes.Log, error) { if count == 0 { return nil, status.Error(codes.InvalidArgument, "count must be greater than zero") } // filter logs within start --> end var logs []*iotextypes.Log end := start + count - 1 if end > api.bc.TipHeight() { end = api.bc.TipHeight() } for i := start; i <= end; i++ { receipts, err := api.dao.GetReceipts(i) if err != nil { return logs, status.Error(codes.InvalidArgument, err.Error()) } logs = append(logs, filter.MatchLogs(receipts)...) } return logs, nil } // TODO: Since GasConsumed on the receipt may not be enough for the gas limit, we use binary search for the gas estimate. Need a better way to address it later. func (api *Server) estimateActionGasConsumptionForExecution(exec *iotextypes.Execution, sender string) (*iotexapi.EstimateActionGasConsumptionResponse, error) { sc := &action.Execution{} if err := sc.LoadProto(exec); err != nil { return nil, status.Error(codes.InvalidArgument, err.Error()) } state, err := accountutil.AccountState(api.sf, sender) if err != nil { return nil, status.Error(codes.InvalidArgument, err.Error()) } nonce := state.Nonce + 1 callerAddr, err := address.FromString(sender) if err != nil { return nil, status.Error(codes.InvalidArgument, err.Error()) } sc, _ = action.NewExecution( sc.Contract(), nonce, sc.Amount(), api.cfg.Genesis.BlockGasLimit, big.NewInt(0), sc.Data(), ) ctx, err := api.bc.Context() if err != nil { return nil, err } _, receipt, err := api.sf.SimulateExecution(ctx, callerAddr, sc, api.dao.GetBlockHash) if err != nil { return nil, status.Error(codes.Internal, err.Error()) } if receipt.Status != uint64(iotextypes.ReceiptStatus_Success) { return nil, status.Error(codes.Internal, "execution simulation gets failure status") } estimatedGas := receipt.GasConsumed enough, err := api.isGasLimitEnough(callerAddr, sc, nonce, estimatedGas) if err != nil { return nil, status.Error(codes.Internal, err.Error()) } if !enough { low, high := estimatedGas, api.cfg.Genesis.BlockGasLimit estimatedGas = high for low <= high { mid := (low + high) / 2 enough, err = api.isGasLimitEnough(callerAddr, sc, nonce, mid) if err != nil { return nil, status.Error(codes.Internal, err.Error()) } if enough { estimatedGas = mid break } low = mid + 1 } } return &iotexapi.EstimateActionGasConsumptionResponse{ Gas: estimatedGas, }, nil } func (api *Server) estimateActionGasConsumptionForTransfer(transfer *iotextypes.Transfer) (*iotexapi.EstimateActionGasConsumptionResponse, error) { payloadSize := uint64(len(transfer.Payload)) return &iotexapi.EstimateActionGasConsumptionResponse{ Gas: payloadSize*action.TransferPayloadGas + action.TransferBaseIntrinsicGas, }, nil } func (api *Server) isGasLimitEnough( caller address.Address, sc *action.Execution, nonce uint64, gasLimit uint64, ) (bool, error) { sc, _ = action.NewExecution( sc.Contract(), nonce, sc.Amount(), gasLimit, big.NewInt(0), sc.Data(), ) ctx, err := api.bc.Context() if err != nil { return false, err } _, receipt, err := api.sf.SimulateExecution(ctx, caller, sc, api.dao.GetBlockHash) if err != nil { return false, err } return receipt.Status == uint64(iotextypes.ReceiptStatus_Success), nil } func (api *Server) getProductivityByEpoch( rp *rolldpos.Protocol, epochNum uint64, tipHeight uint64, abps state.CandidateList, ) (uint64, map[string]uint64, error) { num, produce, err := rp.ProductivityByEpoch(epochNum, tipHeight, func(start uint64, end uint64) (map[string]uint64, error) { return blockchain.Productivity(api.bc, start, end) }) if err != nil { return 0, nil, status.Error(codes.NotFound, err.Error()) } // check if there is any active block producer who didn't prodcue any block for _, abp := range abps { if _, ok := produce[abp.Address]; !ok { produce[abp.Address] = 0 } } return num, produce, nil } func (api *Server) getProtocolAccount(ctx context.Context, addr string) (ret *iotexapi.GetAccountResponse, err error) { var req *iotexapi.ReadStateRequest var balance string var out *iotexapi.ReadStateResponse switch addr { case address.RewardingPoolAddr: req = &iotexapi.ReadStateRequest{ ProtocolID: []byte("rewarding"), MethodName: []byte("TotalBalance"), } out, err = api.ReadState(ctx, req) if err != nil { return } val, ok := big.NewInt(0).SetString(string(out.GetData()), 10) if !ok { err = errors.New("balance convert error") return } balance = val.String() case address.StakingBucketPoolAddr: methodName, err := proto.Marshal(&iotexapi.ReadStakingDataMethod{ Method: iotexapi.ReadStakingDataMethod_TOTAL_STAKING_AMOUNT, }) if err != nil { return nil, err } arg, err := proto.Marshal(&iotexapi.ReadStakingDataRequest{ Request: &iotexapi.ReadStakingDataRequest_TotalStakingAmount_{ TotalStakingAmount: &iotexapi.ReadStakingDataRequest_TotalStakingAmount{}, }, }) if err != nil { return nil, err } req = &iotexapi.ReadStateRequest{ ProtocolID: []byte("staking"), MethodName: methodName, Arguments: [][]byte{arg}, } out, err = api.ReadState(ctx, req) if err != nil { return nil, err } acc := iotextypes.AccountMeta{} if err := proto.Unmarshal(out.GetData(), &acc); err != nil { return nil, errors.Wrap(err, "failed to unmarshal account meta") } balance = acc.GetBalance() } ret = &iotexapi.GetAccountResponse{ AccountMeta: &iotextypes.AccountMeta{ Address: addr, Balance: balance, }, BlockIdentifier: out.GetBlockIdentifier(), } return }
1
22,274
any chance in == nil? same below
iotexproject-iotex-core
go
@@ -150,15 +150,15 @@ static int create_spawnproc(h2o_configurator_command_t *cmd, yoml_t *node, const } /* create socket */ - if ((listen_fd = socket(AF_UNIX, SOCK_STREAM, 0)) == -1) { + if ((listen_fd = h2o_sysfn(socket, AF_UNIX, SOCK_STREAM, 0)) == -1) { h2o_configurator_errprintf(cmd, node, "socket(2) failed: %s", strerror(errno)); goto Error; } - if (bind(listen_fd, (void *)sa, sizeof(*sa)) != 0) { + if (h2o_sysfn(bind, listen_fd, (void *)sa, sizeof(*sa)) != 0) { h2o_configurator_errprintf(cmd, node, "bind(2) failed: %s", strerror(errno)); goto Error; } - if (listen(listen_fd, H2O_SOMAXCONN) != 0) { + if (h2o_sysfn(listen, listen_fd, H2O_SOMAXCONN) != 0) { h2o_configurator_errprintf(cmd, node, "listen(2) failed: %s", strerror(errno)); goto Error; }
1
/* * Copyright (c) 2015 DeNA Co., Ltd. Kazuho Oku * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include <arpa/inet.h> #include <errno.h> #include <fcntl.h> #include <inttypes.h> #include <netinet/in.h> #include <netinet/tcp.h> #include <pwd.h> #include <stdlib.h> #include <sys/stat.h> #include <sys/un.h> #include "h2o.h" #include "h2o/configurator.h" #include "h2o/serverutil.h" struct fastcgi_configurator_t { h2o_configurator_t super; h2o_fastcgi_config_vars_t *vars; h2o_fastcgi_config_vars_t _vars_stack[H2O_CONFIGURATOR_NUM_LEVELS + 1]; }; static int on_config_timeout_io(h2o_configurator_command_t *cmd, h2o_configurator_context_t *ctx, yoml_t *node) { struct fastcgi_configurator_t *self = (void *)cmd->configurator; return h2o_configurator_scanf(cmd, node, "%" SCNu64, &self->vars->io_timeout); } static int on_config_timeout_keepalive(h2o_configurator_command_t *cmd, h2o_configurator_context_t *ctx, yoml_t *node) { struct fastcgi_configurator_t *self = (void *)cmd->configurator; return h2o_configurator_scanf(cmd, node, "%" SCNu64, &self->vars->keepalive_timeout); } static int on_config_document_root(h2o_configurator_command_t *cmd, h2o_configurator_context_t *ctx, yoml_t *node) { struct fastcgi_configurator_t *self = (void *)cmd->configurator; if (node->data.scalar[0] == '\0') { /* unset */ self->vars->document_root = h2o_iovec_init(NULL, 0); } else if (node->data.scalar[0] == '/') { /* set */ self->vars->document_root = h2o_iovec_init(node->data.scalar, strlen(node->data.scalar)); } else { h2o_configurator_errprintf(cmd, node, "value does not start from `/`"); return -1; } return 0; } static int on_config_send_delegated_uri(h2o_configurator_command_t *cmd, h2o_configurator_context_t *ctx, yoml_t *node) { struct fastcgi_configurator_t *self = (void *)cmd->configurator; ssize_t v; if ((v = h2o_configurator_get_one_of(cmd, node, "OFF,ON")) == -1) return -1; self->vars->send_delegated_uri = (int)v; return 0; } static int on_config_connect(h2o_configurator_command_t *cmd, h2o_configurator_context_t *ctx, yoml_t *node) { struct fastcgi_configurator_t *self = (void *)cmd->configurator; const char *hostname = "127.0.0.1", *servname = NULL, *type = "tcp"; /* fetch servname (and hostname) */ switch (node->type) { case YOML_TYPE_SCALAR: servname = node->data.scalar; break; case YOML_TYPE_MAPPING: { yoml_t **port_node, **host_node, **type_node; if (h2o_configurator_parse_mapping(cmd, node, "port:s", "host:s,type:s", &port_node, &host_node, &type_node) != 0) return -1; servname = (*port_node)->data.scalar; if (host_node != NULL) hostname = (*host_node)->data.scalar; if (type_node != NULL) type = (*type_node)->data.scalar; } break; default: h2o_configurator_errprintf(cmd, node, "value must be a string or a mapping (with keys: `port` and optionally `host` and `type`)"); return -1; } h2o_url_t upstream; if (strcmp(type, "unix") == 0) { /* unix socket */ struct sockaddr_un sa; if (strlen(servname) >= sizeof(sa.sun_path)) { h2o_configurator_errprintf(cmd, node, "path:%s is too long as a unix socket name", servname); return -1; } h2o_url_init_with_sun_path(&upstream, NULL, &H2O_URL_SCHEME_FASTCGI, h2o_iovec_init(servname, strlen(servname)), h2o_iovec_init(H2O_STRLIT("/"))); } else if (strcmp(type, "tcp") == 0) { /* tcp socket */ uint16_t port; if (sscanf(servname, "%" SCNu16, &port) != 1) { h2o_configurator_errprintf(cmd, node, "invalid port number:%s", servname); return -1; } h2o_url_init_with_hostport(&upstream, NULL, &H2O_URL_SCHEME_FASTCGI, h2o_iovec_init(hostname, strlen(hostname)), port, h2o_iovec_init(H2O_STRLIT("/"))); } else { h2o_configurator_errprintf(cmd, node, "unknown listen type: %s", type); return -1; } h2o_fastcgi_register(ctx->pathconf, &upstream, self->vars); free(upstream.authority.base); return 0; } static int create_spawnproc(h2o_configurator_command_t *cmd, yoml_t *node, const char *dirname, char *const *argv, struct sockaddr_un *sa, struct passwd *pw) { int ret, listen_fd = -1, pipe_fds[2] = {-1, -1}; /* build socket path */ sa->sun_family = AF_UNIX; ret = snprintf(sa->sun_path, sizeof(sa->sun_path), "%s/_", dirname); if (ret < 0 || ret >= sizeof(sa->sun_path)) { h2o_configurator_errprintf(cmd, node, "unix socket path too long: %s", dirname); goto Error; } /* create socket */ if ((listen_fd = socket(AF_UNIX, SOCK_STREAM, 0)) == -1) { h2o_configurator_errprintf(cmd, node, "socket(2) failed: %s", strerror(errno)); goto Error; } if (bind(listen_fd, (void *)sa, sizeof(*sa)) != 0) { h2o_configurator_errprintf(cmd, node, "bind(2) failed: %s", strerror(errno)); goto Error; } if (listen(listen_fd, H2O_SOMAXCONN) != 0) { h2o_configurator_errprintf(cmd, node, "listen(2) failed: %s", strerror(errno)); goto Error; } /* change ownership of socket */ if (pw != NULL && chown(sa->sun_path, pw->pw_uid, pw->pw_gid) != 0) { h2o_configurator_errprintf(cmd, node, "chown(2) failed to change ownership of socket:%s:%s", sa->sun_path, strerror(errno)); goto Error; } /* create pipe which is used to notify the termination of the server */ if (pipe(pipe_fds) != 0) { h2o_configurator_errprintf(cmd, node, "pipe(2) failed: %s", strerror(errno)); pipe_fds[0] = -1; pipe_fds[1] = -1; goto Error; } if (fcntl(pipe_fds[1], F_SETFD, FD_CLOEXEC) < 0) goto Error; /* spawn */ int mapped_fds[] = {listen_fd, 0, /* listen_fd to 0 */ pipe_fds[0], 5, /* pipe_fds[0] to 5 */ -1}; pid_t pid = h2o_spawnp(argv[0], argv, mapped_fds, 0); if (pid == -1) { h2o_error_printf("[lib/handler/fastcgi.c] failed to launch helper program %s:%s\n", argv[0], strerror(errno)); goto Error; } close(listen_fd); listen_fd = -1; close(pipe_fds[0]); pipe_fds[0] = -1; return pipe_fds[1]; Error: if (pipe_fds[0] != -1) close(pipe_fds[0]); if (pipe_fds[1]) close(pipe_fds[1]); if (listen_fd != -1) close(listen_fd); unlink(sa->sun_path); return -1; } static void spawnproc_on_dispose(h2o_fastcgi_handler_t *handler, void *data) { int pipe_fd = (int)((char *)data - (char *)NULL); close(pipe_fd); } static int on_config_spawn(h2o_configurator_command_t *cmd, h2o_configurator_context_t *ctx, yoml_t *node) { struct fastcgi_configurator_t *self = (void *)cmd->configurator; char *spawn_user = ctx->globalconf->user, *spawn_cmd; char *kill_on_close_cmd_path = NULL, *setuidgid_cmd_path = NULL; char dirname[] = "/tmp/h2o.fcgisock.XXXXXX"; char *argv[10]; int spawner_fd; struct sockaddr_un sa; h2o_fastcgi_config_vars_t config_vars; int ret = -1; struct passwd h2o_user_pwbuf, *h2o_user_pw; char h2o_user_buf[65536]; memset(&sa, 0, sizeof(sa)); switch (node->type) { case YOML_TYPE_SCALAR: spawn_cmd = node->data.scalar; break; case YOML_TYPE_MAPPING: { yoml_t **command_node, **user_node; if (h2o_configurator_parse_mapping(cmd, node, "command:s", "user:s", &command_node, &user_node) != 0) return -1; spawn_cmd = (*command_node)->data.scalar; if (user_node != NULL) spawn_user = (*user_node)->data.scalar; } break; default: h2o_configurator_errprintf(cmd, node, "argument must be scalar or mapping"); return -1; } /* obtain uid & gid of the client that connects to the FastCGI daemon (i.e. H2O after dropping privileges) */ if (ctx->globalconf->user != NULL) { /* change ownership of temporary directory */ if (getpwnam_r(ctx->globalconf->user, &h2o_user_pwbuf, h2o_user_buf, sizeof(h2o_user_buf), &h2o_user_pw) != 0 || h2o_user_pw == NULL) { h2o_configurator_errprintf(cmd, node, "getpwnam_r(3) failed to obtain uid of user:%s", ctx->globalconf->user); goto Exit; } } else { h2o_user_pw = NULL; } { /* build args */ size_t i = 0; argv[i++] = kill_on_close_cmd_path = h2o_configurator_get_cmd_path("share/h2o/kill-on-close"); argv[i++] = "--rm"; argv[i++] = dirname; argv[i++] = "--"; if (spawn_user != NULL) { argv[i++] = setuidgid_cmd_path = h2o_configurator_get_cmd_path("share/h2o/setuidgid"); argv[i++] = spawn_user; } argv[i++] = "/bin/sh"; argv[i++] = "-c"; argv[i++] = spawn_cmd; argv[i++] = NULL; assert(i <= sizeof(argv) / sizeof(argv[0])); } if (ctx->dry_run) { dirname[0] = '\0'; spawner_fd = -1; sa.sun_family = AF_UNIX; strcpy(sa.sun_path, "/dry-run.nonexistent"); } else { /* create temporary directory */ if (mkdtemp(dirname) == NULL) { h2o_configurator_errprintf(cmd, node, "mkdtemp(3) failed to create temporary directory:%s:%s", dirname, strerror(errno)); dirname[0] = '\0'; goto Exit; } /* change ownership of temporary directory */ if (h2o_user_pw != NULL && chown(dirname, h2o_user_pw->pw_uid, h2o_user_pw->pw_gid) != 0) { h2o_configurator_errprintf(cmd, node, "chown(2) failed to change ownership of temporary directory:%s:%s", dirname, strerror(errno)); goto Exit; } /* launch spawnfcgi command */ if ((spawner_fd = create_spawnproc(cmd, node, dirname, argv, &sa, h2o_user_pw)) == -1) { goto Exit; } } config_vars = *self->vars; config_vars.callbacks.dispose = spawnproc_on_dispose; config_vars.callbacks.data = (char *)NULL + spawner_fd; h2o_url_t upstream; h2o_url_init_with_sun_path(&upstream, NULL, &H2O_URL_SCHEME_FASTCGI, h2o_iovec_init(sa.sun_path, strlen(sa.sun_path)), h2o_iovec_init(H2O_STRLIT("/"))); h2o_fastcgi_register(ctx->pathconf, &upstream, &config_vars); free(upstream.authority.base); ret = 0; Exit: if (dirname[0] != '\0') unlink(dirname); free(kill_on_close_cmd_path); free(setuidgid_cmd_path); return ret; } static int on_config_enter(h2o_configurator_t *_self, h2o_configurator_context_t *ctx, yoml_t *node) { struct fastcgi_configurator_t *self = (void *)_self; memcpy(self->vars + 1, self->vars, sizeof(*self->vars)); ++self->vars; return 0; } static int on_config_exit(h2o_configurator_t *_self, h2o_configurator_context_t *ctx, yoml_t *node) { struct fastcgi_configurator_t *self = (void *)_self; --self->vars; return 0; } void h2o_fastcgi_register_configurator(h2o_globalconf_t *conf) { struct fastcgi_configurator_t *c = (void *)h2o_configurator_create(conf, sizeof(*c)); /* set default vars */ c->vars = c->_vars_stack; c->vars->io_timeout = H2O_DEFAULT_FASTCGI_IO_TIMEOUT; c->vars->keepalive_timeout = 0; /* setup handlers */ c->super.enter = on_config_enter; c->super.exit = on_config_exit; h2o_configurator_define_command(&c->super, "fastcgi.connect", H2O_CONFIGURATOR_FLAG_PATH | H2O_CONFIGURATOR_FLAG_EXTENSION | H2O_CONFIGURATOR_FLAG_DEFERRED, on_config_connect); h2o_configurator_define_command(&c->super, "fastcgi.spawn", H2O_CONFIGURATOR_FLAG_PATH | H2O_CONFIGURATOR_FLAG_EXTENSION | H2O_CONFIGURATOR_FLAG_DEFERRED, on_config_spawn); h2o_configurator_define_command(&c->super, "fastcgi.timeout.io", H2O_CONFIGURATOR_FLAG_ALL_LEVELS | H2O_CONFIGURATOR_FLAG_EXPECT_SCALAR, on_config_timeout_io); h2o_configurator_define_command(&c->super, "fastcgi.timeout.keepalive", H2O_CONFIGURATOR_FLAG_ALL_LEVELS | H2O_CONFIGURATOR_FLAG_EXPECT_SCALAR, on_config_timeout_keepalive); h2o_configurator_define_command(&c->super, "fastcgi.document_root", H2O_CONFIGURATOR_FLAG_ALL_LEVELS | H2O_CONFIGURATOR_FLAG_EXPECT_SCALAR, on_config_document_root); h2o_configurator_define_command(&c->super, "fastcgi.send-delegated-uri", H2O_CONFIGURATOR_FLAG_ALL_LEVELS | H2O_CONFIGURATOR_FLAG_EXPECT_SCALAR, on_config_send_delegated_uri); }
1
14,407
We would change the source code of h2o so that certain syscalls will be invoked through the `h2o_sysfn` macro.
h2o-h2o
c
@@ -2895,7 +2895,8 @@ struct SemaphoreSubmitState { (pSemaphore->scope == kSyncScopeInternal || internal_semaphores.count(semaphore))) { if (unsignaled_semaphores.count(semaphore) || (!(signaled_semaphores.count(semaphore)) && !(pSemaphore->signaled) && !core->SemaphoreWasSignaled(semaphore))) { - const auto &vuid = GetQueueSubmitVUID(loc, SubmitError::kSemCannotBeSignalled); + auto error = core->device_extensions.vk_khr_timeline_semaphore ? SubmitError::kTimelineCannotBeSignalled : SubmitError::kBinaryCannotBeSignalled; + const auto &vuid = GetQueueSubmitVUID(loc, error); skip |= core->LogError( objlist, pSemaphore->scope == kSyncScopeInternal ? vuid : kVUID_Core_DrawState_QueueForwardProgress, "%s Queue %s is waiting on semaphore (%s) that has no way to be signaled.", loc.Message().c_str(),
1
/* Copyright (c) 2015-2021 The Khronos Group Inc. * Copyright (c) 2015-2021 Valve Corporation * Copyright (c) 2015-2021 LunarG, Inc. * Copyright (C) 2015-2021 Google Inc. * Modifications Copyright (C) 2020-2021 Advanced Micro Devices, Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * Author: Cody Northrop <[email protected]> * Author: Michael Lentine <[email protected]> * Author: Tobin Ehlis <[email protected]> * Author: Chia-I Wu <[email protected]> * Author: Chris Forbes <[email protected]> * Author: Mark Lobodzinski <[email protected]> * Author: Ian Elliott <[email protected]> * Author: Dave Houlton <[email protected]> * Author: Dustin Graves <[email protected]> * Author: Jeremy Hayes <[email protected]> * Author: Jon Ashburn <[email protected]> * Author: Karl Schultz <[email protected]> * Author: Mark Young <[email protected]> * Author: Mike Schuchardt <[email protected]> * Author: Mike Weiblen <[email protected]> * Author: Tony Barbour <[email protected]> * Author: John Zulauf <[email protected]> * Author: Shannon McPherson <[email protected]> * Author: Jeremy Kniager <[email protected]> * Author: Tobias Hector <[email protected]> * Author: Jeremy Gebben <[email protected]> */ #include <algorithm> #include <array> #include <assert.h> #include <cmath> #include <iostream> #include <list> #include <map> #include <memory> #include <mutex> #include <set> #include <sstream> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <string> #include <valarray> #include "vk_loader_platform.h" #include "vk_enum_string_helper.h" #include "chassis.h" #include "convert_to_renderpass2.h" #include "core_validation.h" #include "buffer_validation.h" #include "shader_validation.h" #include "vk_layer_utils.h" #include "command_counter.h" #include "sync_utils.h" #include "sync_vuid_maps.h" // these templates are defined in buffer_validation.cpp so we need to pull in the explicit instantiations from there extern template void CoreChecks::TransitionImageLayouts(CMD_BUFFER_STATE *cb_state, uint32_t barrier_count, const VkImageMemoryBarrier *barrier); extern template void CoreChecks::TransitionImageLayouts(CMD_BUFFER_STATE *cb_state, uint32_t barrier_count, const VkImageMemoryBarrier2KHR *barrier); extern template bool CoreChecks::ValidateImageBarrierAttachment(const CoreErrorLocation &loc, CMD_BUFFER_STATE const *cb_state, const FRAMEBUFFER_STATE *framebuffer, uint32_t active_subpass, const safe_VkSubpassDescription2 &sub_desc, const VkRenderPass rp_handle, const VkImageMemoryBarrier &img_barrier, const CMD_BUFFER_STATE *primary_cb_state) const; extern template bool CoreChecks::ValidateImageBarrierAttachment(const CoreErrorLocation &loc, CMD_BUFFER_STATE const *cb_state, const FRAMEBUFFER_STATE *framebuffer, uint32_t active_subpass, const safe_VkSubpassDescription2 &sub_desc, const VkRenderPass rp_handle, const VkImageMemoryBarrier2KHR &img_barrier, const CMD_BUFFER_STATE *primary_cb_state) const; // These functions are defined *outside* the core_validation namespace as their type // is also defined outside that namespace size_t PipelineLayoutCompatDef::hash() const { hash_util::HashCombiner hc; // The set number is integral to the CompatDef's distinctiveness hc << set << push_constant_ranges.get(); const auto &descriptor_set_layouts = *set_layouts_id.get(); for (uint32_t i = 0; i <= set; i++) { hc << descriptor_set_layouts[i].get(); } return hc.Value(); } bool PipelineLayoutCompatDef::operator==(const PipelineLayoutCompatDef &other) const { if ((set != other.set) || (push_constant_ranges != other.push_constant_ranges)) { return false; } if (set_layouts_id == other.set_layouts_id) { // if it's the same set_layouts_id, then *any* subset will match return true; } // They aren't exactly the same PipelineLayoutSetLayouts, so we need to check if the required subsets match const auto &descriptor_set_layouts = *set_layouts_id.get(); assert(set < descriptor_set_layouts.size()); const auto &other_ds_layouts = *other.set_layouts_id.get(); assert(set < other_ds_layouts.size()); for (uint32_t i = 0; i <= set; i++) { if (descriptor_set_layouts[i] != other_ds_layouts[i]) { return false; } } return true; } using std::max; using std::string; using std::stringstream; using std::unique_ptr; using std::unordered_map; using std::unordered_set; using std::vector; static std::unique_ptr<ImageSubresourceLayoutMap> LayoutMapFactory(const IMAGE_STATE &image_state) { std::unique_ptr<ImageSubresourceLayoutMap> map(new ImageSubresourceLayoutMap(image_state)); return map; } // The const variant only need the image as it is the key for the map const ImageSubresourceLayoutMap *GetImageSubresourceLayoutMap(const CMD_BUFFER_STATE *cb_state, VkImage image) { auto it = cb_state->image_layout_map.find(image); if (it == cb_state->image_layout_map.cend()) { return nullptr; } return it->second.get(); } // The non-const variant only needs the image state, as the factory requires it to construct a new entry ImageSubresourceLayoutMap *GetImageSubresourceLayoutMap(CMD_BUFFER_STATE *cb_state, const IMAGE_STATE &image_state) { auto it = cb_state->image_layout_map.find(image_state.image); if (it == cb_state->image_layout_map.end()) { // Empty slot... fill it in. auto insert_pair = cb_state->image_layout_map.insert(std::make_pair(image_state.image, LayoutMapFactory(image_state))); assert(insert_pair.second); ImageSubresourceLayoutMap *new_map = insert_pair.first->second.get(); assert(new_map); return new_map; } return it->second.get(); } void AddInitialLayoutintoImageLayoutMap(const IMAGE_STATE &image_state, GlobalImageLayoutMap &image_layout_map) { auto *range_map = GetLayoutRangeMap(&image_layout_map, image_state); auto range_gen = subresource_adapter::RangeGenerator(image_state.subresource_encoder, image_state.full_range); for (; range_gen->non_empty(); ++range_gen) { range_map->insert(range_map->end(), std::make_pair(*range_gen, image_state.createInfo.initialLayout)); } } // Override base class, we have some extra work to do here void CoreChecks::InitDeviceValidationObject(bool add_obj, ValidationObject *inst_obj, ValidationObject *dev_obj) { if (add_obj) { if (dev_obj->device_extensions.vk_khr_performance_query) { auto command_counter = new CommandCounter(this); dev_obj->object_dispatch.emplace_back(command_counter); } ValidationStateTracker::InitDeviceValidationObject(add_obj, inst_obj, dev_obj); } } // Tracks the number of commands recorded in a command buffer. void CoreChecks::IncrementCommandCount(VkCommandBuffer commandBuffer) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); cb_state->commandCount++; } // For given mem object, verify that it is not null or UNBOUND, if it is, report error. Return skip value. template <typename T1> bool CoreChecks::VerifyBoundMemoryIsValid(const DEVICE_MEMORY_STATE *mem_state, const T1 object, const VulkanTypedHandle &typed_handle, const char *api_name, const char *error_code) const { bool result = false; auto type_name = object_string[typed_handle.type]; if (!mem_state) { result |= LogError(object, error_code, "%s: %s used with no memory bound. Memory should be bound by calling vkBind%sMemory().", api_name, report_data->FormatHandle(typed_handle).c_str(), type_name + 2); } else if (mem_state->destroyed) { result |= LogError(object, error_code, "%s: %s used with no memory bound and previously bound memory was freed. Memory must not be freed " "prior to this operation.", api_name, report_data->FormatHandle(typed_handle).c_str()); } return result; } // Check to see if memory was ever bound to this image bool CoreChecks::ValidateMemoryIsBoundToImage(const IMAGE_STATE *image_state, const char *api_name, const char *error_code) const { bool result = false; if (image_state->create_from_swapchain != VK_NULL_HANDLE) { if (image_state->bind_swapchain == VK_NULL_HANDLE) { LogObjectList objlist(image_state->image); objlist.add(image_state->create_from_swapchain); result |= LogError( objlist, error_code, "%s: %s is created by %s, and the image should be bound by calling vkBindImageMemory2(), and the pNext chain " "includes VkBindImageMemorySwapchainInfoKHR.", api_name, report_data->FormatHandle(image_state->image).c_str(), report_data->FormatHandle(image_state->create_from_swapchain).c_str()); } else if (image_state->create_from_swapchain != image_state->bind_swapchain) { LogObjectList objlist(image_state->image); objlist.add(image_state->create_from_swapchain); objlist.add(image_state->bind_swapchain); result |= LogError(objlist, error_code, "%s: %s is created by %s, but the image is bound by %s. The image should be created and bound by the same " "swapchain", api_name, report_data->FormatHandle(image_state->image).c_str(), report_data->FormatHandle(image_state->create_from_swapchain).c_str(), report_data->FormatHandle(image_state->bind_swapchain).c_str()); } } else if (image_state->external_ahb) { // TODO look into how to properly check for a valid bound memory for an external AHB } else if (0 == (static_cast<uint32_t>(image_state->createInfo.flags) & VK_IMAGE_CREATE_SPARSE_BINDING_BIT)) { result |= VerifyBoundMemoryIsValid(image_state->binding.mem_state.get(), image_state->image, VulkanTypedHandle(image_state->image, kVulkanObjectTypeImage), api_name, error_code); } return result; } // Check to see if memory was bound to this buffer bool CoreChecks::ValidateMemoryIsBoundToBuffer(const BUFFER_STATE *buffer_state, const char *api_name, const char *error_code) const { bool result = false; if (0 == (static_cast<uint32_t>(buffer_state->createInfo.flags) & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)) { result |= VerifyBoundMemoryIsValid(buffer_state->binding.mem_state.get(), buffer_state->buffer, VulkanTypedHandle(buffer_state->buffer, kVulkanObjectTypeBuffer), api_name, error_code); } return result; } // Check to see if memory was bound to this acceleration structure bool CoreChecks::ValidateMemoryIsBoundToAccelerationStructure(const ACCELERATION_STRUCTURE_STATE *as_state, const char *api_name, const char *error_code) const { return VerifyBoundMemoryIsValid(as_state->binding.mem_state.get(), as_state->acceleration_structure, VulkanTypedHandle(as_state->acceleration_structure, kVulkanObjectTypeAccelerationStructureNV), api_name, error_code); } // Check to see if memory was bound to this acceleration structure bool CoreChecks::ValidateMemoryIsBoundToAccelerationStructure(const ACCELERATION_STRUCTURE_STATE_KHR *as_state, const char *api_name, const char *error_code) const { return VerifyBoundMemoryIsValid(as_state->binding.mem_state.get(), as_state->acceleration_structure, VulkanTypedHandle(as_state->acceleration_structure, kVulkanObjectTypeAccelerationStructureKHR), api_name, error_code); } // Valid usage checks for a call to SetMemBinding(). // For NULL mem case, output warning // Make sure given object is in global object map // IF a previous binding existed, output validation error // Otherwise, add reference from objectInfo to memoryInfo // Add reference off of objInfo // TODO: We may need to refactor or pass in multiple valid usage statements to handle multiple valid usage conditions. bool CoreChecks::ValidateSetMemBinding(VkDeviceMemory mem, const VulkanTypedHandle &typed_handle, const char *apiName) const { bool skip = false; // It's an error to bind an object to NULL memory if (mem != VK_NULL_HANDLE) { const BINDABLE *mem_binding = ValidationStateTracker::GetObjectMemBinding(typed_handle); assert(mem_binding); if (mem_binding->sparse) { const char *error_code = nullptr; const char *handle_type = nullptr; if (typed_handle.type == kVulkanObjectTypeBuffer) { handle_type = "BUFFER"; if (strcmp(apiName, "vkBindBufferMemory()") == 0) { error_code = "VUID-vkBindBufferMemory-buffer-01030"; } else { error_code = "VUID-VkBindBufferMemoryInfo-buffer-01030"; } } else if (typed_handle.type == kVulkanObjectTypeImage) { handle_type = "IMAGE"; if (strcmp(apiName, "vkBindImageMemory()") == 0) { error_code = "VUID-vkBindImageMemory-image-01045"; } else { error_code = "VUID-VkBindImageMemoryInfo-image-01045"; } } else { // Unsupported object type assert(false); } LogObjectList objlist(mem); objlist.add(typed_handle); skip |= LogError(objlist, error_code, "In %s, attempting to bind %s to %s which was created with sparse memory flags " "(VK_%s_CREATE_SPARSE_*_BIT).", apiName, report_data->FormatHandle(mem).c_str(), report_data->FormatHandle(typed_handle).c_str(), handle_type); } const DEVICE_MEMORY_STATE *mem_info = ValidationStateTracker::GetDevMemState(mem); if (mem_info) { const DEVICE_MEMORY_STATE *prev_binding = mem_binding->binding.mem_state.get(); if (prev_binding) { if (!prev_binding->destroyed) { const char *error_code = nullptr; if (typed_handle.type == kVulkanObjectTypeBuffer) { if (strcmp(apiName, "vkBindBufferMemory()") == 0) { error_code = "VUID-vkBindBufferMemory-buffer-01029"; } else { error_code = "VUID-VkBindBufferMemoryInfo-buffer-01029"; } } else if (typed_handle.type == kVulkanObjectTypeImage) { if (strcmp(apiName, "vkBindImageMemory()") == 0) { error_code = "VUID-vkBindImageMemory-image-01044"; } else { error_code = "VUID-VkBindImageMemoryInfo-image-01044"; } } else { // Unsupported object type assert(false); } LogObjectList objlist(mem); objlist.add(typed_handle); objlist.add(prev_binding->mem); skip |= LogError(objlist, error_code, "In %s, attempting to bind %s to %s which has already been bound to %s.", apiName, report_data->FormatHandle(mem).c_str(), report_data->FormatHandle(typed_handle).c_str(), report_data->FormatHandle(prev_binding->mem).c_str()); } else { LogObjectList objlist(mem); objlist.add(typed_handle); skip |= LogError(objlist, kVUID_Core_MemTrack_RebindObject, "In %s, attempting to bind %s to %s which was previous bound to memory that has " "since been freed. Memory bindings are immutable in " "Vulkan so this attempt to bind to new memory is not allowed.", apiName, report_data->FormatHandle(mem).c_str(), report_data->FormatHandle(typed_handle).c_str()); } } } } return skip; } bool CoreChecks::ValidateDeviceQueueFamily(uint32_t queue_family, const char *cmd_name, const char *parameter_name, const char *error_code, bool optional = false) const { bool skip = false; if (!optional && queue_family == VK_QUEUE_FAMILY_IGNORED) { skip |= LogError(device, error_code, "%s: %s is VK_QUEUE_FAMILY_IGNORED, but it is required to provide a valid queue family index value.", cmd_name, parameter_name); } else if (queue_family_index_map.find(queue_family) == queue_family_index_map.end()) { skip |= LogError(device, error_code, "%s: %s (= %" PRIu32 ") is not one of the queue families given via VkDeviceQueueCreateInfo structures when the device was created.", cmd_name, parameter_name, queue_family); } return skip; } // Validate the specified queue families against the families supported by the physical device that owns this device bool CoreChecks::ValidatePhysicalDeviceQueueFamilies(uint32_t queue_family_count, const uint32_t *queue_families, const char *cmd_name, const char *array_parameter_name, const char *vuid) const { bool skip = false; if (queue_families) { std::unordered_set<uint32_t> set; for (uint32_t i = 0; i < queue_family_count; ++i) { std::string parameter_name = std::string(array_parameter_name) + "[" + std::to_string(i) + "]"; if (set.count(queue_families[i])) { skip |= LogError(device, vuid, "%s: %s (=%" PRIu32 ") is not unique within %s array.", cmd_name, parameter_name.c_str(), queue_families[i], array_parameter_name); } else { set.insert(queue_families[i]); if (queue_families[i] == VK_QUEUE_FAMILY_IGNORED) { skip |= LogError( device, vuid, "%s: %s is VK_QUEUE_FAMILY_IGNORED, but it is required to provide a valid queue family index value.", cmd_name, parameter_name.c_str()); } else if (queue_families[i] >= physical_device_state->queue_family_known_count) { LogObjectList obj_list(physical_device); obj_list.add(device); skip |= LogError(obj_list, vuid, "%s: %s (= %" PRIu32 ") is not one of the queue families supported by the parent PhysicalDevice %s of this device %s.", cmd_name, parameter_name.c_str(), queue_families[i], report_data->FormatHandle(physical_device).c_str(), report_data->FormatHandle(device).c_str()); } } } } return skip; } // Check object status for selected flag state bool CoreChecks::ValidateStatus(const CMD_BUFFER_STATE *pNode, CBStatusFlags status_mask, const char *fail_msg, const char *msg_code) const { if (!(pNode->status & status_mask)) { return LogError(pNode->commandBuffer, msg_code, "%s: %s..", report_data->FormatHandle(pNode->commandBuffer).c_str(), fail_msg); } return false; } // Return true if for a given PSO, the given state enum is dynamic, else return false bool CoreChecks::IsDynamic(const PIPELINE_STATE *pPipeline, const VkDynamicState state) const { if (pPipeline && pPipeline->graphicsPipelineCI.pDynamicState) { for (uint32_t i = 0; i < pPipeline->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) { if (state == pPipeline->graphicsPipelineCI.pDynamicState->pDynamicStates[i]) return true; } } return false; } // Validate state stored as flags at time of draw call bool CoreChecks::ValidateDrawStateFlags(const CMD_BUFFER_STATE *pCB, const PIPELINE_STATE *pPipe, bool indexed, const char *msg_code) const { bool result = false; if (pPipe->topology_at_rasterizer == VK_PRIMITIVE_TOPOLOGY_LINE_LIST || pPipe->topology_at_rasterizer == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP) { result |= ValidateStatus(pCB, CBSTATUS_LINE_WIDTH_SET, "Dynamic line width state not set for this command buffer", msg_code); } if (pPipe->graphicsPipelineCI.pRasterizationState && (pPipe->graphicsPipelineCI.pRasterizationState->depthBiasEnable == VK_TRUE)) { result |= ValidateStatus(pCB, CBSTATUS_DEPTH_BIAS_SET, "Dynamic depth bias state not set for this command buffer", msg_code); } if (pPipe->blendConstantsEnabled) { result |= ValidateStatus(pCB, CBSTATUS_BLEND_CONSTANTS_SET, "Dynamic blend constants state not set for this command buffer", msg_code); } if (pPipe->graphicsPipelineCI.pDepthStencilState && (pPipe->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE)) { result |= ValidateStatus(pCB, CBSTATUS_DEPTH_BOUNDS_SET, "Dynamic depth bounds state not set for this command buffer", msg_code); } if (pPipe->graphicsPipelineCI.pDepthStencilState && (pPipe->graphicsPipelineCI.pDepthStencilState->stencilTestEnable == VK_TRUE)) { result |= ValidateStatus(pCB, CBSTATUS_STENCIL_READ_MASK_SET, "Dynamic stencil read mask state not set for this command buffer", msg_code); result |= ValidateStatus(pCB, CBSTATUS_STENCIL_WRITE_MASK_SET, "Dynamic stencil write mask state not set for this command buffer", msg_code); result |= ValidateStatus(pCB, CBSTATUS_STENCIL_REFERENCE_SET, "Dynamic stencil reference state not set for this command buffer", msg_code); } if (indexed) { result |= ValidateStatus(pCB, CBSTATUS_INDEX_BUFFER_BOUND, "Index buffer object not bound to this command buffer when Indexed Draw attempted", msg_code); } if (pPipe->topology_at_rasterizer == VK_PRIMITIVE_TOPOLOGY_LINE_LIST || pPipe->topology_at_rasterizer == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP) { const auto *line_state = LvlFindInChain<VkPipelineRasterizationLineStateCreateInfoEXT>(pPipe->graphicsPipelineCI.pRasterizationState->pNext); if (line_state && line_state->stippledLineEnable) { result |= ValidateStatus(pCB, CBSTATUS_LINE_STIPPLE_SET, "Dynamic line stipple state not set for this command buffer", msg_code); } } return result; } bool CoreChecks::LogInvalidAttachmentMessage(const char *type1_string, const RENDER_PASS_STATE *rp1_state, const char *type2_string, const RENDER_PASS_STATE *rp2_state, uint32_t primary_attach, uint32_t secondary_attach, const char *msg, const char *caller, const char *error_code) const { LogObjectList objlist(rp1_state->renderPass); objlist.add(rp2_state->renderPass); return LogError(objlist, error_code, "%s: RenderPasses incompatible between %s w/ %s and %s w/ %s Attachment %u is not " "compatible with %u: %s.", caller, type1_string, report_data->FormatHandle(rp1_state->renderPass).c_str(), type2_string, report_data->FormatHandle(rp2_state->renderPass).c_str(), primary_attach, secondary_attach, msg); } bool CoreChecks::ValidateAttachmentCompatibility(const char *type1_string, const RENDER_PASS_STATE *rp1_state, const char *type2_string, const RENDER_PASS_STATE *rp2_state, uint32_t primary_attach, uint32_t secondary_attach, const char *caller, const char *error_code) const { bool skip = false; const auto &primary_pass_ci = rp1_state->createInfo; const auto &secondary_pass_ci = rp2_state->createInfo; if (primary_pass_ci.attachmentCount <= primary_attach) { primary_attach = VK_ATTACHMENT_UNUSED; } if (secondary_pass_ci.attachmentCount <= secondary_attach) { secondary_attach = VK_ATTACHMENT_UNUSED; } if (primary_attach == VK_ATTACHMENT_UNUSED && secondary_attach == VK_ATTACHMENT_UNUSED) { return skip; } if (primary_attach == VK_ATTACHMENT_UNUSED) { skip |= LogInvalidAttachmentMessage(type1_string, rp1_state, type2_string, rp2_state, primary_attach, secondary_attach, "The first is unused while the second is not.", caller, error_code); return skip; } if (secondary_attach == VK_ATTACHMENT_UNUSED) { skip |= LogInvalidAttachmentMessage(type1_string, rp1_state, type2_string, rp2_state, primary_attach, secondary_attach, "The second is unused while the first is not.", caller, error_code); return skip; } if (primary_pass_ci.pAttachments[primary_attach].format != secondary_pass_ci.pAttachments[secondary_attach].format) { skip |= LogInvalidAttachmentMessage(type1_string, rp1_state, type2_string, rp2_state, primary_attach, secondary_attach, "They have different formats.", caller, error_code); } if (primary_pass_ci.pAttachments[primary_attach].samples != secondary_pass_ci.pAttachments[secondary_attach].samples) { skip |= LogInvalidAttachmentMessage(type1_string, rp1_state, type2_string, rp2_state, primary_attach, secondary_attach, "They have different samples.", caller, error_code); } if (primary_pass_ci.pAttachments[primary_attach].flags != secondary_pass_ci.pAttachments[secondary_attach].flags) { skip |= LogInvalidAttachmentMessage(type1_string, rp1_state, type2_string, rp2_state, primary_attach, secondary_attach, "They have different flags.", caller, error_code); } return skip; } bool CoreChecks::ValidateSubpassCompatibility(const char *type1_string, const RENDER_PASS_STATE *rp1_state, const char *type2_string, const RENDER_PASS_STATE *rp2_state, const int subpass, const char *caller, const char *error_code) const { bool skip = false; const auto &primary_desc = rp1_state->createInfo.pSubpasses[subpass]; const auto &secondary_desc = rp2_state->createInfo.pSubpasses[subpass]; uint32_t max_input_attachment_count = std::max(primary_desc.inputAttachmentCount, secondary_desc.inputAttachmentCount); for (uint32_t i = 0; i < max_input_attachment_count; ++i) { uint32_t primary_input_attach = VK_ATTACHMENT_UNUSED, secondary_input_attach = VK_ATTACHMENT_UNUSED; if (i < primary_desc.inputAttachmentCount) { primary_input_attach = primary_desc.pInputAttachments[i].attachment; } if (i < secondary_desc.inputAttachmentCount) { secondary_input_attach = secondary_desc.pInputAttachments[i].attachment; } skip |= ValidateAttachmentCompatibility(type1_string, rp1_state, type2_string, rp2_state, primary_input_attach, secondary_input_attach, caller, error_code); } uint32_t max_color_attachment_count = std::max(primary_desc.colorAttachmentCount, secondary_desc.colorAttachmentCount); for (uint32_t i = 0; i < max_color_attachment_count; ++i) { uint32_t primary_color_attach = VK_ATTACHMENT_UNUSED, secondary_color_attach = VK_ATTACHMENT_UNUSED; if (i < primary_desc.colorAttachmentCount) { primary_color_attach = primary_desc.pColorAttachments[i].attachment; } if (i < secondary_desc.colorAttachmentCount) { secondary_color_attach = secondary_desc.pColorAttachments[i].attachment; } skip |= ValidateAttachmentCompatibility(type1_string, rp1_state, type2_string, rp2_state, primary_color_attach, secondary_color_attach, caller, error_code); if (rp1_state->createInfo.subpassCount > 1) { uint32_t primary_resolve_attach = VK_ATTACHMENT_UNUSED, secondary_resolve_attach = VK_ATTACHMENT_UNUSED; if (i < primary_desc.colorAttachmentCount && primary_desc.pResolveAttachments) { primary_resolve_attach = primary_desc.pResolveAttachments[i].attachment; } if (i < secondary_desc.colorAttachmentCount && secondary_desc.pResolveAttachments) { secondary_resolve_attach = secondary_desc.pResolveAttachments[i].attachment; } skip |= ValidateAttachmentCompatibility(type1_string, rp1_state, type2_string, rp2_state, primary_resolve_attach, secondary_resolve_attach, caller, error_code); } } uint32_t primary_depthstencil_attach = VK_ATTACHMENT_UNUSED, secondary_depthstencil_attach = VK_ATTACHMENT_UNUSED; if (primary_desc.pDepthStencilAttachment) { primary_depthstencil_attach = primary_desc.pDepthStencilAttachment[0].attachment; } if (secondary_desc.pDepthStencilAttachment) { secondary_depthstencil_attach = secondary_desc.pDepthStencilAttachment[0].attachment; } skip |= ValidateAttachmentCompatibility(type1_string, rp1_state, type2_string, rp2_state, primary_depthstencil_attach, secondary_depthstencil_attach, caller, error_code); // Both renderpasses must agree on Multiview usage if (primary_desc.viewMask && secondary_desc.viewMask) { if (primary_desc.viewMask != secondary_desc.viewMask) { std::stringstream ss; ss << "For subpass " << subpass << ", they have a different viewMask. The first has view mask " << primary_desc.viewMask << " while the second has view mask " << secondary_desc.viewMask << "."; skip |= LogInvalidPnextMessage(type1_string, rp1_state, type2_string, rp2_state, ss.str().c_str(), caller, error_code); } } else if (primary_desc.viewMask) { skip |= LogInvalidPnextMessage(type1_string, rp1_state, type2_string, rp2_state, "The first uses Multiview (has non-zero viewMasks) while the second one does not.", caller, error_code); } else if (secondary_desc.viewMask) { skip |= LogInvalidPnextMessage(type1_string, rp1_state, type2_string, rp2_state, "The second uses Multiview (has non-zero viewMasks) while the first one does not.", caller, error_code); } return skip; } bool CoreChecks::LogInvalidPnextMessage(const char *type1_string, const RENDER_PASS_STATE *rp1_state, const char *type2_string, const RENDER_PASS_STATE *rp2_state, const char *msg, const char *caller, const char *error_code) const { LogObjectList objlist(rp1_state->renderPass); objlist.add(rp2_state->renderPass); return LogError(objlist, error_code, "%s: RenderPasses incompatible between %s w/ %s and %s w/ %s: %s", caller, type1_string, report_data->FormatHandle(rp1_state->renderPass).c_str(), type2_string, report_data->FormatHandle(rp2_state->renderPass).c_str(), msg); } // Verify that given renderPass CreateInfo for primary and secondary command buffers are compatible. // This function deals directly with the CreateInfo, there are overloaded versions below that can take the renderPass handle and // will then feed into this function bool CoreChecks::ValidateRenderPassCompatibility(const char *type1_string, const RENDER_PASS_STATE *rp1_state, const char *type2_string, const RENDER_PASS_STATE *rp2_state, const char *caller, const char *error_code) const { bool skip = false; // createInfo flags must be identical for the renderpasses to be compatible. if (rp1_state->createInfo.flags != rp2_state->createInfo.flags) { LogObjectList objlist(rp1_state->renderPass); objlist.add(rp2_state->renderPass); skip |= LogError(objlist, error_code, "%s: RenderPasses incompatible between %s w/ %s with flags of %u and %s w/ " "%s with a flags of %u.", caller, type1_string, report_data->FormatHandle(rp1_state->renderPass).c_str(), rp1_state->createInfo.flags, type2_string, report_data->FormatHandle(rp2_state->renderPass).c_str(), rp2_state->createInfo.flags); } if (rp1_state->createInfo.subpassCount != rp2_state->createInfo.subpassCount) { LogObjectList objlist(rp1_state->renderPass); objlist.add(rp2_state->renderPass); skip |= LogError(objlist, error_code, "%s: RenderPasses incompatible between %s w/ %s with a subpassCount of %u and %s w/ " "%s with a subpassCount of %u.", caller, type1_string, report_data->FormatHandle(rp1_state->renderPass).c_str(), rp1_state->createInfo.subpassCount, type2_string, report_data->FormatHandle(rp2_state->renderPass).c_str(), rp2_state->createInfo.subpassCount); } else { for (uint32_t i = 0; i < rp1_state->createInfo.subpassCount; ++i) { skip |= ValidateSubpassCompatibility(type1_string, rp1_state, type2_string, rp2_state, i, caller, error_code); } } // Find an entry of the Fragment Density Map type in the pNext chain, if it exists const auto fdm1 = LvlFindInChain<VkRenderPassFragmentDensityMapCreateInfoEXT>(rp1_state->createInfo.pNext); const auto fdm2 = LvlFindInChain<VkRenderPassFragmentDensityMapCreateInfoEXT>(rp2_state->createInfo.pNext); // Both renderpasses must agree on usage of a Fragment Density Map type if (fdm1 && fdm2) { uint32_t primary_input_attach = fdm1->fragmentDensityMapAttachment.attachment; uint32_t secondary_input_attach = fdm2->fragmentDensityMapAttachment.attachment; skip |= ValidateAttachmentCompatibility(type1_string, rp1_state, type2_string, rp2_state, primary_input_attach, secondary_input_attach, caller, error_code); } else if (fdm1) { skip |= LogInvalidPnextMessage(type1_string, rp1_state, type2_string, rp2_state, "The first uses a Fragment Density Map while the second one does not.", caller, error_code); } else if (fdm2) { skip |= LogInvalidPnextMessage(type1_string, rp1_state, type2_string, rp2_state, "The second uses a Fragment Density Map while the first one does not.", caller, error_code); } return skip; } // For given pipeline, return number of MSAA samples, or one if MSAA disabled static VkSampleCountFlagBits GetNumSamples(PIPELINE_STATE const *pipe) { if (pipe->graphicsPipelineCI.pMultisampleState != NULL && VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO == pipe->graphicsPipelineCI.pMultisampleState->sType) { return pipe->graphicsPipelineCI.pMultisampleState->rasterizationSamples; } return VK_SAMPLE_COUNT_1_BIT; } static void ListBits(std::ostream &s, uint32_t bits) { for (int i = 0; i < 32 && bits; i++) { if (bits & (1 << i)) { s << i; bits &= ~(1 << i); if (bits) { s << ","; } } } } std::string DynamicStateString(CBStatusFlags input_value) { std::string ret; int index = 0; while (input_value) { if (input_value & 1) { if (!ret.empty()) ret.append("|"); ret.append(string_VkDynamicState(ConvertToDynamicState(static_cast<CBStatusFlagBits>(1 << index)))); } ++index; input_value >>= 1; } if (ret.empty()) ret.append(string_VkDynamicState(ConvertToDynamicState(static_cast<CBStatusFlagBits>(0)))); return ret; } // Validate draw-time state related to the PSO bool CoreChecks::ValidatePipelineDrawtimeState(const LAST_BOUND_STATE &state, const CMD_BUFFER_STATE *pCB, CMD_TYPE cmd_type, const PIPELINE_STATE *pPipeline, const char *caller) const { bool skip = false; const auto &current_vtx_bfr_binding_info = pCB->current_vertex_buffer_binding_info.vertex_buffer_bindings; const DrawDispatchVuid vuid = GetDrawDispatchVuid(cmd_type); // Verify vertex & index buffer for unprotected command buffer. // Because vertex & index buffer is read only, it doesn't need to care protected command buffer case. if (enabled_features.core11.protectedMemory == VK_TRUE) { for (const auto &buffer_binding : current_vtx_bfr_binding_info) { if (buffer_binding.buffer_state && !buffer_binding.buffer_state->destroyed) { skip |= ValidateProtectedBuffer(pCB, buffer_binding.buffer_state.get(), caller, vuid.unprotected_command_buffer, "Buffer is vertex buffer"); } } if (pCB->index_buffer_binding.buffer_state && !pCB->index_buffer_binding.buffer_state->destroyed) { skip |= ValidateProtectedBuffer(pCB, pCB->index_buffer_binding.buffer_state.get(), caller, vuid.unprotected_command_buffer, "Buffer is index buffer"); } } // Verify if using dynamic state setting commands that it doesn't set up in pipeline CBStatusFlags invalid_status = CBSTATUS_ALL_STATE_SET & ~(pCB->dynamic_status | pCB->static_status); if (invalid_status) { std::string dynamic_states = DynamicStateString(invalid_status); LogObjectList objlist(pCB->commandBuffer); objlist.add(pPipeline->pipeline); skip |= LogError(objlist, vuid.dynamic_state_setting_commands, "%s: %s doesn't set up %s, but it calls the related dynamic state setting commands", caller, report_data->FormatHandle(state.pipeline_state->pipeline).c_str(), dynamic_states.c_str()); } // Verify vertex binding if (pPipeline->vertex_binding_descriptions_.size() > 0) { for (size_t i = 0; i < pPipeline->vertex_binding_descriptions_.size(); i++) { const auto vertex_binding = pPipeline->vertex_binding_descriptions_[i].binding; if (current_vtx_bfr_binding_info.size() < (vertex_binding + 1)) { skip |= LogError(pCB->commandBuffer, vuid.vertex_binding, "%s: %s expects that this Command Buffer's vertex binding Index %u should be set via " "vkCmdBindVertexBuffers. This is because VkVertexInputBindingDescription struct at " "index " PRINTF_SIZE_T_SPECIFIER " of pVertexBindingDescriptions has a binding value of %u.", caller, report_data->FormatHandle(state.pipeline_state->pipeline).c_str(), vertex_binding, i, vertex_binding); } else if ((current_vtx_bfr_binding_info[vertex_binding].buffer_state == nullptr) && !enabled_features.robustness2_features.nullDescriptor) { skip |= LogError(pCB->commandBuffer, vuid.vertex_binding_null, "%s: Vertex binding %d must not be VK_NULL_HANDLE %s expects that this Command Buffer's vertex " "binding Index %u should be set via " "vkCmdBindVertexBuffers. This is because VkVertexInputBindingDescription struct at " "index " PRINTF_SIZE_T_SPECIFIER " of pVertexBindingDescriptions has a binding value of %u.", caller, vertex_binding, report_data->FormatHandle(state.pipeline_state->pipeline).c_str(), vertex_binding, i, vertex_binding); } } // Verify vertex attribute address alignment for (size_t i = 0; i < pPipeline->vertex_attribute_descriptions_.size(); i++) { const auto &attribute_description = pPipeline->vertex_attribute_descriptions_[i]; const auto vertex_binding = attribute_description.binding; const auto attribute_offset = attribute_description.offset; const auto &vertex_binding_map_it = pPipeline->vertex_binding_to_index_map_.find(vertex_binding); if ((vertex_binding_map_it != pPipeline->vertex_binding_to_index_map_.cend()) && (vertex_binding < current_vtx_bfr_binding_info.size()) && ((current_vtx_bfr_binding_info[vertex_binding].buffer_state) || enabled_features.robustness2_features.nullDescriptor)) { auto vertex_buffer_stride = pPipeline->vertex_binding_descriptions_[vertex_binding_map_it->second].stride; if (IsDynamic(pPipeline, VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT)) { vertex_buffer_stride = static_cast<uint32_t>(current_vtx_bfr_binding_info[vertex_binding].stride); uint32_t attribute_binding_extent = attribute_description.offset + FormatElementSize(attribute_description.format); if (vertex_buffer_stride < attribute_binding_extent) { skip |= LogError(pCB->commandBuffer, "VUID-vkCmdBindVertexBuffers2EXT-pStrides-03363", "The pStrides[%u] (%u) parameter in the last call to vkCmdBindVertexBuffers2EXT is less than " "the extent of the binding for attribute %u (%u).", vertex_binding, vertex_buffer_stride, i, attribute_binding_extent); } } const auto vertex_buffer_offset = current_vtx_bfr_binding_info[vertex_binding].offset; // Use 1 as vertex/instance index to use buffer stride as well const auto attrib_address = vertex_buffer_offset + vertex_buffer_stride + attribute_offset; VkDeviceSize vtx_attrib_req_alignment = pPipeline->vertex_attribute_alignments_[i]; if (SafeModulo(attrib_address, vtx_attrib_req_alignment) != 0) { LogObjectList objlist(current_vtx_bfr_binding_info[vertex_binding].buffer_state->buffer); objlist.add(state.pipeline_state->pipeline); skip |= LogError( objlist, vuid.vertex_binding_attribute, "%s: Invalid attribAddress alignment for vertex attribute " PRINTF_SIZE_T_SPECIFIER ", %s,from of %s and vertex %s.", caller, i, string_VkFormat(attribute_description.format), report_data->FormatHandle(state.pipeline_state->pipeline).c_str(), report_data->FormatHandle(current_vtx_bfr_binding_info[vertex_binding].buffer_state->buffer).c_str()); } } else { LogObjectList objlist(pCB->commandBuffer); objlist.add(state.pipeline_state->pipeline); skip |= LogError(objlist, vuid.vertex_binding_attribute, "%s: binding #%" PRIu32 " in pVertexAttributeDescriptions of %s is invalid in vkCmdBindVertexBuffers of %s.", caller, vertex_binding, report_data->FormatHandle(state.pipeline_state->pipeline).c_str(), report_data->FormatHandle(pCB->commandBuffer).c_str()); } } } // If Viewport or scissors are dynamic, verify that dynamic count matches PSO count. // Skip check if rasterization is disabled or there is no viewport. if ((!pPipeline->graphicsPipelineCI.pRasterizationState || (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) && pPipeline->graphicsPipelineCI.pViewportState) { bool dyn_viewport = IsDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT); bool dyn_scissor = IsDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR); if (dyn_viewport) { const auto required_viewports_mask = (1 << pPipeline->graphicsPipelineCI.pViewportState->viewportCount) - 1; const auto missing_viewport_mask = ~pCB->viewportMask & required_viewports_mask; if (missing_viewport_mask) { std::stringstream ss; ss << caller << ": Dynamic viewport(s) "; ListBits(ss, missing_viewport_mask); ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetViewport()."; skip |= LogError(device, vuid.dynamic_state, "%s", ss.str().c_str()); } } if (dyn_scissor) { const auto required_scissor_mask = (1 << pPipeline->graphicsPipelineCI.pViewportState->scissorCount) - 1; const auto missing_scissor_mask = ~pCB->scissorMask & required_scissor_mask; if (missing_scissor_mask) { std::stringstream ss; ss << caller << ": Dynamic scissor(s) "; ListBits(ss, missing_scissor_mask); ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetScissor()."; skip |= LogError(device, vuid.dynamic_state, "%s", ss.str().c_str()); } } bool dyn_viewport_count = IsDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT); bool dyn_scissor_count = IsDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT); // VUID {refpage}-viewportCount-03417 if (dyn_viewport_count && !dyn_scissor_count) { const auto required_viewport_mask = (1 << pPipeline->graphicsPipelineCI.pViewportState->scissorCount) - 1; const auto missing_viewport_mask = ~pCB->viewportWithCountMask & required_viewport_mask; if (missing_viewport_mask) { std::stringstream ss; ss << caller << ": Dynamic viewport with count "; ListBits(ss, missing_viewport_mask); ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetViewportWithCountEXT()."; skip |= LogError(device, vuid.viewport_count, "%s", ss.str().c_str()); } } // VUID {refpage}-scissorCount-03418 if (dyn_scissor_count && !dyn_viewport_count) { const auto required_scissor_mask = (1 << pPipeline->graphicsPipelineCI.pViewportState->viewportCount) - 1; const auto missing_scissor_mask = ~pCB->scissorWithCountMask & required_scissor_mask; if (missing_scissor_mask) { std::stringstream ss; ss << caller << ": Dynamic scissor with count "; ListBits(ss, missing_scissor_mask); ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetScissorWithCountEXT()."; skip |= LogError(device, vuid.scissor_count, "%s", ss.str().c_str()); } } // VUID {refpage}-viewportCount-03419 if (dyn_scissor_count && dyn_viewport_count) { if (pCB->viewportWithCountMask != pCB->scissorWithCountMask) { std::stringstream ss; ss << caller << ": Dynamic viewport and scissor with count "; ListBits(ss, pCB->viewportWithCountMask ^ pCB->scissorWithCountMask); ss << " are used by pipeline state object, but were not provided via matching calls to " "vkCmdSetViewportWithCountEXT and vkCmdSetScissorWithCountEXT()."; skip |= LogError(device, vuid.viewport_scissor_count, "%s", ss.str().c_str()); } } } // Verify that any MSAA request in PSO matches sample# in bound FB // Skip the check if rasterization is disabled. if (!pPipeline->graphicsPipelineCI.pRasterizationState || (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) { VkSampleCountFlagBits pso_num_samples = GetNumSamples(pPipeline); if (pCB->activeRenderPass) { const auto render_pass_info = pCB->activeRenderPass->createInfo.ptr(); const VkSubpassDescription2 *subpass_desc = &render_pass_info->pSubpasses[pCB->activeSubpass]; uint32_t i; unsigned subpass_num_samples = 0; for (i = 0; i < subpass_desc->colorAttachmentCount; i++) { const auto attachment = subpass_desc->pColorAttachments[i].attachment; if (attachment != VK_ATTACHMENT_UNUSED) { subpass_num_samples |= static_cast<unsigned>(render_pass_info->pAttachments[attachment].samples); } } if (subpass_desc->pDepthStencilAttachment && subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { const auto attachment = subpass_desc->pDepthStencilAttachment->attachment; subpass_num_samples |= static_cast<unsigned>(render_pass_info->pAttachments[attachment].samples); } if (!(device_extensions.vk_amd_mixed_attachment_samples || device_extensions.vk_nv_framebuffer_mixed_samples) && ((subpass_num_samples & static_cast<unsigned>(pso_num_samples)) != subpass_num_samples)) { LogObjectList objlist(pPipeline->pipeline); objlist.add(pCB->activeRenderPass->renderPass); skip |= LogError(objlist, kVUID_Core_DrawState_NumSamplesMismatch, "%s: Num samples mismatch! At draw-time in %s with %u samples while current %s w/ " "%u samples!", caller, report_data->FormatHandle(pPipeline->pipeline).c_str(), pso_num_samples, report_data->FormatHandle(pCB->activeRenderPass->renderPass).c_str(), subpass_num_samples); } } else { skip |= LogError(pPipeline->pipeline, kVUID_Core_DrawState_NoActiveRenderpass, "%s: No active render pass found at draw-time in %s!", caller, report_data->FormatHandle(pPipeline->pipeline).c_str()); } } // Verify that PSO creation renderPass is compatible with active renderPass if (pCB->activeRenderPass) { // TODO: AMD extension codes are included here, but actual function entrypoints are not yet intercepted if (pCB->activeRenderPass->renderPass != pPipeline->rp_state->renderPass) { // renderPass that PSO was created with must be compatible with active renderPass that PSO is being used with skip |= ValidateRenderPassCompatibility("active render pass", pCB->activeRenderPass.get(), "pipeline state object", pPipeline->rp_state.get(), caller, vuid.render_pass_compatible); } if (pPipeline->graphicsPipelineCI.subpass != pCB->activeSubpass) { skip |= LogError(pPipeline->pipeline, vuid.subpass_index, "%s: Pipeline was built for subpass %u but used in subpass %u.", caller, pPipeline->graphicsPipelineCI.subpass, pCB->activeSubpass); } // Check if depth stencil attachment was created with sample location compatible bit if (pPipeline->sample_location_enabled == VK_TRUE) { const safe_VkAttachmentReference2 *ds_attachment = pCB->activeRenderPass->createInfo.pSubpasses[pCB->activeSubpass].pDepthStencilAttachment; const FRAMEBUFFER_STATE *fb_state = pCB->activeFramebuffer.get(); if ((ds_attachment != nullptr) && (fb_state != nullptr)) { const uint32_t attachment = ds_attachment->attachment; if (attachment != VK_ATTACHMENT_UNUSED) { const auto *imageview_state = GetActiveAttachmentImageViewState(pCB, attachment); if (imageview_state != nullptr) { const IMAGE_STATE *image_state = GetImageState(imageview_state->create_info.image); if (image_state != nullptr) { if ((image_state->createInfo.flags & VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT) == 0) { skip |= LogError(pPipeline->pipeline, vuid.sample_location, "%s: sampleLocationsEnable is true for the pipeline, but the subpass (%u) depth " "stencil attachment's VkImage was not created with " "VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT.", caller, pCB->activeSubpass); } } } } } } } // VUID {refpage}-primitiveTopology-03420 skip |= ValidateStatus(pCB, CBSTATUS_PRIMITIVE_TOPOLOGY_SET, "Dynamic primitive topology state not set for this command buffer", vuid.primitive_topology); if (IsDynamic(pPipeline, VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY_EXT)) { bool compatible_topology = false; switch (pPipeline->graphicsPipelineCI.pInputAssemblyState->topology) { case VK_PRIMITIVE_TOPOLOGY_POINT_LIST: switch (pCB->primitiveTopology) { case VK_PRIMITIVE_TOPOLOGY_POINT_LIST: compatible_topology = true; break; default: break; } break; case VK_PRIMITIVE_TOPOLOGY_LINE_LIST: case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP: case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY: case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY: switch (pCB->primitiveTopology) { case VK_PRIMITIVE_TOPOLOGY_LINE_LIST: case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP: compatible_topology = true; break; default: break; } break; case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST: case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP: case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN: case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY: case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY: switch (pCB->primitiveTopology) { case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST: case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP: case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN: case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY: case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY: compatible_topology = true; break; default: break; } break; case VK_PRIMITIVE_TOPOLOGY_PATCH_LIST: switch (pCB->primitiveTopology) { case VK_PRIMITIVE_TOPOLOGY_PATCH_LIST: compatible_topology = true; break; default: break; } break; default: break; } if (!compatible_topology) { skip |= LogError(pPipeline->pipeline, vuid.primitive_topology, "%s: the last primitive topology %s state set by vkCmdSetPrimitiveTopologyEXT is " "not compatible with the pipeline topology %s.", caller, string_VkPrimitiveTopology(pCB->primitiveTopology), string_VkPrimitiveTopology(pPipeline->graphicsPipelineCI.pInputAssemblyState->topology)); } } if (enabled_features.fragment_shading_rate_features.primitiveFragmentShadingRate) { skip |= ValidateGraphicsPipelineShaderDynamicState(pPipeline, pCB, caller, vuid); } return skip; } // For given cvdescriptorset::DescriptorSet, verify that its Set is compatible w/ the setLayout corresponding to // pipelineLayout[layoutIndex] static bool VerifySetLayoutCompatibility(const debug_report_data *report_data, const cvdescriptorset::DescriptorSet *descriptor_set, PIPELINE_LAYOUT_STATE const *pipeline_layout, const uint32_t layoutIndex, string &errorMsg) { auto num_sets = pipeline_layout->set_layouts.size(); if (layoutIndex >= num_sets) { stringstream error_str; error_str << report_data->FormatHandle(pipeline_layout->layout) << ") only contains " << num_sets << " setLayouts corresponding to sets 0-" << num_sets - 1 << ", but you're attempting to bind set to index " << layoutIndex; errorMsg = error_str.str(); return false; } if (descriptor_set->IsPushDescriptor()) return true; auto layout_node = pipeline_layout->set_layouts[layoutIndex].get(); return cvdescriptorset::VerifySetLayoutCompatibility(report_data, layout_node, descriptor_set->GetLayout().get(), &errorMsg); } // Validate overall state at the time of a draw call bool CoreChecks::ValidateCmdBufDrawState(const CMD_BUFFER_STATE *cb_node, CMD_TYPE cmd_type, const bool indexed, const VkPipelineBindPoint bind_point, const char *function) const { const DrawDispatchVuid vuid = GetDrawDispatchVuid(cmd_type); const auto lv_bind_point = ConvertToLvlBindPoint(bind_point); const auto &state = cb_node->lastBound[lv_bind_point]; const auto *pipe = state.pipeline_state; if (nullptr == pipe) { return LogError(cb_node->commandBuffer, vuid.pipeline_bound, "Must not call %s on this command buffer while there is no %s pipeline bound.", function, bind_point == VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR ? "RayTracing" : bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS ? "Graphics" : "Compute"); } bool result = false; if (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point) { // First check flag states result |= ValidateDrawStateFlags(cb_node, pipe, indexed, vuid.dynamic_state); if (cb_node->activeRenderPass && cb_node->activeFramebuffer) { // Verify attachments for unprotected/protected command buffer. if (enabled_features.core11.protectedMemory == VK_TRUE && cb_node->active_attachments) { uint32_t i = 0; for (const auto &view_state : *cb_node->active_attachments.get()) { const auto &subpass = cb_node->active_subpasses->at(i); if (subpass.used && view_state && !view_state->destroyed) { std::string image_desc = "Image is "; image_desc.append(string_VkImageUsageFlagBits(subpass.usage)); // Because inputAttachment is read only, it doesn't need to care protected command buffer case. // Some CMD_TYPE could not be protected. See VUID 02711. if (subpass.usage != VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT && vuid.protected_command_buffer != kVUIDUndefined) { result |= ValidateUnprotectedImage(cb_node, view_state->image_state.get(), function, vuid.protected_command_buffer, image_desc.c_str()); } result |= ValidateProtectedImage(cb_node, view_state->image_state.get(), function, vuid.unprotected_command_buffer, image_desc.c_str()); } ++i; } } } } // Now complete other state checks string error_string; auto const &pipeline_layout = pipe->pipeline_layout.get(); // Check if the current pipeline is compatible for the maximum used set with the bound sets. if (pipe->active_slots.size() > 0 && !CompatForSet(pipe->max_active_slot, state, pipeline_layout->compat_for_set)) { LogObjectList objlist(pipe->pipeline); objlist.add(pipeline_layout->layout); objlist.add(state.pipeline_layout); result |= LogError(objlist, vuid.compatible_pipeline, "%s(): %s defined with %s is not compatible for maximum set statically used %" PRIu32 " with bound descriptor sets, last bound with %s", CommandTypeString(cmd_type), report_data->FormatHandle(pipe->pipeline).c_str(), report_data->FormatHandle(pipeline_layout->layout).c_str(), pipe->max_active_slot, report_data->FormatHandle(state.pipeline_layout).c_str()); } for (const auto &set_binding_pair : pipe->active_slots) { uint32_t set_index = set_binding_pair.first; // If valid set is not bound throw an error if ((state.per_set.size() <= set_index) || (!state.per_set[set_index].bound_descriptor_set)) { result |= LogError(cb_node->commandBuffer, kVUID_Core_DrawState_DescriptorSetNotBound, "%s uses set #%u but that set is not bound.", report_data->FormatHandle(pipe->pipeline).c_str(), set_index); } else if (!VerifySetLayoutCompatibility(report_data, state.per_set[set_index].bound_descriptor_set, pipeline_layout, set_index, error_string)) { // Set is bound but not compatible w/ overlapping pipeline_layout from PSO VkDescriptorSet set_handle = state.per_set[set_index].bound_descriptor_set->GetSet(); LogObjectList objlist(set_handle); objlist.add(pipeline_layout->layout); result |= LogError(objlist, kVUID_Core_DrawState_PipelineLayoutsIncompatible, "%s bound as set #%u is not compatible with overlapping %s due to: %s", report_data->FormatHandle(set_handle).c_str(), set_index, report_data->FormatHandle(pipeline_layout->layout).c_str(), error_string.c_str()); } else { // Valid set is bound and layout compatible, validate that it's updated // Pull the set node const cvdescriptorset::DescriptorSet *descriptor_set = state.per_set[set_index].bound_descriptor_set; // Validate the draw-time state for this descriptor set std::string err_str; // For the "bindless" style resource usage with many descriptors, need to optimize command <-> descriptor // binding validation. Take the requested binding set and prefilter it to eliminate redundant validation checks. // Here, the currently bound pipeline determines whether an image validation check is redundant... // for images are the "req" portion of the binding_req is indirectly (but tightly) coupled to the pipeline. cvdescriptorset::PrefilterBindRequestMap reduced_map(*descriptor_set, set_binding_pair.second); const auto &binding_req_map = reduced_map.FilteredMap(*cb_node, *pipe); // We can skip validating the descriptor set if "nothing" has changed since the last validation. // Same set, no image layout changes, and same "pipeline state" (binding_req_map). If there are // any dynamic descriptors, always revalidate rather than caching the values. We currently only // apply this optimization if IsManyDescriptors is true, to avoid the overhead of copying the // binding_req_map which could potentially be expensive. bool descriptor_set_changed = !reduced_map.IsManyDescriptors() || // Revalidate each time if the set has dynamic offsets state.per_set[set_index].dynamicOffsets.size() > 0 || // Revalidate if descriptor set (or contents) has changed state.per_set[set_index].validated_set != descriptor_set || state.per_set[set_index].validated_set_change_count != descriptor_set->GetChangeCount() || (!disabled[image_layout_validation] && state.per_set[set_index].validated_set_image_layout_change_count != cb_node->image_layout_change_count); bool need_validate = descriptor_set_changed || // Revalidate if previous bindingReqMap doesn't include new bindingReqMap !std::includes(state.per_set[set_index].validated_set_binding_req_map.begin(), state.per_set[set_index].validated_set_binding_req_map.end(), binding_req_map.begin(), binding_req_map.end()); if (need_validate) { if (!descriptor_set_changed && reduced_map.IsManyDescriptors()) { // Only validate the bindings that haven't already been validated BindingReqMap delta_reqs; std::set_difference(binding_req_map.begin(), binding_req_map.end(), state.per_set[set_index].validated_set_binding_req_map.begin(), state.per_set[set_index].validated_set_binding_req_map.end(), std::inserter(delta_reqs, delta_reqs.begin())); result |= ValidateDrawState(descriptor_set, delta_reqs, state.per_set[set_index].dynamicOffsets, cb_node, cb_node->active_attachments.get(), *cb_node->active_subpasses.get(), function, vuid); } else { result |= ValidateDrawState(descriptor_set, binding_req_map, state.per_set[set_index].dynamicOffsets, cb_node, cb_node->active_attachments.get(), *cb_node->active_subpasses.get(), function, vuid); } } } } // Check general pipeline state that needs to be validated at drawtime if (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point) { result |= ValidatePipelineDrawtimeState(state, cb_node, cmd_type, pipe, function); } // Verify if push constants have been set if (cb_node->push_constant_data_ranges) { if (pipeline_layout->push_constant_ranges != cb_node->push_constant_data_ranges) { LogObjectList objlist(cb_node->commandBuffer); objlist.add(cb_node->push_constant_pipeline_layout_set); objlist.add(pipeline_layout->layout); objlist.add(pipe->pipeline); result |= LogError( objlist, vuid.push_constants_set, "The active push constants of %s isn't compatible with %s of active %s.", report_data->FormatHandle(cb_node->push_constant_pipeline_layout_set).c_str(), report_data->FormatHandle(pipeline_layout->layout).c_str(), report_data->FormatHandle(pipe->pipeline).c_str()); } else { for (const auto &stage : pipe->stage_state) { const auto *entrypoint = FindEntrypointStruct(stage.shader_state.get(), stage.entry_point_name.c_str(), stage.stage_flag); if (!entrypoint || !entrypoint->push_constant_used_in_shader.IsUsed()) { continue; } const auto it = cb_node->push_constant_data_update.find(stage.stage_flag); if (it == cb_node->push_constant_data_update.end()) { // This error has been printed in ValidatePushConstantUsage. break; } uint32_t issue_index = 0; const auto ret = ValidatePushConstantSetUpdate(it->second, entrypoint->push_constant_used_in_shader, issue_index); // "not set" error has been printed in ValidatePushConstantUsage. if (ret == PC_Byte_Not_Updated) { const auto loc_descr = entrypoint->push_constant_used_in_shader.GetLocationDesc(issue_index); LogObjectList objlist(cb_node->commandBuffer); objlist.add(pipeline_layout->layout); result |= LogError(objlist, vuid.push_constants_set, "Push-constant buffer:%s in %s of %s is not updated.", loc_descr.c_str(), string_VkShaderStageFlags(stage.stage_flag).c_str(), report_data->FormatHandle(pipeline_layout->layout).c_str()); break; } } } } return result; } bool CoreChecks::ValidatePipelineLocked(std::vector<std::shared_ptr<PIPELINE_STATE>> const &pPipelines, int pipelineIndex) const { bool skip = false; const PIPELINE_STATE *pipeline = pPipelines[pipelineIndex].get(); // If create derivative bit is set, check that we've specified a base // pipeline correctly, and that the base pipeline was created to allow // derivatives. if (pipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) { const PIPELINE_STATE *base_pipeline = nullptr; if (!((pipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) ^ (pipeline->graphicsPipelineCI.basePipelineIndex != -1))) { // TODO: This check is a superset of VUID-VkGraphicsPipelineCreateInfo-flags-00724 and // TODO: VUID-VkGraphicsPipelineCreateInfo-flags-00725 skip |= LogError(device, kVUID_Core_DrawState_InvalidPipelineCreateState, "Invalid Pipeline CreateInfo[%d]: exactly one of base pipeline index and handle must be specified", pipelineIndex); } else if (pipeline->graphicsPipelineCI.basePipelineIndex != -1) { if (pipeline->graphicsPipelineCI.basePipelineIndex >= pipelineIndex) { skip |= LogError(device, "VUID-vkCreateGraphicsPipelines-flags-00720", "Invalid Pipeline CreateInfo[%d]: base pipeline must occur earlier in array than derivative pipeline.", pipelineIndex); } else { base_pipeline = pPipelines[pipeline->graphicsPipelineCI.basePipelineIndex].get(); } } else if (pipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) { base_pipeline = GetPipelineState(pipeline->graphicsPipelineCI.basePipelineHandle); } if (base_pipeline && !(base_pipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) { skip |= LogError(device, kVUID_Core_DrawState_InvalidPipelineCreateState, "Invalid Pipeline CreateInfo[%d]: base pipeline does not allow derivatives.", pipelineIndex); } } // Check for portability errors if (ExtEnabled::kNotEnabled != device_extensions.vk_khr_portability_subset) { if ((VK_FALSE == enabled_features.portability_subset_features.triangleFans) && (VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN == pipeline->topology_at_rasterizer)) { skip |= LogError(device, "VUID-VkPipelineInputAssemblyStateCreateInfo-triangleFans-04452", "Invalid Pipeline CreateInfo[%d] (portability error): VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN is not supported", pipelineIndex); } // Validate vertex inputs for (const auto desc : pipeline->vertex_binding_descriptions_) { if ((desc.stride < phys_dev_ext_props.portability_props.minVertexInputBindingStrideAlignment) || ((desc.stride % phys_dev_ext_props.portability_props.minVertexInputBindingStrideAlignment) != 0)) { skip |= LogError( device, "VUID-VkVertexInputBindingDescription-stride-04456", "Invalid Pipeline CreateInfo[%d] (portability error): Vertex input stride must be at least as large as and a " "multiple of VkPhysicalDevicePortabilitySubsetPropertiesKHR::minVertexInputBindingStrideAlignment.", pipelineIndex); } } // Validate vertex attributes if (VK_FALSE == enabled_features.portability_subset_features.vertexAttributeAccessBeyondStride) { for (const auto attrib : pipeline->vertex_attribute_descriptions_) { const auto vertex_binding_map_it = pipeline->vertex_binding_to_index_map_.find(attrib.binding); if (vertex_binding_map_it != pipeline->vertex_binding_to_index_map_.cend()) { const auto desc = pipeline->vertex_binding_descriptions_[vertex_binding_map_it->second]; if ((attrib.offset + FormatElementSize(attrib.format)) > desc.stride) { skip |= LogError(device, "VUID-VkVertexInputAttributeDescription-vertexAttributeAccessBeyondStride-04457", "Invalid Pipeline CreateInfo[%d] (portability error): (attribute.offset + " "sizeof(vertex_description.format)) is larger than the vertex stride", pipelineIndex); } } } } // Validate polygon mode auto raster_state_ci = pipeline->graphicsPipelineCI.pRasterizationState; if ((VK_FALSE == enabled_features.portability_subset_features.pointPolygons) && raster_state_ci && (VK_FALSE == raster_state_ci->rasterizerDiscardEnable) && (VK_POLYGON_MODE_POINT == raster_state_ci->polygonMode)) { skip |= LogError(device, "VUID-VkPipelineRasterizationStateCreateInfo-pointPolygons-04458", "Invalid Pipeline CreateInfo[%d] (portability error): point polygons are not supported", pipelineIndex); } } return skip; } // UNLOCKED pipeline validation. DO NOT lookup objects in the CoreChecks->* maps in this function. bool CoreChecks::ValidatePipelineUnlocked(const PIPELINE_STATE *pPipeline, uint32_t pipelineIndex) const { bool skip = false; // Ensure the subpass index is valid. If not, then ValidateGraphicsPipelineShaderState // produces nonsense errors that confuse users. Other layers should already // emit errors for renderpass being invalid. auto subpass_desc = &pPipeline->rp_state->createInfo.pSubpasses[pPipeline->graphicsPipelineCI.subpass]; if (pPipeline->graphicsPipelineCI.subpass >= pPipeline->rp_state->createInfo.subpassCount) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-subpass-00759", "Invalid Pipeline CreateInfo[%u] State: Subpass index %u is out of range for this renderpass (0..%u).", pipelineIndex, pPipeline->graphicsPipelineCI.subpass, pPipeline->rp_state->createInfo.subpassCount - 1); subpass_desc = nullptr; } if (pPipeline->graphicsPipelineCI.pColorBlendState != NULL) { const safe_VkPipelineColorBlendStateCreateInfo *color_blend_state = pPipeline->graphicsPipelineCI.pColorBlendState; if (subpass_desc && color_blend_state->attachmentCount != subpass_desc->colorAttachmentCount) { skip |= LogError( device, "VUID-VkGraphicsPipelineCreateInfo-attachmentCount-00746", "vkCreateGraphicsPipelines() pCreateInfo[%u]: %s subpass %u has colorAttachmentCount of %u which doesn't " "match the pColorBlendState->attachmentCount of %u.", pipelineIndex, report_data->FormatHandle(pPipeline->rp_state->renderPass).c_str(), pPipeline->graphicsPipelineCI.subpass, subpass_desc->colorAttachmentCount, color_blend_state->attachmentCount); } if (!enabled_features.core.independentBlend) { if (pPipeline->attachments.size() > 1) { const VkPipelineColorBlendAttachmentState *const attachments = &pPipeline->attachments[0]; for (size_t i = 1; i < pPipeline->attachments.size(); i++) { // Quoting the spec: "If [the independent blend] feature is not enabled, the VkPipelineColorBlendAttachmentState // settings for all color attachments must be identical." VkPipelineColorBlendAttachmentState contains // only attachment state, so memcmp is best suited for the comparison if (memcmp(static_cast<const void *>(attachments), static_cast<const void *>(&attachments[i]), sizeof(attachments[0]))) { skip |= LogError(device, "VUID-VkPipelineColorBlendStateCreateInfo-pAttachments-00605", "Invalid Pipeline CreateInfo[%u]: If independent blend feature not enabled, all elements of " "pAttachments must be identical.", pipelineIndex); break; } } } } if (!enabled_features.core.logicOp && (pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable != VK_FALSE)) { skip |= LogError( device, "VUID-VkPipelineColorBlendStateCreateInfo-logicOpEnable-00606", "Invalid Pipeline CreateInfo[%u]: If logic operations feature not enabled, logicOpEnable must be VK_FALSE.", pipelineIndex); } for (size_t i = 0; i < pPipeline->attachments.size(); i++) { if ((pPipeline->attachments[i].srcColorBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) || (pPipeline->attachments[i].srcColorBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) || (pPipeline->attachments[i].srcColorBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) || (pPipeline->attachments[i].srcColorBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) { if (!enabled_features.core.dualSrcBlend) { skip |= LogError( device, "VUID-VkPipelineColorBlendAttachmentState-srcColorBlendFactor-00608", "vkCreateGraphicsPipelines(): pPipelines[%d].pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER "].srcColorBlendFactor uses a dual-source blend factor (%d), but this device feature is not " "enabled.", pipelineIndex, i, pPipeline->attachments[i].srcColorBlendFactor); } } if ((pPipeline->attachments[i].dstColorBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) || (pPipeline->attachments[i].dstColorBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) || (pPipeline->attachments[i].dstColorBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) || (pPipeline->attachments[i].dstColorBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) { if (!enabled_features.core.dualSrcBlend) { skip |= LogError( device, "VUID-VkPipelineColorBlendAttachmentState-dstColorBlendFactor-00609", "vkCreateGraphicsPipelines(): pPipelines[%d].pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER "].dstColorBlendFactor uses a dual-source blend factor (%d), but this device feature is not " "enabled.", pipelineIndex, i, pPipeline->attachments[i].dstColorBlendFactor); } } if ((pPipeline->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) || (pPipeline->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) || (pPipeline->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) || (pPipeline->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) { if (!enabled_features.core.dualSrcBlend) { skip |= LogError( device, "VUID-VkPipelineColorBlendAttachmentState-srcAlphaBlendFactor-00610", "vkCreateGraphicsPipelines(): pPipelines[%d].pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER "].srcAlphaBlendFactor uses a dual-source blend factor (%d), but this device feature is not " "enabled.", pipelineIndex, i, pPipeline->attachments[i].srcAlphaBlendFactor); } } if ((pPipeline->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) || (pPipeline->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) || (pPipeline->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) || (pPipeline->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) { if (!enabled_features.core.dualSrcBlend) { skip |= LogError( device, "VUID-VkPipelineColorBlendAttachmentState-dstAlphaBlendFactor-00611", "vkCreateGraphicsPipelines(): pPipelines[%d].pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER "].dstAlphaBlendFactor uses a dual-source blend factor (%d), but this device feature is not " "enabled.", pipelineIndex, i, pPipeline->attachments[i].dstAlphaBlendFactor); } } } } if (ValidateGraphicsPipelineShaderState(pPipeline)) { skip = true; } // Each shader's stage must be unique if (pPipeline->duplicate_shaders) { for (uint32_t stage = VK_SHADER_STAGE_VERTEX_BIT; stage & VK_SHADER_STAGE_ALL_GRAPHICS; stage <<= 1) { if (pPipeline->duplicate_shaders & stage) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-stage-00726", "Invalid Pipeline CreateInfo[%u] State: Multiple shaders provided for stage %s", pipelineIndex, string_VkShaderStageFlagBits(VkShaderStageFlagBits(stage))); } } } if (device_extensions.vk_nv_mesh_shader) { // VS or mesh is required if (!(pPipeline->active_shaders & (VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_MESH_BIT_NV))) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-stage-02096", "Invalid Pipeline CreateInfo[%u] State: Vertex Shader or Mesh Shader required.", pipelineIndex); } // Can't mix mesh and VTG if ((pPipeline->active_shaders & (VK_SHADER_STAGE_MESH_BIT_NV | VK_SHADER_STAGE_TASK_BIT_NV)) && (pPipeline->active_shaders & (VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_GEOMETRY_BIT | VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT))) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pStages-02095", "Invalid Pipeline CreateInfo[%u] State: Geometric shader stages must either be all mesh (mesh | task) " "or all VTG (vertex, tess control, tess eval, geom).", pipelineIndex); } } else { // VS is required if (!(pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT)) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-stage-00727", "Invalid Pipeline CreateInfo[%u] State: Vertex Shader required.", pipelineIndex); } } if (!enabled_features.mesh_shader.meshShader && (pPipeline->active_shaders & VK_SHADER_STAGE_MESH_BIT_NV)) { skip |= LogError(device, "VUID-VkPipelineShaderStageCreateInfo-stage-02091", "Invalid Pipeline CreateInfo[%u] State: Mesh Shader not supported.", pipelineIndex); } if (!enabled_features.mesh_shader.taskShader && (pPipeline->active_shaders & VK_SHADER_STAGE_TASK_BIT_NV)) { skip |= LogError(device, "VUID-VkPipelineShaderStageCreateInfo-stage-02092", "Invalid Pipeline CreateInfo[%u] State: Task Shader not supported.", pipelineIndex); } // Either both or neither TC/TE shaders should be defined bool has_control = (pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) != 0; bool has_eval = (pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) != 0; if (has_control && !has_eval) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pStages-00729", "Invalid Pipeline CreateInfo[%u] State: TE and TC shaders must be included or excluded as a pair.", pipelineIndex); } if (!has_control && has_eval) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pStages-00730", "Invalid Pipeline CreateInfo[%u] State: TE and TC shaders must be included or excluded as a pair.", pipelineIndex); } // Compute shaders should be specified independent of Gfx shaders if (pPipeline->active_shaders & VK_SHADER_STAGE_COMPUTE_BIT) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-stage-00728", "Invalid Pipeline CreateInfo[%u] State: Do not specify Compute Shader for Gfx Pipeline.", pipelineIndex); } if ((pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT) && !pPipeline->graphicsPipelineCI.pInputAssemblyState) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pStages-02098", "Invalid Pipeline CreateInfo[%u] State: Missing pInputAssemblyState.", pipelineIndex); } // VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid for tessellation pipelines. // Mismatching primitive topology and tessellation fails graphics pipeline creation. if (has_control && has_eval && (!pPipeline->graphicsPipelineCI.pInputAssemblyState || pPipeline->graphicsPipelineCI.pInputAssemblyState->topology != VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pStages-00736", "Invalid Pipeline CreateInfo[%u] State: VK_PRIMITIVE_TOPOLOGY_PATCH_LIST must be set as IA topology for " "tessellation pipelines.", pipelineIndex); } if (pPipeline->graphicsPipelineCI.pInputAssemblyState) { if (pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) { if (!has_control || !has_eval) { skip |= LogError( device, "VUID-VkGraphicsPipelineCreateInfo-topology-00737", "Invalid Pipeline CreateInfo[%u] State: VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid " "for tessellation pipelines.", pipelineIndex); } } if ((pPipeline->graphicsPipelineCI.pInputAssemblyState->primitiveRestartEnable == VK_TRUE) && (pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_POINT_LIST || pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST || pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST || pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY || pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY || pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) { skip |= LogError( device, "VUID-VkPipelineInputAssemblyStateCreateInfo-topology-00428", "vkCreateGraphicsPipelines() pCreateInfo[%u]: topology is %s and primitiveRestartEnable is VK_TRUE. It is invalid.", pipelineIndex, string_VkPrimitiveTopology(pPipeline->graphicsPipelineCI.pInputAssemblyState->topology)); } if ((enabled_features.core.geometryShader == VK_FALSE) && (pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY || pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY || pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY || pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY)) { skip |= LogError(device, "VUID-VkPipelineInputAssemblyStateCreateInfo-topology-00429", "vkCreateGraphicsPipelines() pCreateInfo[%u]: topology is %s and geometry shaders feature is not enabled. " "It is invalid.", pipelineIndex, string_VkPrimitiveTopology(pPipeline->graphicsPipelineCI.pInputAssemblyState->topology)); } if ((enabled_features.core.tessellationShader == VK_FALSE) && (pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) { skip |= LogError(device, "VUID-VkPipelineInputAssemblyStateCreateInfo-topology-00430", "vkCreateGraphicsPipelines() pCreateInfo[%u]: topology is %s and tessellation shaders feature is not " "enabled. It is invalid.", pipelineIndex, string_VkPrimitiveTopology(pPipeline->graphicsPipelineCI.pInputAssemblyState->topology)); } } // If a rasterization state is provided... if (pPipeline->graphicsPipelineCI.pRasterizationState) { if ((pPipeline->graphicsPipelineCI.pRasterizationState->depthClampEnable == VK_TRUE) && (!enabled_features.core.depthClamp)) { skip |= LogError(device, "VUID-VkPipelineRasterizationStateCreateInfo-depthClampEnable-00782", "vkCreateGraphicsPipelines() pCreateInfo[%u]: the depthClamp device feature is disabled: the " "depthClampEnable member " "of the VkPipelineRasterizationStateCreateInfo structure must be set to VK_FALSE.", pipelineIndex); } if (!IsDynamic(pPipeline, VK_DYNAMIC_STATE_DEPTH_BIAS) && (pPipeline->graphicsPipelineCI.pRasterizationState->depthBiasClamp != 0.0) && (!enabled_features.core.depthBiasClamp)) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00754", "vkCreateGraphicsPipelines() pCreateInfo[%u]: the depthBiasClamp device feature is disabled: the " "depthBiasClamp member " "of the VkPipelineRasterizationStateCreateInfo structure must be set to 0.0 unless the " "VK_DYNAMIC_STATE_DEPTH_BIAS dynamic state is enabled", pipelineIndex); } // If rasterization is enabled... if (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE) { if ((pPipeline->graphicsPipelineCI.pMultisampleState->alphaToOneEnable == VK_TRUE) && (!enabled_features.core.alphaToOne)) { skip |= LogError( device, "VUID-VkPipelineMultisampleStateCreateInfo-alphaToOneEnable-00785", "vkCreateGraphicsPipelines() pCreateInfo[%u]: the alphaToOne device feature is disabled: the alphaToOneEnable " "member of the VkPipelineMultisampleStateCreateInfo structure must be set to VK_FALSE.", pipelineIndex); } // If subpass uses a depth/stencil attachment, pDepthStencilState must be a pointer to a valid structure if (subpass_desc && subpass_desc->pDepthStencilAttachment && subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { if (!pPipeline->graphicsPipelineCI.pDepthStencilState) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-rasterizerDiscardEnable-00752", "Invalid Pipeline CreateInfo[%u] State: pDepthStencilState is NULL when rasterization is enabled " "and subpass uses a depth/stencil attachment.", pipelineIndex); } else if (pPipeline->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE) { if (!enabled_features.core.depthBounds) { skip |= LogError(device, "VUID-VkPipelineDepthStencilStateCreateInfo-depthBoundsTestEnable-00598", "vkCreateGraphicsPipelines() pCreateInfo[%u]: the depthBounds device feature is disabled: the " "depthBoundsTestEnable member of the VkPipelineDepthStencilStateCreateInfo structure must be " "set to VK_FALSE.", pipelineIndex); } // The extension was not created with a feature bit whichs prevents displaying the 2 variations of the VUIDs if (!device_extensions.vk_ext_depth_range_unrestricted && !IsDynamic(pPipeline, VK_DYNAMIC_STATE_DEPTH_BOUNDS)) { const float minDepthBounds = pPipeline->graphicsPipelineCI.pDepthStencilState->minDepthBounds; const float maxDepthBounds = pPipeline->graphicsPipelineCI.pDepthStencilState->maxDepthBounds; // Also VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00755 if (!(minDepthBounds >= 0.0) || !(minDepthBounds <= 1.0)) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-02510", "vkCreateGraphicsPipelines() pCreateInfo[%u]: VK_EXT_depth_range_unrestricted extension " "is not enabled, VK_DYNAMIC_STATE_DEPTH_BOUNDS is not used, depthBoundsTestEnable is " "true, and pDepthStencilState::minDepthBounds (=%f) is not within the [0.0, 1.0] range.", minDepthBounds); } if (!(maxDepthBounds >= 0.0) || !(maxDepthBounds <= 1.0)) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-02510", "vkCreateGraphicsPipelines() pCreateInfo[%u]: VK_EXT_depth_range_unrestricted extension " "is not enabled, VK_DYNAMIC_STATE_DEPTH_BOUNDS is not used, depthBoundsTestEnable is " "true, and pDepthStencilState::maxDepthBounds (=%f) is not within the [0.0, 1.0] range.", maxDepthBounds); } } } } // If subpass uses color attachments, pColorBlendState must be valid pointer if (subpass_desc) { uint32_t color_attachment_count = 0; for (uint32_t i = 0; i < subpass_desc->colorAttachmentCount; ++i) { if (subpass_desc->pColorAttachments[i].attachment != VK_ATTACHMENT_UNUSED) { ++color_attachment_count; } } if (color_attachment_count > 0 && pPipeline->graphicsPipelineCI.pColorBlendState == nullptr) { skip |= LogError( device, "VUID-VkGraphicsPipelineCreateInfo-rasterizerDiscardEnable-00753", "Invalid Pipeline CreateInfo[%u] State: pColorBlendState is NULL when rasterization is enabled and " "subpass uses color attachments.", pipelineIndex); } } } } if ((pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT) && !pPipeline->graphicsPipelineCI.pVertexInputState) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pStages-02097", "Invalid Pipeline CreateInfo[%u] State: Missing pVertexInputState.", pipelineIndex); } auto vi = pPipeline->graphicsPipelineCI.pVertexInputState; if (vi != NULL) { for (uint32_t j = 0; j < vi->vertexAttributeDescriptionCount; j++) { VkFormat format = vi->pVertexAttributeDescriptions[j].format; // Internal call to get format info. Still goes through layers, could potentially go directly to ICD. VkFormatProperties properties; DispatchGetPhysicalDeviceFormatProperties(physical_device, format, &properties); if ((properties.bufferFeatures & VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT) == 0) { skip |= LogError(device, "VUID-VkVertexInputAttributeDescription-format-00623", "vkCreateGraphicsPipelines: pCreateInfo[%d].pVertexInputState->vertexAttributeDescriptions[%d].format " "(%s) is not a supported vertex buffer format.", pipelineIndex, j, string_VkFormat(format)); } } } if (subpass_desc && pPipeline->graphicsPipelineCI.pMultisampleState) { const safe_VkPipelineMultisampleStateCreateInfo *multisample_state = pPipeline->graphicsPipelineCI.pMultisampleState; auto accum_color_samples = [subpass_desc, pPipeline](uint32_t &samples) { for (uint32_t i = 0; i < subpass_desc->colorAttachmentCount; i++) { const auto attachment = subpass_desc->pColorAttachments[i].attachment; if (attachment != VK_ATTACHMENT_UNUSED) { samples |= static_cast<uint32_t>(pPipeline->rp_state->createInfo.pAttachments[attachment].samples); } } }; if (!(device_extensions.vk_amd_mixed_attachment_samples || device_extensions.vk_nv_framebuffer_mixed_samples)) { uint32_t raster_samples = static_cast<uint32_t>(GetNumSamples(pPipeline)); uint32_t subpass_num_samples = 0; accum_color_samples(subpass_num_samples); if (subpass_desc->pDepthStencilAttachment && subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { const auto attachment = subpass_desc->pDepthStencilAttachment->attachment; subpass_num_samples |= static_cast<uint32_t>(pPipeline->rp_state->createInfo.pAttachments[attachment].samples); } // subpass_num_samples is 0 when the subpass has no attachments or if all attachments are VK_ATTACHMENT_UNUSED. // Only validate the value of subpass_num_samples if the subpass has attachments that are not VK_ATTACHMENT_UNUSED. if (subpass_num_samples && (!IsPowerOfTwo(subpass_num_samples) || (subpass_num_samples != raster_samples))) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-subpass-00757", "vkCreateGraphicsPipelines: pCreateInfo[%d].pMultisampleState->rasterizationSamples (%u) " "does not match the number of samples of the RenderPass color and/or depth attachment.", pipelineIndex, raster_samples); } } if (device_extensions.vk_amd_mixed_attachment_samples) { VkSampleCountFlagBits max_sample_count = static_cast<VkSampleCountFlagBits>(0); for (uint32_t i = 0; i < subpass_desc->colorAttachmentCount; ++i) { if (subpass_desc->pColorAttachments[i].attachment != VK_ATTACHMENT_UNUSED) { max_sample_count = std::max( max_sample_count, pPipeline->rp_state->createInfo.pAttachments[subpass_desc->pColorAttachments[i].attachment].samples); } } if (subpass_desc->pDepthStencilAttachment && subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { max_sample_count = std::max( max_sample_count, pPipeline->rp_state->createInfo.pAttachments[subpass_desc->pDepthStencilAttachment->attachment].samples); } if ((pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE) && (max_sample_count != static_cast<VkSampleCountFlagBits>(0)) && (multisample_state->rasterizationSamples != max_sample_count)) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-subpass-01505", "vkCreateGraphicsPipelines: pCreateInfo[%d].pMultisampleState->rasterizationSamples (%s) != max " "attachment samples (%s) used in subpass %u.", pipelineIndex, string_VkSampleCountFlagBits(multisample_state->rasterizationSamples), string_VkSampleCountFlagBits(max_sample_count), pPipeline->graphicsPipelineCI.subpass); } } if (device_extensions.vk_nv_framebuffer_mixed_samples) { uint32_t raster_samples = static_cast<uint32_t>(GetNumSamples(pPipeline)); uint32_t subpass_color_samples = 0; accum_color_samples(subpass_color_samples); if (subpass_desc->pDepthStencilAttachment && subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { const auto attachment = subpass_desc->pDepthStencilAttachment->attachment; const uint32_t subpass_depth_samples = static_cast<uint32_t>(pPipeline->rp_state->createInfo.pAttachments[attachment].samples); if (pPipeline->graphicsPipelineCI.pDepthStencilState) { const bool ds_test_enabled = (pPipeline->graphicsPipelineCI.pDepthStencilState->depthTestEnable == VK_TRUE) || (pPipeline->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE) || (pPipeline->graphicsPipelineCI.pDepthStencilState->stencilTestEnable == VK_TRUE); if (ds_test_enabled && (!IsPowerOfTwo(subpass_depth_samples) || (raster_samples != subpass_depth_samples))) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-subpass-01411", "vkCreateGraphicsPipelines: pCreateInfo[%d].pMultisampleState->rasterizationSamples (%u) " "does not match the number of samples of the RenderPass depth attachment (%u).", pipelineIndex, raster_samples, subpass_depth_samples); } } } if (IsPowerOfTwo(subpass_color_samples)) { if (raster_samples < subpass_color_samples) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-subpass-01412", "vkCreateGraphicsPipelines: pCreateInfo[%d].pMultisampleState->rasterizationSamples (%u) " "is not greater or equal to the number of samples of the RenderPass color attachment (%u).", pipelineIndex, raster_samples, subpass_color_samples); } if (multisample_state) { if ((raster_samples > subpass_color_samples) && (multisample_state->sampleShadingEnable == VK_TRUE)) { skip |= LogError(device, "VUID-VkPipelineMultisampleStateCreateInfo-rasterizationSamples-01415", "vkCreateGraphicsPipelines: pCreateInfo[%d].pMultisampleState->sampleShadingEnable must be " "VK_FALSE when " "pCreateInfo[%d].pMultisampleState->rasterizationSamples (%u) is greater than the number of " "samples of the " "subpass color attachment (%u).", pipelineIndex, pipelineIndex, raster_samples, subpass_color_samples); } const auto *coverage_modulation_state = LvlFindInChain<VkPipelineCoverageModulationStateCreateInfoNV>(multisample_state->pNext); if (coverage_modulation_state && (coverage_modulation_state->coverageModulationTableEnable == VK_TRUE)) { if (coverage_modulation_state->coverageModulationTableCount != (raster_samples / subpass_color_samples)) { skip |= LogError( device, "VUID-VkPipelineCoverageModulationStateCreateInfoNV-coverageModulationTableEnable-01405", "vkCreateGraphicsPipelines: pCreateInfos[%d] VkPipelineCoverageModulationStateCreateInfoNV " "coverageModulationTableCount of %u is invalid.", pipelineIndex, coverage_modulation_state->coverageModulationTableCount); } } } } } if (device_extensions.vk_nv_coverage_reduction_mode) { uint32_t raster_samples = static_cast<uint32_t>(GetNumSamples(pPipeline)); uint32_t subpass_color_samples = 0; uint32_t subpass_depth_samples = 0; accum_color_samples(subpass_color_samples); if (subpass_desc->pDepthStencilAttachment && subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { const auto attachment = subpass_desc->pDepthStencilAttachment->attachment; subpass_depth_samples = static_cast<uint32_t>(pPipeline->rp_state->createInfo.pAttachments[attachment].samples); } if (multisample_state && IsPowerOfTwo(subpass_color_samples) && (subpass_depth_samples == 0 || IsPowerOfTwo(subpass_depth_samples))) { const auto *coverage_reduction_state = LvlFindInChain<VkPipelineCoverageReductionStateCreateInfoNV>(multisample_state->pNext); if (coverage_reduction_state) { const VkCoverageReductionModeNV coverage_reduction_mode = coverage_reduction_state->coverageReductionMode; uint32_t combination_count = 0; std::vector<VkFramebufferMixedSamplesCombinationNV> combinations; DispatchGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV(physical_device, &combination_count, nullptr); combinations.resize(combination_count); DispatchGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV(physical_device, &combination_count, &combinations[0]); bool combination_found = false; for (const auto &combination : combinations) { if (coverage_reduction_mode == combination.coverageReductionMode && raster_samples == combination.rasterizationSamples && subpass_depth_samples == combination.depthStencilSamples && subpass_color_samples == combination.colorSamples) { combination_found = true; break; } } if (!combination_found) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-coverageReductionMode-02722", "vkCreateGraphicsPipelines: pCreateInfos[%d] the specified combination of coverage " "reduction mode (%s), pMultisampleState->rasterizationSamples (%u), sample counts for " "the subpass color and depth/stencil attachments is not a valid combination returned by " "vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV.", pipelineIndex, string_VkCoverageReductionModeNV(coverage_reduction_mode)); } } } } if (device_extensions.vk_nv_fragment_coverage_to_color) { const auto coverage_to_color_state = LvlFindInChain<VkPipelineCoverageToColorStateCreateInfoNV>(multisample_state); if (coverage_to_color_state && coverage_to_color_state->coverageToColorEnable == VK_TRUE) { bool attachment_is_valid = false; std::string error_detail; if (coverage_to_color_state->coverageToColorLocation < subpass_desc->colorAttachmentCount) { const auto color_attachment_ref = subpass_desc->pColorAttachments[coverage_to_color_state->coverageToColorLocation]; if (color_attachment_ref.attachment != VK_ATTACHMENT_UNUSED) { const auto color_attachment = pPipeline->rp_state->createInfo.pAttachments[color_attachment_ref.attachment]; switch (color_attachment.format) { case VK_FORMAT_R8_UINT: case VK_FORMAT_R8_SINT: case VK_FORMAT_R16_UINT: case VK_FORMAT_R16_SINT: case VK_FORMAT_R32_UINT: case VK_FORMAT_R32_SINT: attachment_is_valid = true; break; default: std::ostringstream str; str << "references an attachment with an invalid format (" << string_VkFormat(color_attachment.format) << ")."; error_detail = str.str(); break; } } else { std::ostringstream str; str << "references an invalid attachment. The subpass pColorAttachments[" << coverage_to_color_state->coverageToColorLocation << "].attachment has the value VK_ATTACHMENT_UNUSED."; error_detail = str.str(); } } else { std::ostringstream str; str << "references an non-existing attachment since the subpass colorAttachmentCount is " << subpass_desc->colorAttachmentCount << "."; error_detail = str.str(); } if (!attachment_is_valid) { skip |= LogError(device, "VUID-VkPipelineCoverageToColorStateCreateInfoNV-coverageToColorEnable-01404", "vkCreateGraphicsPipelines: pCreateInfos[%" PRId32 "].pMultisampleState VkPipelineCoverageToColorStateCreateInfoNV " "coverageToColorLocation = %" PRIu32 " %s", pipelineIndex, coverage_to_color_state->coverageToColorLocation, error_detail.c_str()); } } } if (device_extensions.vk_ext_sample_locations) { const VkPipelineSampleLocationsStateCreateInfoEXT *sample_location_state = LvlFindInChain<VkPipelineSampleLocationsStateCreateInfoEXT>(multisample_state->pNext); if (sample_location_state != nullptr) { if ((sample_location_state->sampleLocationsEnable == VK_TRUE) && (IsDynamic(pPipeline, VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT) == false)) { const VkSampleLocationsInfoEXT sample_location_info = sample_location_state->sampleLocationsInfo; skip |= ValidateSampleLocationsInfo(&sample_location_info, "vkCreateGraphicsPipelines"); const VkExtent2D grid_size = sample_location_info.sampleLocationGridSize; VkMultisamplePropertiesEXT multisample_prop; DispatchGetPhysicalDeviceMultisamplePropertiesEXT(physical_device, multisample_state->rasterizationSamples, &multisample_prop); const VkExtent2D max_grid_size = multisample_prop.maxSampleLocationGridSize; // Note order or "divide" in "sampleLocationsInfo must evenly divide VkMultisamplePropertiesEXT" if (SafeModulo(max_grid_size.width, grid_size.width) != 0) { skip |= LogError( device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-01521", "vkCreateGraphicsPipelines() pCreateInfo[%u]: Because there is no dynamic state for Sample Location " "and sampleLocationEnable is true, the " "VkPipelineSampleLocationsStateCreateInfoEXT::sampleLocationsInfo::sampleLocationGridSize.width (%u) " "must be evenly divided by VkMultisamplePropertiesEXT::sampleLocationGridSize.width (%u).", pipelineIndex, grid_size.width, max_grid_size.width); } if (SafeModulo(max_grid_size.height, grid_size.height) != 0) { skip |= LogError( device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-01522", "vkCreateGraphicsPipelines() pCreateInfo[%u]: Because there is no dynamic state for Sample Location " "and sampleLocationEnable is true, the " "VkPipelineSampleLocationsStateCreateInfoEXT::sampleLocationsInfo::sampleLocationGridSize.height (%u) " "must be evenly divided by VkMultisamplePropertiesEXT::sampleLocationGridSize.height (%u).", pipelineIndex, grid_size.height, max_grid_size.height); } if (sample_location_info.sampleLocationsPerPixel != multisample_state->rasterizationSamples) { skip |= LogError( device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-01523", "vkCreateGraphicsPipelines() pCreateInfo[%u]: Because there is no dynamic state for Sample Location " "and sampleLocationEnable is true, the " "VkPipelineSampleLocationsStateCreateInfoEXT::sampleLocationsInfo::sampleLocationsPerPixel (%s) must " "be the same as the VkPipelineMultisampleStateCreateInfo::rasterizationSamples (%s).", pipelineIndex, string_VkSampleCountFlagBits(sample_location_info.sampleLocationsPerPixel), string_VkSampleCountFlagBits(multisample_state->rasterizationSamples)); } } } } } skip |= ValidatePipelineCacheControlFlags(pPipeline->graphicsPipelineCI.flags, pipelineIndex, "vkCreateGraphicsPipelines", "VUID-VkGraphicsPipelineCreateInfo-pipelineCreationCacheControl-02878"); // VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-03378 if (!enabled_features.extended_dynamic_state_features.extendedDynamicState && (IsDynamic(pPipeline, VK_DYNAMIC_STATE_CULL_MODE_EXT) || IsDynamic(pPipeline, VK_DYNAMIC_STATE_FRONT_FACE_EXT) || IsDynamic(pPipeline, VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY_EXT) || IsDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT) || IsDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT) || IsDynamic(pPipeline, VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT) || IsDynamic(pPipeline, VK_DYNAMIC_STATE_DEPTH_TEST_ENABLE_EXT) || IsDynamic(pPipeline, VK_DYNAMIC_STATE_DEPTH_WRITE_ENABLE_EXT) || IsDynamic(pPipeline, VK_DYNAMIC_STATE_DEPTH_COMPARE_OP_EXT) || IsDynamic(pPipeline, VK_DYNAMIC_STATE_DEPTH_BOUNDS_TEST_ENABLE_EXT) || IsDynamic(pPipeline, VK_DYNAMIC_STATE_STENCIL_TEST_ENABLE_EXT) || IsDynamic(pPipeline, VK_DYNAMIC_STATE_STENCIL_OP_EXT))) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-03378", "Extended dynamic state used by the extendedDynamicState feature is not enabled"); } const VkPipelineFragmentShadingRateStateCreateInfoKHR *fragment_shading_rate_state = LvlFindInChain<VkPipelineFragmentShadingRateStateCreateInfoKHR>(pPipeline->graphicsPipelineCI.pNext); if (fragment_shading_rate_state && !IsDynamic(pPipeline, VK_DYNAMIC_STATE_FRAGMENT_SHADING_RATE_KHR)) { const char *struct_name = "VkPipelineFragmentShadingRateStateCreateInfoKHR"; if (fragment_shading_rate_state->fragmentSize.width == 0) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04494", "vkCreateGraphicsPipelines: Fragment width of %u has been specified in %s.", fragment_shading_rate_state->fragmentSize.width, struct_name); } if (fragment_shading_rate_state->fragmentSize.height == 0) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04495", "vkCreateGraphicsPipelines: Fragment height of %u has been specified in %s.", fragment_shading_rate_state->fragmentSize.height, struct_name); } if (fragment_shading_rate_state->fragmentSize.width != 0 && !IsPowerOfTwo(fragment_shading_rate_state->fragmentSize.width)) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04496", "vkCreateGraphicsPipelines: Non-power-of-two fragment width of %u has been specified in %s.", fragment_shading_rate_state->fragmentSize.width, struct_name); } if (fragment_shading_rate_state->fragmentSize.height != 0 && !IsPowerOfTwo(fragment_shading_rate_state->fragmentSize.height)) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04497", "vkCreateGraphicsPipelines: Non-power-of-two fragment height of %u has been specified in %s.", fragment_shading_rate_state->fragmentSize.height, struct_name); } if (fragment_shading_rate_state->fragmentSize.width > 4) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04498", "vkCreateGraphicsPipelines: Fragment width of %u specified in %s is too large.", fragment_shading_rate_state->fragmentSize.width, struct_name); } if (fragment_shading_rate_state->fragmentSize.height > 4) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04499", "vkCreateGraphicsPipelines: Fragment height of %u specified in %s is too large", fragment_shading_rate_state->fragmentSize.height, struct_name); } if (!enabled_features.fragment_shading_rate_features.pipelineFragmentShadingRate && fragment_shading_rate_state->fragmentSize.width != 1) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04500", "vkCreateGraphicsPipelines: Pipeline fragment width of %u has been specified in %s, but " "pipelineFragmentShadingRate is not enabled", fragment_shading_rate_state->fragmentSize.width, struct_name); } if (!enabled_features.fragment_shading_rate_features.pipelineFragmentShadingRate && fragment_shading_rate_state->fragmentSize.height != 1) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04500", "vkCreateGraphicsPipelines: Pipeline fragment height of %u has been specified in %s, but " "pipelineFragmentShadingRate is not enabled", fragment_shading_rate_state->fragmentSize.height, struct_name); } if (!enabled_features.fragment_shading_rate_features.primitiveFragmentShadingRate && fragment_shading_rate_state->combinerOps[0] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04501", "vkCreateGraphicsPipelines: First combiner operation of %s has been specified in %s, but " "primitiveFragmentShadingRate is not enabled", string_VkFragmentShadingRateCombinerOpKHR(fragment_shading_rate_state->combinerOps[0]), struct_name); } if (!enabled_features.fragment_shading_rate_features.attachmentFragmentShadingRate && fragment_shading_rate_state->combinerOps[1] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04502", "vkCreateGraphicsPipelines: Second combiner operation of %s has been specified in %s, but " "attachmentFragmentShadingRate is not enabled", string_VkFragmentShadingRateCombinerOpKHR(fragment_shading_rate_state->combinerOps[1]), struct_name); } if (!phys_dev_ext_props.fragment_shading_rate_props.fragmentShadingRateNonTrivialCombinerOps && (fragment_shading_rate_state->combinerOps[0] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR && fragment_shading_rate_state->combinerOps[0] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_REPLACE_KHR)) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-fragmentShadingRateNonTrivialCombinerOps-04506", "vkCreateGraphicsPipelines: First combiner operation of %s has been specified in %s, but " "fragmentShadingRateNonTrivialCombinerOps is not supported", string_VkFragmentShadingRateCombinerOpKHR(fragment_shading_rate_state->combinerOps[0]), struct_name); } if (!phys_dev_ext_props.fragment_shading_rate_props.fragmentShadingRateNonTrivialCombinerOps && (fragment_shading_rate_state->combinerOps[1] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR && fragment_shading_rate_state->combinerOps[1] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_REPLACE_KHR)) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-fragmentShadingRateNonTrivialCombinerOps-04506", "vkCreateGraphicsPipelines: Second combiner operation of %s has been specified in %s, but " "fragmentShadingRateNonTrivialCombinerOps is not supported", string_VkFragmentShadingRateCombinerOpKHR(fragment_shading_rate_state->combinerOps[1]), struct_name); } } return skip; } // Block of code at start here specifically for managing/tracking DSs // Validate that given set is valid and that it's not being used by an in-flight CmdBuffer // func_str is the name of the calling function // Return false if no errors occur // Return true if validation error occurs and callback returns true (to skip upcoming API call down the chain) bool CoreChecks::ValidateIdleDescriptorSet(VkDescriptorSet set, const char *func_str) const { if (disabled[idle_descriptor_set]) return false; bool skip = false; auto set_node = setMap.find(set); if (set_node != setMap.end()) { // TODO : This covers various error cases so should pass error enum into this function and use passed in enum here if (set_node->second->in_use.load()) { skip |= LogError(set, "VUID-vkFreeDescriptorSets-pDescriptorSets-00309", "Cannot call %s() on %s that is in use by a command buffer.", func_str, report_data->FormatHandle(set).c_str()); } } return skip; } // If a renderpass is active, verify that the given command type is appropriate for current subpass state bool CoreChecks::ValidateCmdSubpassState(const CMD_BUFFER_STATE *pCB, const CMD_TYPE cmd_type) const { if (!pCB->activeRenderPass) return false; bool skip = false; if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS && (cmd_type != CMD_EXECUTECOMMANDS && cmd_type != CMD_NEXTSUBPASS && cmd_type != CMD_ENDRENDERPASS && cmd_type != CMD_NEXTSUBPASS2 && cmd_type != CMD_ENDRENDERPASS2)) { skip |= LogError(pCB->commandBuffer, kVUID_Core_DrawState_InvalidCommandBuffer, "Commands cannot be called in a subpass using secondary command buffers."); } else if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_INLINE && cmd_type == CMD_EXECUTECOMMANDS) { skip |= LogError(pCB->commandBuffer, kVUID_Core_DrawState_InvalidCommandBuffer, "vkCmdExecuteCommands() cannot be called in a subpass using inline commands."); } return skip; } bool CoreChecks::ValidateCmdQueueFlags(const CMD_BUFFER_STATE *cb_node, const char *caller_name, VkQueueFlags required_flags, const char *error_code) const { auto pool = cb_node->command_pool.get(); if (pool) { const uint32_t queue_family_index = pool->queueFamilyIndex; const VkQueueFlags queue_flags = GetPhysicalDeviceState()->queue_family_properties[queue_family_index].queueFlags; if (!(required_flags & queue_flags)) { string required_flags_string; for (auto flag : {VK_QUEUE_TRANSFER_BIT, VK_QUEUE_GRAPHICS_BIT, VK_QUEUE_COMPUTE_BIT, VK_QUEUE_SPARSE_BINDING_BIT, VK_QUEUE_PROTECTED_BIT}) { if (flag & required_flags) { if (required_flags_string.size()) { required_flags_string += " or "; } required_flags_string += string_VkQueueFlagBits(flag); } } return LogError(cb_node->commandBuffer, error_code, "%s(): Called in command buffer %s which was allocated from the command pool %s which was created with " "queueFamilyIndex %u which doesn't contain the required %s capability flags.", caller_name, report_data->FormatHandle(cb_node->commandBuffer).c_str(), report_data->FormatHandle(pool->commandPool).c_str(), queue_family_index, required_flags_string.c_str()); } } return false; } bool CoreChecks::ValidateSampleLocationsInfo(const VkSampleLocationsInfoEXT *pSampleLocationsInfo, const char *apiName) const { bool skip = false; const VkSampleCountFlagBits sample_count = pSampleLocationsInfo->sampleLocationsPerPixel; const uint32_t sample_total_size = pSampleLocationsInfo->sampleLocationGridSize.width * pSampleLocationsInfo->sampleLocationGridSize.height * SampleCountSize(sample_count); if (pSampleLocationsInfo->sampleLocationsCount != sample_total_size) { skip |= LogError(device, "VUID-VkSampleLocationsInfoEXT-sampleLocationsCount-01527", "%s: VkSampleLocationsInfoEXT::sampleLocationsCount (%u) must equal grid width * grid height * pixel " "sample rate which currently is (%u * %u * %u).", apiName, pSampleLocationsInfo->sampleLocationsCount, pSampleLocationsInfo->sampleLocationGridSize.width, pSampleLocationsInfo->sampleLocationGridSize.height, SampleCountSize(sample_count)); } if ((phys_dev_ext_props.sample_locations_props.sampleLocationSampleCounts & sample_count) == 0) { skip |= LogError(device, "VUID-VkSampleLocationsInfoEXT-sampleLocationsPerPixel-01526", "%s: VkSampleLocationsInfoEXT::sampleLocationsPerPixel of %s is not supported by the device, please check " "VkPhysicalDeviceSampleLocationsPropertiesEXT::sampleLocationSampleCounts for valid sample counts.", apiName, string_VkSampleCountFlagBits(sample_count)); } return skip; } static char const *GetCauseStr(VulkanTypedHandle obj) { if (obj.type == kVulkanObjectTypeDescriptorSet) return "destroyed or updated"; if (obj.type == kVulkanObjectTypeCommandBuffer) return "destroyed or rerecorded"; return "destroyed"; } bool CoreChecks::ReportInvalidCommandBuffer(const CMD_BUFFER_STATE *cb_state, const char *call_source) const { bool skip = false; for (auto obj : cb_state->broken_bindings) { const char *cause_str = GetCauseStr(obj); string vuid; std::ostringstream str; str << kVUID_Core_DrawState_InvalidCommandBuffer << "-" << object_string[obj.type]; vuid = str.str(); LogObjectList objlist(cb_state->commandBuffer); objlist.add(obj); skip |= LogError(objlist, vuid, "You are adding %s to %s that is invalid because bound %s was %s.", call_source, report_data->FormatHandle(cb_state->commandBuffer).c_str(), report_data->FormatHandle(obj).c_str(), cause_str); } return skip; } // 'commandBuffer must be in the recording state' valid usage error code for each command // Autogenerated as part of the vk_validation_error_message.h codegen // This accounts for the following VUIDs, enumerated here for search and tracking purposes: /* "VUID-vkCmdBeginConditionalRenderingEXT-commandBuffer-recording", "VUID-vkCmdBeginDebugUtilsLabelEXT-commandBuffer-recording", "VUID-vkCmdBeginQuery-commandBuffer-recording", "VUID-vkCmdBeginQueryIndexedEXT-commandBuffer-recording", "VUID-vkCmdBeginRenderPass-commandBuffer-recording", "VUID-vkCmdBeginRenderPass2-commandBuffer-recording", "VUID-vkCmdBeginTransformFeedbackEXT-commandBuffer-recording", "VUID-vkCmdBindDescriptorSets-commandBuffer-recording", "VUID-vkCmdBindIndexBuffer-commandBuffer-recording", "VUID-vkCmdBindPipeline-commandBuffer-recording", "VUID-vkCmdBindPipelineShaderGroupNV-commandBuffer-recording", "VUID-vkCmdBindShadingRateImageNV-commandBuffer-recording", "VUID-vkCmdBindTransformFeedbackBuffersEXT-commandBuffer-recording", "VUID-vkCmdBindVertexBuffers-commandBuffer-recording", "VUID-vkCmdBindVertexBuffers2EXT-commandBuffer-recording", "VUID-vkCmdBlitImage-commandBuffer-recording", "VUID-vkCmdBlitImage2KHR-commandBuffer-recording", "VUID-vkCmdBuildAccelerationStructuresIndirectKHR-commandBuffer-recording", "VUID-vkCmdBuildAccelerationStructuresKHR-commandBuffer-recording", "VUID-vkCmdBuildAccelerationStructureNV-commandBuffer-recording", "VUID-vkCmdClearAttachments-commandBuffer-recording", "VUID-vkCmdClearColorImage-commandBuffer-recording", "VUID-vkCmdClearDepthStencilImage-commandBuffer-recording", "VUID-vkCmdCopyAccelerationStructureKHR-commandBuffer-recording", "VUID-vkCmdCopyAccelerationStructureNV-commandBuffer-recording", "VUID-vkCmdCopyAccelerationStructureToMemoryKHR-commandBuffer-recording", "VUID-vkCmdCopyBuffer-commandBuffer-recording", "VUID-vkCmdCopyBuffer2KHR-commandBuffer-recording", "VUID-vkCmdCopyBufferToImage-commandBuffer-recording", "VUID-vkCmdCopyBufferToImage2KHR-commandBuffer-recording", "VUID-vkCmdCopyImage-commandBuffer-recording", "VUID-vkCmdCopyImage2KHR-commandBuffer-recording", "VUID-vkCmdCopyImageToBuffer-commandBuffer-recording", "VUID-vkCmdCopyImageToBuffer2KHR-commandBuffer-recording", "VUID-vkCmdCopyMemoryToAccelerationStructureKHR-commandBuffer-recording", "VUID-vkCmdCopyQueryPoolResults-commandBuffer-recording", "VUID-vkCmdDebugMarkerBeginEXT-commandBuffer-recording", "VUID-vkCmdDebugMarkerEndEXT-commandBuffer-recording", "VUID-vkCmdDebugMarkerInsertEXT-commandBuffer-recording", "VUID-vkCmdDispatch-commandBuffer-recording", "VUID-vkCmdDispatchBase-commandBuffer-recording", "VUID-vkCmdDispatchIndirect-commandBuffer-recording", "VUID-vkCmdDraw-commandBuffer-recording", "VUID-vkCmdDrawIndexed-commandBuffer-recording", "VUID-vkCmdDrawIndexedIndirect-commandBuffer-recording", "VUID-vkCmdDrawIndexedIndirectCount-commandBuffer-recording", "VUID-vkCmdDrawIndirect-commandBuffer-recording", "VUID-vkCmdDrawIndirectByteCountEXT-commandBuffer-recording", "VUID-vkCmdDrawIndirectCount-commandBuffer-recording", "VUID-vkCmdDrawMeshTasksIndirectCountNV-commandBuffer-recording", "VUID-vkCmdDrawMeshTasksIndirectNV-commandBuffer-recording", "VUID-vkCmdDrawMeshTasksNV-commandBuffer-recording", "VUID-vkCmdEndConditionalRenderingEXT-commandBuffer-recording", "VUID-vkCmdEndDebugUtilsLabelEXT-commandBuffer-recording", "VUID-vkCmdEndQuery-commandBuffer-recording", "VUID-vkCmdEndQueryIndexedEXT-commandBuffer-recording", "VUID-vkCmdEndRenderPass-commandBuffer-recording", "VUID-vkCmdEndRenderPass2-commandBuffer-recording", "VUID-vkCmdEndTransformFeedbackEXT-commandBuffer-recording", "VUID-vkCmdExecuteCommands-commandBuffer-recording", "VUID-vkCmdExecuteGeneratedCommandsNV-commandBuffer-recording", "VUID-vkCmdFillBuffer-commandBuffer-recording", "VUID-vkCmdInsertDebugUtilsLabelEXT-commandBuffer-recording", "VUID-vkCmdNextSubpass-commandBuffer-recording", "VUID-vkCmdNextSubpass2-commandBuffer-recording", "VUID-vkCmdPipelineBarrier-commandBuffer-recording", "VUID-vkCmdPreprocessGeneratedCommandsNV-commandBuffer-recording", "VUID-vkCmdPushConstants-commandBuffer-recording", "VUID-vkCmdPushDescriptorSetKHR-commandBuffer-recording", "VUID-vkCmdPushDescriptorSetWithTemplateKHR-commandBuffer-recording", "VUID-vkCmdResetEvent-commandBuffer-recording", "VUID-vkCmdResetQueryPool-commandBuffer-recording", "VUID-vkCmdResolveImage-commandBuffer-recording", "VUID-vkCmdResolveImage2KHR-commandBuffer-recording", "VUID-vkCmdSetBlendConstants-commandBuffer-recording", "VUID-vkCmdSetCheckpointNV-commandBuffer-recording", "VUID-vkCmdSetCoarseSampleOrderNV-commandBuffer-recording", "VUID-vkCmdSetCullModeEXT-commandBuffer-recording", "VUID-vkCmdSetDepthBias-commandBuffer-recording", "VUID-vkCmdSetDepthBounds-commandBuffer-recording", "VUID-vkCmdSetDepthBoundsTestEnableEXT-commandBuffer-recording", "VUID-vkCmdSetDepthCompareOpEXT-commandBuffer-recording", "VUID-vkCmdSetDepthTestEnableEXT-commandBuffer-recording", "VUID-vkCmdSetDepthWriteEnableEXT-commandBuffer-recording", "VUID-vkCmdSetDeviceMask-commandBuffer-recording", "VUID-vkCmdSetDiscardRectangleEXT-commandBuffer-recording", "VUID-vkCmdSetEvent-commandBuffer-recording", "VUID-vkCmdSetExclusiveScissorNV-commandBuffer-recording", "VUID-vkCmdSetFragmentShadingRateKHR-commandBuffer-recording", "VUID-vkCmdSetFrontFaceEXT-commandBuffer-recording", "VUID-vkCmdSetLineStippleEXT-commandBuffer-recording", "VUID-vkCmdSetLineWidth-commandBuffer-recording", "VUID-vkCmdSetPerformanceMarkerINTEL-commandBuffer-recording", "VUID-vkCmdSetPerformanceOverrideINTEL-commandBuffer-recording", "VUID-vkCmdSetPerformanceStreamMarkerINTEL-commandBuffer-recording", "VUID-vkCmdSetPrimitiveTopologyEXT-commandBuffer-recording", "VUID-vkCmdSetSampleLocationsEXT-commandBuffer-recording", "VUID-vkCmdSetScissor-commandBuffer-recording", "VUID-vkCmdSetScissorWithCountEXT-commandBuffer-recording", "VUID-vkCmdSetStencilCompareMask-commandBuffer-recording", "VUID-vkCmdSetStencilOpEXT-commandBuffer-recording", "VUID-vkCmdSetStencilReference-commandBuffer-recording", "VUID-vkCmdSetStencilTestEnableEXT-commandBuffer-recording", "VUID-vkCmdSetStencilWriteMask-commandBuffer-recording", "VUID-vkCmdSetViewport-commandBuffer-recording", "VUID-vkCmdSetViewportShadingRatePaletteNV-commandBuffer-recording", "VUID-vkCmdSetViewportWScalingNV-commandBuffer-recording", "VUID-vkCmdSetViewportWithCountEXT-commandBuffer-recording", "VUID-vkCmdTraceRaysIndirectKHR-commandBuffer-recording", "VUID-vkCmdTraceRaysKHR-commandBuffer-recording", "VUID-vkCmdTraceRaysNV-commandBuffer-recording", "VUID-vkCmdUpdateBuffer-commandBuffer-recording", "VUID-vkCmdWaitEvents-commandBuffer-recording", "VUID-vkCmdWriteAccelerationStructuresPropertiesKHR-commandBuffer-recording", "VUID-vkCmdWriteBufferMarkerAMD-commandBuffer-recording", "VUID-vkCmdWriteTimestamp-commandBuffer-recording", "VUID-vkEndCommandBuffer-commandBuffer-00059" */ // Validate the given command being added to the specified cmd buffer, flagging errors if CB is not in the recording state or if // there's an issue with the Cmd ordering bool CoreChecks::ValidateCmd(const CMD_BUFFER_STATE *cb_state, const CMD_TYPE cmd, const char *caller_name) const { switch (cb_state->state) { case CB_RECORDING: return ValidateCmdSubpassState(cb_state, cmd); case CB_INVALID_COMPLETE: case CB_INVALID_INCOMPLETE: return ReportInvalidCommandBuffer(cb_state, caller_name); default: assert(cmd != CMD_NONE); const auto error = KGeneratedMustBeRecordingList[cmd]; return LogError(cb_state->commandBuffer, error, "You must call vkBeginCommandBuffer() before this call to %s.", caller_name); } } bool CoreChecks::ValidateIndirectCmd(VkCommandBuffer command_buffer, VkBuffer buffer, CMD_TYPE cmd_type, const char *caller_name) const { bool skip = false; const DrawDispatchVuid vuid = GetDrawDispatchVuid(cmd_type); const CMD_BUFFER_STATE *cb_state = GetCBState(command_buffer); const BUFFER_STATE *buffer_state = GetBufferState(buffer); if ((cb_state != nullptr) && (buffer_state != nullptr)) { skip |= ValidateMemoryIsBoundToBuffer(buffer_state, caller_name, vuid.indirect_contiguous_memory); skip |= ValidateBufferUsageFlags(buffer_state, VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT, true, vuid.indirect_buffer_bit, caller_name, "VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT"); if (cb_state->unprotected == false) { skip |= LogError(cb_state->commandBuffer, vuid.indirect_protected_cb, "%s: Indirect commands can't be used in protected command buffers.", caller_name); } } return skip; } template <typename T1> bool CoreChecks::ValidateDeviceMaskToPhysicalDeviceCount(uint32_t deviceMask, const T1 object, const char *VUID) const { bool skip = false; uint32_t count = 1 << physical_device_count; if (count <= deviceMask) { skip |= LogError(object, VUID, "deviceMask(0x%" PRIx32 ") is invalid. Physical device count is %" PRIu32 ".", deviceMask, physical_device_count); } return skip; } template <typename T1> bool CoreChecks::ValidateDeviceMaskToZero(uint32_t deviceMask, const T1 object, const char *VUID) const { bool skip = false; if (deviceMask == 0) { skip |= LogError(object, VUID, "deviceMask(0x%" PRIx32 ") must be non-zero.", deviceMask); } return skip; } template <typename T1> bool CoreChecks::ValidateDeviceMaskToCommandBuffer(const CMD_BUFFER_STATE *pCB, uint32_t deviceMask, const T1 object, const char *VUID) const { bool skip = false; if ((deviceMask & pCB->initial_device_mask) != deviceMask) { skip |= LogError(object, VUID, "deviceMask(0x%" PRIx32 ") is not a subset of %s initial device mask(0x%" PRIx32 ").", deviceMask, report_data->FormatHandle(pCB->commandBuffer).c_str(), pCB->initial_device_mask); } return skip; } bool CoreChecks::ValidateDeviceMaskToRenderPass(const CMD_BUFFER_STATE *pCB, uint32_t deviceMask, const char *VUID) const { bool skip = false; if ((deviceMask & pCB->active_render_pass_device_mask) != deviceMask) { skip |= LogError(pCB->commandBuffer, VUID, "deviceMask(0x%" PRIx32 ") is not a subset of %s device mask(0x%" PRIx32 ").", deviceMask, report_data->FormatHandle(pCB->activeRenderPass->renderPass).c_str(), pCB->active_render_pass_device_mask); } return skip; } // Flags validation error if the associated call is made inside a render pass. The apiName routine should ONLY be called outside a // render pass. bool CoreChecks::InsideRenderPass(const CMD_BUFFER_STATE *pCB, const char *apiName, const char *msgCode) const { bool inside = false; if (pCB->activeRenderPass) { inside = LogError(pCB->commandBuffer, msgCode, "%s: It is invalid to issue this call inside an active %s.", apiName, report_data->FormatHandle(pCB->activeRenderPass->renderPass).c_str()); } return inside; } // Flags validation error if the associated call is made outside a render pass. The apiName // routine should ONLY be called inside a render pass. bool CoreChecks::OutsideRenderPass(const CMD_BUFFER_STATE *pCB, const char *apiName, const char *msgCode) const { bool outside = false; if (((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (!pCB->activeRenderPass)) || ((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) && (!pCB->activeRenderPass) && !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))) { outside = LogError(pCB->commandBuffer, msgCode, "%s: This call must be issued inside an active render pass.", apiName); } return outside; } bool CoreChecks::ValidateQueueFamilyIndex(const PHYSICAL_DEVICE_STATE *pd_state, uint32_t requested_queue_family, const char *err_code, const char *cmd_name, const char *queue_family_var_name) const { bool skip = false; if (requested_queue_family >= pd_state->queue_family_known_count) { const char *conditional_ext_cmd = instance_extensions.vk_khr_get_physical_device_properties_2 ? " or vkGetPhysicalDeviceQueueFamilyProperties2[KHR]" : ""; skip |= LogError(pd_state->phys_device, err_code, "%s: %s (= %" PRIu32 ") is not less than any previously obtained pQueueFamilyPropertyCount from " "vkGetPhysicalDeviceQueueFamilyProperties%s (i.e. is not less than %s).", cmd_name, queue_family_var_name, requested_queue_family, conditional_ext_cmd, std::to_string(pd_state->queue_family_known_count).c_str()); } return skip; } // Verify VkDeviceQueueCreateInfos bool CoreChecks::ValidateDeviceQueueCreateInfos(const PHYSICAL_DEVICE_STATE *pd_state, uint32_t info_count, const VkDeviceQueueCreateInfo *infos) const { bool skip = false; std::unordered_set<uint32_t> queue_family_set; for (uint32_t i = 0; i < info_count; ++i) { const auto requested_queue_family = infos[i].queueFamilyIndex; std::string queue_family_var_name = "pCreateInfo->pQueueCreateInfos[" + std::to_string(i) + "].queueFamilyIndex"; skip |= ValidateQueueFamilyIndex(pd_state, requested_queue_family, "VUID-VkDeviceQueueCreateInfo-queueFamilyIndex-00381", "vkCreateDevice", queue_family_var_name.c_str()); if (queue_family_set.insert(requested_queue_family).second == false) { skip |= LogError(pd_state->phys_device, "VUID-VkDeviceCreateInfo-queueFamilyIndex-00372", "CreateDevice(): %s (=%" PRIu32 ") is not unique within pQueueCreateInfos.", queue_family_var_name.c_str(), requested_queue_family); } // Verify that requested queue count of queue family is known to be valid at this point in time if (requested_queue_family < pd_state->queue_family_known_count) { const auto requested_queue_count = infos[i].queueCount; const bool queue_family_has_props = requested_queue_family < pd_state->queue_family_properties.size(); // spec guarantees at least one queue for each queue family const uint32_t available_queue_count = queue_family_has_props ? pd_state->queue_family_properties[requested_queue_family].queueCount : 1; const char *conditional_ext_cmd = instance_extensions.vk_khr_get_physical_device_properties_2 ? " or vkGetPhysicalDeviceQueueFamilyProperties2[KHR]" : ""; if (requested_queue_count > available_queue_count) { const std::string count_note = queue_family_has_props ? "i.e. is not less than or equal to " + std::to_string(pd_state->queue_family_properties[requested_queue_family].queueCount) : "the pQueueFamilyProperties[" + std::to_string(requested_queue_family) + "] was never obtained"; skip |= LogError( pd_state->phys_device, "VUID-VkDeviceQueueCreateInfo-queueCount-00382", "vkCreateDevice: pCreateInfo->pQueueCreateInfos[%" PRIu32 "].queueCount (=%" PRIu32 ") is not less than or equal to available queue count for this pCreateInfo->pQueueCreateInfos[%" PRIu32 "].queueFamilyIndex} (=%" PRIu32 ") obtained previously from vkGetPhysicalDeviceQueueFamilyProperties%s (%s).", i, requested_queue_count, i, requested_queue_family, conditional_ext_cmd, count_note.c_str()); } } } return skip; } bool CoreChecks::PreCallValidateCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) const { bool skip = false; auto pd_state = GetPhysicalDeviceState(gpu); // TODO: object_tracker should perhaps do this instead // and it does not seem to currently work anyway -- the loader just crashes before this point if (!pd_state) { skip |= LogError(device, kVUID_Core_DevLimit_MustQueryCount, "Invalid call to vkCreateDevice() w/o first calling vkEnumeratePhysicalDevices()."); } else { skip |= ValidateDeviceQueueCreateInfos(pd_state, pCreateInfo->queueCreateInfoCount, pCreateInfo->pQueueCreateInfos); const VkPhysicalDeviceFragmentShadingRateFeaturesKHR *fragment_shading_rate_features = LvlFindInChain<VkPhysicalDeviceFragmentShadingRateFeaturesKHR>(pCreateInfo->pNext); if (fragment_shading_rate_features) { const VkPhysicalDeviceShadingRateImageFeaturesNV *shading_rate_image_features = LvlFindInChain<VkPhysicalDeviceShadingRateImageFeaturesNV>(pCreateInfo->pNext); if (shading_rate_image_features && shading_rate_image_features->shadingRateImage) { if (fragment_shading_rate_features->pipelineFragmentShadingRate) { skip |= LogError( pd_state->phys_device, "VUID-VkDeviceCreateInfo-shadingRateImage-04478", "vkCreateDevice: Cannot enable shadingRateImage and pipelineFragmentShadingRate features simultaneously."); } if (fragment_shading_rate_features->primitiveFragmentShadingRate) { skip |= LogError( pd_state->phys_device, "VUID-VkDeviceCreateInfo-shadingRateImage-04479", "vkCreateDevice: Cannot enable shadingRateImage and primitiveFragmentShadingRate features simultaneously."); } if (fragment_shading_rate_features->attachmentFragmentShadingRate) { skip |= LogError(pd_state->phys_device, "VUID-VkDeviceCreateInfo-shadingRateImage-04480", "vkCreateDevice: Cannot enable shadingRateImage and attachmentFragmentShadingRate features " "simultaneously."); } } const VkPhysicalDeviceFragmentDensityMapFeaturesEXT *fragment_density_map_features = LvlFindInChain<VkPhysicalDeviceFragmentDensityMapFeaturesEXT>(pCreateInfo->pNext); if (fragment_density_map_features && fragment_density_map_features->fragmentDensityMap) { if (fragment_shading_rate_features->pipelineFragmentShadingRate) { skip |= LogError(pd_state->phys_device, "VUID-VkDeviceCreateInfo-fragmentDensityMap-04481", "vkCreateDevice: Cannot enable fragmentDensityMap and pipelineFragmentShadingRate features " "simultaneously."); } if (fragment_shading_rate_features->primitiveFragmentShadingRate) { skip |= LogError(pd_state->phys_device, "VUID-VkDeviceCreateInfo-fragmentDensityMap-04482", "vkCreateDevice: Cannot enable fragmentDensityMap and primitiveFragmentShadingRate features " "simultaneously."); } if (fragment_shading_rate_features->attachmentFragmentShadingRate) { skip |= LogError(pd_state->phys_device, "VUID-VkDeviceCreateInfo-fragmentDensityMap-04483", "vkCreateDevice: Cannot enable fragmentDensityMap and attachmentFragmentShadingRate features " "simultaneously."); } } } } return skip; } void CoreChecks::PostCallRecordCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkDevice *pDevice, VkResult result) { // The state tracker sets up the device state StateTracker::PostCallRecordCreateDevice(gpu, pCreateInfo, pAllocator, pDevice, result); // Add the callback hooks for the functions that are either broadly or deeply used and that the ValidationStateTracker refactor // would be messier without. // TODO: Find a good way to do this hooklessly. ValidationObject *device_object = GetLayerDataPtr(get_dispatch_key(*pDevice), layer_data_map); ValidationObject *validation_data = GetValidationObject(device_object->object_dispatch, LayerObjectTypeCoreValidation); CoreChecks *core_checks = static_cast<CoreChecks *>(validation_data); core_checks->SetSetImageViewInitialLayoutCallback( [core_checks](CMD_BUFFER_STATE *cb_node, const IMAGE_VIEW_STATE &iv_state, VkImageLayout layout) -> void { core_checks->SetImageViewInitialLayout(cb_node, iv_state, layout); }); } void CoreChecks::PreCallRecordDestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) { if (!device) return; imageLayoutMap.clear(); StateTracker::PreCallRecordDestroyDevice(device, pAllocator); } bool CoreChecks::ValidateStageMaskHost(const CoreErrorLocation &loc, VkPipelineStageFlags2KHR stageMask) const { bool skip = false; if ((stageMask & VK_PIPELINE_STAGE_HOST_BIT) != 0) { const auto &vuid = sync_vuid_maps::GetQueueSubmitVUID(loc, sync_vuid_maps::SubmitError::kHostStageMask); skip |= LogError( device, vuid, "%s stage mask must not include VK_PIPELINE_STAGE_HOST_BIT as the stage can't be invoked inside a command buffer.", loc.Message().c_str()); } return skip; } // Note: This function assumes that the global lock is held by the calling thread. // For the given queue, verify the queue state up to the given seq number. // Currently the only check is to make sure that if there are events to be waited on prior to // a QueryReset, make sure that all such events have been signalled. bool CoreChecks::VerifyQueueStateToSeq(const QUEUE_STATE *initial_queue, uint64_t initial_seq) const { bool skip = false; // sequence number we want to validate up to, per queue std::unordered_map<const QUEUE_STATE *, uint64_t> target_seqs{{initial_queue, initial_seq}}; // sequence number we've completed validation for, per queue std::unordered_map<const QUEUE_STATE *, uint64_t> done_seqs; std::vector<const QUEUE_STATE *> worklist{initial_queue}; while (worklist.size()) { auto queue = worklist.back(); worklist.pop_back(); auto target_seq = target_seqs[queue]; auto seq = std::max(done_seqs[queue], queue->seq); auto sub_it = queue->submissions.begin() + int(seq - queue->seq); // seq >= queue->seq for (; seq < target_seq; ++sub_it, ++seq) { for (auto &wait : sub_it->waitSemaphores) { auto other_queue = GetQueueState(wait.queue); if (other_queue == queue) continue; // semaphores /always/ point backwards, so no point here. auto other_target_seq = std::max(target_seqs[other_queue], wait.seq); auto other_done_seq = std::max(done_seqs[other_queue], other_queue->seq); // if this wait is for another queue, and covers new sequence // numbers beyond what we've already validated, mark the new // target seq and (possibly-re)add the queue to the worklist. if (other_done_seq < other_target_seq) { target_seqs[other_queue] = other_target_seq; worklist.push_back(other_queue); } } } // finally mark the point we've now validated this queue to. done_seqs[queue] = seq; } return skip; } // When the given fence is retired, verify outstanding queue operations through the point of the fence bool CoreChecks::VerifyQueueStateToFence(VkFence fence) const { auto fence_state = GetFenceState(fence); if (fence_state && fence_state->scope == kSyncScopeInternal && VK_NULL_HANDLE != fence_state->signaler.first) { return VerifyQueueStateToSeq(GetQueueState(fence_state->signaler.first), fence_state->signaler.second); } return false; } bool CoreChecks::ValidateCommandBufferSimultaneousUse(const CoreErrorLocation &loc, const CMD_BUFFER_STATE *pCB, int current_submit_count) const { using sync_vuid_maps::GetQueueSubmitVUID; using sync_vuid_maps::SubmitError; bool skip = false; if ((pCB->in_use.load() || current_submit_count > 1) && !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) { const auto &vuid = sync_vuid_maps::GetQueueSubmitVUID(loc, SubmitError::kCmdNotSimultaneous); skip |= LogError(device, vuid, "%s %s is already in use and is not marked for simultaneous use.", loc.Message().c_str(), report_data->FormatHandle(pCB->commandBuffer).c_str()); } return skip; } bool CoreChecks::ValidateCommandBufferState(const CMD_BUFFER_STATE *cb_state, const char *call_source, int current_submit_count, const char *vu_id) const { bool skip = false; if (disabled[command_buffer_state]) return skip; // Validate ONE_TIME_SUBMIT_BIT CB is not being submitted more than once if ((cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) && (cb_state->submitCount + current_submit_count > 1)) { skip |= LogError(cb_state->commandBuffer, kVUID_Core_DrawState_CommandBufferSingleSubmitViolation, "%s was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT set, but has been submitted 0x%" PRIxLEAST64 "times.", report_data->FormatHandle(cb_state->commandBuffer).c_str(), cb_state->submitCount + current_submit_count); } // Validate that cmd buffers have been updated switch (cb_state->state) { case CB_INVALID_INCOMPLETE: case CB_INVALID_COMPLETE: skip |= ReportInvalidCommandBuffer(cb_state, call_source); break; case CB_NEW: skip |= LogError(cb_state->commandBuffer, vu_id, "%s used in the call to %s is unrecorded and contains no commands.", report_data->FormatHandle(cb_state->commandBuffer).c_str(), call_source); break; case CB_RECORDING: skip |= LogError(cb_state->commandBuffer, kVUID_Core_DrawState_NoEndCommandBuffer, "You must call vkEndCommandBuffer() on %s before this call to %s!", report_data->FormatHandle(cb_state->commandBuffer).c_str(), call_source); break; default: /* recorded */ break; } return skip; } // Check that the queue family index of 'queue' matches one of the entries in pQueueFamilyIndices bool CoreChecks::ValidImageBufferQueue(const CMD_BUFFER_STATE *cb_node, const VulkanTypedHandle &object, uint32_t queueFamilyIndex, uint32_t count, const uint32_t *indices) const { bool found = false; bool skip = false; for (uint32_t i = 0; i < count; i++) { if (indices[i] == queueFamilyIndex) { found = true; break; } } if (!found) { LogObjectList objlist(cb_node->commandBuffer); objlist.add(object); skip = LogError(objlist, "VUID-vkQueueSubmit-pSubmits-04626", "vkQueueSubmit: %s contains %s which was not created allowing concurrent access to " "this queue family %d.", report_data->FormatHandle(cb_node->commandBuffer).c_str(), report_data->FormatHandle(object).c_str(), queueFamilyIndex); } return skip; } // Validate that queueFamilyIndices of primary command buffers match this queue // Secondary command buffers were previously validated in vkCmdExecuteCommands(). bool CoreChecks::ValidateQueueFamilyIndices(const CoreErrorLocation &loc, const CMD_BUFFER_STATE *pCB, VkQueue queue) const { using sync_vuid_maps::GetQueueSubmitVUID; using sync_vuid_maps::SubmitError; bool skip = false; auto pool = pCB->command_pool.get(); auto queue_state = GetQueueState(queue); if (pool && queue_state) { if (pool->queueFamilyIndex != queue_state->queueFamilyIndex) { LogObjectList objlist(pCB->commandBuffer); objlist.add(queue); const auto &vuid = GetQueueSubmitVUID(loc, SubmitError::kCmdWrongQueueFamily); skip |= LogError(objlist, vuid, "%s Primary %s created in queue family %d is being submitted on %s " "from queue family %d.", loc.Message().c_str(), report_data->FormatHandle(pCB->commandBuffer).c_str(), pool->queueFamilyIndex, report_data->FormatHandle(queue).c_str(), queue_state->queueFamilyIndex); } // Ensure that any bound images or buffers created with SHARING_MODE_CONCURRENT have access to the current queue family for (const auto &object : pCB->object_bindings) { if (object.type == kVulkanObjectTypeImage) { auto image_state = object.node ? (IMAGE_STATE *)object.node : GetImageState(object.Cast<VkImage>()); if (image_state && image_state->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) { skip |= ValidImageBufferQueue(pCB, object, queue_state->queueFamilyIndex, image_state->createInfo.queueFamilyIndexCount, image_state->createInfo.pQueueFamilyIndices); } } else if (object.type == kVulkanObjectTypeBuffer) { auto buffer_state = object.node ? (BUFFER_STATE *)object.node : GetBufferState(object.Cast<VkBuffer>()); if (buffer_state && buffer_state->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) { skip |= ValidImageBufferQueue(pCB, object, queue_state->queueFamilyIndex, buffer_state->createInfo.queueFamilyIndexCount, buffer_state->createInfo.pQueueFamilyIndices); } } } } return skip; } bool CoreChecks::ValidatePrimaryCommandBufferState( const CoreErrorLocation &loc, const CMD_BUFFER_STATE *pCB, int current_submit_count, QFOTransferCBScoreboards<QFOImageTransferBarrier> *qfo_image_scoreboards, QFOTransferCBScoreboards<QFOBufferTransferBarrier> *qfo_buffer_scoreboards) const { using sync_vuid_maps::GetQueueSubmitVUID; using sync_vuid_maps::SubmitError; // Track in-use for resources off of primary and any secondary CBs bool skip = false; if (pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) { const auto &vuid = GetQueueSubmitVUID(loc, SubmitError::kSecondaryCmdInSubmit); skip |= LogError(pCB->commandBuffer, vuid, "%s Command buffer %s must be allocated with VK_COMMAND_BUFFER_LEVEL_PRIMARY.", loc.Message().c_str(), report_data->FormatHandle(pCB->commandBuffer).c_str()); } else { for (auto sub_cb : pCB->linkedCommandBuffers) { skip |= ValidateQueuedQFOTransfers(sub_cb, qfo_image_scoreboards, qfo_buffer_scoreboards); // TODO: replace with InvalidateCommandBuffers() at recording. if ((sub_cb->primaryCommandBuffer != pCB->commandBuffer) && !(sub_cb->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) { LogObjectList objlist(device); objlist.add(pCB->commandBuffer); objlist.add(sub_cb->commandBuffer); objlist.add(sub_cb->primaryCommandBuffer); const auto &vuid = GetQueueSubmitVUID(loc, SubmitError::kSecondaryCmdNotSimultaneous); skip |= LogError(objlist, vuid, "%s %s was submitted with secondary %s but that buffer has subsequently been bound to " "primary %s and it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set.", loc.Message().c_str(), report_data->FormatHandle(pCB->commandBuffer).c_str(), report_data->FormatHandle(sub_cb->commandBuffer).c_str(), report_data->FormatHandle(sub_cb->primaryCommandBuffer).c_str()); } } } // If USAGE_SIMULTANEOUS_USE_BIT not set then CB cannot already be executing on device skip |= ValidateCommandBufferSimultaneousUse(loc, pCB, current_submit_count); skip |= ValidateQueuedQFOTransfers(pCB, qfo_image_scoreboards, qfo_buffer_scoreboards); const char *vuid = loc.func_name == ErrFunc::vkQueueSubmit ? "VUID-vkQueueSubmit-pCommandBuffers-00072" : "VUID-vkQueueSubmit2KHR-commandBuffer-03876"; skip |= ValidateCommandBufferState(pCB, loc.StringFuncName().c_str(), current_submit_count, vuid); return skip; } bool CoreChecks::ValidateFenceForSubmit(const FENCE_STATE *pFence, const char *inflight_vuid, const char *retired_vuid, const char *func_name) const { bool skip = false; if (pFence && pFence->scope == kSyncScopeInternal) { if (pFence->state == FENCE_INFLIGHT) { skip |= LogError(pFence->fence, inflight_vuid, "%s: %s is already in use by another submission.", func_name, report_data->FormatHandle(pFence->fence).c_str()); } else if (pFence->state == FENCE_RETIRED) { skip |= LogError(pFence->fence, retired_vuid, "%s: %s submitted in SIGNALED state. Fences must be reset before being submitted", func_name, report_data->FormatHandle(pFence->fence).c_str()); } } return skip; } void CoreChecks::PostCallRecordQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence, VkResult result) { StateTracker::PostCallRecordQueueSubmit(queue, submitCount, pSubmits, fence, result); if (result != VK_SUCCESS) return; // The triply nested for duplicates that in the StateTracker, but avoids the need for two additional callbacks. for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) { const VkSubmitInfo *submit = &pSubmits[submit_idx]; for (uint32_t i = 0; i < submit->commandBufferCount; i++) { auto cb_node = GetCBState(submit->pCommandBuffers[i]); if (cb_node) { for (auto secondary_cmd_buffer : cb_node->linkedCommandBuffers) { UpdateCmdBufImageLayouts(secondary_cmd_buffer); RecordQueuedQFOTransfers(secondary_cmd_buffer); } UpdateCmdBufImageLayouts(cb_node); RecordQueuedQFOTransfers(cb_node); } } } } void CoreChecks::PostCallRecordQueueSubmit2KHR(VkQueue queue, uint32_t submitCount, const VkSubmitInfo2KHR *pSubmits, VkFence fence, VkResult result) { StateTracker::PostCallRecordQueueSubmit2KHR(queue, submitCount, pSubmits, fence, result); if (result != VK_SUCCESS) return; // The triply nested for duplicates that in the StateTracker, but avoids the need for two additional callbacks. for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) { const VkSubmitInfo2KHR *submit = &pSubmits[submit_idx]; for (uint32_t i = 0; i < submit->commandBufferInfoCount; i++) { auto cb_node = GetCBState(submit->pCommandBufferInfos[i].commandBuffer); if (cb_node) { for (auto secondaryCmdBuffer : cb_node->linkedCommandBuffers) { UpdateCmdBufImageLayouts(secondaryCmdBuffer); RecordQueuedQFOTransfers(secondaryCmdBuffer); } UpdateCmdBufImageLayouts(cb_node); RecordQueuedQFOTransfers(cb_node); } } } } bool CoreChecks::SemaphoreWasSignaled(VkSemaphore semaphore) const { for (auto &pair : queueMap) { const QUEUE_STATE &queue_state = pair.second; for (const auto &submission : queue_state.submissions) { for (const auto &signal_semaphore : submission.signalSemaphores) { if (signal_semaphore.semaphore == semaphore) { return true; } } } } return false; } struct SemaphoreSubmitState { const CoreChecks *core; VkQueueFlags queue_flags; unordered_set<VkSemaphore> signaled_semaphores; unordered_set<VkSemaphore> unsignaled_semaphores; unordered_set<VkSemaphore> internal_semaphores; SemaphoreSubmitState(const CoreChecks *core_, VkQueueFlags queue_flags_) : core(core_), queue_flags(queue_flags_) {} bool ValidateWaitSemaphore(const CoreErrorLocation &loc, VkQueue queue, VkSemaphore semaphore, uint64_t value, uint32_t device_Index) { using sync_vuid_maps::GetQueueSubmitVUID; using sync_vuid_maps::SubmitError; bool skip = false; LogObjectList objlist(semaphore); objlist.add(queue); const auto *pSemaphore = core->GetSemaphoreState(semaphore); if (pSemaphore && pSemaphore->type == VK_SEMAPHORE_TYPE_BINARY_KHR && (pSemaphore->scope == kSyncScopeInternal || internal_semaphores.count(semaphore))) { if (unsignaled_semaphores.count(semaphore) || (!(signaled_semaphores.count(semaphore)) && !(pSemaphore->signaled) && !core->SemaphoreWasSignaled(semaphore))) { const auto &vuid = GetQueueSubmitVUID(loc, SubmitError::kSemCannotBeSignalled); skip |= core->LogError( objlist, pSemaphore->scope == kSyncScopeInternal ? vuid : kVUID_Core_DrawState_QueueForwardProgress, "%s Queue %s is waiting on semaphore (%s) that has no way to be signaled.", loc.Message().c_str(), core->report_data->FormatHandle(queue).c_str(), core->report_data->FormatHandle(semaphore).c_str()); } else { signaled_semaphores.erase(semaphore); unsignaled_semaphores.insert(semaphore); } } if (pSemaphore && pSemaphore->type == VK_SEMAPHORE_TYPE_BINARY_KHR && pSemaphore->scope == kSyncScopeExternalTemporary) { internal_semaphores.insert(semaphore); } return skip; } bool ValidateSignalSemaphore(const CoreErrorLocation &loc, VkQueue queue, VkSemaphore semaphore, uint64_t value, uint32_t deviceIndex) { using sync_vuid_maps::GetQueueSubmitVUID; using sync_vuid_maps::SubmitError; bool skip = false; LogObjectList objlist(semaphore); objlist.add(queue); const auto *pSemaphore = core->GetSemaphoreState(semaphore); if (pSemaphore && pSemaphore->type == VK_SEMAPHORE_TYPE_TIMELINE_KHR && value <= pSemaphore->payload) { const auto &vuid = GetQueueSubmitVUID(loc, SubmitError::kTimelineSemSmallValue); skip |= core->LogError(objlist, vuid, "%s signal value (0x%" PRIx64 ") in %s must be greater than current timeline semaphore %s value (0x%" PRIx64 ")", loc.Message().c_str(), pSemaphore->payload, core->report_data->FormatHandle(queue).c_str(), core->report_data->FormatHandle(semaphore).c_str(), value); } if (pSemaphore && pSemaphore->type == VK_SEMAPHORE_TYPE_BINARY_KHR && (pSemaphore->scope == kSyncScopeInternal || internal_semaphores.count(semaphore))) { if (signaled_semaphores.count(semaphore) || (!(unsignaled_semaphores.count(semaphore)) && pSemaphore->signaled)) { objlist.add(pSemaphore->signaler.first); skip |= core->LogError(objlist, kVUID_Core_DrawState_QueueForwardProgress, "%s is signaling %s (%s) that was previously " "signaled by %s but has not since been waited on by any queue.", loc.Message().c_str(), core->report_data->FormatHandle(queue).c_str(), core->report_data->FormatHandle(semaphore).c_str(), core->report_data->FormatHandle(pSemaphore->signaler.first).c_str()); } else { unsignaled_semaphores.erase(semaphore); signaled_semaphores.insert(semaphore); } } return skip; } }; bool CoreChecks::ValidateSemaphoresForSubmit(SemaphoreSubmitState &state, VkQueue queue, const VkSubmitInfo *submit, const CoreErrorLocation &outer_loc) const { bool skip = false; auto *timeline_semaphore_submit_info = LvlFindInChain<VkTimelineSemaphoreSubmitInfo>(submit->pNext); for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) { uint64_t value = 0; uint32_t device_index = 0; // TODO: VkSemaphore semaphore = submit->pWaitSemaphores[i]; LogObjectList objlist(semaphore); objlist.add(queue); if (submit->pWaitDstStageMask) { auto loc = outer_loc.dot(Field::pWaitDstStageMask, i); skip |= ValidatePipelineStage(objlist, loc, state.queue_flags, submit->pWaitDstStageMask[i]); skip |= ValidateStageMaskHost(loc, submit->pWaitDstStageMask[i]); } const auto *semaphore_state = GetSemaphoreState(semaphore); if (!semaphore_state) { continue; } auto loc = outer_loc.dot(Field::pWaitSemaphores, i); if (semaphore_state->type == VK_SEMAPHORE_TYPE_TIMELINE) { if (timeline_semaphore_submit_info == nullptr) { skip |= LogError(semaphore, "VUID-VkSubmitInfo-pWaitSemaphores-03239", "%s (%s) is a timeline semaphore, but VkSubmitInfo does " "not include an instance of VkTimelineSemaphoreSubmitInfo", loc.Message().c_str(), report_data->FormatHandle(semaphore).c_str()); continue; } else if (submit->waitSemaphoreCount != timeline_semaphore_submit_info->waitSemaphoreValueCount) { skip |= LogError(semaphore, "VUID-VkSubmitInfo-pNext-03240", "%s (%s) is a timeline semaphore, it contains an " "instance of VkTimelineSemaphoreSubmitInfo, but waitSemaphoreValueCount (%u) is different than " "waitSemaphoreCount (%u)", loc.Message().c_str(), report_data->FormatHandle(semaphore).c_str(), timeline_semaphore_submit_info->waitSemaphoreValueCount, submit->waitSemaphoreCount); continue; } value = timeline_semaphore_submit_info->pWaitSemaphoreValues[i]; } skip |= state.ValidateWaitSemaphore(outer_loc.dot(Field::pWaitSemaphores, i), queue, semaphore, value, device_index); } for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) { VkSemaphore semaphore = submit->pSignalSemaphores[i]; uint64_t value = 0; uint32_t device_index = 0; const auto *semaphore_state = GetSemaphoreState(semaphore); if (!semaphore_state) { continue; } auto loc = outer_loc.dot(Field::pSignalSemaphores, i); if (semaphore_state->type == VK_SEMAPHORE_TYPE_TIMELINE) { if (timeline_semaphore_submit_info == nullptr) { skip |= LogError(semaphore, "VUID-VkSubmitInfo-pWaitSemaphores-03239", "%s (%s) is a timeline semaphore, but VkSubmitInfo" "does not include an instance of VkTimelineSemaphoreSubmitInfo", loc.Message().c_str(), report_data->FormatHandle(semaphore).c_str()); continue; } else if (submit->signalSemaphoreCount != timeline_semaphore_submit_info->signalSemaphoreValueCount) { skip |= LogError(semaphore, "VUID-VkSubmitInfo-pNext-03241", "%s (%s) is a timeline semaphore, it contains an " "instance of VkTimelineSemaphoreSubmitInfo, but signalSemaphoreValueCount (%u) is different than " "signalSemaphoreCount (%u)", loc.Message().c_str(), report_data->FormatHandle(semaphore).c_str(), timeline_semaphore_submit_info->signalSemaphoreValueCount, submit->signalSemaphoreCount); continue; } value = timeline_semaphore_submit_info->pSignalSemaphoreValues[i]; } skip |= state.ValidateSignalSemaphore(loc, queue, semaphore, value, device_index); } return skip; } bool CoreChecks::ValidateSemaphoresForSubmit(SemaphoreSubmitState &state, VkQueue queue, const VkSubmitInfo2KHR *submit, const CoreErrorLocation &outer_loc) const { bool skip = false; for (uint32_t i = 0; i < submit->waitSemaphoreInfoCount; ++i) { const auto &sem_info = submit->pWaitSemaphoreInfos[i]; CoreErrorLocation loc = outer_loc.dot(Field::pWaitSemaphoreInfos, i); skip |= ValidatePipelineStage(LogObjectList(sem_info.semaphore), loc.dot(Field::stageMask), state.queue_flags, sem_info.stageMask); skip |= ValidateStageMaskHost(loc.dot(Field::stageMask), sem_info.stageMask); skip |= state.ValidateWaitSemaphore(loc, queue, sem_info.semaphore, sem_info.value, sem_info.deviceIndex); } for (uint32_t i = 0; i < submit->signalSemaphoreInfoCount; ++i) { const auto &sem_info = submit->pSignalSemaphoreInfos[i]; auto loc = outer_loc.dot(Field::pSignalSemaphoreInfos, i); skip |= ValidatePipelineStage(LogObjectList(sem_info.semaphore), loc.dot(Field::stageMask), state.queue_flags, sem_info.stageMask); skip |= ValidateStageMaskHost(loc.dot(Field::stageMask), sem_info.stageMask); skip |= state.ValidateSignalSemaphore(loc, queue, sem_info.semaphore, sem_info.value, sem_info.deviceIndex); } return skip; } bool CoreChecks::ValidateMaxTimelineSemaphoreValueDifference(const CoreErrorLocation &loc, VkSemaphore semaphore, uint64_t value) const { using sync_vuid_maps::GetQueueSubmitVUID; using sync_vuid_maps::SubmitError; bool skip = false; const auto semaphore_state = GetSemaphoreState(semaphore); if (semaphore_state->type != VK_SEMAPHORE_TYPE_TIMELINE) return false; uint64_t diff = value > semaphore_state->payload ? value - semaphore_state->payload : semaphore_state->payload - value; if (diff > phys_dev_props_core12.maxTimelineSemaphoreValueDifference) { const auto &vuid = GetQueueSubmitVUID(loc, SubmitError::kTimelineSemMaxDiff); skip |= LogError(semaphore, vuid, "%s value exceeds limit regarding current semaphore %s payload", loc.Message().c_str(), report_data->FormatHandle(semaphore).c_str()); } for (auto &pair : queueMap) { const QUEUE_STATE &queue_state = pair.second; for (const auto &submission : queue_state.submissions) { for (const auto &signal_semaphore : submission.signalSemaphores) { if (signal_semaphore.semaphore == semaphore) { diff = value > signal_semaphore.payload ? value - signal_semaphore.payload : signal_semaphore.payload - value; if (diff > phys_dev_props_core12.maxTimelineSemaphoreValueDifference) { const auto &vuid = GetQueueSubmitVUID(loc, SubmitError::kTimelineSemMaxDiff); skip |= LogError(semaphore, vuid, "%s value exceeds limit regarding pending semaphore %s signal value", loc.Message().c_str(), report_data->FormatHandle(semaphore).c_str()); } } } for (const auto &wait_semaphore : submission.waitSemaphores) { if (wait_semaphore.semaphore == semaphore) { diff = value > wait_semaphore.payload ? value - wait_semaphore.payload : wait_semaphore.payload - value; if (diff > phys_dev_props_core12.maxTimelineSemaphoreValueDifference) { const auto &vuid = GetQueueSubmitVUID(loc, SubmitError::kTimelineSemMaxDiff); skip |= LogError(semaphore, vuid, "%s value exceeds limit regarding pending semaphore %s wait value", loc.Message().c_str(), report_data->FormatHandle(semaphore).c_str()); } } } } } return skip; } struct CommandBufferSubmitState { const CoreChecks *core; const QUEUE_STATE *queue_state; QFOTransferCBScoreboards<QFOImageTransferBarrier> qfo_image_scoreboards; QFOTransferCBScoreboards<QFOBufferTransferBarrier> qfo_buffer_scoreboards; vector<VkCommandBuffer> current_cmds; GlobalImageLayoutMap overlay_image_layout_map; QueryMap local_query_to_state_map; EventToStageMap local_event_to_stage_map; CommandBufferSubmitState(const CoreChecks *c, const char *func, const QUEUE_STATE *q) : core(c), queue_state(q) {} bool Validate(const CoreErrorLocation &loc, VkCommandBuffer cmd, uint32_t perf_pass) { bool skip = false; const auto *cb_node = core->GetCBState(cmd); if (cb_node == nullptr) { return skip; } skip |= core->ValidateCmdBufImageLayouts(cb_node, core->imageLayoutMap, &overlay_image_layout_map); current_cmds.push_back(cmd); skip |= core->ValidatePrimaryCommandBufferState(loc, cb_node, static_cast<int>(std::count(current_cmds.begin(), current_cmds.end(), cmd)), &qfo_image_scoreboards, &qfo_buffer_scoreboards); skip |= core->ValidateQueueFamilyIndices(loc, cb_node, queue_state->queue); for (auto descriptor_set : cb_node->validate_descriptorsets_in_queuesubmit) { const cvdescriptorset::DescriptorSet *set_node = core->GetSetNode(descriptor_set.first); if (!set_node) { continue; } for (auto cmd_info : descriptor_set.second) { std::string function = loc.StringFuncName(); function += ", "; function += cmd_info.function; for (auto binding_info : cmd_info.binding_infos) { std::string error; std::vector<uint32_t> dynamic_offsets; // dynamic data isn't allowed in UPDATE_AFTER_BIND, so dynamicOffsets is always empty. // This submit time not record time... const bool record_time_validate = false; skip |= core->ValidateDescriptorSetBindingData(cb_node, set_node, dynamic_offsets, binding_info, cmd_info.framebuffer, cmd_info.attachments.get(), *cmd_info.subpasses.get(), record_time_validate, function.c_str(), core->GetDrawDispatchVuid(cmd_info.cmd_type)); } } } // Potential early exit here as bad object state may crash in delayed function calls if (skip) { return true; } // Call submit-time functions to validate or update local mirrors of state (to preserve const-ness at validate time) for (auto &function : cb_node->queue_submit_functions) { skip |= function(core, queue_state); } for (auto &function : cb_node->eventUpdates) { skip |= function(core, /*do_validate*/ true, &local_event_to_stage_map); } VkQueryPool first_perf_query_pool = VK_NULL_HANDLE; for (auto &function : cb_node->queryUpdates) { skip |= function(core, /*do_validate*/ true, first_perf_query_pool, perf_pass, &local_query_to_state_map); } return skip; } }; bool CoreChecks::PreCallValidateQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) const { const auto *fence_state = GetFenceState(fence); bool skip = ValidateFenceForSubmit(fence_state, "VUID-vkQueueSubmit-fence-00064", "VUID-vkQueueSubmit-fence-00063", "vkQueueSubmit()"); if (skip) { return true; } const auto queue_state = GetQueueState(queue); CommandBufferSubmitState cb_submit_state(this, "vkQueueSubmit()", queue_state); SemaphoreSubmitState sem_submit_state( this, GetPhysicalDeviceState()->queue_family_properties[queue_state->queueFamilyIndex].queueFlags); // Now verify each individual submit for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) { const VkSubmitInfo *submit = &pSubmits[submit_idx]; const auto perf_submit = LvlFindInChain<VkPerformanceQuerySubmitInfoKHR>(submit->pNext); uint32_t perf_pass = perf_submit ? perf_submit->counterPassIndex : 0; CoreErrorLocation loc(ErrFunc::vkQueueSubmit, RefPage::VkSubmitInfo, Field::pSubmits, submit_idx); for (uint32_t i = 0; i < submit->commandBufferCount; i++) { skip |= cb_submit_state.Validate(loc.dot(Field::pCommandBuffers, i), submit->pCommandBuffers[i], perf_pass); } skip |= ValidateSemaphoresForSubmit(sem_submit_state, queue, submit, loc); auto chained_device_group_struct = LvlFindInChain<VkDeviceGroupSubmitInfo>(submit->pNext); if (chained_device_group_struct && chained_device_group_struct->commandBufferCount > 0) { for (uint32_t i = 0; i < chained_device_group_struct->commandBufferCount; ++i) { skip |= ValidateDeviceMaskToPhysicalDeviceCount(chained_device_group_struct->pCommandBufferDeviceMasks[i], queue, "VUID-VkDeviceGroupSubmitInfo-pCommandBufferDeviceMasks-00086"); } } auto protected_submit_info = LvlFindInChain<VkProtectedSubmitInfo>(submit->pNext); if (protected_submit_info) { const bool protected_submit = protected_submit_info->protectedSubmit == VK_TRUE; // Only check feature once for submit if ((protected_submit == true) && (enabled_features.core11.protectedMemory == VK_FALSE)) { skip |= LogError(queue, "VUID-VkProtectedSubmitInfo-protectedSubmit-01816", "vkQueueSubmit(): The protectedMemory device feature is disabled, can't submit a protected queue " "to %s pSubmits[%u]", report_data->FormatHandle(queue).c_str(), submit_idx); } // Make sure command buffers are all protected or unprotected for (uint32_t i = 0; i < submit->commandBufferCount; i++) { const CMD_BUFFER_STATE *cb_state = GetCBState(submit->pCommandBuffers[i]); if (cb_state != nullptr) { if ((cb_state->unprotected == true) && (protected_submit == true)) { LogObjectList objlist(cb_state->commandBuffer); objlist.add(queue); skip |= LogError(objlist, "VUID-VkSubmitInfo-pNext-04148", "vkQueueSubmit(): command buffer %s is unprotected while queue %s pSubmits[%u] has " "VkProtectedSubmitInfo:protectedSubmit set to VK_TRUE", report_data->FormatHandle(cb_state->commandBuffer).c_str(), report_data->FormatHandle(queue).c_str(), submit_idx); } if ((cb_state->unprotected == false) && (protected_submit == false)) { LogObjectList objlist(cb_state->commandBuffer); objlist.add(queue); skip |= LogError(objlist, "VUID-VkSubmitInfo-pNext-04120", "vkQueueSubmit(): command buffer %s is protected while queue %s pSubmits[%u] has " "VkProtectedSubmitInfo:protectedSubmit set to VK_FALSE", report_data->FormatHandle(cb_state->commandBuffer).c_str(), report_data->FormatHandle(queue).c_str(), submit_idx); } } } } } if (skip) return skip; // Now verify maxTimelineSemaphoreValueDifference for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) { CoreErrorLocation loc(ErrFunc::vkQueueSubmit, RefPage::VkSubmitInfo, Field::pSubmits, submit_idx); const VkSubmitInfo *submit = &pSubmits[submit_idx]; auto *info = LvlFindInChain<VkTimelineSemaphoreSubmitInfo>(submit->pNext); if (info) { // If there are any timeline semaphores, this condition gets checked before the early return above if (info->waitSemaphoreValueCount) { for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) { VkSemaphore semaphore = submit->pWaitSemaphores[i]; skip |= ValidateMaxTimelineSemaphoreValueDifference(loc.dot(Field::pWaitSemaphores, i), semaphore, info->pWaitSemaphoreValues[i]); } } // If there are any timeline semaphores, this condition gets checked before the early return above if (info->signalSemaphoreValueCount) { for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) { VkSemaphore semaphore = submit->pSignalSemaphores[i]; skip |= ValidateMaxTimelineSemaphoreValueDifference(loc.dot(Field::pSignalSemaphores, i), semaphore, info->pSignalSemaphoreValues[i]); } } } } return skip; } bool CoreChecks::PreCallValidateQueueSubmit2KHR(VkQueue queue, uint32_t submitCount, const VkSubmitInfo2KHR *pSubmits, VkFence fence) const { const auto *pFence = GetFenceState(fence); bool skip = ValidateFenceForSubmit(pFence, "UNASSIGNED-CoreValidation-vkQueueSubmit2KHR-fence-00064", "UNASSIGNED-vkQueueSubmit2KHR-fence-00063", "vkQueueSubmit2KHR()"); if (skip) { return true; } const auto queue_state = GetQueueState(queue); CommandBufferSubmitState cb_submit_state(this, "vkQueueSubmit2KHR()", queue_state); SemaphoreSubmitState sem_submit_state( this, GetPhysicalDeviceState()->queue_family_properties[queue_state->queueFamilyIndex].queueFlags); // Now verify each individual submit for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) { const VkSubmitInfo2KHR *submit = &pSubmits[submit_idx]; const auto perf_submit = LvlFindInChain<VkPerformanceQuerySubmitInfoKHR>(submit->pNext); uint32_t perf_pass = perf_submit ? perf_submit->counterPassIndex : 0; CoreErrorLocation loc(ErrFunc::vkQueueSubmit2KHR, RefPage::VkSubmitInfo2KHR, Field::pSubmits, submit_idx); skip |= ValidateSemaphoresForSubmit(sem_submit_state, queue, submit, loc); bool protectedSubmit = (submit->flags & VK_SUBMIT_PROTECTED_BIT_KHR) != 0; // Only check feature once for submit if ((protectedSubmit == true) && (enabled_features.core11.protectedMemory == VK_FALSE)) { skip |= LogError(queue, "VUID-VkSubmitInfo2KHR-flags-03885", "vkQueueSubmit2KHR(): The protectedMemory device feature is disabled, can't submit a protected queue " "to %s pSubmits[%u]", report_data->FormatHandle(queue).c_str(), submit_idx); } for (uint32_t i = 0; i < submit->commandBufferInfoCount; i++) { auto info_loc = loc.dot(Field::pCommandBufferInfos, i); info_loc.refpage = RefPage::VkCommandBufferSubmitInfoKHR; skip |= cb_submit_state.Validate(info_loc.dot(Field::commandBuffer), submit->pCommandBufferInfos[i].commandBuffer, perf_pass); skip |= ValidateDeviceMaskToPhysicalDeviceCount(submit->pCommandBufferInfos[i].deviceMask, queue, "VUID-VkCommandBufferSubmitInfoKHR-deviceMask-03891"); // Make sure command buffers are all protected or unprotected const CMD_BUFFER_STATE *cb_state = GetCBState(submit->pCommandBufferInfos[i].commandBuffer); if (cb_state != nullptr) { if ((cb_state->unprotected == true) && (protectedSubmit == true)) { LogObjectList objlist(cb_state->commandBuffer); objlist.add(queue); skip |= LogError(objlist, "VUID-VkSubmitInfo2KHR-flags-03886", "vkQueueSubmit2KHR(): command buffer %s is unprotected while queue %s pSubmits[%u] has " "VK_SUBMIT_PROTECTED_BIT_KHR set", report_data->FormatHandle(cb_state->commandBuffer).c_str(), report_data->FormatHandle(queue).c_str(), submit_idx); } if ((cb_state->unprotected == false) && (protectedSubmit == false)) { LogObjectList objlist(cb_state->commandBuffer); objlist.add(queue); skip |= LogError(objlist, "VUID-VkSubmitInfo2KHR-flags-03887", "vkQueueSubmit2KHR(): command buffer %s is protected while queue %s pSubmitInfos[%u] has " "VK_SUBMIT_PROTECTED_BIT_KHR not set", report_data->FormatHandle(cb_state->commandBuffer).c_str(), report_data->FormatHandle(queue).c_str(), submit_idx); } } } } if (skip) return skip; // Now verify maxTimelineSemaphoreValueDifference for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) { const VkSubmitInfo2KHR *submit = &pSubmits[submit_idx]; CoreErrorLocation outer_loc(ErrFunc::vkQueueSubmit2KHR, RefPage::VkSubmitInfo2KHR, Field::pSubmits, submit_idx); // If there are any timeline semaphores, this condition gets checked before the early return above for (uint32_t i = 0; i < submit->waitSemaphoreInfoCount; ++i) { const auto *sem_info = &submit->pWaitSemaphoreInfos[i]; auto loc = outer_loc.dot(Field::pWaitSemaphoreInfos, i); skip |= ValidateMaxTimelineSemaphoreValueDifference(loc.dot(Field::semaphore), sem_info->semaphore, sem_info->value); } // If there are any timeline semaphores, this condition gets checked before the early return above for (uint32_t i = 0; i < submit->signalSemaphoreInfoCount; ++i) { const auto *sem_info = &submit->pSignalSemaphoreInfos[i]; auto loc = outer_loc.dot(Field::pSignalSemaphoreInfos, i); skip |= ValidateMaxTimelineSemaphoreValueDifference(loc.dot(Field::semaphore), sem_info->semaphore, sem_info->value); } } return skip; } #ifdef AHB_VALIDATION_SUPPORT // Android-specific validation that uses types defined only on Android and only for NDK versions // that support the VK_ANDROID_external_memory_android_hardware_buffer extension. // This chunk could move into a seperate core_validation_android.cpp file... ? // clang-format off // Map external format and usage flags to/from equivalent Vulkan flags // (Tables as of v1.1.92) // AHardwareBuffer Format Vulkan Format // ====================== ============= // AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM VK_FORMAT_R8G8B8A8_UNORM // AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM VK_FORMAT_R8G8B8A8_UNORM // AHARDWAREBUFFER_FORMAT_R8G8B8_UNORM VK_FORMAT_R8G8B8_UNORM // AHARDWAREBUFFER_FORMAT_R5G6B5_UNORM VK_FORMAT_R5G6B5_UNORM_PACK16 // AHARDWAREBUFFER_FORMAT_R16G16B16A16_FLOAT VK_FORMAT_R16G16B16A16_SFLOAT // AHARDWAREBUFFER_FORMAT_R10G10B10A2_UNORM VK_FORMAT_A2B10G10R10_UNORM_PACK32 // AHARDWAREBUFFER_FORMAT_D16_UNORM VK_FORMAT_D16_UNORM // AHARDWAREBUFFER_FORMAT_D24_UNORM VK_FORMAT_X8_D24_UNORM_PACK32 // AHARDWAREBUFFER_FORMAT_D24_UNORM_S8_UINT VK_FORMAT_D24_UNORM_S8_UINT // AHARDWAREBUFFER_FORMAT_D32_FLOAT VK_FORMAT_D32_SFLOAT // AHARDWAREBUFFER_FORMAT_D32_FLOAT_S8_UINT VK_FORMAT_D32_SFLOAT_S8_UINT // AHARDWAREBUFFER_FORMAT_S8_UINT VK_FORMAT_S8_UINT // The AHARDWAREBUFFER_FORMAT_* are an enum in the NDK headers, but get passed in to Vulkan // as uint32_t. Casting the enums here avoids scattering casts around in the code. std::map<uint32_t, VkFormat> ahb_format_map_a2v = { { (uint32_t)AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM, VK_FORMAT_R8G8B8A8_UNORM }, { (uint32_t)AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM, VK_FORMAT_R8G8B8A8_UNORM }, { (uint32_t)AHARDWAREBUFFER_FORMAT_R8G8B8_UNORM, VK_FORMAT_R8G8B8_UNORM }, { (uint32_t)AHARDWAREBUFFER_FORMAT_R5G6B5_UNORM, VK_FORMAT_R5G6B5_UNORM_PACK16 }, { (uint32_t)AHARDWAREBUFFER_FORMAT_R16G16B16A16_FLOAT, VK_FORMAT_R16G16B16A16_SFLOAT }, { (uint32_t)AHARDWAREBUFFER_FORMAT_R10G10B10A2_UNORM, VK_FORMAT_A2B10G10R10_UNORM_PACK32 }, { (uint32_t)AHARDWAREBUFFER_FORMAT_D16_UNORM, VK_FORMAT_D16_UNORM }, { (uint32_t)AHARDWAREBUFFER_FORMAT_D24_UNORM, VK_FORMAT_X8_D24_UNORM_PACK32 }, { (uint32_t)AHARDWAREBUFFER_FORMAT_D24_UNORM_S8_UINT, VK_FORMAT_D24_UNORM_S8_UINT }, { (uint32_t)AHARDWAREBUFFER_FORMAT_D32_FLOAT, VK_FORMAT_D32_SFLOAT }, { (uint32_t)AHARDWAREBUFFER_FORMAT_D32_FLOAT_S8_UINT, VK_FORMAT_D32_SFLOAT_S8_UINT }, { (uint32_t)AHARDWAREBUFFER_FORMAT_S8_UINT, VK_FORMAT_S8_UINT } }; // AHardwareBuffer Usage Vulkan Usage or Creation Flag (Intermixed - Aargh!) // ===================== =================================================== // None VK_IMAGE_USAGE_TRANSFER_SRC_BIT // None VK_IMAGE_USAGE_TRANSFER_DST_BIT // AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE VK_IMAGE_USAGE_SAMPLED_BIT // AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT // AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT // AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT // AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT // AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE None // AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT VK_IMAGE_CREATE_PROTECTED_BIT // None VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT // None VK_IMAGE_CREATE_EXTENDED_USAGE_BIT // Same casting rationale. De-mixing the table to prevent type confusion and aliasing std::map<uint64_t, VkImageUsageFlags> ahb_usage_map_a2v = { { (uint64_t)AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE, (VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT) }, { (uint64_t)AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER, (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) }, { (uint64_t)AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE, 0 }, // No equivalent }; std::map<uint64_t, VkImageCreateFlags> ahb_create_map_a2v = { { (uint64_t)AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP, VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT }, { (uint64_t)AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT, VK_IMAGE_CREATE_PROTECTED_BIT }, { (uint64_t)AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE, 0 }, // No equivalent }; std::map<VkImageUsageFlags, uint64_t> ahb_usage_map_v2a = { { VK_IMAGE_USAGE_SAMPLED_BIT, (uint64_t)AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE }, { VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, (uint64_t)AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE }, { VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, (uint64_t)AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER }, { VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, (uint64_t)AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER }, }; std::map<VkImageCreateFlags, uint64_t> ahb_create_map_v2a = { { VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT, (uint64_t)AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP }, { VK_IMAGE_CREATE_PROTECTED_BIT, (uint64_t)AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT }, }; // clang-format on // // AHB-extension new APIs // bool CoreChecks::PreCallValidateGetAndroidHardwareBufferPropertiesANDROID( VkDevice device, const struct AHardwareBuffer *buffer, VkAndroidHardwareBufferPropertiesANDROID *pProperties) const { bool skip = false; // buffer must be a valid Android hardware buffer object with at least one of the AHARDWAREBUFFER_USAGE_GPU_* usage flags. AHardwareBuffer_Desc ahb_desc; AHardwareBuffer_describe(buffer, &ahb_desc); uint32_t required_flags = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE | AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER | AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP | AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE | AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER; if (0 == (ahb_desc.usage & required_flags)) { skip |= LogError(device, "VUID-vkGetAndroidHardwareBufferPropertiesANDROID-buffer-01884", "vkGetAndroidHardwareBufferPropertiesANDROID: The AHardwareBuffer's AHardwareBuffer_Desc.usage (0x%" PRIx64 ") does not have any AHARDWAREBUFFER_USAGE_GPU_* flags set.", ahb_desc.usage); } return skip; } bool CoreChecks::PreCallValidateGetMemoryAndroidHardwareBufferANDROID(VkDevice device, const VkMemoryGetAndroidHardwareBufferInfoANDROID *pInfo, struct AHardwareBuffer **pBuffer) const { bool skip = false; const DEVICE_MEMORY_STATE *mem_info = GetDevMemState(pInfo->memory); // VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID must have been included in // VkExportMemoryAllocateInfo::handleTypes when memory was created. if (!mem_info->is_export || (0 == (mem_info->export_handle_type_flags & VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID))) { skip |= LogError(device, "VUID-VkMemoryGetAndroidHardwareBufferInfoANDROID-handleTypes-01882", "vkGetMemoryAndroidHardwareBufferANDROID: %s was not allocated for export, or the " "export handleTypes (0x%" PRIx32 ") did not contain VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID.", report_data->FormatHandle(pInfo->memory).c_str(), mem_info->export_handle_type_flags); } // If the pNext chain of the VkMemoryAllocateInfo used to allocate memory included a VkMemoryDedicatedAllocateInfo // with non-NULL image member, then that image must already be bound to memory. if (mem_info->is_dedicated && (VK_NULL_HANDLE != mem_info->dedicated_image)) { const auto image_state = GetImageState(mem_info->dedicated_image); // count() requires DEVICE_MEMORY_STATE* const & or DEVICE_MEMORY_STATE*, not const DEVICE_MEMORY_STATE*. // But here is in a const function. It could get const DEVICE_MEMORY_STATE* only, so cast it. if ((nullptr == image_state) || (0 == (image_state->GetBoundMemory().count((DEVICE_MEMORY_STATE *)mem_info)))) { LogObjectList objlist(device); objlist.add(pInfo->memory); objlist.add(mem_info->dedicated_image); skip |= LogError(objlist, "VUID-VkMemoryGetAndroidHardwareBufferInfoANDROID-pNext-01883", "vkGetMemoryAndroidHardwareBufferANDROID: %s was allocated using a dedicated " "%s, but that image is not bound to the VkDeviceMemory object.", report_data->FormatHandle(pInfo->memory).c_str(), report_data->FormatHandle(mem_info->dedicated_image).c_str()); } } return skip; } // // AHB-specific validation within non-AHB APIs // bool CoreChecks::ValidateAllocateMemoryANDROID(const VkMemoryAllocateInfo *alloc_info) const { bool skip = false; auto import_ahb_info = LvlFindInChain<VkImportAndroidHardwareBufferInfoANDROID>(alloc_info->pNext); auto exp_mem_alloc_info = LvlFindInChain<VkExportMemoryAllocateInfo>(alloc_info->pNext); auto mem_ded_alloc_info = LvlFindInChain<VkMemoryDedicatedAllocateInfo>(alloc_info->pNext); if ((import_ahb_info) && (NULL != import_ahb_info->buffer)) { // This is an import with handleType of VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID AHardwareBuffer_Desc ahb_desc = {}; AHardwareBuffer_describe(import_ahb_info->buffer, &ahb_desc); // Validate AHardwareBuffer_Desc::usage is a valid usage for imported AHB // // BLOB & GPU_DATA_BUFFER combo specifically allowed if ((AHARDWAREBUFFER_FORMAT_BLOB != ahb_desc.format) || (0 == (ahb_desc.usage & AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER))) { // Otherwise, must be a combination from the AHardwareBuffer Format and Usage Equivalence tables // Usage must have at least one bit from the table. It may have additional bits not in the table uint64_t ahb_equiv_usage_bits = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE | AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER | AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP | AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE | AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT; if (0 == (ahb_desc.usage & ahb_equiv_usage_bits)) { skip |= LogError(device, "VUID-VkImportAndroidHardwareBufferInfoANDROID-buffer-01881", "vkAllocateMemory: The AHardwareBuffer_Desc's usage (0x%" PRIx64 ") is not compatible with Vulkan.", ahb_desc.usage); } } // Collect external buffer info VkPhysicalDeviceExternalBufferInfo pdebi = {}; pdebi.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO; pdebi.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID; if (AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE & ahb_desc.usage) { pdebi.usage |= ahb_usage_map_a2v[AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE]; } if (AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER & ahb_desc.usage) { pdebi.usage |= ahb_usage_map_a2v[AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER]; } VkExternalBufferProperties ext_buf_props = {}; ext_buf_props.sType = VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES; DispatchGetPhysicalDeviceExternalBufferProperties(physical_device, &pdebi, &ext_buf_props); // If buffer is not NULL, Android hardware buffers must be supported for import, as reported by // VkExternalImageFormatProperties or VkExternalBufferProperties. if (0 == (ext_buf_props.externalMemoryProperties.externalMemoryFeatures & VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT)) { // Collect external format info VkPhysicalDeviceExternalImageFormatInfo pdeifi = {}; pdeifi.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO; pdeifi.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID; VkPhysicalDeviceImageFormatInfo2 pdifi2 = {}; pdifi2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2; pdifi2.pNext = &pdeifi; if (0 < ahb_format_map_a2v.count(ahb_desc.format)) pdifi2.format = ahb_format_map_a2v[ahb_desc.format]; pdifi2.type = VK_IMAGE_TYPE_2D; // Seems likely pdifi2.tiling = VK_IMAGE_TILING_OPTIMAL; // Ditto if (AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE & ahb_desc.usage) { pdifi2.usage |= ahb_usage_map_a2v[AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE]; } if (AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER & ahb_desc.usage) { pdifi2.usage |= ahb_usage_map_a2v[AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER]; } if (AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP & ahb_desc.usage) { pdifi2.flags |= ahb_create_map_a2v[AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP]; } if (AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT & ahb_desc.usage) { pdifi2.flags |= ahb_create_map_a2v[AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT]; } VkExternalImageFormatProperties ext_img_fmt_props = {}; ext_img_fmt_props.sType = VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES; VkImageFormatProperties2 ifp2 = {}; ifp2.sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2; ifp2.pNext = &ext_img_fmt_props; VkResult fmt_lookup_result = DispatchGetPhysicalDeviceImageFormatProperties2(physical_device, &pdifi2, &ifp2); if ((VK_SUCCESS != fmt_lookup_result) || (0 == (ext_img_fmt_props.externalMemoryProperties.externalMemoryFeatures & VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT))) { skip |= LogError(device, "VUID-VkImportAndroidHardwareBufferInfoANDROID-buffer-01880", "vkAllocateMemory: Neither the VkExternalImageFormatProperties nor the VkExternalBufferProperties " "structs for the AHardwareBuffer include the VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT flag."); } } // Retrieve buffer and format properties of the provided AHardwareBuffer VkAndroidHardwareBufferFormatPropertiesANDROID ahb_format_props = {}; ahb_format_props.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_ANDROID; VkAndroidHardwareBufferPropertiesANDROID ahb_props = {}; ahb_props.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_PROPERTIES_ANDROID; ahb_props.pNext = &ahb_format_props; DispatchGetAndroidHardwareBufferPropertiesANDROID(device, import_ahb_info->buffer, &ahb_props); // allocationSize must be the size returned by vkGetAndroidHardwareBufferPropertiesANDROID for the Android hardware buffer if (alloc_info->allocationSize != ahb_props.allocationSize) { skip |= LogError(device, "VUID-VkMemoryAllocateInfo-allocationSize-02383", "vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID " "struct, allocationSize (%" PRId64 ") does not match the AHardwareBuffer's reported allocationSize (%" PRId64 ").", alloc_info->allocationSize, ahb_props.allocationSize); } // memoryTypeIndex must be one of those returned by vkGetAndroidHardwareBufferPropertiesANDROID for the AHardwareBuffer // Note: memoryTypeIndex is an index, memoryTypeBits is a bitmask uint32_t mem_type_bitmask = 1 << alloc_info->memoryTypeIndex; if (0 == (mem_type_bitmask & ahb_props.memoryTypeBits)) { skip |= LogError(device, "VUID-VkMemoryAllocateInfo-memoryTypeIndex-02385", "vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID " "struct, memoryTypeIndex (%" PRId32 ") does not correspond to a bit set in AHardwareBuffer's reported " "memoryTypeBits bitmask (0x%" PRIx32 ").", alloc_info->memoryTypeIndex, ahb_props.memoryTypeBits); } // Checks for allocations without a dedicated allocation requirement if ((nullptr == mem_ded_alloc_info) || (VK_NULL_HANDLE == mem_ded_alloc_info->image)) { // the Android hardware buffer must have a format of AHARDWAREBUFFER_FORMAT_BLOB and a usage that includes // AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER if (((uint64_t)AHARDWAREBUFFER_FORMAT_BLOB != ahb_desc.format) || (0 == (ahb_desc.usage & AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER))) { skip |= LogError( device, "VUID-VkMemoryAllocateInfo-pNext-02384", "vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID " "struct without a dedicated allocation requirement, while the AHardwareBuffer_Desc's format ( %u ) is not " "AHARDWAREBUFFER_FORMAT_BLOB or usage (0x%" PRIx64 ") does not include AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER.", ahb_desc.format, ahb_desc.usage); } } else { // Checks specific to import with a dedicated allocation requirement const VkImageCreateInfo *ici = &(GetImageState(mem_ded_alloc_info->image)->createInfo); // The Android hardware buffer's usage must include at least one of AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER or // AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE if (0 == (ahb_desc.usage & (AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER | AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE))) { skip |= LogError( device, "VUID-VkMemoryAllocateInfo-pNext-02386", "vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID and a " "dedicated allocation requirement, while the AHardwareBuffer's usage (0x%" PRIx64 ") contains neither AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER nor AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE.", ahb_desc.usage); } // the format of image must be VK_FORMAT_UNDEFINED or the format returned by // vkGetAndroidHardwareBufferPropertiesANDROID if ((ici->format != ahb_format_props.format) && (VK_FORMAT_UNDEFINED != ici->format)) { skip |= LogError(device, "VUID-VkMemoryAllocateInfo-pNext-02387", "vkAllocateMemory: VkMemoryAllocateInfo struct with chained " "VkImportAndroidHardwareBufferInfoANDROID, the dedicated allocation image's " "format (%s) is not VK_FORMAT_UNDEFINED and does not match the AHardwareBuffer's format (%s).", string_VkFormat(ici->format), string_VkFormat(ahb_format_props.format)); } // The width, height, and array layer dimensions of image and the Android hardwarebuffer must be identical if ((ici->extent.width != ahb_desc.width) || (ici->extent.height != ahb_desc.height) || (ici->arrayLayers != ahb_desc.layers)) { skip |= LogError(device, "VUID-VkMemoryAllocateInfo-pNext-02388", "vkAllocateMemory: VkMemoryAllocateInfo struct with chained " "VkImportAndroidHardwareBufferInfoANDROID, the dedicated allocation image's " "width, height, and arrayLayers (%" PRId32 " %" PRId32 " %" PRId32 ") do not match those of the AHardwareBuffer (%" PRId32 " %" PRId32 " %" PRId32 ").", ici->extent.width, ici->extent.height, ici->arrayLayers, ahb_desc.width, ahb_desc.height, ahb_desc.layers); } // If the Android hardware buffer's usage includes AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE, the image must // have either a full mipmap chain or exactly 1 mip level. // // NOTE! The language of this VUID contradicts the language in the spec (1.1.93), which says "The // AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE flag does not correspond to a Vulkan image usage or creation flag. Instead, // its presence indicates that the Android hardware buffer contains a complete mipmap chain, and its absence indicates // that the Android hardware buffer contains only a single mip level." // // TODO: This code implements the VUID's meaning, but it seems likely that the spec text is actually correct. // Clarification requested. if ((ahb_desc.usage & AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE) && (ici->mipLevels != 1) && (ici->mipLevels != FullMipChainLevels(ici->extent))) { skip |= LogError(device, "VUID-VkMemoryAllocateInfo-pNext-02389", "vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID, " "usage includes AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE but mipLevels (%" PRId32 ") is neither 1 nor full mip " "chain levels (%" PRId32 ").", ici->mipLevels, FullMipChainLevels(ici->extent)); } // each bit set in the usage of image must be listed in AHardwareBuffer Usage Equivalence, and if there is a // corresponding AHARDWAREBUFFER_USAGE bit listed that bit must be included in the Android hardware buffer's // AHardwareBuffer_Desc::usage if (ici->usage & ~(VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) { skip |= LogError(device, "VUID-VkMemoryAllocateInfo-pNext-02390", "vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID, " "dedicated image usage bits (0x%" PRIx64 ") include an issue not listed in the AHardwareBuffer Usage Equivalence table.", ici->usage); } std::vector<VkImageUsageFlags> usages = {VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT}; for (VkImageUsageFlags ubit : usages) { if (ici->usage & ubit) { uint64_t ahb_usage = ahb_usage_map_v2a[ubit]; if (0 == (ahb_usage & ahb_desc.usage)) { skip |= LogError( device, "VUID-VkMemoryAllocateInfo-pNext-02390", "vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID, " "The dedicated image usage bit %s equivalent is not in AHardwareBuffer_Desc.usage (0x%" PRIx64 ") ", string_VkImageUsageFlags(ubit).c_str(), ahb_desc.usage); } } } } } else { // Not an import if ((exp_mem_alloc_info) && (mem_ded_alloc_info) && (0 != (VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID & exp_mem_alloc_info->handleTypes)) && (VK_NULL_HANDLE != mem_ded_alloc_info->image)) { // This is an Android HW Buffer export if (0 != alloc_info->allocationSize) { skip |= LogError(device, "VUID-VkMemoryAllocateInfo-pNext-01874", "vkAllocateMemory: pNext chain indicates a dedicated Android Hardware Buffer export allocation, " "but allocationSize is non-zero."); } } else { if (0 == alloc_info->allocationSize) { skip |= LogError( device, "VUID-VkMemoryAllocateInfo-pNext-01874", "vkAllocateMemory: pNext chain does not indicate a dedicated export allocation, but allocationSize is 0."); }; } } return skip; } bool CoreChecks::ValidateGetImageMemoryRequirementsANDROID(const VkImage image, const char *func_name) const { bool skip = false; const IMAGE_STATE *image_state = GetImageState(image); if (image_state != nullptr) { if (image_state->external_ahb && (0 == image_state->GetBoundMemory().size())) { const char *vuid = strcmp(func_name, "vkGetImageMemoryRequirements()") == 0 ? "VUID-vkGetImageMemoryRequirements-image-04004" : "VUID-VkImageMemoryRequirementsInfo2-image-01897"; skip |= LogError(image, vuid, "%s: Attempt get image memory requirements for an image created with a " "VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID handleType, which has not yet been " "bound to memory.", func_name); } } return skip; } bool CoreChecks::ValidateGetPhysicalDeviceImageFormatProperties2ANDROID( const VkPhysicalDeviceImageFormatInfo2 *pImageFormatInfo, const VkImageFormatProperties2 *pImageFormatProperties) const { bool skip = false; const VkAndroidHardwareBufferUsageANDROID *ahb_usage = LvlFindInChain<VkAndroidHardwareBufferUsageANDROID>(pImageFormatProperties->pNext); if (nullptr != ahb_usage) { const VkPhysicalDeviceExternalImageFormatInfo *pdeifi = LvlFindInChain<VkPhysicalDeviceExternalImageFormatInfo>(pImageFormatInfo->pNext); if ((nullptr == pdeifi) || (VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID != pdeifi->handleType)) { skip |= LogError(device, "VUID-vkGetPhysicalDeviceImageFormatProperties2-pNext-01868", "vkGetPhysicalDeviceImageFormatProperties2: pImageFormatProperties includes a chained " "VkAndroidHardwareBufferUsageANDROID struct, but pImageFormatInfo does not include a chained " "VkPhysicalDeviceExternalImageFormatInfo struct with handleType " "VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID."); } } return skip; } bool CoreChecks::ValidateBufferImportedHandleANDROID(const char *func_name, VkExternalMemoryHandleTypeFlags handleType, VkDeviceMemory memory, VkBuffer buffer) const { bool skip = false; if ((handleType & VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID) == 0) { const char *vuid = (strcmp(func_name, "vkBindBufferMemory()") == 0) ? "VUID-vkBindBufferMemory-memory-02986" : "VUID-VkBindBufferMemoryInfo-memory-02986"; LogObjectList objlist(buffer); objlist.add(memory); skip |= LogError(objlist, vuid, "%s: The VkDeviceMemory (%s) was created with an AHB import operation which is not set " "VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID in the VkBuffer (%s) " "VkExternalMemoryBufferreateInfo::handleType (%s)", func_name, report_data->FormatHandle(memory).c_str(), report_data->FormatHandle(buffer).c_str(), string_VkExternalMemoryHandleTypeFlags(handleType).c_str()); } return skip; } bool CoreChecks::ValidateImageImportedHandleANDROID(const char *func_name, VkExternalMemoryHandleTypeFlags handleType, VkDeviceMemory memory, VkImage image) const { bool skip = false; if ((handleType & VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID) == 0) { const char *vuid = (strcmp(func_name, "vkBindImageMemory()") == 0) ? "VUID-vkBindImageMemory-memory-02990" : "VUID-VkBindImageMemoryInfo-memory-02990"; LogObjectList objlist(image); objlist.add(memory); skip |= LogError(objlist, vuid, "%s: The VkDeviceMemory (%s) was created with an AHB import operation which is not set " "VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID in the VkImage (%s) " "VkExternalMemoryImageCreateInfo::handleType (%s)", func_name, report_data->FormatHandle(memory).c_str(), report_data->FormatHandle(image).c_str(), string_VkExternalMemoryHandleTypeFlags(handleType).c_str()); } return skip; } #else // !AHB_VALIDATION_SUPPORT // Case building for Android without AHB Validation #ifdef VK_USE_PLATFORM_ANDROID_KHR bool CoreChecks::PreCallValidateGetAndroidHardwareBufferPropertiesANDROID( VkDevice device, const struct AHardwareBuffer *buffer, VkAndroidHardwareBufferPropertiesANDROID *pProperties) const { return false; } bool CoreChecks::PreCallValidateGetMemoryAndroidHardwareBufferANDROID(VkDevice device, const VkMemoryGetAndroidHardwareBufferInfoANDROID *pInfo, struct AHardwareBuffer **pBuffer) const { return false; } #endif // VK_USE_PLATFORM_ANDROID_KHR bool CoreChecks::ValidateAllocateMemoryANDROID(const VkMemoryAllocateInfo *alloc_info) const { return false; } bool CoreChecks::ValidateGetPhysicalDeviceImageFormatProperties2ANDROID( const VkPhysicalDeviceImageFormatInfo2 *pImageFormatInfo, const VkImageFormatProperties2 *pImageFormatProperties) const { return false; } bool CoreChecks::ValidateGetImageMemoryRequirementsANDROID(const VkImage image, const char *func_name) const { return false; } bool CoreChecks::ValidateBufferImportedHandleANDROID(const char *func_name, VkExternalMemoryHandleTypeFlags handleType, VkDeviceMemory memory, VkBuffer buffer) const { return false; } bool CoreChecks::ValidateImageImportedHandleANDROID(const char *func_name, VkExternalMemoryHandleTypeFlags handleType, VkDeviceMemory memory, VkImage image) const { return false; } #endif // AHB_VALIDATION_SUPPORT bool CoreChecks::PreCallValidateAllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo, const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) const { bool skip = false; if (memObjMap.size() >= phys_dev_props.limits.maxMemoryAllocationCount) { skip |= LogError(device, "VUID-vkAllocateMemory-maxMemoryAllocationCount-04101", "vkAllocateMemory: Number of currently valid memory objects is not less than the maximum allowed (%u).", phys_dev_props.limits.maxMemoryAllocationCount); } if (device_extensions.vk_android_external_memory_android_hardware_buffer) { skip |= ValidateAllocateMemoryANDROID(pAllocateInfo); } else { if (0 == pAllocateInfo->allocationSize) { skip |= LogError(device, "VUID-VkMemoryAllocateInfo-allocationSize-00638", "vkAllocateMemory: allocationSize is 0."); }; } auto chained_flags_struct = LvlFindInChain<VkMemoryAllocateFlagsInfo>(pAllocateInfo->pNext); if (chained_flags_struct && chained_flags_struct->flags == VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT) { skip |= ValidateDeviceMaskToPhysicalDeviceCount(chained_flags_struct->deviceMask, device, "VUID-VkMemoryAllocateFlagsInfo-deviceMask-00675"); skip |= ValidateDeviceMaskToZero(chained_flags_struct->deviceMask, device, "VUID-VkMemoryAllocateFlagsInfo-deviceMask-00676"); } if (pAllocateInfo->memoryTypeIndex >= phys_dev_mem_props.memoryTypeCount) { skip |= LogError(device, "VUID-vkAllocateMemory-pAllocateInfo-01714", "vkAllocateMemory: attempting to allocate memory type %u, which is not a valid index. Device only " "advertises %u memory types.", pAllocateInfo->memoryTypeIndex, phys_dev_mem_props.memoryTypeCount); } else { const VkMemoryType memory_type = phys_dev_mem_props.memoryTypes[pAllocateInfo->memoryTypeIndex]; if (pAllocateInfo->allocationSize > phys_dev_mem_props.memoryHeaps[memory_type.heapIndex].size) { skip |= LogError(device, "VUID-vkAllocateMemory-pAllocateInfo-01713", "vkAllocateMemory: attempting to allocate %" PRIu64 " bytes from heap %u," "but size of that heap is only %" PRIu64 " bytes.", pAllocateInfo->allocationSize, memory_type.heapIndex, phys_dev_mem_props.memoryHeaps[memory_type.heapIndex].size); } if (!enabled_features.device_coherent_memory_features.deviceCoherentMemory && ((memory_type.propertyFlags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD) != 0)) { skip |= LogError(device, "VUID-vkAllocateMemory-deviceCoherentMemory-02790", "vkAllocateMemory: attempting to allocate memory type %u, which includes the " "VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD memory property, but the deviceCoherentMemory feature " "is not enabled.", pAllocateInfo->memoryTypeIndex); } if ((enabled_features.core11.protectedMemory == VK_FALSE) && ((memory_type.propertyFlags & VK_MEMORY_PROPERTY_PROTECTED_BIT) != 0)) { skip |= LogError(device, "VUID-VkMemoryAllocateInfo-memoryTypeIndex-01872", "vkAllocateMemory(): attempting to allocate memory type %u, which includes the " "VK_MEMORY_PROPERTY_PROTECTED_BIT memory property, but the protectedMemory feature " "is not enabled.", pAllocateInfo->memoryTypeIndex); } } bool imported_ahb = false; #ifdef AHB_VALIDATION_SUPPORT // "memory is not an imported Android Hardware Buffer" refers to VkImportAndroidHardwareBufferInfoANDROID with a non-NULL // buffer value. Memory imported has another VUID to check size and allocationSize match up auto imported_ahb_info = LvlFindInChain<VkImportAndroidHardwareBufferInfoANDROID>(pAllocateInfo->pNext); if (imported_ahb_info != nullptr) { imported_ahb = imported_ahb_info->buffer != nullptr; } #endif // AHB_VALIDATION_SUPPORT auto dedicated_allocate_info = LvlFindInChain<VkMemoryDedicatedAllocateInfo>(pAllocateInfo->pNext); if (dedicated_allocate_info) { if ((dedicated_allocate_info->buffer != VK_NULL_HANDLE) && (dedicated_allocate_info->image != VK_NULL_HANDLE)) { skip |= LogError(device, "VUID-VkMemoryDedicatedAllocateInfo-image-01432", "vkAllocateMemory: Either buffer or image has to be VK_NULL_HANDLE in VkMemoryDedicatedAllocateInfo"); } else if (dedicated_allocate_info->image != VK_NULL_HANDLE) { // Dedicated VkImage const IMAGE_STATE *image_state = GetImageState(dedicated_allocate_info->image); if (image_state->disjoint == true) { skip |= LogError( device, "VUID-VkMemoryDedicatedAllocateInfo-image-01797", "vkAllocateMemory: VkImage %s can't be used in VkMemoryDedicatedAllocateInfo because it was created with " "VK_IMAGE_CREATE_DISJOINT_BIT", report_data->FormatHandle(dedicated_allocate_info->image).c_str()); } else { if ((pAllocateInfo->allocationSize != image_state->requirements.size) && (imported_ahb == false)) { const char *vuid = (device_extensions.vk_android_external_memory_android_hardware_buffer) ? "VUID-VkMemoryDedicatedAllocateInfo-image-02964" : "VUID-VkMemoryDedicatedAllocateInfo-image-01433"; skip |= LogError( device, vuid, "vkAllocateMemory: Allocation Size (%u) needs to be equal to VkImage %s VkMemoryRequirements::size (%u)", pAllocateInfo->allocationSize, report_data->FormatHandle(dedicated_allocate_info->image).c_str(), image_state->requirements.size); } if ((image_state->createInfo.flags & VK_IMAGE_CREATE_SPARSE_BINDING_BIT) != 0) { skip |= LogError( device, "VUID-VkMemoryDedicatedAllocateInfo-image-01434", "vkAllocateMemory: VkImage %s can't be used in VkMemoryDedicatedAllocateInfo because it was created with " "VK_IMAGE_CREATE_SPARSE_BINDING_BIT", report_data->FormatHandle(dedicated_allocate_info->image).c_str()); } } } else if (dedicated_allocate_info->buffer != VK_NULL_HANDLE) { // Dedicated VkBuffer const BUFFER_STATE *buffer_state = GetBufferState(dedicated_allocate_info->buffer); if ((pAllocateInfo->allocationSize != buffer_state->requirements.size) && (imported_ahb == false)) { const char *vuid = (device_extensions.vk_android_external_memory_android_hardware_buffer) ? "VUID-VkMemoryDedicatedAllocateInfo-buffer-02965" : "VUID-VkMemoryDedicatedAllocateInfo-buffer-01435"; skip |= LogError( device, vuid, "vkAllocateMemory: Allocation Size (%u) needs to be equal to VkBuffer %s VkMemoryRequirements::size (%u)", pAllocateInfo->allocationSize, report_data->FormatHandle(dedicated_allocate_info->buffer).c_str(), buffer_state->requirements.size); } if ((buffer_state->createInfo.flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT) != 0) { skip |= LogError( device, "VUID-VkMemoryDedicatedAllocateInfo-buffer-01436", "vkAllocateMemory: VkBuffer %s can't be used in VkMemoryDedicatedAllocateInfo because it was created with " "VK_BUFFER_CREATE_SPARSE_BINDING_BIT", report_data->FormatHandle(dedicated_allocate_info->buffer).c_str()); } } } // TODO: VUIDs ending in 00643, 00644, 00646, 00647, 01742, 01743, 01745, 00645, 00648, 01744 return skip; } // For given obj node, if it is use, flag a validation error and return callback result, else return false bool CoreChecks::ValidateObjectNotInUse(const BASE_NODE *obj_node, const VulkanTypedHandle &obj_struct, const char *caller_name, const char *error_code) const { if (disabled[object_in_use]) return false; bool skip = false; if (obj_node->in_use.load()) { skip |= LogError(device, error_code, "Cannot call %s on %s that is currently in use by a command buffer.", caller_name, report_data->FormatHandle(obj_struct).c_str()); } return skip; } bool CoreChecks::PreCallValidateFreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) const { const DEVICE_MEMORY_STATE *mem_info = GetDevMemState(mem); const VulkanTypedHandle obj_struct(mem, kVulkanObjectTypeDeviceMemory); bool skip = false; if (mem_info) { skip |= ValidateObjectNotInUse(mem_info, obj_struct, "vkFreeMemory", "VUID-vkFreeMemory-memory-00677"); } return skip; } // Validate that given Map memory range is valid. This means that the memory should not already be mapped, // and that the size of the map range should be: // 1. Not zero // 2. Within the size of the memory allocation bool CoreChecks::ValidateMapMemRange(const DEVICE_MEMORY_STATE *mem_info, VkDeviceSize offset, VkDeviceSize size) const { bool skip = false; assert(mem_info); const auto mem = mem_info->mem; if (size == 0) { skip = LogError(mem, "VUID-vkMapMemory-size-00680", "VkMapMemory: Attempting to map memory range of size zero"); } // It is an application error to call VkMapMemory on an object that is already mapped if (mem_info->mapped_range.size != 0) { skip = LogError(mem, "VUID-vkMapMemory-memory-00678", "VkMapMemory: Attempting to map memory on an already-mapped %s.", report_data->FormatHandle(mem).c_str()); } // Validate offset is not over allocaiton size if (offset >= mem_info->alloc_info.allocationSize) { skip = LogError(mem, "VUID-vkMapMemory-offset-00679", "VkMapMemory: Attempting to map memory with an offset of 0x%" PRIx64 " which is larger than the total array size 0x%" PRIx64, offset, mem_info->alloc_info.allocationSize); } // Validate that offset + size is within object's allocationSize if (size != VK_WHOLE_SIZE) { if ((offset + size) > mem_info->alloc_info.allocationSize) { skip = LogError(mem, "VUID-vkMapMemory-size-00681", "VkMapMemory: Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64 " oversteps total array size 0x%" PRIx64 ".", offset, size + offset, mem_info->alloc_info.allocationSize); } } return skip; } bool CoreChecks::PreCallValidateWaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll, uint64_t timeout) const { // Verify fence status of submitted fences bool skip = false; for (uint32_t i = 0; i < fenceCount; i++) { skip |= VerifyQueueStateToFence(pFences[i]); } return skip; } bool CoreChecks::PreCallValidateGetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue *pQueue) const { bool skip = false; skip |= ValidateDeviceQueueFamily(queueFamilyIndex, "vkGetDeviceQueue", "queueFamilyIndex", "VUID-vkGetDeviceQueue-queueFamilyIndex-00384"); const auto &queue_data = queue_family_index_map.find(queueFamilyIndex); if ((queue_data != queue_family_index_map.end()) && (queue_data->second <= queueIndex)) { skip |= LogError(device, "VUID-vkGetDeviceQueue-queueIndex-00385", "vkGetDeviceQueue: queueIndex (=%" PRIu32 ") is not less than the number of queues requested from queueFamilyIndex (=%" PRIu32 ") when the device was created (i.e. is not less than %" PRIu32 ").", queueIndex, queueFamilyIndex, queue_data->second); } const auto &queue_flags = queue_family_create_flags_map.find(queueFamilyIndex); if ((queue_flags != queue_family_create_flags_map.end()) && (queue_flags->second != 0)) { skip |= LogError(device, "VUID-vkGetDeviceQueue-flags-01841", "vkGetDeviceQueue: queueIndex (=%" PRIu32 ") was created with a non-zero VkDeviceQueueCreateFlags. Need to use vkGetDeviceQueue2 instead.", queueIndex); } return skip; } bool CoreChecks::PreCallValidateQueueWaitIdle(VkQueue queue) const { const QUEUE_STATE *queue_state = GetQueueState(queue); return VerifyQueueStateToSeq(queue_state, queue_state->seq + queue_state->submissions.size()); } bool CoreChecks::PreCallValidateDeviceWaitIdle(VkDevice device) const { bool skip = false; const auto &const_queue_map = queueMap; for (auto &queue : const_queue_map) { skip |= VerifyQueueStateToSeq(&queue.second, queue.second.seq + queue.second.submissions.size()); } return skip; } bool CoreChecks::PreCallValidateCreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore) const { bool skip = false; auto *sem_type_create_info = LvlFindInChain<VkSemaphoreTypeCreateInfo>(pCreateInfo->pNext); if (sem_type_create_info && sem_type_create_info->semaphoreType == VK_SEMAPHORE_TYPE_TIMELINE && !enabled_features.core12.timelineSemaphore && !device_extensions.vk_khr_timeline_semaphore) { skip |= LogError(device, "VUID-VkSemaphoreTypeCreateInfo-timelineSemaphore-03252", "VkCreateSemaphore: timelineSemaphore feature is not enabled, can not create timeline semaphores"); } if (sem_type_create_info && sem_type_create_info->semaphoreType == VK_SEMAPHORE_TYPE_BINARY && sem_type_create_info->initialValue != 0) { skip |= LogError(device, "VUID-VkSemaphoreTypeCreateInfo-semaphoreType-03279", "vkCreateSemaphore: if semaphoreType is VK_SEMAPHORE_TYPE_BINARY, initialValue must be zero"); } return skip; } bool CoreChecks::PreCallValidateWaitSemaphores(VkDevice device, const VkSemaphoreWaitInfo *pWaitInfo, uint64_t timeout) const { return ValidateWaitSemaphores(device, pWaitInfo, timeout, "VkWaitSemaphores"); } bool CoreChecks::PreCallValidateWaitSemaphoresKHR(VkDevice device, const VkSemaphoreWaitInfo *pWaitInfo, uint64_t timeout) const { return ValidateWaitSemaphores(device, pWaitInfo, timeout, "VkWaitSemaphoresKHR"); } bool CoreChecks::ValidateWaitSemaphores(VkDevice device, const VkSemaphoreWaitInfo *pWaitInfo, uint64_t timeout, const char *apiName) const { bool skip = false; for (uint32_t i = 0; i < pWaitInfo->semaphoreCount; i++) { auto *semaphore_state = GetSemaphoreState(pWaitInfo->pSemaphores[i]); if (semaphore_state && semaphore_state->type != VK_SEMAPHORE_TYPE_TIMELINE) { skip |= LogError(pWaitInfo->pSemaphores[i], "VUID-VkSemaphoreWaitInfo-pSemaphores-03256", "%s(): all semaphores in pWaitInfo must be timeline semaphores, but %s is not", apiName, report_data->FormatHandle(pWaitInfo->pSemaphores[i]).c_str()); } } return skip; } bool CoreChecks::PreCallValidateDestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) const { const FENCE_STATE *fence_node = GetFenceState(fence); bool skip = false; if (fence_node) { if (fence_node->scope == kSyncScopeInternal && fence_node->state == FENCE_INFLIGHT) { skip |= LogError(fence, "VUID-vkDestroyFence-fence-01120", "%s is in use.", report_data->FormatHandle(fence).c_str()); } } return skip; } bool CoreChecks::PreCallValidateDestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) const { const SEMAPHORE_STATE *sema_node = GetSemaphoreState(semaphore); const VulkanTypedHandle obj_struct(semaphore, kVulkanObjectTypeSemaphore); bool skip = false; if (sema_node) { skip |= ValidateObjectNotInUse(sema_node, obj_struct, "vkDestroySemaphore", "VUID-vkDestroySemaphore-semaphore-01137"); } return skip; } bool CoreChecks::PreCallValidateDestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) const { const EVENT_STATE *event_state = GetEventState(event); const VulkanTypedHandle obj_struct(event, kVulkanObjectTypeEvent); bool skip = false; if (event_state) { skip |= ValidateObjectNotInUse(event_state, obj_struct, "vkDestroyEvent", "VUID-vkDestroyEvent-event-01145"); } return skip; } bool CoreChecks::PreCallValidateDestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) const { if (disabled[query_validation]) return false; const QUERY_POOL_STATE *qp_state = GetQueryPoolState(queryPool); const VulkanTypedHandle obj_struct(queryPool, kVulkanObjectTypeQueryPool); bool skip = false; if (qp_state) { skip |= ValidateObjectNotInUse(qp_state, obj_struct, "vkDestroyQueryPool", "VUID-vkDestroyQueryPool-queryPool-00793"); } return skip; } bool CoreChecks::ValidatePerformanceQueryResults(const char *cmd_name, const QUERY_POOL_STATE *query_pool_state, uint32_t firstQuery, uint32_t queryCount, VkQueryResultFlags flags) const { bool skip = false; if (flags & (VK_QUERY_RESULT_WITH_AVAILABILITY_BIT | VK_QUERY_RESULT_PARTIAL_BIT | VK_QUERY_RESULT_64_BIT)) { string invalid_flags_string; for (auto flag : {VK_QUERY_RESULT_WITH_AVAILABILITY_BIT, VK_QUERY_RESULT_PARTIAL_BIT, VK_QUERY_RESULT_64_BIT}) { if (flag & flags) { if (invalid_flags_string.size()) { invalid_flags_string += " and "; } invalid_flags_string += string_VkQueryResultFlagBits(flag); } } skip |= LogError(query_pool_state->pool, strcmp(cmd_name, "vkGetQueryPoolResults") == 0 ? "VUID-vkGetQueryPoolResults-queryType-03230" : "VUID-vkCmdCopyQueryPoolResults-queryType-03233", "%s: QueryPool %s was created with a queryType of" "VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR but flags contains %s.", cmd_name, report_data->FormatHandle(query_pool_state->pool).c_str(), invalid_flags_string.c_str()); } for (uint32_t query_index = firstQuery; query_index < queryCount; query_index++) { uint32_t submitted = 0; for (uint32_t pass_index = 0; pass_index < query_pool_state->n_performance_passes; pass_index++) { QueryObject obj(QueryObject(query_pool_state->pool, query_index), pass_index); auto query_pass_iter = queryToStateMap.find(obj); if (query_pass_iter != queryToStateMap.end() && query_pass_iter->second == QUERYSTATE_AVAILABLE) submitted++; } if (submitted < query_pool_state->n_performance_passes) { skip |= LogError(query_pool_state->pool, "VUID-vkGetQueryPoolResults-queryType-03231", "%s: QueryPool %s has %u performance query passes, but the query has only been " "submitted for %u of the passes.", cmd_name, report_data->FormatHandle(query_pool_state->pool).c_str(), query_pool_state->n_performance_passes, submitted); } } return skip; } bool CoreChecks::ValidateGetQueryPoolPerformanceResults(VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, void *pData, VkDeviceSize stride, VkQueryResultFlags flags, const char *apiName) const { bool skip = false; const auto query_pool_state = GetQueryPoolState(queryPool); if (!query_pool_state || query_pool_state->createInfo.queryType != VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) return skip; if (((((uintptr_t)pData) % sizeof(VkPerformanceCounterResultKHR)) != 0 || (stride % sizeof(VkPerformanceCounterResultKHR)) != 0)) { skip |= LogError(queryPool, "VUID-vkGetQueryPoolResults-queryType-03229", "%s(): QueryPool %s was created with a queryType of " "VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR but pData & stride are not multiple of the " "size of VkPerformanceCounterResultKHR.", apiName, report_data->FormatHandle(queryPool).c_str()); } skip |= ValidatePerformanceQueryResults(apiName, query_pool_state, firstQuery, queryCount, flags); return skip; } bool CoreChecks::PreCallValidateGetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, size_t dataSize, void *pData, VkDeviceSize stride, VkQueryResultFlags flags) const { if (disabled[query_validation]) return false; bool skip = false; skip |= ValidateQueryPoolStride("VUID-vkGetQueryPoolResults-flags-02827", "VUID-vkGetQueryPoolResults-flags-00815", stride, "dataSize", dataSize, flags); skip |= ValidateQueryPoolIndex(queryPool, firstQuery, queryCount, "vkGetQueryPoolResults()", "VUID-vkGetQueryPoolResults-firstQuery-00813", "VUID-vkGetQueryPoolResults-firstQuery-00816"); skip |= ValidateGetQueryPoolPerformanceResults(queryPool, firstQuery, queryCount, pData, stride, flags, "vkGetQueryPoolResults"); const auto query_pool_state = GetQueryPoolState(queryPool); if (query_pool_state) { if ((query_pool_state->createInfo.queryType == VK_QUERY_TYPE_TIMESTAMP) && (flags & VK_QUERY_RESULT_PARTIAL_BIT)) { skip |= LogError( queryPool, "VUID-vkGetQueryPoolResults-queryType-00818", "%s was created with a queryType of VK_QUERY_TYPE_TIMESTAMP but flags contains VK_QUERY_RESULT_PARTIAL_BIT.", report_data->FormatHandle(queryPool).c_str()); } if (!skip) { uint32_t query_avail_data = (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) ? 1 : 0; uint32_t query_size_in_bytes = (flags & VK_QUERY_RESULT_64_BIT) ? sizeof(uint64_t) : sizeof(uint32_t); uint32_t query_items = 0; uint32_t query_size = 0; switch (query_pool_state->createInfo.queryType) { case VK_QUERY_TYPE_OCCLUSION: // Occlusion queries write one integer value - the number of samples passed. query_items = 1; query_size = query_size_in_bytes * (query_items + query_avail_data); break; case VK_QUERY_TYPE_PIPELINE_STATISTICS: // Pipeline statistics queries write one integer value for each bit that is enabled in the pipelineStatistics // when the pool is created { const int num_bits = sizeof(VkFlags) * CHAR_BIT; std::bitset<num_bits> pipe_stats_bits(query_pool_state->createInfo.pipelineStatistics); query_items = static_cast<uint32_t>(pipe_stats_bits.count()); query_size = query_size_in_bytes * (query_items + query_avail_data); } break; case VK_QUERY_TYPE_TIMESTAMP: // Timestamp queries write one integer query_items = 1; query_size = query_size_in_bytes * (query_items + query_avail_data); break; case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT: // Transform feedback queries write two integers query_items = 2; query_size = query_size_in_bytes * (query_items + query_avail_data); break; case VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR: // Performance queries store results in a tightly packed array of VkPerformanceCounterResultsKHR query_items = query_pool_state->perf_counter_index_count; query_size = sizeof(VkPerformanceCounterResultKHR) * query_items; if (query_size > stride) { skip |= LogError(queryPool, "VUID-vkGetQueryPoolResults-queryType-04519", "vkGetQueryPoolResults() on querypool %s specified stride %" PRIu64 " which must be at least counterIndexCount (%d) " "multiplied by sizeof(VkPerformanceCounterResultKHR) (%d).", report_data->FormatHandle(queryPool).c_str(), stride, query_items, sizeof(VkPerformanceCounterResultKHR)); } break; // These cases intentionally fall through to the default case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR: // VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_NV case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR: case VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL: default: query_size = 0; break; } if (query_size && (((queryCount - 1) * stride + query_size) > dataSize)) { skip |= LogError(queryPool, "VUID-vkGetQueryPoolResults-dataSize-00817", "vkGetQueryPoolResults() on querypool %s specified dataSize %zu which is " "incompatible with the specified query type and options.", report_data->FormatHandle(queryPool).c_str(), dataSize); } } } return skip; } bool CoreChecks::ValidateInsertMemoryRange(const VulkanTypedHandle &typed_handle, const DEVICE_MEMORY_STATE *mem_info, VkDeviceSize memoryOffset, const char *api_name) const { bool skip = false; if (memoryOffset >= mem_info->alloc_info.allocationSize) { const char *error_code = nullptr; if (typed_handle.type == kVulkanObjectTypeBuffer) { if (strcmp(api_name, "vkBindBufferMemory()") == 0) { error_code = "VUID-vkBindBufferMemory-memoryOffset-01031"; } else { error_code = "VUID-VkBindBufferMemoryInfo-memoryOffset-01031"; } } else if (typed_handle.type == kVulkanObjectTypeImage) { if (strcmp(api_name, "vkBindImageMemory()") == 0) { error_code = "VUID-vkBindImageMemory-memoryOffset-01046"; } else { error_code = "VUID-VkBindImageMemoryInfo-memoryOffset-01046"; } } else if (typed_handle.type == kVulkanObjectTypeAccelerationStructureNV) { error_code = "VUID-VkBindAccelerationStructureMemoryInfoNV-memoryOffset-03621"; } else { // Unsupported object type assert(false); } LogObjectList objlist(mem_info->mem); objlist.add(typed_handle); skip = LogError(objlist, error_code, "In %s, attempting to bind %s to %s, memoryOffset=0x%" PRIxLEAST64 " must be less than the memory allocation size 0x%" PRIxLEAST64 ".", api_name, report_data->FormatHandle(mem_info->mem).c_str(), report_data->FormatHandle(typed_handle).c_str(), memoryOffset, mem_info->alloc_info.allocationSize); } return skip; } bool CoreChecks::ValidateInsertImageMemoryRange(VkImage image, const DEVICE_MEMORY_STATE *mem_info, VkDeviceSize mem_offset, const char *api_name) const { return ValidateInsertMemoryRange(VulkanTypedHandle(image, kVulkanObjectTypeImage), mem_info, mem_offset, api_name); } bool CoreChecks::ValidateInsertBufferMemoryRange(VkBuffer buffer, const DEVICE_MEMORY_STATE *mem_info, VkDeviceSize mem_offset, const char *api_name) const { return ValidateInsertMemoryRange(VulkanTypedHandle(buffer, kVulkanObjectTypeBuffer), mem_info, mem_offset, api_name); } bool CoreChecks::ValidateInsertAccelerationStructureMemoryRange(VkAccelerationStructureNV as, const DEVICE_MEMORY_STATE *mem_info, VkDeviceSize mem_offset, const char *api_name) const { return ValidateInsertMemoryRange(VulkanTypedHandle(as, kVulkanObjectTypeAccelerationStructureNV), mem_info, mem_offset, api_name); } bool CoreChecks::ValidateMemoryTypes(const DEVICE_MEMORY_STATE *mem_info, const uint32_t memory_type_bits, const char *funcName, const char *msgCode) const { bool skip = false; if (((1 << mem_info->alloc_info.memoryTypeIndex) & memory_type_bits) == 0) { skip = LogError(mem_info->mem, msgCode, "%s(): MemoryRequirements->memoryTypeBits (0x%X) for this object type are not compatible with the memory " "type (0x%X) of %s.", funcName, memory_type_bits, mem_info->alloc_info.memoryTypeIndex, report_data->FormatHandle(mem_info->mem).c_str()); } return skip; } bool CoreChecks::ValidateBindBufferMemory(VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset, const char *api_name) const { const BUFFER_STATE *buffer_state = GetBufferState(buffer); bool bind_buffer_mem_2 = strcmp(api_name, "vkBindBufferMemory()") != 0; bool skip = false; if (buffer_state) { // Track objects tied to memory const VulkanTypedHandle obj_struct(buffer, kVulkanObjectTypeBuffer); skip = ValidateSetMemBinding(mem, obj_struct, api_name); const auto mem_info = GetDevMemState(mem); // Validate memory requirements alignment if (SafeModulo(memoryOffset, buffer_state->requirements.alignment) != 0) { const char *vuid = bind_buffer_mem_2 ? "VUID-VkBindBufferMemoryInfo-memoryOffset-01036" : "VUID-vkBindBufferMemory-memoryOffset-01036"; skip |= LogError(buffer, vuid, "%s: memoryOffset is 0x%" PRIxLEAST64 " but must be an integer multiple of the VkMemoryRequirements::alignment value 0x%" PRIxLEAST64 ", returned from a call to vkGetBufferMemoryRequirements with buffer.", api_name, memoryOffset, buffer_state->requirements.alignment); } if (mem_info) { // Validate bound memory range information skip |= ValidateInsertBufferMemoryRange(buffer, mem_info, memoryOffset, api_name); const char *mem_type_vuid = bind_buffer_mem_2 ? "VUID-VkBindBufferMemoryInfo-memory-01035" : "VUID-vkBindBufferMemory-memory-01035"; skip |= ValidateMemoryTypes(mem_info, buffer_state->requirements.memoryTypeBits, api_name, mem_type_vuid); // Validate memory requirements size if (buffer_state->requirements.size > (mem_info->alloc_info.allocationSize - memoryOffset)) { const char *vuid = bind_buffer_mem_2 ? "VUID-VkBindBufferMemoryInfo-size-01037" : "VUID-vkBindBufferMemory-size-01037"; skip |= LogError(buffer, vuid, "%s: memory size minus memoryOffset is 0x%" PRIxLEAST64 " but must be at least as large as VkMemoryRequirements::size value 0x%" PRIxLEAST64 ", returned from a call to vkGetBufferMemoryRequirements with buffer.", api_name, mem_info->alloc_info.allocationSize - memoryOffset, buffer_state->requirements.size); } // Validate dedicated allocation if (mem_info->is_dedicated && (mem_info->dedicated_buffer != VK_NULL_HANDLE) && ((mem_info->dedicated_buffer != buffer) || (memoryOffset != 0))) { const char *vuid = bind_buffer_mem_2 ? "VUID-VkBindBufferMemoryInfo-memory-01508" : "VUID-vkBindBufferMemory-memory-01508"; LogObjectList objlist(buffer); objlist.add(mem); objlist.add(mem_info->dedicated_buffer); skip |= LogError(objlist, vuid, "%s: for dedicated %s, VkMemoryDedicatedAllocateInfo::buffer %s must be equal " "to %s and memoryOffset 0x%" PRIxLEAST64 " must be zero.", api_name, report_data->FormatHandle(mem).c_str(), report_data->FormatHandle(mem_info->dedicated_buffer).c_str(), report_data->FormatHandle(buffer).c_str(), memoryOffset); } auto chained_flags_struct = LvlFindInChain<VkMemoryAllocateFlagsInfo>(mem_info->alloc_info.pNext); if (enabled_features.core12.bufferDeviceAddress && (buffer_state->createInfo.usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT) && (!chained_flags_struct || !(chained_flags_struct->flags & VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT))) { skip |= LogError(buffer, "VUID-vkBindBufferMemory-bufferDeviceAddress-03339", "%s: If buffer was created with the VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT bit set, " "memory must have been allocated with the VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT bit set.", api_name); } // Validate export memory handles if ((mem_info->export_handle_type_flags != 0) && ((mem_info->export_handle_type_flags & buffer_state->external_memory_handle) == 0)) { const char *vuid = bind_buffer_mem_2 ? "VUID-VkBindBufferMemoryInfo-memory-02726" : "VUID-vkBindBufferMemory-memory-02726"; LogObjectList objlist(buffer); objlist.add(mem); skip |= LogError(objlist, vuid, "%s: The VkDeviceMemory (%s) has an external handleType of %s which does not include at least one " "handle from VkBuffer (%s) handleType %s.", api_name, report_data->FormatHandle(mem).c_str(), string_VkExternalMemoryHandleTypeFlags(mem_info->export_handle_type_flags).c_str(), report_data->FormatHandle(buffer).c_str(), string_VkExternalMemoryHandleTypeFlags(buffer_state->external_memory_handle).c_str()); } // Validate import memory handles if (mem_info->is_import_ahb == true) { skip |= ValidateBufferImportedHandleANDROID(api_name, buffer_state->external_memory_handle, mem, buffer); } else if (mem_info->is_import == true) { if ((mem_info->import_handle_type_flags & buffer_state->external_memory_handle) == 0) { const char *vuid = nullptr; if ((bind_buffer_mem_2) && (device_extensions.vk_android_external_memory_android_hardware_buffer)) { vuid = "VUID-VkBindBufferMemoryInfo-memory-02985"; } else if ((!bind_buffer_mem_2) && (device_extensions.vk_android_external_memory_android_hardware_buffer)) { vuid = "VUID-vkBindBufferMemory-memory-02985"; } else if ((bind_buffer_mem_2) && (!device_extensions.vk_android_external_memory_android_hardware_buffer)) { vuid = "VUID-VkBindBufferMemoryInfo-memory-02727"; } else if ((!bind_buffer_mem_2) && (!device_extensions.vk_android_external_memory_android_hardware_buffer)) { vuid = "VUID-vkBindBufferMemory-memory-02727"; } LogObjectList objlist(buffer); objlist.add(mem); skip |= LogError(objlist, vuid, "%s: The VkDeviceMemory (%s) was created with an import operation with handleType of %s which " "is not set in the VkBuffer (%s) VkExternalMemoryBufferCreateInfo::handleType (%s)", api_name, report_data->FormatHandle(mem).c_str(), string_VkExternalMemoryHandleTypeFlags(mem_info->import_handle_type_flags).c_str(), report_data->FormatHandle(buffer).c_str(), string_VkExternalMemoryHandleTypeFlags(buffer_state->external_memory_handle).c_str()); } } // Validate mix of protected buffer and memory if ((buffer_state->unprotected == false) && (mem_info->unprotected == true)) { const char *vuid = bind_buffer_mem_2 ? "VUID-VkBindBufferMemoryInfo-None-01898" : "VUID-vkBindBufferMemory-None-01898"; LogObjectList objlist(buffer); objlist.add(mem); skip |= LogError(objlist, vuid, "%s: The VkDeviceMemory (%s) was not created with protected memory but the VkBuffer (%s) was set " "to use protected memory.", api_name, report_data->FormatHandle(mem).c_str(), report_data->FormatHandle(buffer).c_str()); } else if ((buffer_state->unprotected == true) && (mem_info->unprotected == false)) { const char *vuid = bind_buffer_mem_2 ? "VUID-VkBindBufferMemoryInfo-None-01899" : "VUID-vkBindBufferMemory-None-01899"; LogObjectList objlist(buffer); objlist.add(mem); skip |= LogError(objlist, vuid, "%s: The VkDeviceMemory (%s) was created with protected memory but the VkBuffer (%s) was not set " "to use protected memory.", api_name, report_data->FormatHandle(mem).c_str(), report_data->FormatHandle(buffer).c_str()); } } } return skip; } bool CoreChecks::PreCallValidateBindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) const { const char *api_name = "vkBindBufferMemory()"; return ValidateBindBufferMemory(buffer, mem, memoryOffset, api_name); } bool CoreChecks::PreCallValidateBindBufferMemory2(VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfo *pBindInfos) const { char api_name[64]; bool skip = false; for (uint32_t i = 0; i < bindInfoCount; i++) { sprintf(api_name, "vkBindBufferMemory2() pBindInfos[%u]", i); skip |= ValidateBindBufferMemory(pBindInfos[i].buffer, pBindInfos[i].memory, pBindInfos[i].memoryOffset, api_name); } return skip; } bool CoreChecks::PreCallValidateBindBufferMemory2KHR(VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfo *pBindInfos) const { char api_name[64]; bool skip = false; for (uint32_t i = 0; i < bindInfoCount; i++) { sprintf(api_name, "vkBindBufferMemory2KHR() pBindInfos[%u]", i); skip |= ValidateBindBufferMemory(pBindInfos[i].buffer, pBindInfos[i].memory, pBindInfos[i].memoryOffset, api_name); } return skip; } bool CoreChecks::PreCallValidateGetImageMemoryRequirements(VkDevice device, VkImage image, VkMemoryRequirements *pMemoryRequirements) const { bool skip = false; if (device_extensions.vk_android_external_memory_android_hardware_buffer) { skip |= ValidateGetImageMemoryRequirementsANDROID(image, "vkGetImageMemoryRequirements()"); } const IMAGE_STATE *image_state = GetImageState(image); if (image_state) { // Checks for no disjoint bit if (image_state->disjoint == true) { skip |= LogError(image, "VUID-vkGetImageMemoryRequirements-image-01588", "vkGetImageMemoryRequirements(): %s must not have been created with the VK_IMAGE_CREATE_DISJOINT_BIT " "(need to use vkGetImageMemoryRequirements2).", report_data->FormatHandle(image).c_str()); } } return skip; } bool CoreChecks::ValidateGetImageMemoryRequirements2(const VkImageMemoryRequirementsInfo2 *pInfo, const char *func_name) const { bool skip = false; if (device_extensions.vk_android_external_memory_android_hardware_buffer) { skip |= ValidateGetImageMemoryRequirementsANDROID(pInfo->image, func_name); } const IMAGE_STATE *image_state = GetImageState(pInfo->image); const VkFormat image_format = image_state->createInfo.format; const VkImageTiling image_tiling = image_state->createInfo.tiling; const VkImagePlaneMemoryRequirementsInfo *image_plane_info = LvlFindInChain<VkImagePlaneMemoryRequirementsInfo>(pInfo->pNext); if ((FormatIsMultiplane(image_format)) && (image_state->disjoint == true) && (image_plane_info == nullptr)) { skip |= LogError(pInfo->image, "VUID-VkImageMemoryRequirementsInfo2-image-01589", "%s: %s image was created with a multi-planar format (%s) and " "VK_IMAGE_CREATE_DISJOINT_BIT, but the current pNext doesn't include a " "VkImagePlaneMemoryRequirementsInfo struct", func_name, report_data->FormatHandle(pInfo->image).c_str(), string_VkFormat(image_format)); } if ((image_state->disjoint == false) && (image_plane_info != nullptr)) { skip |= LogError(pInfo->image, "VUID-VkImageMemoryRequirementsInfo2-image-01590", "%s: %s image was not created with VK_IMAGE_CREATE_DISJOINT_BIT," "but the current pNext includes a VkImagePlaneMemoryRequirementsInfo struct", func_name, report_data->FormatHandle(pInfo->image).c_str()); } if ((FormatIsMultiplane(image_format) == false) && (image_tiling != VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT) && (image_plane_info != nullptr)) { const char *vuid = device_extensions.vk_ext_image_drm_format_modifier ? "VUID-VkImageMemoryRequirementsInfo2-image-02280" : "VUID-VkImageMemoryRequirementsInfo2-image-01591"; skip |= LogError(pInfo->image, vuid, "%s: %s image is a single-plane format (%s) and does not have tiling of " "VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT," "but the current pNext includes a VkImagePlaneMemoryRequirementsInfo struct", func_name, report_data->FormatHandle(pInfo->image).c_str(), string_VkFormat(image_format)); } if (image_plane_info != nullptr) { if ((image_tiling == VK_IMAGE_TILING_LINEAR) || (image_tiling == VK_IMAGE_TILING_OPTIMAL)) { // Make sure planeAspect is only a single, valid plane uint32_t planes = FormatPlaneCount(image_format); VkImageAspectFlags aspect = image_plane_info->planeAspect; if ((2 == planes) && (aspect != VK_IMAGE_ASPECT_PLANE_0_BIT) && (aspect != VK_IMAGE_ASPECT_PLANE_1_BIT)) { skip |= LogError( pInfo->image, "VUID-VkImagePlaneMemoryRequirementsInfo-planeAspect-02281", "%s: Image %s VkImagePlaneMemoryRequirementsInfo::planeAspect is %s but can only be VK_IMAGE_ASPECT_PLANE_0_BIT" "or VK_IMAGE_ASPECT_PLANE_1_BIT.", func_name, report_data->FormatHandle(image_state->image).c_str(), string_VkImageAspectFlags(aspect).c_str()); } if ((3 == planes) && (aspect != VK_IMAGE_ASPECT_PLANE_0_BIT) && (aspect != VK_IMAGE_ASPECT_PLANE_1_BIT) && (aspect != VK_IMAGE_ASPECT_PLANE_2_BIT)) { skip |= LogError( pInfo->image, "VUID-VkImagePlaneMemoryRequirementsInfo-planeAspect-02281", "%s: Image %s VkImagePlaneMemoryRequirementsInfo::planeAspect is %s but can only be VK_IMAGE_ASPECT_PLANE_0_BIT" "or VK_IMAGE_ASPECT_PLANE_1_BIT or VK_IMAGE_ASPECT_PLANE_2_BIT.", func_name, report_data->FormatHandle(image_state->image).c_str(), string_VkImageAspectFlags(aspect).c_str()); } } } return skip; } bool CoreChecks::PreCallValidateGetImageMemoryRequirements2(VkDevice device, const VkImageMemoryRequirementsInfo2 *pInfo, VkMemoryRequirements2 *pMemoryRequirements) const { return ValidateGetImageMemoryRequirements2(pInfo, "vkGetImageMemoryRequirements2()"); } bool CoreChecks::PreCallValidateGetImageMemoryRequirements2KHR(VkDevice device, const VkImageMemoryRequirementsInfo2 *pInfo, VkMemoryRequirements2 *pMemoryRequirements) const { return ValidateGetImageMemoryRequirements2(pInfo, "vkGetImageMemoryRequirements2KHR()"); } bool CoreChecks::PreCallValidateGetPhysicalDeviceImageFormatProperties2(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceImageFormatInfo2 *pImageFormatInfo, VkImageFormatProperties2 *pImageFormatProperties) const { // Can't wrap AHB-specific validation in a device extension check here, but no harm bool skip = ValidateGetPhysicalDeviceImageFormatProperties2ANDROID(pImageFormatInfo, pImageFormatProperties); return skip; } bool CoreChecks::PreCallValidateGetPhysicalDeviceImageFormatProperties2KHR(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceImageFormatInfo2 *pImageFormatInfo, VkImageFormatProperties2 *pImageFormatProperties) const { // Can't wrap AHB-specific validation in a device extension check here, but no harm bool skip = ValidateGetPhysicalDeviceImageFormatProperties2ANDROID(pImageFormatInfo, pImageFormatProperties); return skip; } bool CoreChecks::PreCallValidateDestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) const { const PIPELINE_STATE *pipeline_state = GetPipelineState(pipeline); const VulkanTypedHandle obj_struct(pipeline, kVulkanObjectTypePipeline); bool skip = false; if (pipeline_state) { skip |= ValidateObjectNotInUse(pipeline_state, obj_struct, "vkDestroyPipeline", "VUID-vkDestroyPipeline-pipeline-00765"); } return skip; } bool CoreChecks::PreCallValidateDestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) const { const SAMPLER_STATE *sampler_state = GetSamplerState(sampler); const VulkanTypedHandle obj_struct(sampler, kVulkanObjectTypeSampler); bool skip = false; if (sampler_state) { skip |= ValidateObjectNotInUse(sampler_state, obj_struct, "vkDestroySampler", "VUID-vkDestroySampler-sampler-01082"); } return skip; } bool CoreChecks::PreCallValidateDestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks *pAllocator) const { const DESCRIPTOR_POOL_STATE *desc_pool_state = GetDescriptorPoolState(descriptorPool); const VulkanTypedHandle obj_struct(descriptorPool, kVulkanObjectTypeDescriptorPool); bool skip = false; if (desc_pool_state) { skip |= ValidateObjectNotInUse(desc_pool_state, obj_struct, "vkDestroyDescriptorPool", "VUID-vkDestroyDescriptorPool-descriptorPool-00303"); } return skip; } // Verify cmdBuffer in given cb_node is not in global in-flight set, and return skip result // If this is a secondary command buffer, then make sure its primary is also in-flight // If primary is not in-flight, then remove secondary from global in-flight set // This function is only valid at a point when cmdBuffer is being reset or freed bool CoreChecks::CheckCommandBufferInFlight(const CMD_BUFFER_STATE *cb_node, const char *action, const char *error_code) const { bool skip = false; if (cb_node->in_use.load()) { skip |= LogError(cb_node->commandBuffer, error_code, "Attempt to %s %s which is in use.", action, report_data->FormatHandle(cb_node->commandBuffer).c_str()); } return skip; } // Iterate over all cmdBuffers in given commandPool and verify that each is not in use bool CoreChecks::CheckCommandBuffersInFlight(const COMMAND_POOL_STATE *pPool, const char *action, const char *error_code) const { bool skip = false; for (auto cmd_buffer : pPool->commandBuffers) { skip |= CheckCommandBufferInFlight(GetCBState(cmd_buffer), action, error_code); } return skip; } bool CoreChecks::PreCallValidateFreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer *pCommandBuffers) const { bool skip = false; for (uint32_t i = 0; i < commandBufferCount; i++) { const auto *cb_node = GetCBState(pCommandBuffers[i]); // Delete CB information structure, and remove from commandBufferMap if (cb_node) { skip |= CheckCommandBufferInFlight(cb_node, "free", "VUID-vkFreeCommandBuffers-pCommandBuffers-00047"); } } return skip; } bool CoreChecks::PreCallValidateCreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkCommandPool *pCommandPool) const { bool skip = false; skip |= ValidateDeviceQueueFamily(pCreateInfo->queueFamilyIndex, "vkCreateCommandPool", "pCreateInfo->queueFamilyIndex", "VUID-vkCreateCommandPool-queueFamilyIndex-01937"); if ((enabled_features.core11.protectedMemory == VK_FALSE) && ((pCreateInfo->flags & VK_COMMAND_POOL_CREATE_PROTECTED_BIT) != 0)) { skip |= LogError(device, "VUID-VkCommandPoolCreateInfo-flags-02860", "vkCreateCommandPool(): the protectedMemory device feature is disabled: CommandPools cannot be created " "with the VK_COMMAND_POOL_CREATE_PROTECTED_BIT set."); } return skip; } bool CoreChecks::PreCallValidateCreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) const { if (disabled[query_validation]) return false; bool skip = false; if (pCreateInfo && pCreateInfo->queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS) { if (!enabled_features.core.pipelineStatisticsQuery) { skip |= LogError(device, "VUID-VkQueryPoolCreateInfo-queryType-00791", "vkCreateQueryPool(): Query pool with type VK_QUERY_TYPE_PIPELINE_STATISTICS created on a device with " "VkDeviceCreateInfo.pEnabledFeatures.pipelineStatisticsQuery == VK_FALSE."); } } if (pCreateInfo && pCreateInfo->queryType == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) { if (!enabled_features.performance_query_features.performanceCounterQueryPools) { skip |= LogError(device, "VUID-VkQueryPoolPerformanceCreateInfoKHR-performanceCounterQueryPools-03237", "vkCreateQueryPool(): Query pool with type VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR created on a device with " "VkPhysicalDevicePerformanceQueryFeaturesKHR.performanceCounterQueryPools == VK_FALSE."); } auto perf_ci = LvlFindInChain<VkQueryPoolPerformanceCreateInfoKHR>(pCreateInfo->pNext); if (!perf_ci) { skip |= LogError( device, "VUID-VkQueryPoolCreateInfo-queryType-03222", "vkCreateQueryPool(): Query pool with type VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR created but the pNext chain of " "pCreateInfo does not contain in instance of VkQueryPoolPerformanceCreateInfoKHR."); } else { const auto &perf_counter_iter = physical_device_state->perf_counters.find(perf_ci->queueFamilyIndex); if (perf_counter_iter == physical_device_state->perf_counters.end()) { skip |= LogError( device, "VUID-VkQueryPoolPerformanceCreateInfoKHR-queueFamilyIndex-03236", "vkCreateQueryPool(): VkQueryPerformanceCreateInfoKHR::queueFamilyIndex is not a valid queue family index."); } else { const QUEUE_FAMILY_PERF_COUNTERS *perf_counters = perf_counter_iter->second.get(); for (uint32_t idx = 0; idx < perf_ci->counterIndexCount; idx++) { if (perf_ci->pCounterIndices[idx] >= perf_counters->counters.size()) { skip |= LogError( device, "VUID-VkQueryPoolPerformanceCreateInfoKHR-pCounterIndices-03321", "vkCreateQueryPool(): VkQueryPerformanceCreateInfoKHR::pCounterIndices[%u] = %u is not a valid " "counter index.", idx, perf_ci->pCounterIndices[idx]); } } } } } return skip; } bool CoreChecks::PreCallValidateDestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) const { const COMMAND_POOL_STATE *cp_state = GetCommandPoolState(commandPool); bool skip = false; if (cp_state) { // Verify that command buffers in pool are complete (not in-flight) skip |= CheckCommandBuffersInFlight(cp_state, "destroy command pool with", "VUID-vkDestroyCommandPool-commandPool-00041"); } return skip; } bool CoreChecks::PreCallValidateResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) const { const auto *command_pool_state = GetCommandPoolState(commandPool); return CheckCommandBuffersInFlight(command_pool_state, "reset command pool with", "VUID-vkResetCommandPool-commandPool-00040"); } bool CoreChecks::PreCallValidateResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) const { bool skip = false; for (uint32_t i = 0; i < fenceCount; ++i) { const auto fence_state = GetFenceState(pFences[i]); if (fence_state && fence_state->scope == kSyncScopeInternal && fence_state->state == FENCE_INFLIGHT) { skip |= LogError(pFences[i], "VUID-vkResetFences-pFences-01123", "%s is in use.", report_data->FormatHandle(pFences[i]).c_str()); } } return skip; } bool CoreChecks::PreCallValidateDestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator) const { const FRAMEBUFFER_STATE *framebuffer_state = GetFramebufferState(framebuffer); const VulkanTypedHandle obj_struct(framebuffer, kVulkanObjectTypeFramebuffer); bool skip = false; if (framebuffer_state) { skip |= ValidateObjectNotInUse(framebuffer_state, obj_struct, "vkDestroyFramebuffer", "VUID-vkDestroyFramebuffer-framebuffer-00892"); } return skip; } bool CoreChecks::PreCallValidateDestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) const { const RENDER_PASS_STATE *rp_state = GetRenderPassState(renderPass); const VulkanTypedHandle obj_struct(renderPass, kVulkanObjectTypeRenderPass); bool skip = false; if (rp_state) { skip |= ValidateObjectNotInUse(rp_state, obj_struct, "vkDestroyRenderPass", "VUID-vkDestroyRenderPass-renderPass-00873"); } return skip; } // Access helper functions for external modules VkFormatProperties CoreChecks::GetPDFormatProperties(const VkFormat format) const { VkFormatProperties format_properties; DispatchGetPhysicalDeviceFormatProperties(physical_device, format, &format_properties); return format_properties; } bool CoreChecks::ValidatePipelineVertexDivisors(std::vector<std::shared_ptr<PIPELINE_STATE>> const &pipe_state_vec, const uint32_t count, const VkGraphicsPipelineCreateInfo *pipe_cis) const { bool skip = false; const VkPhysicalDeviceLimits *device_limits = &phys_dev_props.limits; for (uint32_t i = 0; i < count; i++) { auto pvids_ci = LvlFindInChain<VkPipelineVertexInputDivisorStateCreateInfoEXT>(pipe_cis[i].pVertexInputState->pNext); if (nullptr == pvids_ci) continue; const PIPELINE_STATE *pipe_state = pipe_state_vec[i].get(); for (uint32_t j = 0; j < pvids_ci->vertexBindingDivisorCount; j++) { const VkVertexInputBindingDivisorDescriptionEXT *vibdd = &(pvids_ci->pVertexBindingDivisors[j]); if (vibdd->binding >= device_limits->maxVertexInputBindings) { skip |= LogError( device, "VUID-VkVertexInputBindingDivisorDescriptionEXT-binding-01869", "vkCreateGraphicsPipelines(): Pipeline[%1u] with chained VkPipelineVertexInputDivisorStateCreateInfoEXT, " "pVertexBindingDivisors[%1u] binding index of (%1u) exceeds device maxVertexInputBindings (%1u).", i, j, vibdd->binding, device_limits->maxVertexInputBindings); } if (vibdd->divisor > phys_dev_ext_props.vtx_attrib_divisor_props.maxVertexAttribDivisor) { skip |= LogError( device, "VUID-VkVertexInputBindingDivisorDescriptionEXT-divisor-01870", "vkCreateGraphicsPipelines(): Pipeline[%1u] with chained VkPipelineVertexInputDivisorStateCreateInfoEXT, " "pVertexBindingDivisors[%1u] divisor of (%1u) exceeds extension maxVertexAttribDivisor (%1u).", i, j, vibdd->divisor, phys_dev_ext_props.vtx_attrib_divisor_props.maxVertexAttribDivisor); } if ((0 == vibdd->divisor) && !enabled_features.vtx_attrib_divisor_features.vertexAttributeInstanceRateZeroDivisor) { skip |= LogError( device, "VUID-VkVertexInputBindingDivisorDescriptionEXT-vertexAttributeInstanceRateZeroDivisor-02228", "vkCreateGraphicsPipelines(): Pipeline[%1u] with chained VkPipelineVertexInputDivisorStateCreateInfoEXT, " "pVertexBindingDivisors[%1u] divisor must not be 0 when vertexAttributeInstanceRateZeroDivisor feature is not " "enabled.", i, j); } if ((1 != vibdd->divisor) && !enabled_features.vtx_attrib_divisor_features.vertexAttributeInstanceRateDivisor) { skip |= LogError( device, "VUID-VkVertexInputBindingDivisorDescriptionEXT-vertexAttributeInstanceRateDivisor-02229", "vkCreateGraphicsPipelines(): Pipeline[%1u] with chained VkPipelineVertexInputDivisorStateCreateInfoEXT, " "pVertexBindingDivisors[%1u] divisor (%1u) must be 1 when vertexAttributeInstanceRateDivisor feature is not " "enabled.", i, j, vibdd->divisor); } // Find the corresponding binding description and validate input rate setting bool failed_01871 = true; for (size_t k = 0; k < pipe_state->vertex_binding_descriptions_.size(); k++) { if ((vibdd->binding == pipe_state->vertex_binding_descriptions_[k].binding) && (VK_VERTEX_INPUT_RATE_INSTANCE == pipe_state->vertex_binding_descriptions_[k].inputRate)) { failed_01871 = false; break; } } if (failed_01871) { // Description not found, or has incorrect inputRate value skip |= LogError( device, "VUID-VkVertexInputBindingDivisorDescriptionEXT-inputRate-01871", "vkCreateGraphicsPipelines(): Pipeline[%1u] with chained VkPipelineVertexInputDivisorStateCreateInfoEXT, " "pVertexBindingDivisors[%1u] specifies binding index (%1u), but that binding index's " "VkVertexInputBindingDescription.inputRate member is not VK_VERTEX_INPUT_RATE_INSTANCE.", i, j, vibdd->binding); } } } return skip; } bool CoreChecks::ValidatePipelineCacheControlFlags(VkPipelineCreateFlags flags, uint32_t index, const char *caller_name, const char *vuid) const { bool skip = false; if (enabled_features.pipeline_creation_cache_control_features.pipelineCreationCacheControl == VK_FALSE) { const VkPipelineCreateFlags invalid_flags = VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_EXT | VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT_EXT; if ((flags & invalid_flags) != 0) { skip |= LogError(device, vuid, "%s(): pipelineCreationCacheControl is turned off but pipeline[%u] has VkPipelineCreateFlags " "containing VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_EXT or " "VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT_EXT", caller_name, index); } } return skip; } bool CoreChecks::PreCallValidateCreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkPipelineCache *pPipelineCache) const { bool skip = false; if (enabled_features.pipeline_creation_cache_control_features.pipelineCreationCacheControl == VK_FALSE) { if ((pCreateInfo->flags & VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT_EXT) != 0) { skip |= LogError(device, "VUID-VkPipelineCacheCreateInfo-pipelineCreationCacheControl-02892", "vkCreatePipelineCache(): pipelineCreationCacheControl is turned off but pCreateInfo::flags contains " "VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT_EXT"); } } return skip; } bool CoreChecks::PreCallValidateCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count, const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines, void *cgpl_state_data) const { bool skip = StateTracker::PreCallValidateCreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines, cgpl_state_data); create_graphics_pipeline_api_state *cgpl_state = reinterpret_cast<create_graphics_pipeline_api_state *>(cgpl_state_data); for (uint32_t i = 0; i < count; i++) { skip |= ValidatePipelineLocked(cgpl_state->pipe_state, i); } for (uint32_t i = 0; i < count; i++) { skip |= ValidatePipelineUnlocked(cgpl_state->pipe_state[i].get(), i); } if (device_extensions.vk_ext_vertex_attribute_divisor) { skip |= ValidatePipelineVertexDivisors(cgpl_state->pipe_state, count, pCreateInfos); } if (ExtEnabled::kNotEnabled != device_extensions.vk_khr_portability_subset) { for (uint32_t i = 0; i < count; ++i) { // Validate depth-stencil state auto raster_state_ci = pCreateInfos[i].pRasterizationState; if ((VK_FALSE == enabled_features.portability_subset_features.separateStencilMaskRef) && raster_state_ci && (VK_CULL_MODE_NONE == raster_state_ci->cullMode)) { auto depth_stencil_ci = pCreateInfos[i].pDepthStencilState; if ((VK_TRUE == depth_stencil_ci->stencilTestEnable) && (depth_stencil_ci->front.reference != depth_stencil_ci->back.reference)) { skip |= LogError(device, "VUID-VkPipelineDepthStencilStateCreateInfo-separateStencilMaskRef-04453", "Invalid Pipeline CreateInfo[%d] (portability error): VkStencilOpState::reference must be the " "same for front and back", i); } } // Validate color attachments auto color_blend_state = pCreateInfos[i].pColorBlendState; if ((VK_FALSE == enabled_features.portability_subset_features.constantAlphaColorBlendFactors) && color_blend_state) { const auto attachments = color_blend_state->pAttachments; for (uint32_t color_attachment_index = 0; i < color_blend_state->attachmentCount; ++i) { if ((VK_BLEND_FACTOR_CONSTANT_ALPHA == attachments[color_attachment_index].srcColorBlendFactor) || (VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA == attachments[color_attachment_index].srcColorBlendFactor)) { skip |= LogError( device, "VUID-VkPipelineColorBlendAttachmentState-constantAlphaColorBlendFactors-04454", "Invalid Pipeline CreateInfo[%d] (portability error): srcColorBlendFactor for color attachment %d must " "not be VK_BLEND_FACTOR_CONSTANT_ALPHA or VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA", i, color_attachment_index); } if ((VK_BLEND_FACTOR_CONSTANT_ALPHA == attachments[color_attachment_index].dstColorBlendFactor) || (VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA == attachments[color_attachment_index].dstColorBlendFactor)) { skip |= LogError( device, "VUID-VkPipelineColorBlendAttachmentState-constantAlphaColorBlendFactors-04455", "Invalid Pipeline CreateInfo[%d] (portability error): dstColorBlendFactor for color attachment %d must " "not be VK_BLEND_FACTOR_CONSTANT_ALPHA or VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA", i, color_attachment_index); } } } } } return skip; } void CoreChecks::PostCallRecordCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count, const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines, VkResult result, void *cgpl_state_data) { ValidationStateTracker::PostCallRecordCreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines, result, cgpl_state_data); if (enabled_features.fragment_shading_rate_features.primitiveFragmentShadingRate) { for (uint32_t i = 0; i < count; i++) { PIPELINE_STATE *pipeline_state = GetPipelineState(pPipelines[i]); RecordGraphicsPipelineShaderDynamicState(pipeline_state); } } } bool CoreChecks::PreCallValidateCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count, const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines, void *ccpl_state_data) const { bool skip = StateTracker::PreCallValidateCreateComputePipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines, ccpl_state_data); auto *ccpl_state = reinterpret_cast<create_compute_pipeline_api_state *>(ccpl_state_data); for (uint32_t i = 0; i < count; i++) { // TODO: Add Compute Pipeline Verification skip |= ValidateComputePipelineShaderState(ccpl_state->pipe_state[i].get()); skip |= ValidatePipelineCacheControlFlags(pCreateInfos->flags, i, "vkCreateComputePipelines", "VUID-VkComputePipelineCreateInfo-pipelineCreationCacheControl-02875"); } return skip; } bool CoreChecks::PreCallValidateCreateRayTracingPipelinesNV(VkDevice device, VkPipelineCache pipelineCache, uint32_t count, const VkRayTracingPipelineCreateInfoNV *pCreateInfos, const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines, void *crtpl_state_data) const { bool skip = StateTracker::PreCallValidateCreateRayTracingPipelinesNV(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines, crtpl_state_data); auto *crtpl_state = reinterpret_cast<create_ray_tracing_pipeline_api_state *>(crtpl_state_data); for (uint32_t i = 0; i < count; i++) { PIPELINE_STATE *pipeline = crtpl_state->pipe_state[i].get(); if (pipeline->raytracingPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) { const PIPELINE_STATE *base_pipeline = nullptr; if (pipeline->raytracingPipelineCI.basePipelineIndex != -1) { base_pipeline = crtpl_state->pipe_state[pipeline->raytracingPipelineCI.basePipelineIndex].get(); } else if (pipeline->raytracingPipelineCI.basePipelineHandle != VK_NULL_HANDLE) { base_pipeline = GetPipelineState(pipeline->raytracingPipelineCI.basePipelineHandle); } if (!base_pipeline || !(base_pipeline->getPipelineCreateFlags() & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) { skip |= LogError( device, "VUID-vkCreateRayTracingPipelinesNV-flags-03416", "vkCreateRayTracingPipelinesNV: If the flags member of any element of pCreateInfos contains the " "VK_PIPELINE_CREATE_DERIVATIVE_BIT flag," "the base pipeline must have been created with the VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT flag set."); } } skip |= ValidateRayTracingPipeline(pipeline, pCreateInfos[i].flags, /*isKHR*/ false); skip |= ValidatePipelineCacheControlFlags(pCreateInfos[i].flags, i, "vkCreateRayTracingPipelinesNV", "VUID-VkRayTracingPipelineCreateInfoNV-pipelineCreationCacheControl-02905"); } return skip; } bool CoreChecks::PreCallValidateCreateRayTracingPipelinesKHR(VkDevice device, VkDeferredOperationKHR deferredOperation, VkPipelineCache pipelineCache, uint32_t count, const VkRayTracingPipelineCreateInfoKHR *pCreateInfos, const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines, void *crtpl_state_data) const { bool skip = StateTracker::PreCallValidateCreateRayTracingPipelinesKHR(device, deferredOperation, pipelineCache, count, pCreateInfos, pAllocator, pPipelines, crtpl_state_data); auto *crtpl_state = reinterpret_cast<create_ray_tracing_pipeline_khr_api_state *>(crtpl_state_data); for (uint32_t i = 0; i < count; i++) { PIPELINE_STATE *pipeline = crtpl_state->pipe_state[i].get(); if (pipeline->raytracingPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) { const PIPELINE_STATE *base_pipeline = nullptr; if (pipeline->raytracingPipelineCI.basePipelineIndex != -1) { base_pipeline = crtpl_state->pipe_state[pipeline->raytracingPipelineCI.basePipelineIndex].get(); } else if (pipeline->raytracingPipelineCI.basePipelineHandle != VK_NULL_HANDLE) { base_pipeline = GetPipelineState(pipeline->raytracingPipelineCI.basePipelineHandle); } if (!base_pipeline || !(base_pipeline->getPipelineCreateFlags() & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) { skip |= LogError( device, "VUID-vkCreateRayTracingPipelinesKHR-flags-03416", "vkCreateRayTracingPipelinesKHR: If the flags member of any element of pCreateInfos contains the " "VK_PIPELINE_CREATE_DERIVATIVE_BIT flag," "the base pipeline must have been created with the VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT flag set."); } } skip |= ValidateRayTracingPipeline(pipeline, pCreateInfos[i].flags, /*isKHR*/ true); skip |= ValidatePipelineCacheControlFlags(pCreateInfos[i].flags, i, "vkCreateRayTracingPipelinesKHR", "VUID-VkRayTracingPipelineCreateInfoKHR-pipelineCreationCacheControl-02905"); } return skip; } bool CoreChecks::PreCallValidateGetPipelineExecutablePropertiesKHR(VkDevice device, const VkPipelineInfoKHR *pPipelineInfo, uint32_t *pExecutableCount, VkPipelineExecutablePropertiesKHR *pProperties) const { bool skip = false; if (!enabled_features.pipeline_exe_props_features.pipelineExecutableInfo) { skip |= LogError(device, "VUID-vkGetPipelineExecutablePropertiesKHR-pipelineExecutableInfo-03270", "vkGetPipelineExecutablePropertiesKHR called when pipelineExecutableInfo feature is not enabled."); } return skip; } bool CoreChecks::ValidatePipelineExecutableInfo(VkDevice device, const VkPipelineExecutableInfoKHR *pExecutableInfo) const { bool skip = false; if (!enabled_features.pipeline_exe_props_features.pipelineExecutableInfo) { skip |= LogError(device, "VUID-vkGetPipelineExecutableStatisticsKHR-pipelineExecutableInfo-03272", "vkGetPipelineExecutableStatisticsKHR called when pipelineExecutableInfo feature is not enabled."); } VkPipelineInfoKHR pi = {}; pi.sType = VK_STRUCTURE_TYPE_PIPELINE_INFO_KHR; pi.pipeline = pExecutableInfo->pipeline; // We could probably cache this instead of fetching it every time uint32_t executable_count = 0; DispatchGetPipelineExecutablePropertiesKHR(device, &pi, &executable_count, NULL); if (pExecutableInfo->executableIndex >= executable_count) { skip |= LogError(pExecutableInfo->pipeline, "VUID-VkPipelineExecutableInfoKHR-executableIndex-03275", "VkPipelineExecutableInfo::executableIndex (%1u) must be less than the number of executables associated with " "the pipeline (%1u) as returned by vkGetPipelineExecutablePropertiessKHR", pExecutableInfo->executableIndex, executable_count); } return skip; } bool CoreChecks::PreCallValidateGetPipelineExecutableStatisticsKHR(VkDevice device, const VkPipelineExecutableInfoKHR *pExecutableInfo, uint32_t *pStatisticCount, VkPipelineExecutableStatisticKHR *pStatistics) const { bool skip = ValidatePipelineExecutableInfo(device, pExecutableInfo); const PIPELINE_STATE *pipeline_state = GetPipelineState(pExecutableInfo->pipeline); if (!(pipeline_state->getPipelineCreateFlags() & VK_PIPELINE_CREATE_CAPTURE_STATISTICS_BIT_KHR)) { skip |= LogError(pExecutableInfo->pipeline, "VUID-vkGetPipelineExecutableStatisticsKHR-pipeline-03274", "vkGetPipelineExecutableStatisticsKHR called on a pipeline created without the " "VK_PIPELINE_CREATE_CAPTURE_STATISTICS_BIT_KHR flag set"); } return skip; } bool CoreChecks::PreCallValidateGetPipelineExecutableInternalRepresentationsKHR( VkDevice device, const VkPipelineExecutableInfoKHR *pExecutableInfo, uint32_t *pInternalRepresentationCount, VkPipelineExecutableInternalRepresentationKHR *pStatistics) const { bool skip = ValidatePipelineExecutableInfo(device, pExecutableInfo); const PIPELINE_STATE *pipeline_state = GetPipelineState(pExecutableInfo->pipeline); if (!(pipeline_state->getPipelineCreateFlags() & VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR)) { skip |= LogError(pExecutableInfo->pipeline, "VUID-vkGetPipelineExecutableInternalRepresentationsKHR-pipeline-03278", "vkGetPipelineExecutableInternalRepresentationsKHR called on a pipeline created without the " "VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR flag set"); } return skip; } bool CoreChecks::PreCallValidateCreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkDescriptorSetLayout *pSetLayout) const { return cvdescriptorset::ValidateDescriptorSetLayoutCreateInfo( this, pCreateInfo, IsExtEnabled(device_extensions.vk_khr_push_descriptor), phys_dev_ext_props.max_push_descriptors, IsExtEnabled(device_extensions.vk_ext_descriptor_indexing), &enabled_features.core12, &enabled_features.inline_uniform_block, &phys_dev_ext_props.inline_uniform_block_props, &device_extensions); } // Used by CreatePipelineLayout and CmdPushConstants. // Note that the index argument is optional and only used by CreatePipelineLayout. bool CoreChecks::ValidatePushConstantRange(const uint32_t offset, const uint32_t size, const char *caller_name, uint32_t index = 0) const { if (disabled[push_constant_range]) return false; uint32_t const max_push_constants_size = phys_dev_props.limits.maxPushConstantsSize; bool skip = false; // Check that offset + size don't exceed the max. // Prevent arithetic overflow here by avoiding addition and testing in this order. if ((offset >= max_push_constants_size) || (size > max_push_constants_size - offset)) { // This is a pain just to adapt the log message to the caller, but better to sort it out only when there is a problem. if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) { if (offset >= max_push_constants_size) { skip |= LogError( device, "VUID-VkPushConstantRange-offset-00294", "%s call has push constants index %u with offset %u that exceeds this device's maxPushConstantSize of %u.", caller_name, index, offset, max_push_constants_size); } if (size > max_push_constants_size - offset) { skip |= LogError(device, "VUID-VkPushConstantRange-size-00298", "%s call has push constants index %u with offset %u and size %u that exceeds this device's " "maxPushConstantSize of %u.", caller_name, index, offset, size, max_push_constants_size); } } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) { if (offset >= max_push_constants_size) { skip |= LogError( device, "VUID-vkCmdPushConstants-offset-00370", "%s call has push constants index %u with offset %u that exceeds this device's maxPushConstantSize of %u.", caller_name, index, offset, max_push_constants_size); } if (size > max_push_constants_size - offset) { skip |= LogError(device, "VUID-vkCmdPushConstants-size-00371", "%s call has push constants index %u with offset %u and size %u that exceeds this device's " "maxPushConstantSize of %u.", caller_name, index, offset, size, max_push_constants_size); } } else { skip |= LogError(device, kVUID_Core_DrawState_InternalError, "%s caller not supported.", caller_name); } } // size needs to be non-zero and a multiple of 4. if ((size == 0) || ((size & 0x3) != 0)) { if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) { if (size == 0) { skip |= LogError(device, "VUID-VkPushConstantRange-size-00296", "%s call has push constants index %u with size %u. Size must be greater than zero.", caller_name, index, size); } if (size & 0x3) { skip |= LogError(device, "VUID-VkPushConstantRange-size-00297", "%s call has push constants index %u with size %u. Size must be a multiple of 4.", caller_name, index, size); } } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) { if (size == 0) { skip |= LogError(device, "VUID-vkCmdPushConstants-size-arraylength", "%s call has push constants index %u with size %u. Size must be greater than zero.", caller_name, index, size); } if (size & 0x3) { skip |= LogError(device, "VUID-vkCmdPushConstants-size-00369", "%s call has push constants index %u with size %u. Size must be a multiple of 4.", caller_name, index, size); } } else { skip |= LogError(device, kVUID_Core_DrawState_InternalError, "%s caller not supported.", caller_name); } } // offset needs to be a multiple of 4. if ((offset & 0x3) != 0) { if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) { skip |= LogError(device, "VUID-VkPushConstantRange-offset-00295", "%s call has push constants index %u with offset %u. Offset must be a multiple of 4.", caller_name, index, offset); } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) { skip |= LogError(device, "VUID-vkCmdPushConstants-offset-00368", "%s call has push constants with offset %u. Offset must be a multiple of 4.", caller_name, offset); } else { skip |= LogError(device, kVUID_Core_DrawState_InternalError, "%s caller not supported.", caller_name); } } return skip; } enum DSL_DESCRIPTOR_GROUPS { DSL_TYPE_SAMPLERS = 0, DSL_TYPE_UNIFORM_BUFFERS, DSL_TYPE_STORAGE_BUFFERS, DSL_TYPE_SAMPLED_IMAGES, DSL_TYPE_STORAGE_IMAGES, DSL_TYPE_INPUT_ATTACHMENTS, DSL_TYPE_INLINE_UNIFORM_BLOCK, DSL_NUM_DESCRIPTOR_GROUPS }; // Used by PreCallValidateCreatePipelineLayout. // Returns an array of size DSL_NUM_DESCRIPTOR_GROUPS of the maximum number of descriptors used in any single pipeline stage std::valarray<uint32_t> GetDescriptorCountMaxPerStage( const DeviceFeatures *enabled_features, const std::vector<std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> &set_layouts, bool skip_update_after_bind) { // Identify active pipeline stages std::vector<VkShaderStageFlags> stage_flags = {VK_SHADER_STAGE_VERTEX_BIT, VK_SHADER_STAGE_FRAGMENT_BIT, VK_SHADER_STAGE_COMPUTE_BIT}; if (enabled_features->core.geometryShader) { stage_flags.push_back(VK_SHADER_STAGE_GEOMETRY_BIT); } if (enabled_features->core.tessellationShader) { stage_flags.push_back(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT); stage_flags.push_back(VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT); } // Allow iteration over enum values std::vector<DSL_DESCRIPTOR_GROUPS> dsl_groups = { DSL_TYPE_SAMPLERS, DSL_TYPE_UNIFORM_BUFFERS, DSL_TYPE_STORAGE_BUFFERS, DSL_TYPE_SAMPLED_IMAGES, DSL_TYPE_STORAGE_IMAGES, DSL_TYPE_INPUT_ATTACHMENTS, DSL_TYPE_INLINE_UNIFORM_BLOCK}; // Sum by layouts per stage, then pick max of stages per type std::valarray<uint32_t> max_sum(0U, DSL_NUM_DESCRIPTOR_GROUPS); // max descriptor sum among all pipeline stages for (auto stage : stage_flags) { std::valarray<uint32_t> stage_sum(0U, DSL_NUM_DESCRIPTOR_GROUPS); // per-stage sums for (auto dsl : set_layouts) { if (skip_update_after_bind && (dsl->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT)) { continue; } for (uint32_t binding_idx = 0; binding_idx < dsl->GetBindingCount(); binding_idx++) { const VkDescriptorSetLayoutBinding *binding = dsl->GetDescriptorSetLayoutBindingPtrFromIndex(binding_idx); // Bindings with a descriptorCount of 0 are "reserved" and should be skipped if (0 != (stage & binding->stageFlags) && binding->descriptorCount > 0) { switch (binding->descriptorType) { case VK_DESCRIPTOR_TYPE_SAMPLER: stage_sum[DSL_TYPE_SAMPLERS] += binding->descriptorCount; break; case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER: case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC: stage_sum[DSL_TYPE_UNIFORM_BUFFERS] += binding->descriptorCount; break; case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER: case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: stage_sum[DSL_TYPE_STORAGE_BUFFERS] += binding->descriptorCount; break; case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE: case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER: stage_sum[DSL_TYPE_SAMPLED_IMAGES] += binding->descriptorCount; break; case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: stage_sum[DSL_TYPE_STORAGE_IMAGES] += binding->descriptorCount; break; case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: stage_sum[DSL_TYPE_SAMPLED_IMAGES] += binding->descriptorCount; stage_sum[DSL_TYPE_SAMPLERS] += binding->descriptorCount; break; case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT: stage_sum[DSL_TYPE_INPUT_ATTACHMENTS] += binding->descriptorCount; break; case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT: // count one block per binding. descriptorCount is number of bytes stage_sum[DSL_TYPE_INLINE_UNIFORM_BLOCK]++; break; default: break; } } } } for (auto type : dsl_groups) { max_sum[type] = std::max(stage_sum[type], max_sum[type]); } } return max_sum; } // Used by PreCallValidateCreatePipelineLayout. // Returns a map indexed by VK_DESCRIPTOR_TYPE_* enum of the summed descriptors by type. // Note: descriptors only count against the limit once even if used by multiple stages. std::map<uint32_t, uint32_t> GetDescriptorSum( const std::vector<std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> &set_layouts, bool skip_update_after_bind) { std::map<uint32_t, uint32_t> sum_by_type; for (auto dsl : set_layouts) { if (skip_update_after_bind && (dsl->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT)) { continue; } for (uint32_t binding_idx = 0; binding_idx < dsl->GetBindingCount(); binding_idx++) { const VkDescriptorSetLayoutBinding *binding = dsl->GetDescriptorSetLayoutBindingPtrFromIndex(binding_idx); // Bindings with a descriptorCount of 0 are "reserved" and should be skipped if (binding->descriptorCount > 0) { if (binding->descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT) { // count one block per binding. descriptorCount is number of bytes sum_by_type[binding->descriptorType]++; } else { sum_by_type[binding->descriptorType] += binding->descriptorCount; } } } } return sum_by_type; } bool CoreChecks::PreCallValidateCreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout) const { bool skip = false; // Validate layout count against device physical limit if (pCreateInfo->setLayoutCount > phys_dev_props.limits.maxBoundDescriptorSets) { skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-setLayoutCount-00286", "vkCreatePipelineLayout(): setLayoutCount (%d) exceeds physical device maxBoundDescriptorSets limit (%d).", pCreateInfo->setLayoutCount, phys_dev_props.limits.maxBoundDescriptorSets); } // Validate Push Constant ranges uint32_t i, j; for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) { skip |= ValidatePushConstantRange(pCreateInfo->pPushConstantRanges[i].offset, pCreateInfo->pPushConstantRanges[i].size, "vkCreatePipelineLayout()", i); if (0 == pCreateInfo->pPushConstantRanges[i].stageFlags) { skip |= LogError(device, "VUID-VkPushConstantRange-stageFlags-requiredbitmask", "vkCreatePipelineLayout() call has no stageFlags set."); } } // As of 1.0.28, there is a VU that states that a stage flag cannot appear more than once in the list of push constant ranges. for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) { for (j = i + 1; j < pCreateInfo->pushConstantRangeCount; ++j) { if (0 != (pCreateInfo->pPushConstantRanges[i].stageFlags & pCreateInfo->pPushConstantRanges[j].stageFlags)) { skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pPushConstantRanges-00292", "vkCreatePipelineLayout() Duplicate stage flags found in ranges %d and %d.", i, j); } } } // Early-out if (skip) return skip; std::vector<std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> set_layouts(pCreateInfo->setLayoutCount, nullptr); unsigned int push_descriptor_set_count = 0; { for (i = 0; i < pCreateInfo->setLayoutCount; ++i) { set_layouts[i] = GetDescriptorSetLayoutShared(pCreateInfo->pSetLayouts[i]); if (set_layouts[i]->IsPushDescriptor()) ++push_descriptor_set_count; } } if (push_descriptor_set_count > 1) { skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00293", "vkCreatePipelineLayout() Multiple push descriptor sets found."); } // Max descriptors by type, within a single pipeline stage std::valarray<uint32_t> max_descriptors_per_stage = GetDescriptorCountMaxPerStage(&enabled_features, set_layouts, true); // Samplers if (max_descriptors_per_stage[DSL_TYPE_SAMPLERS] > phys_dev_props.limits.maxPerStageDescriptorSamplers) { const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03016" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00287"; skip |= LogError(device, vuid, "vkCreatePipelineLayout(): max per-stage sampler bindings count (%d) exceeds device " "maxPerStageDescriptorSamplers limit (%d).", max_descriptors_per_stage[DSL_TYPE_SAMPLERS], phys_dev_props.limits.maxPerStageDescriptorSamplers); } // Uniform buffers if (max_descriptors_per_stage[DSL_TYPE_UNIFORM_BUFFERS] > phys_dev_props.limits.maxPerStageDescriptorUniformBuffers) { const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03017" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00288"; skip |= LogError(device, vuid, "vkCreatePipelineLayout(): max per-stage uniform buffer bindings count (%d) exceeds device " "maxPerStageDescriptorUniformBuffers limit (%d).", max_descriptors_per_stage[DSL_TYPE_UNIFORM_BUFFERS], phys_dev_props.limits.maxPerStageDescriptorUniformBuffers); } // Storage buffers if (max_descriptors_per_stage[DSL_TYPE_STORAGE_BUFFERS] > phys_dev_props.limits.maxPerStageDescriptorStorageBuffers) { const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03018" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00289"; skip |= LogError(device, vuid, "vkCreatePipelineLayout(): max per-stage storage buffer bindings count (%d) exceeds device " "maxPerStageDescriptorStorageBuffers limit (%d).", max_descriptors_per_stage[DSL_TYPE_STORAGE_BUFFERS], phys_dev_props.limits.maxPerStageDescriptorStorageBuffers); } // Sampled images if (max_descriptors_per_stage[DSL_TYPE_SAMPLED_IMAGES] > phys_dev_props.limits.maxPerStageDescriptorSampledImages) { const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03019" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00290"; skip |= LogError(device, vuid, "vkCreatePipelineLayout(): max per-stage sampled image bindings count (%d) exceeds device " "maxPerStageDescriptorSampledImages limit (%d).", max_descriptors_per_stage[DSL_TYPE_SAMPLED_IMAGES], phys_dev_props.limits.maxPerStageDescriptorSampledImages); } // Storage images if (max_descriptors_per_stage[DSL_TYPE_STORAGE_IMAGES] > phys_dev_props.limits.maxPerStageDescriptorStorageImages) { const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03020" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00291"; skip |= LogError(device, vuid, "vkCreatePipelineLayout(): max per-stage storage image bindings count (%d) exceeds device " "maxPerStageDescriptorStorageImages limit (%d).", max_descriptors_per_stage[DSL_TYPE_STORAGE_IMAGES], phys_dev_props.limits.maxPerStageDescriptorStorageImages); } // Input attachments if (max_descriptors_per_stage[DSL_TYPE_INPUT_ATTACHMENTS] > phys_dev_props.limits.maxPerStageDescriptorInputAttachments) { const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03021" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01676"; skip |= LogError(device, vuid, "vkCreatePipelineLayout(): max per-stage input attachment bindings count (%d) exceeds device " "maxPerStageDescriptorInputAttachments limit (%d).", max_descriptors_per_stage[DSL_TYPE_INPUT_ATTACHMENTS], phys_dev_props.limits.maxPerStageDescriptorInputAttachments); } // Inline uniform blocks if (max_descriptors_per_stage[DSL_TYPE_INLINE_UNIFORM_BLOCK] > phys_dev_ext_props.inline_uniform_block_props.maxPerStageDescriptorInlineUniformBlocks) { const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-02214" : "VUID-VkPipelineLayoutCreateInfo-descriptorType-02212"; skip |= LogError(device, vuid, "vkCreatePipelineLayout(): max per-stage inline uniform block bindings count (%d) exceeds device " "maxPerStageDescriptorInlineUniformBlocks limit (%d).", max_descriptors_per_stage[DSL_TYPE_INLINE_UNIFORM_BLOCK], phys_dev_ext_props.inline_uniform_block_props.maxPerStageDescriptorInlineUniformBlocks); } // Total descriptors by type // std::map<uint32_t, uint32_t> sum_all_stages = GetDescriptorSum(set_layouts, true); // Samplers uint32_t sum = sum_all_stages[VK_DESCRIPTOR_TYPE_SAMPLER] + sum_all_stages[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER]; if (sum > phys_dev_props.limits.maxDescriptorSetSamplers) { const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03028" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01677"; skip |= LogError(device, vuid, "vkCreatePipelineLayout(): sum of sampler bindings among all stages (%d) exceeds device " "maxDescriptorSetSamplers limit (%d).", sum, phys_dev_props.limits.maxDescriptorSetSamplers); } // Uniform buffers if (sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER] > phys_dev_props.limits.maxDescriptorSetUniformBuffers) { const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03029" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01678"; skip |= LogError(device, vuid, "vkCreatePipelineLayout(): sum of uniform buffer bindings among all stages (%d) exceeds device " "maxDescriptorSetUniformBuffers limit (%d).", sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER], phys_dev_props.limits.maxDescriptorSetUniformBuffers); } // Dynamic uniform buffers if (sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC] > phys_dev_props.limits.maxDescriptorSetUniformBuffersDynamic) { const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03030" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01679"; skip |= LogError(device, vuid, "vkCreatePipelineLayout(): sum of dynamic uniform buffer bindings among all stages (%d) exceeds device " "maxDescriptorSetUniformBuffersDynamic limit (%d).", sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC], phys_dev_props.limits.maxDescriptorSetUniformBuffersDynamic); } // Storage buffers if (sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER] > phys_dev_props.limits.maxDescriptorSetStorageBuffers) { const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03031" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01680"; skip |= LogError(device, vuid, "vkCreatePipelineLayout(): sum of storage buffer bindings among all stages (%d) exceeds device " "maxDescriptorSetStorageBuffers limit (%d).", sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER], phys_dev_props.limits.maxDescriptorSetStorageBuffers); } // Dynamic storage buffers if (sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC] > phys_dev_props.limits.maxDescriptorSetStorageBuffersDynamic) { const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03032" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01681"; skip |= LogError(device, vuid, "vkCreatePipelineLayout(): sum of dynamic storage buffer bindings among all stages (%d) exceeds device " "maxDescriptorSetStorageBuffersDynamic limit (%d).", sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC], phys_dev_props.limits.maxDescriptorSetStorageBuffersDynamic); } // Sampled images sum = sum_all_stages[VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE] + sum_all_stages[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER] + sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER]; if (sum > phys_dev_props.limits.maxDescriptorSetSampledImages) { const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03033" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01682"; skip |= LogError(device, vuid, "vkCreatePipelineLayout(): sum of sampled image bindings among all stages (%d) exceeds device " "maxDescriptorSetSampledImages limit (%d).", sum, phys_dev_props.limits.maxDescriptorSetSampledImages); } // Storage images sum = sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_IMAGE] + sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER]; if (sum > phys_dev_props.limits.maxDescriptorSetStorageImages) { const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03034" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01683"; skip |= LogError(device, vuid, "vkCreatePipelineLayout(): sum of storage image bindings among all stages (%d) exceeds device " "maxDescriptorSetStorageImages limit (%d).", sum, phys_dev_props.limits.maxDescriptorSetStorageImages); } // Input attachments if (sum_all_stages[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT] > phys_dev_props.limits.maxDescriptorSetInputAttachments) { const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03035" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01684"; skip |= LogError(device, vuid, "vkCreatePipelineLayout(): sum of input attachment bindings among all stages (%d) exceeds device " "maxDescriptorSetInputAttachments limit (%d).", sum_all_stages[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT], phys_dev_props.limits.maxDescriptorSetInputAttachments); } // Inline uniform blocks if (sum_all_stages[VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT] > phys_dev_ext_props.inline_uniform_block_props.maxDescriptorSetInlineUniformBlocks) { const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-02216" : "VUID-VkPipelineLayoutCreateInfo-descriptorType-02213"; skip |= LogError(device, vuid, "vkCreatePipelineLayout(): sum of inline uniform block bindings among all stages (%d) exceeds device " "maxDescriptorSetInlineUniformBlocks limit (%d).", sum_all_stages[VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT], phys_dev_ext_props.inline_uniform_block_props.maxDescriptorSetInlineUniformBlocks); } if (device_extensions.vk_ext_descriptor_indexing) { // XXX TODO: replace with correct VU messages // Max descriptors by type, within a single pipeline stage std::valarray<uint32_t> max_descriptors_per_stage_update_after_bind = GetDescriptorCountMaxPerStage(&enabled_features, set_layouts, false); // Samplers if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLERS] > phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindSamplers) { skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03022", "vkCreatePipelineLayout(): max per-stage sampler bindings count (%d) exceeds device " "maxPerStageDescriptorUpdateAfterBindSamplers limit (%d).", max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLERS], phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindSamplers); } // Uniform buffers if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_UNIFORM_BUFFERS] > phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindUniformBuffers) { skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03023", "vkCreatePipelineLayout(): max per-stage uniform buffer bindings count (%d) exceeds device " "maxPerStageDescriptorUpdateAfterBindUniformBuffers limit (%d).", max_descriptors_per_stage_update_after_bind[DSL_TYPE_UNIFORM_BUFFERS], phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindUniformBuffers); } // Storage buffers if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_BUFFERS] > phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindStorageBuffers) { skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03024", "vkCreatePipelineLayout(): max per-stage storage buffer bindings count (%d) exceeds device " "maxPerStageDescriptorUpdateAfterBindStorageBuffers limit (%d).", max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_BUFFERS], phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindStorageBuffers); } // Sampled images if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLED_IMAGES] > phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindSampledImages) { skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03025", "vkCreatePipelineLayout(): max per-stage sampled image bindings count (%d) exceeds device " "maxPerStageDescriptorUpdateAfterBindSampledImages limit (%d).", max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLED_IMAGES], phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindSampledImages); } // Storage images if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_IMAGES] > phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindStorageImages) { skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03026", "vkCreatePipelineLayout(): max per-stage storage image bindings count (%d) exceeds device " "maxPerStageDescriptorUpdateAfterBindStorageImages limit (%d).", max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_IMAGES], phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindStorageImages); } // Input attachments if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_INPUT_ATTACHMENTS] > phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindInputAttachments) { skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03027", "vkCreatePipelineLayout(): max per-stage input attachment bindings count (%d) exceeds device " "maxPerStageDescriptorUpdateAfterBindInputAttachments limit (%d).", max_descriptors_per_stage_update_after_bind[DSL_TYPE_INPUT_ATTACHMENTS], phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindInputAttachments); } // Inline uniform blocks if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_INLINE_UNIFORM_BLOCK] > phys_dev_ext_props.inline_uniform_block_props.maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks) { skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-descriptorType-02215", "vkCreatePipelineLayout(): max per-stage inline uniform block bindings count (%d) exceeds device " "maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks limit (%d).", max_descriptors_per_stage_update_after_bind[DSL_TYPE_INLINE_UNIFORM_BLOCK], phys_dev_ext_props.inline_uniform_block_props.maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks); } // Total descriptors by type, summed across all pipeline stages // std::map<uint32_t, uint32_t> sum_all_stages_update_after_bind = GetDescriptorSum(set_layouts, false); // Samplers sum = sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_SAMPLER] + sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER]; if (sum > phys_dev_props_core12.maxDescriptorSetUpdateAfterBindSamplers) { skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03036", "vkCreatePipelineLayout(): sum of sampler bindings among all stages (%d) exceeds device " "maxDescriptorSetUpdateAfterBindSamplers limit (%d).", sum, phys_dev_props_core12.maxDescriptorSetUpdateAfterBindSamplers); } // Uniform buffers if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER] > phys_dev_props_core12.maxDescriptorSetUpdateAfterBindUniformBuffers) { skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03037", "vkCreatePipelineLayout(): sum of uniform buffer bindings among all stages (%d) exceeds device " "maxDescriptorSetUpdateAfterBindUniformBuffers limit (%d).", sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER], phys_dev_props_core12.maxDescriptorSetUpdateAfterBindUniformBuffers); } // Dynamic uniform buffers if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC] > phys_dev_props_core12.maxDescriptorSetUpdateAfterBindUniformBuffersDynamic) { skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03038", "vkCreatePipelineLayout(): sum of dynamic uniform buffer bindings among all stages (%d) exceeds device " "maxDescriptorSetUpdateAfterBindUniformBuffersDynamic limit (%d).", sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC], phys_dev_props_core12.maxDescriptorSetUpdateAfterBindUniformBuffersDynamic); } // Storage buffers if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER] > phys_dev_props_core12.maxDescriptorSetUpdateAfterBindStorageBuffers) { skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03039", "vkCreatePipelineLayout(): sum of storage buffer bindings among all stages (%d) exceeds device " "maxDescriptorSetUpdateAfterBindStorageBuffers limit (%d).", sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER], phys_dev_props_core12.maxDescriptorSetUpdateAfterBindStorageBuffers); } // Dynamic storage buffers if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC] > phys_dev_props_core12.maxDescriptorSetUpdateAfterBindStorageBuffersDynamic) { skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03040", "vkCreatePipelineLayout(): sum of dynamic storage buffer bindings among all stages (%d) exceeds device " "maxDescriptorSetUpdateAfterBindStorageBuffersDynamic limit (%d).", sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC], phys_dev_props_core12.maxDescriptorSetUpdateAfterBindStorageBuffersDynamic); } // Sampled images sum = sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE] + sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER] + sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER]; if (sum > phys_dev_props_core12.maxDescriptorSetUpdateAfterBindSampledImages) { skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03041", "vkCreatePipelineLayout(): sum of sampled image bindings among all stages (%d) exceeds device " "maxDescriptorSetUpdateAfterBindSampledImages limit (%d).", sum, phys_dev_props_core12.maxDescriptorSetUpdateAfterBindSampledImages); } // Storage images sum = sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_IMAGE] + sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER]; if (sum > phys_dev_props_core12.maxDescriptorSetUpdateAfterBindStorageImages) { skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03042", "vkCreatePipelineLayout(): sum of storage image bindings among all stages (%d) exceeds device " "maxDescriptorSetUpdateAfterBindStorageImages limit (%d).", sum, phys_dev_props_core12.maxDescriptorSetUpdateAfterBindStorageImages); } // Input attachments if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT] > phys_dev_props_core12.maxDescriptorSetUpdateAfterBindInputAttachments) { skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03043", "vkCreatePipelineLayout(): sum of input attachment bindings among all stages (%d) exceeds device " "maxDescriptorSetUpdateAfterBindInputAttachments limit (%d).", sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT], phys_dev_props_core12.maxDescriptorSetUpdateAfterBindInputAttachments); } // Inline uniform blocks if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT] > phys_dev_ext_props.inline_uniform_block_props.maxDescriptorSetUpdateAfterBindInlineUniformBlocks) { skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-descriptorType-02217", "vkCreatePipelineLayout(): sum of inline uniform block bindings among all stages (%d) exceeds device " "maxDescriptorSetUpdateAfterBindInlineUniformBlocks limit (%d).", sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT], phys_dev_ext_props.inline_uniform_block_props.maxDescriptorSetUpdateAfterBindInlineUniformBlocks); } } if (device_extensions.vk_ext_fragment_density_map_2) { uint32_t sum_subsampled_samplers = 0; for (auto dsl : set_layouts) { // find the number of subsampled samplers across all stages // NOTE: this does not use the GetDescriptorSum patter because it needs the GetSamplerState method if ((dsl->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT)) { continue; } for (uint32_t binding_idx = 0; binding_idx < dsl->GetBindingCount(); binding_idx++) { const VkDescriptorSetLayoutBinding *binding = dsl->GetDescriptorSetLayoutBindingPtrFromIndex(binding_idx); // Bindings with a descriptorCount of 0 are "reserved" and should be skipped if (binding->descriptorCount > 0) { if (((binding->descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) || (binding->descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER)) && (binding->pImmutableSamplers != nullptr)) { for (uint32_t sampler_idx = 0; sampler_idx < binding->descriptorCount; sampler_idx++) { const SAMPLER_STATE *state = GetSamplerState(binding->pImmutableSamplers[sampler_idx]); if (state->createInfo.flags & (VK_SAMPLER_CREATE_SUBSAMPLED_BIT_EXT | VK_SAMPLER_CREATE_SUBSAMPLED_COARSE_RECONSTRUCTION_BIT_EXT)) { sum_subsampled_samplers++; } } } } } } if (sum_subsampled_samplers > phys_dev_ext_props.fragment_density_map2_props.maxDescriptorSetSubsampledSamplers) { skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pImmutableSamplers-03566", "vkCreatePipelineLayout(): sum of sampler bindings with flags containing " "VK_SAMPLER_CREATE_SUBSAMPLED_BIT_EXT or " "VK_SAMPLER_CREATE_SUBSAMPLED_COARSE_RECONSTRUCTION_BIT_EXT among all stages(% d) " "exceeds device maxDescriptorSetSubsampledSamplers limit (%d).", sum_subsampled_samplers, phys_dev_ext_props.fragment_density_map2_props.maxDescriptorSetSubsampledSamplers); } } return skip; } bool CoreChecks::PreCallValidateResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags) const { // Make sure sets being destroyed are not currently in-use if (disabled[idle_descriptor_set]) return false; bool skip = false; const DESCRIPTOR_POOL_STATE *pool = GetDescriptorPoolState(descriptorPool); if (pool != nullptr) { for (auto ds : pool->sets) { if (ds && ds->in_use.load()) { skip |= LogError(descriptorPool, "VUID-vkResetDescriptorPool-descriptorPool-00313", "It is invalid to call vkResetDescriptorPool() with descriptor sets in use by a command buffer."); if (skip) break; } } } return skip; } // Ensure the pool contains enough descriptors and descriptor sets to satisfy // an allocation request. Fills common_data with the total number of descriptors of each type required, // as well as DescriptorSetLayout ptrs used for later update. bool CoreChecks::PreCallValidateAllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo, VkDescriptorSet *pDescriptorSets, void *ads_state_data) const { StateTracker::PreCallValidateAllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets, ads_state_data); cvdescriptorset::AllocateDescriptorSetsData *ads_state = reinterpret_cast<cvdescriptorset::AllocateDescriptorSetsData *>(ads_state_data); // All state checks for AllocateDescriptorSets is done in single function return ValidateAllocateDescriptorSets(pAllocateInfo, ads_state); } bool CoreChecks::PreCallValidateFreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count, const VkDescriptorSet *pDescriptorSets) const { // Make sure that no sets being destroyed are in-flight bool skip = false; // First make sure sets being destroyed are not currently in-use for (uint32_t i = 0; i < count; ++i) { if (pDescriptorSets[i] != VK_NULL_HANDLE) { skip |= ValidateIdleDescriptorSet(pDescriptorSets[i], "vkFreeDescriptorSets"); } } const DESCRIPTOR_POOL_STATE *pool_state = GetDescriptorPoolState(descriptorPool); if (pool_state && !(VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT & pool_state->createInfo.flags)) { // Can't Free from a NON_FREE pool skip |= LogError(descriptorPool, "VUID-vkFreeDescriptorSets-descriptorPool-00312", "It is invalid to call vkFreeDescriptorSets() with a pool created without setting " "VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT."); } return skip; } bool CoreChecks::PreCallValidateUpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pDescriptorCopies) const { // First thing to do is perform map look-ups. // NOTE : UpdateDescriptorSets is somewhat unique in that it's operating on a number of DescriptorSets // so we can't just do a single map look-up up-front, but do them individually in functions below // Now make call(s) that validate state, but don't perform state updates in this function // Note, here DescriptorSets is unique in that we don't yet have an instance. Using a helper function in the // namespace which will parse params and make calls into specific class instances return ValidateUpdateDescriptorSets(descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, pDescriptorCopies, "vkUpdateDescriptorSets()"); } bool CoreChecks::PreCallValidateBeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); if (!cb_state) return false; bool skip = false; if (cb_state->in_use.load()) { skip |= LogError(commandBuffer, "VUID-vkBeginCommandBuffer-commandBuffer-00049", "Calling vkBeginCommandBuffer() on active %s before it has completed. You must check " "command buffer fence before this call.", report_data->FormatHandle(commandBuffer).c_str()); } if (cb_state->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) { // Primary Command Buffer const VkCommandBufferUsageFlags invalid_usage = (VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT | VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT); if ((pBeginInfo->flags & invalid_usage) == invalid_usage) { skip |= LogError(commandBuffer, "VUID-vkBeginCommandBuffer-commandBuffer-02840", "vkBeginCommandBuffer(): Primary %s can't have both VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT and " "VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set.", report_data->FormatHandle(commandBuffer).c_str()); } } else { // Secondary Command Buffer const VkCommandBufferInheritanceInfo *info = pBeginInfo->pInheritanceInfo; if (!info) { skip |= LogError(commandBuffer, "VUID-vkBeginCommandBuffer-commandBuffer-00051", "vkBeginCommandBuffer(): Secondary %s must have inheritance info.", report_data->FormatHandle(commandBuffer).c_str()); } else { if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) { assert(info->renderPass); const auto *framebuffer = GetFramebufferState(info->framebuffer); if (framebuffer) { if (framebuffer->createInfo.renderPass != info->renderPass) { const auto *render_pass = GetRenderPassState(info->renderPass); // renderPass that framebuffer was created with must be compatible with local renderPass skip |= ValidateRenderPassCompatibility("framebuffer", framebuffer->rp_state.get(), "command buffer", render_pass, "vkBeginCommandBuffer()", "VUID-VkCommandBufferBeginInfo-flags-00055"); } } } if ((info->occlusionQueryEnable == VK_FALSE || enabled_features.core.occlusionQueryPrecise == VK_FALSE) && (info->queryFlags & VK_QUERY_CONTROL_PRECISE_BIT)) { skip |= LogError(commandBuffer, "VUID-vkBeginCommandBuffer-commandBuffer-00052", "vkBeginCommandBuffer(): Secondary %s must not have VK_QUERY_CONTROL_PRECISE_BIT if " "occulusionQuery is disabled or the device does not support precise occlusion queries.", report_data->FormatHandle(commandBuffer).c_str()); } } if (info && info->renderPass != VK_NULL_HANDLE) { const auto *render_pass = GetRenderPassState(info->renderPass); if (render_pass) { if (info->subpass >= render_pass->createInfo.subpassCount) { skip |= LogError(commandBuffer, "VUID-VkCommandBufferBeginInfo-flags-00054", "vkBeginCommandBuffer(): Secondary %s must have a subpass index (%d) that is " "less than the number of subpasses (%d).", report_data->FormatHandle(commandBuffer).c_str(), info->subpass, render_pass->createInfo.subpassCount); } } } } if (CB_RECORDING == cb_state->state) { skip |= LogError(commandBuffer, "VUID-vkBeginCommandBuffer-commandBuffer-00049", "vkBeginCommandBuffer(): Cannot call Begin on %s in the RECORDING state. Must first call " "vkEndCommandBuffer().", report_data->FormatHandle(commandBuffer).c_str()); } else if (CB_RECORDED == cb_state->state || CB_INVALID_COMPLETE == cb_state->state) { VkCommandPool cmd_pool = cb_state->createInfo.commandPool; const auto *pool = cb_state->command_pool.get(); if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pool->createFlags)) { LogObjectList objlist(commandBuffer); objlist.add(cmd_pool); skip |= LogError(objlist, "VUID-vkBeginCommandBuffer-commandBuffer-00050", "Call to vkBeginCommandBuffer() on %s attempts to implicitly reset cmdBuffer created from " "%s that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.", report_data->FormatHandle(commandBuffer).c_str(), report_data->FormatHandle(cmd_pool).c_str()); } } auto chained_device_group_struct = LvlFindInChain<VkDeviceGroupCommandBufferBeginInfo>(pBeginInfo->pNext); if (chained_device_group_struct) { skip |= ValidateDeviceMaskToPhysicalDeviceCount(chained_device_group_struct->deviceMask, commandBuffer, "VUID-VkDeviceGroupCommandBufferBeginInfo-deviceMask-00106"); skip |= ValidateDeviceMaskToZero(chained_device_group_struct->deviceMask, commandBuffer, "VUID-VkDeviceGroupCommandBufferBeginInfo-deviceMask-00107"); } return skip; } bool CoreChecks::PreCallValidateEndCommandBuffer(VkCommandBuffer commandBuffer) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); if (!cb_state) return false; bool skip = false; if ((VK_COMMAND_BUFFER_LEVEL_PRIMARY == cb_state->createInfo.level) || !(cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) { // This needs spec clarification to update valid usage, see comments in PR: // https://github.com/KhronosGroup/Vulkan-ValidationLayers/issues/165 skip |= InsideRenderPass(cb_state, "vkEndCommandBuffer()", "VUID-vkEndCommandBuffer-commandBuffer-00060"); } skip |= ValidateCmd(cb_state, CMD_ENDCOMMANDBUFFER, "vkEndCommandBuffer()"); for (auto query : cb_state->activeQueries) { skip |= LogError(commandBuffer, "VUID-vkEndCommandBuffer-commandBuffer-00061", "vkEndCommandBuffer(): Ending command buffer with in progress query: %s, query %d.", report_data->FormatHandle(query.pool).c_str(), query.query); } return skip; } bool CoreChecks::PreCallValidateResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) const { bool skip = false; const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); if (!cb_state) return false; VkCommandPool cmd_pool = cb_state->createInfo.commandPool; const auto *pool = cb_state->command_pool.get(); if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pool->createFlags)) { LogObjectList objlist(commandBuffer); objlist.add(cmd_pool); skip |= LogError(objlist, "VUID-vkResetCommandBuffer-commandBuffer-00046", "vkResetCommandBuffer(): Attempt to reset %s created from %s that does NOT have the " "VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.", report_data->FormatHandle(commandBuffer).c_str(), report_data->FormatHandle(cmd_pool).c_str()); } skip |= CheckCommandBufferInFlight(cb_state, "reset", "VUID-vkResetCommandBuffer-commandBuffer-00045"); return skip; } static const char *GetPipelineTypeName(VkPipelineBindPoint pipelineBindPoint) { switch (pipelineBindPoint) { case VK_PIPELINE_BIND_POINT_GRAPHICS: return "graphics"; case VK_PIPELINE_BIND_POINT_COMPUTE: return "compute"; case VK_PIPELINE_BIND_POINT_RAY_TRACING_NV: return "ray-tracing"; default: return "unknown"; } } bool CoreChecks::ValidateGraphicsPipelineBindPoint(const CMD_BUFFER_STATE *cb_state, const PIPELINE_STATE *pipeline_state) const { bool skip = false; const FRAMEBUFFER_STATE *fb_state = cb_state->activeFramebuffer.get(); if (fb_state) { auto subpass_desc = &pipeline_state->rp_state->createInfo.pSubpasses[pipeline_state->graphicsPipelineCI.subpass]; for (size_t i = 0; i < pipeline_state->attachments.size() && i < subpass_desc->colorAttachmentCount; i++) { const auto attachment = subpass_desc->pColorAttachments[i].attachment; if (attachment == VK_ATTACHMENT_UNUSED) continue; const auto *imageview_state = GetActiveAttachmentImageViewState(cb_state, attachment); if (!imageview_state) continue; const IMAGE_STATE *image_state = GetImageState(imageview_state->create_info.image); if (!image_state) continue; const VkFormat format = pipeline_state->rp_state->createInfo.pAttachments[attachment].format; const VkFormatFeatureFlags format_features = GetPotentialFormatFeatures(format); if (pipeline_state->graphicsPipelineCI.pRasterizationState && !pipeline_state->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable && pipeline_state->attachments[i].blendEnable && !(format_features & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT)) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-blendEnable-04717", "vkCreateGraphicsPipelines(): pipeline.pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER "].blendEnable is VK_TRUE but format %s associated with this attached image (%s) does " "not support VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT.", i, report_data->FormatHandle(image_state->image).c_str(), string_VkFormat(format)); } } } return skip; } bool CoreChecks::PreCallValidateCmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdBindPipeline()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, "VUID-vkCmdBindPipeline-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_BINDPIPELINE, "vkCmdBindPipeline()"); static const std::map<VkPipelineBindPoint, std::string> bindpoint_errors = { std::make_pair(VK_PIPELINE_BIND_POINT_GRAPHICS, "VUID-vkCmdBindPipeline-pipelineBindPoint-00777"), std::make_pair(VK_PIPELINE_BIND_POINT_COMPUTE, "VUID-vkCmdBindPipeline-pipelineBindPoint-00778"), std::make_pair(VK_PIPELINE_BIND_POINT_RAY_TRACING_NV, "VUID-vkCmdBindPipeline-pipelineBindPoint-02391")}; skip |= ValidatePipelineBindPoint(cb_state, pipelineBindPoint, "vkCmdBindPipeline()", bindpoint_errors); const auto *pipeline_state = GetPipelineState(pipeline); assert(pipeline_state); const auto &pipeline_state_bind_point = pipeline_state->getPipelineType(); if (pipelineBindPoint != pipeline_state_bind_point) { if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_GRAPHICS) { skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdBindPipeline-pipelineBindPoint-00779", "Cannot bind a pipeline of type %s to the graphics pipeline bind point", GetPipelineTypeName(pipeline_state_bind_point)); } else if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_COMPUTE) { skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdBindPipeline-pipelineBindPoint-00780", "Cannot bind a pipeline of type %s to the compute pipeline bind point", GetPipelineTypeName(pipeline_state_bind_point)); } else if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_RAY_TRACING_NV) { skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdBindPipeline-pipelineBindPoint-02392", "Cannot bind a pipeline of type %s to the ray-tracing pipeline bind point", GetPipelineTypeName(pipeline_state_bind_point)); } } else { if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_GRAPHICS) { skip |= ValidateGraphicsPipelineBindPoint(cb_state, pipeline_state); } } return skip; } bool CoreChecks::PreCallValidateCmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport *pViewports) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetViewport()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdSetViewport-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_SETVIEWPORT, "vkCmdSetViewport()"); return skip; } bool CoreChecks::PreCallValidateCmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D *pScissors) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetScissor()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdSetScissor-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_SETSCISSOR, "vkCmdSetScissor()"); return skip; } bool CoreChecks::PreCallValidateCmdSetExclusiveScissorNV(VkCommandBuffer commandBuffer, uint32_t firstExclusiveScissor, uint32_t exclusiveScissorCount, const VkRect2D *pExclusiveScissors) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetExclusiveScissorNV()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdSetExclusiveScissorNV-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_SETEXCLUSIVESCISSORNV, "vkCmdSetExclusiveScissorNV()"); if (!enabled_features.exclusive_scissor.exclusiveScissor) { skip |= LogError(commandBuffer, "VUID-vkCmdSetExclusiveScissorNV-None-02031", "vkCmdSetExclusiveScissorNV: The exclusiveScissor feature is disabled."); } return skip; } bool CoreChecks::PreCallValidateCmdBindShadingRateImageNV(VkCommandBuffer commandBuffer, VkImageView imageView, VkImageLayout imageLayout) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdBindShadingRateImageNV()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdBindShadingRateImageNV-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_BINDSHADINGRATEIMAGENV, "vkCmdBindShadingRateImageNV()"); if (!enabled_features.shading_rate_image.shadingRateImage) { skip |= LogError(commandBuffer, "VUID-vkCmdBindShadingRateImageNV-None-02058", "vkCmdBindShadingRateImageNV: The shadingRateImage feature is disabled."); } if (imageView != VK_NULL_HANDLE) { const auto view_state = GetImageViewState(imageView); auto &ivci = view_state->create_info; if (!view_state || (ivci.viewType != VK_IMAGE_VIEW_TYPE_2D && ivci.viewType != VK_IMAGE_VIEW_TYPE_2D_ARRAY)) { skip |= LogError(imageView, "VUID-vkCmdBindShadingRateImageNV-imageView-02059", "vkCmdBindShadingRateImageNV: If imageView is not VK_NULL_HANDLE, it must be a valid " "VkImageView handle of type VK_IMAGE_VIEW_TYPE_2D or VK_IMAGE_VIEW_TYPE_2D_ARRAY."); } if (view_state && ivci.format != VK_FORMAT_R8_UINT) { skip |= LogError( imageView, "VUID-vkCmdBindShadingRateImageNV-imageView-02060", "vkCmdBindShadingRateImageNV: If imageView is not VK_NULL_HANDLE, it must have a format of VK_FORMAT_R8_UINT."); } const VkImageCreateInfo *ici = view_state ? &GetImageState(view_state->create_info.image)->createInfo : nullptr; if (ici && !(ici->usage & VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV)) { skip |= LogError(imageView, "VUID-vkCmdBindShadingRateImageNV-imageView-02061", "vkCmdBindShadingRateImageNV: If imageView is not VK_NULL_HANDLE, the image must have been " "created with VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV set."); } if (view_state) { const auto image_state = GetImageState(view_state->create_info.image); bool hit_error = false; // XXX TODO: While the VUID says "each subresource", only the base mip level is // actually used. Since we don't have an existing convenience function to iterate // over all mip levels, just don't bother with non-base levels. const VkImageSubresourceRange &range = view_state->create_info.subresourceRange; VkImageSubresourceLayers subresource = {range.aspectMask, range.baseMipLevel, range.baseArrayLayer, range.layerCount}; if (image_state) { skip |= VerifyImageLayout(cb_state, image_state, subresource, imageLayout, VK_IMAGE_LAYOUT_SHADING_RATE_OPTIMAL_NV, "vkCmdCopyImage()", "VUID-vkCmdBindShadingRateImageNV-imageLayout-02063", "VUID-vkCmdBindShadingRateImageNV-imageView-02062", &hit_error); } } } return skip; } bool CoreChecks::PreCallValidateCmdSetViewportShadingRatePaletteNV(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkShadingRatePaletteNV *pShadingRatePalettes) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetViewportShadingRatePaletteNV()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdSetViewportShadingRatePaletteNV-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_SETVIEWPORTSHADINGRATEPALETTENV, "vkCmdSetViewportShadingRatePaletteNV()"); if (!enabled_features.shading_rate_image.shadingRateImage) { skip |= LogError(commandBuffer, "VUID-vkCmdSetViewportShadingRatePaletteNV-None-02064", "vkCmdSetViewportShadingRatePaletteNV: The shadingRateImage feature is disabled."); } for (uint32_t i = 0; i < viewportCount; ++i) { auto *palette = &pShadingRatePalettes[i]; if (palette->shadingRatePaletteEntryCount == 0 || palette->shadingRatePaletteEntryCount > phys_dev_ext_props.shading_rate_image_props.shadingRatePaletteSize) { skip |= LogError( commandBuffer, "VUID-VkShadingRatePaletteNV-shadingRatePaletteEntryCount-02071", "vkCmdSetViewportShadingRatePaletteNV: shadingRatePaletteEntryCount must be between 1 and shadingRatePaletteSize."); } } return skip; } bool CoreChecks::ValidateGeometryTrianglesNV(const VkGeometryTrianglesNV &triangles, const char *func_name) const { bool skip = false; const BUFFER_STATE *vb_state = GetBufferState(triangles.vertexData); if (vb_state != nullptr && vb_state->createInfo.size <= triangles.vertexOffset) { skip |= LogError(device, "VUID-VkGeometryTrianglesNV-vertexOffset-02428", "%s", func_name); } const BUFFER_STATE *ib_state = GetBufferState(triangles.indexData); if (ib_state != nullptr && ib_state->createInfo.size <= triangles.indexOffset) { skip |= LogError(device, "VUID-VkGeometryTrianglesNV-indexOffset-02431", "%s", func_name); } const BUFFER_STATE *td_state = GetBufferState(triangles.transformData); if (td_state != nullptr && td_state->createInfo.size <= triangles.transformOffset) { skip |= LogError(device, "VUID-VkGeometryTrianglesNV-transformOffset-02437", "%s", func_name); } return skip; } bool CoreChecks::ValidateGeometryAABBNV(const VkGeometryAABBNV &aabbs, const char *func_name) const { bool skip = false; const BUFFER_STATE *aabb_state = GetBufferState(aabbs.aabbData); if (aabb_state != nullptr && aabb_state->createInfo.size > 0 && aabb_state->createInfo.size <= aabbs.offset) { skip |= LogError(device, "VUID-VkGeometryAABBNV-offset-02439", "%s", func_name); } return skip; } bool CoreChecks::ValidateGeometryNV(const VkGeometryNV &geometry, const char *func_name) const { bool skip = false; if (geometry.geometryType == VK_GEOMETRY_TYPE_TRIANGLES_NV) { skip = ValidateGeometryTrianglesNV(geometry.geometry.triangles, func_name); } else if (geometry.geometryType == VK_GEOMETRY_TYPE_AABBS_NV) { skip = ValidateGeometryAABBNV(geometry.geometry.aabbs, func_name); } return skip; } bool CoreChecks::PreCallValidateCreateAccelerationStructureNV(VkDevice device, const VkAccelerationStructureCreateInfoNV *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkAccelerationStructureNV *pAccelerationStructure) const { bool skip = false; if (pCreateInfo != nullptr && pCreateInfo->info.type == VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV) { for (uint32_t i = 0; i < pCreateInfo->info.geometryCount; i++) { skip |= ValidateGeometryNV(pCreateInfo->info.pGeometries[i], "vkCreateAccelerationStructureNV():"); } } return skip; } bool CoreChecks::PreCallValidateCreateAccelerationStructureKHR(VkDevice device, const VkAccelerationStructureCreateInfoKHR *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkAccelerationStructureKHR *pAccelerationStructure) const { bool skip = false; if (pCreateInfo) { const BUFFER_STATE *buffer_state = GetBufferState(pCreateInfo->buffer); if (buffer_state) { if (!(buffer_state->createInfo.usage & VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_STORAGE_BIT_KHR)) { skip |= LogError(device, "VUID-VkAccelerationStructureCreateInfoKHR-buffer-03614", "VkAccelerationStructureCreateInfoKHR(): buffer must have been created with a usage value containing " "VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_STORAGE_BIT_KHR."); } if (buffer_state->createInfo.flags & VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT) { skip |= LogError(device, "VUID-VkAccelerationStructureCreateInfoKHR-buffer-03615", "VkAccelerationStructureCreateInfoKHR(): buffer must not have been created with " "VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT."); } if (pCreateInfo->offset + pCreateInfo->size > buffer_state->createInfo.size) { skip |= LogError( device, "VUID-VkAccelerationStructureCreateInfoKHR-offset-03616", "VkAccelerationStructureCreateInfoKHR(): The sum of offset and size must be less than the size of buffer."); } } } return skip; } bool CoreChecks::ValidateBindAccelerationStructureMemory(VkDevice device, const VkBindAccelerationStructureMemoryInfoNV &info) const { bool skip = false; const ACCELERATION_STRUCTURE_STATE *as_state = GetAccelerationStructureStateNV(info.accelerationStructure); if (!as_state) { return skip; } if (!as_state->GetBoundMemory().empty()) { skip |= LogError(info.accelerationStructure, "VUID-VkBindAccelerationStructureMemoryInfoNV-accelerationStructure-03620", "vkBindAccelerationStructureMemoryNV(): accelerationStructure must not already be backed by a memory object."); } // Validate bound memory range information const auto mem_info = GetDevMemState(info.memory); if (mem_info) { skip |= ValidateInsertAccelerationStructureMemoryRange(info.accelerationStructure, mem_info, info.memoryOffset, "vkBindAccelerationStructureMemoryNV()"); skip |= ValidateMemoryTypes(mem_info, as_state->memory_requirements.memoryRequirements.memoryTypeBits, "vkBindAccelerationStructureMemoryNV()", "VUID-VkBindAccelerationStructureMemoryInfoNV-memory-03622"); } // Validate memory requirements alignment if (SafeModulo(info.memoryOffset, as_state->memory_requirements.memoryRequirements.alignment) != 0) { skip |= LogError(info.accelerationStructure, "VUID-VkBindAccelerationStructureMemoryInfoNV-memoryOffset-03623", "vkBindAccelerationStructureMemoryNV(): memoryOffset 0x%" PRIxLEAST64 " must be an integer multiple of the alignment 0x%" PRIxLEAST64 " member of the VkMemoryRequirements structure returned from " "a call to vkGetAccelerationStructureMemoryRequirementsNV with accelerationStructure and type of " "VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_NV", info.memoryOffset, as_state->memory_requirements.memoryRequirements.alignment); } if (mem_info) { // Validate memory requirements size if (as_state->memory_requirements.memoryRequirements.size > (mem_info->alloc_info.allocationSize - info.memoryOffset)) { skip |= LogError(info.accelerationStructure, "VUID-VkBindAccelerationStructureMemoryInfoNV-size-03624", "vkBindAccelerationStructureMemoryNV(): The size 0x%" PRIxLEAST64 " member of the VkMemoryRequirements structure returned from a call to " "vkGetAccelerationStructureMemoryRequirementsNV with accelerationStructure and type of " "VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_NV must be less than or equal to the size " "of memory minus memoryOffset 0x%" PRIxLEAST64 ".", as_state->memory_requirements.memoryRequirements.size, mem_info->alloc_info.allocationSize - info.memoryOffset); } } return skip; } bool CoreChecks::PreCallValidateBindAccelerationStructureMemoryNV(VkDevice device, uint32_t bindInfoCount, const VkBindAccelerationStructureMemoryInfoNV *pBindInfos) const { bool skip = false; for (uint32_t i = 0; i < bindInfoCount; i++) { skip |= ValidateBindAccelerationStructureMemory(device, pBindInfos[i]); } return skip; } bool CoreChecks::PreCallValidateGetAccelerationStructureHandleNV(VkDevice device, VkAccelerationStructureNV accelerationStructure, size_t dataSize, void *pData) const { bool skip = false; const ACCELERATION_STRUCTURE_STATE *as_state = GetAccelerationStructureStateNV(accelerationStructure); if (as_state != nullptr) { // TODO: update the fake VUID below once the real one is generated. skip = ValidateMemoryIsBoundToAccelerationStructure( as_state, "vkGetAccelerationStructureHandleNV", "UNASSIGNED-vkGetAccelerationStructureHandleNV-accelerationStructure-XXXX"); } return skip; } bool CoreChecks::PreCallValidateCmdBuildAccelerationStructuresKHR( VkCommandBuffer commandBuffer, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR *pInfos, const VkAccelerationStructureBuildRangeInfoKHR *const *ppBuildRangeInfos) const { bool skip = false; const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); skip = ValidateCmdQueueFlags(cb_state, "vkCmdBuildAccelerationStructuresKHR()", VK_QUEUE_COMPUTE_BIT, "VUID-vkCmdBuildAccelerationStructuresKHR-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_BUILDACCELERATIONSTRUCTURESKHR, "vkCmdBuildAccelerationStructuresKHR()"); skip |= InsideRenderPass(cb_state, "vkCmdBuildAccelerationStructuresKHR()", "VUID-vkCmdBuildAccelerationStructuresKHR-renderpass"); if (pInfos != NULL) { for (uint32_t info_index = 0; info_index < infoCount; ++info_index) { const ACCELERATION_STRUCTURE_STATE_KHR *src_as_state = GetAccelerationStructureStateKHR(pInfos[info_index].srcAccelerationStructure); const ACCELERATION_STRUCTURE_STATE_KHR *dst_as_state = GetAccelerationStructureStateKHR(pInfos[info_index].dstAccelerationStructure); if (pInfos[info_index].mode == VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR) { if (!src_as_state || (src_as_state && src_as_state->acceleration_structure == VK_NULL_HANDLE)) { skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresKHR-pInfos-03666", "vkCmdBuildAccelerationStructuresKHR(): For each element of pInfos, if its mode member is " "VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR, its srcAccelerationStructure member must not be " "VK_NULL_HANDLE."); } if (src_as_state == nullptr || !src_as_state->built || !(src_as_state->build_info_khr.flags & VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_KHR)) { skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresKHR-pInfos-03667", "vkCmdBuildAccelerationStructuresKHR(): For each element of pInfos, if its mode member is " "VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR, its srcAccelerationStructure member must " "have been built before with VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_KHR set in " "VkAccelerationStructureBuildGeometryInfoKHR::flags."); } if (pInfos[info_index].geometryCount != src_as_state->build_info_khr.geometryCount) { skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresKHR-pInfos-03758", "vkCmdBuildAccelerationStructuresKHR(): For each element of pInfos, if its mode member is " "VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR," " its geometryCount member must have the same value which was specified when " "srcAccelerationStructure was last built."); } if (pInfos[info_index].flags != src_as_state->build_info_khr.flags) { skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresKHR-pInfos-03759", "vkCmdBuildAccelerationStructuresKHR(): For each element of pInfos, if its mode member is" " VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR, its flags member must have the same value which" " was specified when srcAccelerationStructure was last built."); } if (pInfos[info_index].type != src_as_state->build_info_khr.type) { skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresKHR-pInfos-03760", "vkCmdBuildAccelerationStructuresKHR(): For each element of pInfos, if its mode member is" " VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR, its type member must have the same value which" " was specified when srcAccelerationStructure was last built."); } } if (pInfos[info_index].type == VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR) { if (!dst_as_state || (dst_as_state && dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR && dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR)) { skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresKHR-pInfos-03700", "vkCmdBuildAccelerationStructuresKHR(): For each element of pInfos, if its type member is " "VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR, its dstAccelerationStructure member must have " "been created with a value of VkAccelerationStructureCreateInfoKHR::type equal to either " "VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR or VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR."); } } if (pInfos[info_index].type == VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR) { if (!dst_as_state || (dst_as_state && dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR && dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR)) { skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresKHR-pInfos-03699", "vkCmdBuildAccelerationStructuresKHR(): For each element of pInfos, if its type member is " "VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR, its dstAccelerationStructure member must have been " "created with a value of VkAccelerationStructureCreateInfoKHR::type equal to either " "VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR or VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR."); } } } } return skip; } bool CoreChecks::PreCallValidateBuildAccelerationStructuresKHR( VkDevice device, VkDeferredOperationKHR deferredOperation, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR *pInfos, const VkAccelerationStructureBuildRangeInfoKHR *const *ppBuildRangeInfos) const { bool skip = false; for (uint32_t i = 0; i < infoCount; ++i) { const ACCELERATION_STRUCTURE_STATE_KHR *src_as_state = GetAccelerationStructureStateKHR(pInfos[i].srcAccelerationStructure); const ACCELERATION_STRUCTURE_STATE_KHR *dst_as_state = GetAccelerationStructureStateKHR(pInfos[i].dstAccelerationStructure); if (pInfos[i].mode == VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR) { if (!src_as_state || (src_as_state && !src_as_state->acceleration_structure)) { skip |= LogError(device, "VUID-vkBuildAccelerationStructuresKHR-pInfos-03666", "vkBuildAccelerationStructuresKHR(): For each element of pInfos, if its mode member is " "VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR, its srcAccelerationStructure member must not be " "VK_NULL_HANDLE."); } if (src_as_state == nullptr || !src_as_state->built || !(src_as_state->build_info_khr.flags & VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_KHR)) { skip |= LogError(device, "VUID-vkBuildAccelerationStructuresKHR-pInfos-03667", "vkBuildAccelerationStructuresKHR(): For each element of pInfos, if its mode member is " "VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR, its srcAccelerationStructure member must have " "been built before with VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_KHR set in " "VkAccelerationStructureBuildGeometryInfoKHR::flags."); } if (pInfos[i].geometryCount != src_as_state->build_info_khr.geometryCount) { skip |= LogError(device, "VUID-vkBuildAccelerationStructuresKHR-pInfos-03758", "vkBuildAccelerationStructuresKHR(): For each element of pInfos, if its mode member is " "VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR," " its geometryCount member must have the same value which was specified when " "srcAccelerationStructure was last built."); } if (pInfos[i].flags != src_as_state->build_info_khr.flags) { skip |= LogError(device, "VUID-vkBuildAccelerationStructuresKHR-pInfos-03759", "vkBuildAccelerationStructuresKHR(): For each element of pInfos, if its mode member is" " VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR, its flags member must have the same value which" " was specified when srcAccelerationStructure was last built."); } if (pInfos[i].type != src_as_state->build_info_khr.type) { skip |= LogError(device, "VUID-vkBuildAccelerationStructuresKHR-pInfos-03760", "vkBuildAccelerationStructuresKHR(): For each element of pInfos, if its mode member is" " VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR, its type member must have the same value which" " was specified when srcAccelerationStructure was last built."); } } if (pInfos[i].type == VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR) { if (!dst_as_state || (dst_as_state && dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR && dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR)) { skip |= LogError(device, "VUID-vkBuildAccelerationStructuresKHR-pInfos-03700", "vkBuildAccelerationStructuresKHR(): For each element of pInfos, if its type member is " "VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR, its dstAccelerationStructure member must have " "been created with a value of VkAccelerationStructureCreateInfoKHR::type equal to either " "VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR or VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR."); } } if (pInfos[i].type == VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR) { if (!dst_as_state || (dst_as_state && dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR && dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR)) { skip |= LogError(device, "VUID-vkBuildAccelerationStructuresKHR-pInfos-03699", "vkBuildAccelerationStructuresKHR(): For each element of pInfos, if its type member is " "VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR, its dstAccelerationStructure member must have been " "created with a value of VkAccelerationStructureCreateInfoKHR::type equal to either " "VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR or VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR."); } } } return skip; } bool CoreChecks::PreCallValidateCmdBuildAccelerationStructureNV(VkCommandBuffer commandBuffer, const VkAccelerationStructureInfoNV *pInfo, VkBuffer instanceData, VkDeviceSize instanceOffset, VkBool32 update, VkAccelerationStructureNV dst, VkAccelerationStructureNV src, VkBuffer scratch, VkDeviceSize scratchOffset) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdBuildAccelerationStructureNV()", VK_QUEUE_COMPUTE_BIT, "VUID-vkCmdBuildAccelerationStructureNV-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_BUILDACCELERATIONSTRUCTURENV, "vkCmdBuildAccelerationStructureNV()"); skip |= InsideRenderPass(cb_state, "vkCmdBuildAccelerationStructureNV()", "VUID-vkCmdBuildAccelerationStructureNV-renderpass"); if (pInfo != nullptr && pInfo->type == VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV) { for (uint32_t i = 0; i < pInfo->geometryCount; i++) { skip |= ValidateGeometryNV(pInfo->pGeometries[i], "vkCmdBuildAccelerationStructureNV():"); } } if (pInfo != nullptr && pInfo->geometryCount > phys_dev_ext_props.ray_tracing_propsNV.maxGeometryCount) { skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-geometryCount-02241", "vkCmdBuildAccelerationStructureNV(): geometryCount [%d] must be less than or equal to " "VkPhysicalDeviceRayTracingPropertiesNV::maxGeometryCount.", pInfo->geometryCount); } const ACCELERATION_STRUCTURE_STATE *dst_as_state = GetAccelerationStructureStateNV(dst); const ACCELERATION_STRUCTURE_STATE *src_as_state = GetAccelerationStructureStateNV(src); const BUFFER_STATE *scratch_buffer_state = GetBufferState(scratch); if (dst_as_state != nullptr && pInfo != nullptr) { if (dst_as_state->create_infoNV.info.type != pInfo->type) { skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-dst-02488", "vkCmdBuildAccelerationStructureNV(): create info VkAccelerationStructureInfoNV::type" "[%s] must be identical to build info VkAccelerationStructureInfoNV::type [%s].", string_VkAccelerationStructureTypeNV(dst_as_state->create_infoNV.info.type), string_VkAccelerationStructureTypeNV(pInfo->type)); } if (dst_as_state->create_infoNV.info.flags != pInfo->flags) { skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-dst-02488", "vkCmdBuildAccelerationStructureNV(): create info VkAccelerationStructureInfoNV::flags" "[0x%X] must be identical to build info VkAccelerationStructureInfoNV::flags [0x%X].", dst_as_state->create_infoNV.info.flags, pInfo->flags); } if (dst_as_state->create_infoNV.info.instanceCount < pInfo->instanceCount) { skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-dst-02488", "vkCmdBuildAccelerationStructureNV(): create info VkAccelerationStructureInfoNV::instanceCount " "[%d] must be greater than or equal to build info VkAccelerationStructureInfoNV::instanceCount [%d].", dst_as_state->create_infoNV.info.instanceCount, pInfo->instanceCount); } if (dst_as_state->create_infoNV.info.geometryCount < pInfo->geometryCount) { skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-dst-02488", "vkCmdBuildAccelerationStructureNV(): create info VkAccelerationStructureInfoNV::geometryCount" "[%d] must be greater than or equal to build info VkAccelerationStructureInfoNV::geometryCount [%d].", dst_as_state->create_infoNV.info.geometryCount, pInfo->geometryCount); } else { for (uint32_t i = 0; i < pInfo->geometryCount; i++) { const VkGeometryDataNV &create_geometry_data = dst_as_state->create_infoNV.info.pGeometries[i].geometry; const VkGeometryDataNV &build_geometry_data = pInfo->pGeometries[i].geometry; if (create_geometry_data.triangles.vertexCount < build_geometry_data.triangles.vertexCount) { skip |= LogError( commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-dst-02488", "vkCmdBuildAccelerationStructureNV(): create info pGeometries[%d].geometry.triangles.vertexCount [%d]" "must be greater than or equal to build info pGeometries[%d].geometry.triangles.vertexCount [%d].", i, create_geometry_data.triangles.vertexCount, i, build_geometry_data.triangles.vertexCount); break; } if (create_geometry_data.triangles.indexCount < build_geometry_data.triangles.indexCount) { skip |= LogError( commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-dst-02488", "vkCmdBuildAccelerationStructureNV(): create info pGeometries[%d].geometry.triangles.indexCount [%d]" "must be greater than or equal to build info pGeometries[%d].geometry.triangles.indexCount [%d].", i, create_geometry_data.triangles.indexCount, i, build_geometry_data.triangles.indexCount); break; } if (create_geometry_data.aabbs.numAABBs < build_geometry_data.aabbs.numAABBs) { skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-dst-02488", "vkCmdBuildAccelerationStructureNV(): create info pGeometries[%d].geometry.aabbs.numAABBs [%d]" "must be greater than or equal to build info pGeometries[%d].geometry.aabbs.numAABBs [%d].", i, create_geometry_data.aabbs.numAABBs, i, build_geometry_data.aabbs.numAABBs); break; } } } } if (dst_as_state != nullptr) { skip |= ValidateMemoryIsBoundToAccelerationStructure( dst_as_state, "vkCmdBuildAccelerationStructureNV()", "UNASSIGNED-CoreValidation-DrawState-InvalidCommandBuffer-VkAccelerationStructureNV"); } if (update == VK_TRUE) { if (src == VK_NULL_HANDLE) { skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-update-02489", "vkCmdBuildAccelerationStructureNV(): If update is VK_TRUE, src must not be VK_NULL_HANDLE."); } else { if (src_as_state == nullptr || !src_as_state->built || !(src_as_state->build_info.flags & VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_NV)) { skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-update-02490", "vkCmdBuildAccelerationStructureNV(): If update is VK_TRUE, src must have been built before " "with VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_NV set in " "VkAccelerationStructureInfoNV::flags."); } } if (dst_as_state != nullptr && !dst_as_state->update_scratch_memory_requirements_checked) { skip |= LogWarning(dst, kVUID_Core_CmdBuildAccelNV_NoUpdateMemReqQuery, "vkCmdBuildAccelerationStructureNV(): Updating %s but vkGetAccelerationStructureMemoryRequirementsNV() " "has not been called for update scratch memory.", report_data->FormatHandle(dst_as_state->acceleration_structure).c_str()); // Use requirements fetched at create time } if (scratch_buffer_state != nullptr && dst_as_state != nullptr && dst_as_state->update_scratch_memory_requirements.memoryRequirements.size > (scratch_buffer_state->createInfo.size - scratchOffset)) { skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-update-02492", "vkCmdBuildAccelerationStructureNV(): If update is VK_TRUE, The size member of the " "VkMemoryRequirements structure returned from a call to " "vkGetAccelerationStructureMemoryRequirementsNV with " "VkAccelerationStructureMemoryRequirementsInfoNV::accelerationStructure set to dst and " "VkAccelerationStructureMemoryRequirementsInfoNV::type set to " "VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_UPDATE_SCRATCH_NV must be less than " "or equal to the size of scratch minus scratchOffset"); } } else { if (dst_as_state != nullptr && !dst_as_state->build_scratch_memory_requirements_checked) { skip |= LogWarning(dst, kVUID_Core_CmdBuildAccelNV_NoScratchMemReqQuery, "vkCmdBuildAccelerationStructureNV(): Assigning scratch buffer to %s but " "vkGetAccelerationStructureMemoryRequirementsNV() has not been called for scratch memory.", report_data->FormatHandle(dst_as_state->acceleration_structure).c_str()); // Use requirements fetched at create time } if (scratch_buffer_state != nullptr && dst_as_state != nullptr && dst_as_state->build_scratch_memory_requirements.memoryRequirements.size > (scratch_buffer_state->createInfo.size - scratchOffset)) { skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-update-02491", "vkCmdBuildAccelerationStructureNV(): If update is VK_FALSE, The size member of the " "VkMemoryRequirements structure returned from a call to " "vkGetAccelerationStructureMemoryRequirementsNV with " "VkAccelerationStructureMemoryRequirementsInfoNV::accelerationStructure set to dst and " "VkAccelerationStructureMemoryRequirementsInfoNV::type set to " "VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_BUILD_SCRATCH_NV must be less than " "or equal to the size of scratch minus scratchOffset"); } } if (instanceData != VK_NULL_HANDLE) { const auto buffer_state = GetBufferState(instanceData); if (buffer_state != nullptr) { skip |= ValidateBufferUsageFlags(buffer_state, VK_BUFFER_USAGE_RAY_TRACING_BIT_NV, true, "VUID-VkAccelerationStructureInfoNV-instanceData-02782", "vkCmdBuildAccelerationStructureNV()", "VK_BUFFER_USAGE_RAY_TRACING_BIT_NV"); } } if (scratch_buffer_state != nullptr) { skip |= ValidateBufferUsageFlags(scratch_buffer_state, VK_BUFFER_USAGE_RAY_TRACING_BIT_NV, true, "VUID-VkAccelerationStructureInfoNV-scratch-02781", "vkCmdBuildAccelerationStructureNV()", "VK_BUFFER_USAGE_RAY_TRACING_BIT_NV"); } return skip; } bool CoreChecks::PreCallValidateCmdCopyAccelerationStructureNV(VkCommandBuffer commandBuffer, VkAccelerationStructureNV dst, VkAccelerationStructureNV src, VkCopyAccelerationStructureModeNV mode) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdCopyAccelerationStructureNV()", VK_QUEUE_COMPUTE_BIT, "VUID-vkCmdCopyAccelerationStructureNV-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_COPYACCELERATIONSTRUCTURENV, "vkCmdCopyAccelerationStructureNV()"); skip |= InsideRenderPass(cb_state, "vkCmdCopyAccelerationStructureNV()", "VUID-vkCmdCopyAccelerationStructureNV-renderpass"); const ACCELERATION_STRUCTURE_STATE *dst_as_state = GetAccelerationStructureStateNV(dst); const ACCELERATION_STRUCTURE_STATE *src_as_state = GetAccelerationStructureStateNV(src); if (dst_as_state != nullptr) { skip |= ValidateMemoryIsBoundToAccelerationStructure( dst_as_state, "vkCmdBuildAccelerationStructureNV()", "UNASSIGNED-CoreValidation-DrawState-InvalidCommandBuffer-VkAccelerationStructureNV"); } if (mode == VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_NV) { if (src_as_state != nullptr && (!src_as_state->built || !(src_as_state->build_info.flags & VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_NV))) { skip |= LogError(commandBuffer, "VUID-vkCmdCopyAccelerationStructureNV-src-03411", "vkCmdCopyAccelerationStructureNV(): src must have been built with " "VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_NV if mode is " "VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_NV."); } } if (!(mode == VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_NV || mode == VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_KHR)) { skip |= LogError(commandBuffer, "VUID-vkCmdCopyAccelerationStructureNV-mode-03410", "vkCmdCopyAccelerationStructureNV():mode must be VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_KHR" "or VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_KHR."); } return skip; } bool CoreChecks::PreCallValidateDestroyAccelerationStructureNV(VkDevice device, VkAccelerationStructureNV accelerationStructure, const VkAllocationCallbacks *pAllocator) const { const ACCELERATION_STRUCTURE_STATE *as_state = GetAccelerationStructureStateNV(accelerationStructure); const VulkanTypedHandle obj_struct(accelerationStructure, kVulkanObjectTypeAccelerationStructureNV); bool skip = false; if (as_state) { skip |= ValidateObjectNotInUse(as_state, obj_struct, "vkDestroyAccelerationStructureNV", "VUID-vkDestroyAccelerationStructureKHR-accelerationStructure-02442"); } return skip; } bool CoreChecks::PreCallValidateDestroyAccelerationStructureKHR(VkDevice device, VkAccelerationStructureKHR accelerationStructure, const VkAllocationCallbacks *pAllocator) const { const ACCELERATION_STRUCTURE_STATE_KHR *as_state = GetAccelerationStructureStateKHR(accelerationStructure); const VulkanTypedHandle obj_struct(accelerationStructure, kVulkanObjectTypeAccelerationStructureKHR); bool skip = false; if (as_state) { skip |= ValidateObjectNotInUse(as_state, obj_struct, "vkDestroyAccelerationStructureKHR", "VUID-vkDestroyAccelerationStructureKHR-accelerationStructure-02442"); } if (pAllocator && !as_state->allocator) { skip |= LogError(device, "VUID-vkDestroyAccelerationStructureKHR-accelerationStructure-02444", "vkDestroyAccelerationStructureKH:If no VkAllocationCallbacks were provided when accelerationStructure" "was created, pAllocator must be NULL."); } return skip; } bool CoreChecks::PreCallValidateCmdSetViewportWScalingNV(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewportWScalingNV *pViewportWScalings) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetViewportWScalingNV()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdSetViewportWScalingNV-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_SETVIEWPORTWSCALINGNV, "vkCmdSetViewportWScalingNV()"); return skip; } bool CoreChecks::PreCallValidateCmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetLineWidth()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdSetLineWidth-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_SETLINEWIDTH, "vkCmdSetLineWidth()"); return skip; } bool CoreChecks::PreCallValidateCmdSetLineStippleEXT(VkCommandBuffer commandBuffer, uint32_t lineStippleFactor, uint16_t lineStipplePattern) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetLineStippleEXT()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdSetLineStippleEXT-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_SETLINESTIPPLEEXT, "vkCmdSetLineStippleEXT()"); return skip; } bool CoreChecks::PreCallValidateCmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetDepthBias()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdSetDepthBias-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_SETDEPTHBIAS, "vkCmdSetDepthBias()"); if ((depthBiasClamp != 0.0) && (!enabled_features.core.depthBiasClamp)) { skip |= LogError(commandBuffer, "VUID-vkCmdSetDepthBias-depthBiasClamp-00790", "vkCmdSetDepthBias(): the depthBiasClamp device feature is disabled: the depthBiasClamp parameter must " "be set to 0.0."); } return skip; } bool CoreChecks::PreCallValidateCmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetBlendConstants()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdSetBlendConstants-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_SETBLENDCONSTANTS, "vkCmdSetBlendConstants()"); return skip; } bool CoreChecks::PreCallValidateCmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetDepthBounds()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdSetDepthBounds-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_SETDEPTHBOUNDS, "vkCmdSetDepthBounds()"); // The extension was not created with a feature bit whichs prevents displaying the 2 variations of the VUIDs if (!device_extensions.vk_ext_depth_range_unrestricted) { if (!(minDepthBounds >= 0.0) || !(minDepthBounds <= 1.0)) { // Also VUID-vkCmdSetDepthBounds-minDepthBounds-00600 skip |= LogError(commandBuffer, "VUID-vkCmdSetDepthBounds-minDepthBounds-02508", "vkCmdSetDepthBounds(): VK_EXT_depth_range_unrestricted extension is not enabled and minDepthBounds " "(=%f) is not within the [0.0, 1.0] range.", minDepthBounds); } if (!(maxDepthBounds >= 0.0) || !(maxDepthBounds <= 1.0)) { // Also VUID-vkCmdSetDepthBounds-maxDepthBounds-00601 skip |= LogError(commandBuffer, "VUID-vkCmdSetDepthBounds-maxDepthBounds-02509", "vkCmdSetDepthBounds(): VK_EXT_depth_range_unrestricted extension is not enabled and maxDepthBounds " "(=%f) is not within the [0.0, 1.0] range.", maxDepthBounds); } } return skip; } bool CoreChecks::PreCallValidateCmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetStencilCompareMask()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdSetStencilCompareMask-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_SETSTENCILCOMPAREMASK, "vkCmdSetStencilCompareMask()"); return skip; } bool CoreChecks::PreCallValidateCmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetStencilWriteMask()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdSetStencilWriteMask-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_SETSTENCILWRITEMASK, "vkCmdSetStencilWriteMask()"); return skip; } bool CoreChecks::PreCallValidateCmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetStencilReference()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdSetStencilReference-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_SETSTENCILREFERENCE, "vkCmdSetStencilReference()"); return skip; } bool CoreChecks::ValidateDynamicOffsetAlignment(VkCommandBuffer command_buffer, const VkDescriptorSetLayoutBinding *binding, VkDescriptorType test_type, VkDeviceSize alignment, const uint32_t *pDynamicOffsets, const char *err_msg, const char *limit_name, uint32_t *offset_idx) const { bool skip = false; if (binding->descriptorType == test_type) { const auto end_idx = *offset_idx + binding->descriptorCount; for (uint32_t current_idx = *offset_idx; current_idx < end_idx; current_idx++) { if (SafeModulo(pDynamicOffsets[current_idx], alignment) != 0) { skip |= LogError( command_buffer, err_msg, "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of device limit %s 0x%" PRIxLEAST64 ".", current_idx, pDynamicOffsets[current_idx], limit_name, alignment); } } *offset_idx = end_idx; } return skip; } bool CoreChecks::PreCallValidateCmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t firstSet, uint32_t setCount, const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount, const uint32_t *pDynamicOffsets) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = false; skip |= ValidateCmdQueueFlags(cb_state, "vkCmdBindDescriptorSets()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, "VUID-vkCmdBindDescriptorSets-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_BINDDESCRIPTORSETS, "vkCmdBindDescriptorSets()"); // Track total count of dynamic descriptor types to make sure we have an offset for each one uint32_t total_dynamic_descriptors = 0; string error_string = ""; const auto *pipeline_layout = GetPipelineLayout(layout); for (uint32_t set_idx = 0; set_idx < setCount; set_idx++) { const cvdescriptorset::DescriptorSet *descriptor_set = GetSetNode(pDescriptorSets[set_idx]); if (descriptor_set) { // Verify that set being bound is compatible with overlapping setLayout of pipelineLayout if (!VerifySetLayoutCompatibility(report_data, descriptor_set, pipeline_layout, set_idx + firstSet, error_string)) { skip |= LogError(pDescriptorSets[set_idx], "VUID-vkCmdBindDescriptorSets-pDescriptorSets-00358", "vkCmdBindDescriptorSets(): descriptorSet #%u being bound is not compatible with overlapping " "descriptorSetLayout at index %u of " "%s due to: %s.", set_idx, set_idx + firstSet, report_data->FormatHandle(layout).c_str(), error_string.c_str()); } auto set_dynamic_descriptor_count = descriptor_set->GetDynamicDescriptorCount(); if (set_dynamic_descriptor_count) { // First make sure we won't overstep bounds of pDynamicOffsets array if ((total_dynamic_descriptors + set_dynamic_descriptor_count) > dynamicOffsetCount) { // Test/report this here, such that we don't run past the end of pDynamicOffsets in the else clause skip |= LogError(pDescriptorSets[set_idx], "VUID-vkCmdBindDescriptorSets-dynamicOffsetCount-00359", "vkCmdBindDescriptorSets(): descriptorSet #%u (%s) requires %u dynamicOffsets, but only %u " "dynamicOffsets are left in " "pDynamicOffsets array. There must be one dynamic offset for each dynamic descriptor being bound.", set_idx, report_data->FormatHandle(pDescriptorSets[set_idx]).c_str(), descriptor_set->GetDynamicDescriptorCount(), (dynamicOffsetCount - total_dynamic_descriptors)); // Set the number found to the maximum to prevent duplicate messages, or subsquent descriptor sets from // testing against the "short tail" we're skipping below. total_dynamic_descriptors = dynamicOffsetCount; } else { // Validate dynamic offsets and Dynamic Offset Minimums uint32_t cur_dyn_offset = total_dynamic_descriptors; const auto dsl = descriptor_set->GetLayout(); const auto binding_count = dsl->GetBindingCount(); const auto &limits = phys_dev_props.limits; for (uint32_t binding_idx = 0; binding_idx < binding_count; binding_idx++) { const auto *binding = dsl->GetDescriptorSetLayoutBindingPtrFromIndex(binding_idx); skip |= ValidateDynamicOffsetAlignment(commandBuffer, binding, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, limits.minUniformBufferOffsetAlignment, pDynamicOffsets, "VUID-vkCmdBindDescriptorSets-pDynamicOffsets-01971", "minUniformBufferOffsetAlignment", &cur_dyn_offset); skip |= ValidateDynamicOffsetAlignment(commandBuffer, binding, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC, limits.minStorageBufferOffsetAlignment, pDynamicOffsets, "VUID-vkCmdBindDescriptorSets-pDynamicOffsets-01972", "minStorageBufferOffsetAlignment", &cur_dyn_offset); } // Keep running total of dynamic descriptor count to verify at the end total_dynamic_descriptors += set_dynamic_descriptor_count; } } } else { skip |= LogError(pDescriptorSets[set_idx], kVUID_Core_DrawState_InvalidSet, "vkCmdBindDescriptorSets(): Attempt to bind %s that doesn't exist!", report_data->FormatHandle(pDescriptorSets[set_idx]).c_str()); } } // dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound if (total_dynamic_descriptors != dynamicOffsetCount) { skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdBindDescriptorSets-dynamicOffsetCount-00359", "vkCmdBindDescriptorSets(): Attempting to bind %u descriptorSets with %u dynamic descriptors, but " "dynamicOffsetCount is %u. It should " "exactly match the number of dynamic descriptors.", setCount, total_dynamic_descriptors, dynamicOffsetCount); } // firstSet and descriptorSetCount sum must be less than setLayoutCount if ((firstSet + setCount) > static_cast<uint32_t>(pipeline_layout->set_layouts.size())) { skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdBindDescriptorSets-firstSet-00360", "vkCmdBindDescriptorSets(): Sum of firstSet (%u) and descriptorSetCount (%u) is greater than " "VkPipelineLayoutCreateInfo::setLayoutCount " "(%zu) when pipeline layout was created", firstSet, setCount, pipeline_layout->set_layouts.size()); } return skip; } // Validates that the supplied bind point is supported for the command buffer (vis. the command pool) // Takes array of error codes as some of the VUID's (e.g. vkCmdBindPipeline) are written per bindpoint // TODO add vkCmdBindPipeline bind_point validation using this call. bool CoreChecks::ValidatePipelineBindPoint(const CMD_BUFFER_STATE *cb_state, VkPipelineBindPoint bind_point, const char *func_name, const std::map<VkPipelineBindPoint, std::string> &bind_errors) const { bool skip = false; auto pool = cb_state->command_pool.get(); if (pool) { // The loss of a pool in a recording cmd is reported in DestroyCommandPool static const std::map<VkPipelineBindPoint, VkQueueFlags> flag_mask = { std::make_pair(VK_PIPELINE_BIND_POINT_GRAPHICS, static_cast<VkQueueFlags>(VK_QUEUE_GRAPHICS_BIT)), std::make_pair(VK_PIPELINE_BIND_POINT_COMPUTE, static_cast<VkQueueFlags>(VK_QUEUE_COMPUTE_BIT)), std::make_pair(VK_PIPELINE_BIND_POINT_RAY_TRACING_NV, static_cast<VkQueueFlags>(VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT)), }; const auto &qfp = GetPhysicalDeviceState()->queue_family_properties[pool->queueFamilyIndex]; if (0 == (qfp.queueFlags & flag_mask.at(bind_point))) { const std::string &error = bind_errors.at(bind_point); LogObjectList objlist(cb_state->commandBuffer); objlist.add(cb_state->createInfo.commandPool); skip |= LogError(objlist, error, "%s: %s was allocated from %s that does not support bindpoint %s.", func_name, report_data->FormatHandle(cb_state->commandBuffer).c_str(), report_data->FormatHandle(cb_state->createInfo.commandPool).c_str(), string_VkPipelineBindPoint(bind_point)); } } return skip; } bool CoreChecks::PreCallValidateCmdPushDescriptorSetKHR(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); const char *func_name = "vkCmdPushDescriptorSetKHR()"; bool skip = false; skip |= ValidateCmd(cb_state, CMD_PUSHDESCRIPTORSETKHR, func_name); skip |= ValidateCmdQueueFlags(cb_state, func_name, (VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT), "VUID-vkCmdPushDescriptorSetKHR-commandBuffer-cmdpool"); static const std::map<VkPipelineBindPoint, std::string> bind_errors = { std::make_pair(VK_PIPELINE_BIND_POINT_GRAPHICS, "VUID-vkCmdPushDescriptorSetKHR-pipelineBindPoint-00363"), std::make_pair(VK_PIPELINE_BIND_POINT_COMPUTE, "VUID-vkCmdPushDescriptorSetKHR-pipelineBindPoint-00363"), std::make_pair(VK_PIPELINE_BIND_POINT_RAY_TRACING_NV, "VUID-vkCmdPushDescriptorSetKHR-pipelineBindPoint-00363")}; skip |= ValidatePipelineBindPoint(cb_state, pipelineBindPoint, func_name, bind_errors); const auto layout_data = GetPipelineLayout(layout); // Validate the set index points to a push descriptor set and is in range if (layout_data) { const auto &set_layouts = layout_data->set_layouts; if (set < set_layouts.size()) { const auto dsl = set_layouts[set]; if (dsl) { if (!dsl->IsPushDescriptor()) { skip = LogError(layout, "VUID-vkCmdPushDescriptorSetKHR-set-00365", "%s: Set index %" PRIu32 " does not match push descriptor set layout index for %s.", func_name, set, report_data->FormatHandle(layout).c_str()); } else { // Create an empty proxy in order to use the existing descriptor set update validation // TODO move the validation (like this) that doesn't need descriptor set state to the DSL object so we // don't have to do this. cvdescriptorset::DescriptorSet proxy_ds(VK_NULL_HANDLE, nullptr, dsl, 0, this); skip |= ValidatePushDescriptorsUpdate(&proxy_ds, descriptorWriteCount, pDescriptorWrites, func_name); } } } else { skip = LogError(layout, "VUID-vkCmdPushDescriptorSetKHR-set-00364", "%s: Set index %" PRIu32 " is outside of range for %s (set < %" PRIu32 ").", func_name, set, report_data->FormatHandle(layout).c_str(), static_cast<uint32_t>(set_layouts.size())); } } return skip; } bool CoreChecks::PreCallValidateCmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType) const { const auto buffer_state = GetBufferState(buffer); const auto cb_node = GetCBState(commandBuffer); assert(buffer_state); assert(cb_node); bool skip = ValidateBufferUsageFlags(buffer_state, VK_BUFFER_USAGE_INDEX_BUFFER_BIT, true, "VUID-vkCmdBindIndexBuffer-buffer-00433", "vkCmdBindIndexBuffer()", "VK_BUFFER_USAGE_INDEX_BUFFER_BIT"); skip |= ValidateCmdQueueFlags(cb_node, "vkCmdBindIndexBuffer()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdBindIndexBuffer-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_node, CMD_BINDINDEXBUFFER, "vkCmdBindIndexBuffer()"); skip |= ValidateMemoryIsBoundToBuffer(buffer_state, "vkCmdBindIndexBuffer()", "VUID-vkCmdBindIndexBuffer-buffer-00434"); const auto offset_align = GetIndexAlignment(indexType); if (offset % offset_align) { skip |= LogError(commandBuffer, "VUID-vkCmdBindIndexBuffer-offset-00432", "vkCmdBindIndexBuffer() offset (0x%" PRIxLEAST64 ") does not fall on alignment (%s) boundary.", offset, string_VkIndexType(indexType)); } if (offset >= buffer_state->requirements.size) { skip |= LogError(commandBuffer, "VUID-vkCmdBindIndexBuffer-offset-00431", "vkCmdBindIndexBuffer() offset (0x%" PRIxLEAST64 ") is not less than the size (0x%" PRIxLEAST64 ") of buffer (%s).", offset, buffer_state->requirements.size, report_data->FormatHandle(buffer_state->buffer).c_str()); } return skip; } bool CoreChecks::PreCallValidateCmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers, const VkDeviceSize *pOffsets) const { const auto cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdBindVertexBuffers()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdBindVertexBuffers-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_BINDVERTEXBUFFERS, "vkCmdBindVertexBuffers()"); for (uint32_t i = 0; i < bindingCount; ++i) { const auto buffer_state = GetBufferState(pBuffers[i]); if (buffer_state) { skip |= ValidateBufferUsageFlags(buffer_state, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, true, "VUID-vkCmdBindVertexBuffers-pBuffers-00627", "vkCmdBindVertexBuffers()", "VK_BUFFER_USAGE_VERTEX_BUFFER_BIT"); skip |= ValidateMemoryIsBoundToBuffer(buffer_state, "vkCmdBindVertexBuffers()", "VUID-vkCmdBindVertexBuffers-pBuffers-00628"); if (pOffsets[i] >= buffer_state->createInfo.size) { skip |= LogError(buffer_state->buffer, "VUID-vkCmdBindVertexBuffers-pOffsets-00626", "vkCmdBindVertexBuffers() offset (0x%" PRIxLEAST64 ") is beyond the end of the buffer.", pOffsets[i]); } } } return skip; } // Validate that an image's sampleCount matches the requirement for a specific API call bool CoreChecks::ValidateImageSampleCount(const IMAGE_STATE *image_state, VkSampleCountFlagBits sample_count, const char *location, const std::string &msgCode) const { bool skip = false; if (image_state->createInfo.samples != sample_count) { skip = LogError(image_state->image, msgCode, "%s for %s was created with a sample count of %s but must be %s.", location, report_data->FormatHandle(image_state->image).c_str(), string_VkSampleCountFlagBits(image_state->createInfo.samples), string_VkSampleCountFlagBits(sample_count)); } return skip; } bool CoreChecks::PreCallValidateCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize dataSize, const void *pData) const { const auto cb_state = GetCBState(commandBuffer); assert(cb_state); const auto dst_buffer_state = GetBufferState(dstBuffer); assert(dst_buffer_state); bool skip = false; skip |= ValidateMemoryIsBoundToBuffer(dst_buffer_state, "vkCmdUpdateBuffer()", "VUID-vkCmdUpdateBuffer-dstBuffer-00035"); // Validate that DST buffer has correct usage flags set skip |= ValidateBufferUsageFlags(dst_buffer_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "VUID-vkCmdUpdateBuffer-dstBuffer-00034", "vkCmdUpdateBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT"); skip |= ValidateCmdQueueFlags(cb_state, "vkCmdUpdateBuffer()", VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, "VUID-vkCmdUpdateBuffer-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_UPDATEBUFFER, "vkCmdUpdateBuffer()"); skip |= InsideRenderPass(cb_state, "vkCmdUpdateBuffer()", "VUID-vkCmdUpdateBuffer-renderpass"); skip |= ValidateProtectedBuffer(cb_state, dst_buffer_state, "vkCmdUpdateBuffer()", "VUID-vkCmdUpdateBuffer-commandBuffer-01813"); skip |= ValidateUnprotectedBuffer(cb_state, dst_buffer_state, "vkCmdUpdateBuffer()", "VUID-vkCmdUpdateBuffer-commandBuffer-01814"); return skip; } bool CoreChecks::PreCallValidateCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetEvent()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, "VUID-vkCmdSetEvent-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_SETEVENT, "vkCmdSetEvent()"); skip |= InsideRenderPass(cb_state, "vkCmdSetEvent()", "VUID-vkCmdSetEvent-renderpass"); CoreErrorLocation loc(ErrFunc::vkCmdSetEvent, RefPage::vkCmdSetEvent, Field::stageMask); LogObjectList objects(commandBuffer); skip |= ValidatePipelineStage(objects, loc, GetQueueFlags(*cb_state), stageMask); skip |= ValidateStageMaskHost(loc, stageMask); return skip; } bool CoreChecks::PreCallValidateCmdSetEvent2KHR(VkCommandBuffer commandBuffer, VkEvent event, const VkDependencyInfoKHR *pDependencyInfo) const { const char *func = "vkCmdSetEvent2KHR()"; LogObjectList objects(commandBuffer); objects.add(event); const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = ValidateCmdQueueFlags(cb_state, func, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, "VUID-vkCmdSetEvent2KHR-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_SETEVENT, func); skip |= InsideRenderPass(cb_state, func, "VUID-vkCmdSetEvent2KHR-renderpass"); CoreErrorLocation loc(ErrFunc::vkCmdSetEvent2KHR, RefPage::vkCmdSetEvent2KHR, Field::pDependencyInfo); if (pDependencyInfo->dependencyFlags != 0) { skip |= LogError(objects, "VUID-vkCmdSetEvent2KHR-dependencyFlags-03825", "%s (%s) must be 0", loc.dot(Field::dependencyFlags).Message().c_str(), string_VkDependencyFlags(pDependencyInfo->dependencyFlags).c_str()); } skip |= ValidateDependencyInfo(objects, loc, cb_state, kGeneral, pDependencyInfo); return skip; } bool CoreChecks::PreCallValidateCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); LogObjectList objects(commandBuffer); CoreErrorLocation loc(ErrFunc::vkCmdResetEvent, RefPage::vkCmdResetEvent, Field::stageMask); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdResetEvent()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, "VUID-vkCmdResetEvent-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_RESETEVENT, "vkCmdResetEvent()"); skip |= InsideRenderPass(cb_state, "vkCmdResetEvent()", "VUID-vkCmdResetEvent-renderpass"); skip |= ValidatePipelineStage(objects, loc, GetQueueFlags(*cb_state), stageMask); skip |= ValidateStageMaskHost(loc, stageMask); return skip; } bool CoreChecks::PreCallValidateCmdResetEvent2KHR(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags2KHR stageMask) const { const char *func = "vkCmdResetEvent2KHR()"; const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); LogObjectList objects(commandBuffer); CoreErrorLocation loc(ErrFunc::vkCmdResetEvent2KHR, RefPage::vkCmdResetEvent2KHR, Field::stageMask); bool skip = ValidateCmdQueueFlags(cb_state, func, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, "VUID-vkCmdResetEvent2KHR-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_RESETEVENT, func); skip |= InsideRenderPass(cb_state, func, "VUID-vkCmdResetEvent2KHR-renderpass"); skip |= ValidatePipelineStage(objects, loc, GetQueueFlags(*cb_state), stageMask); skip |= ValidateStageMaskHost(loc, stageMask); return skip; } static bool HasNonFramebufferStagePipelineStageFlags(VkPipelineStageFlags2KHR inflags) { return (inflags & ~(VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT)) != 0; } // transient helper struct for checking parts of VUID 02285 struct RenderPassDepState { const CoreChecks *core; const std::string func_name; const std::string vuid; uint32_t active_subpass; const VkRenderPass rp_handle; const VkPipelineStageFlags2KHR disabled_features; const std::vector<uint32_t> &self_dependencies; const safe_VkSubpassDependency2 *dependencies; RenderPassDepState(const CoreChecks *c, const std::string &f, const std::string &v, uint32_t subpass, const VkRenderPass handle, const DeviceFeatures &features, const std::vector<uint32_t> &self_deps, const safe_VkSubpassDependency2 *deps) : core(c), func_name(f), vuid(v), active_subpass(subpass), rp_handle(handle), disabled_features(sync_utils::DisabledPipelineStages(features)), self_dependencies(self_deps), dependencies(deps) {} VkMemoryBarrier2KHR GetSubPassDepBarrier(const safe_VkSubpassDependency2 &dep) { VkMemoryBarrier2KHR result; const auto *barrier = LvlFindInChain<VkMemoryBarrier2KHR>(dep.pNext); if (barrier) { result = *barrier; } else { result.srcStageMask = dep.srcStageMask; result.dstStageMask = dep.dstStageMask; result.srcAccessMask = dep.srcAccessMask; result.dstAccessMask = dep.dstAccessMask; } return result; } bool ValidateStage(const CoreErrorLocation &loc, VkPipelineStageFlags2KHR src_stage_mask, VkPipelineStageFlags2KHR dst_stage_mask) { // Look for matching mask in any self-dependency bool match = false; for (const auto self_dep_index : self_dependencies) { const auto sub_dep = GetSubPassDepBarrier(dependencies[self_dep_index]); auto sub_src_stage_mask = sync_utils::ExpandPipelineStages(sub_dep.srcStageMask, sync_utils::kAllQueueTypes, disabled_features); auto sub_dst_stage_mask = sync_utils::ExpandPipelineStages(sub_dep.dstStageMask, sync_utils::kAllQueueTypes, disabled_features); match = ((sub_src_stage_mask == VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) || (src_stage_mask == (sub_src_stage_mask & src_stage_mask))) && ((sub_dst_stage_mask == VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) || (dst_stage_mask == (sub_dst_stage_mask & dst_stage_mask))); if (match) break; } if (!match) { std::stringstream self_dep_ss; stream_join(self_dep_ss, ", ", self_dependencies); core->LogError(rp_handle, vuid, "%s (0x%" PRIx64 ") is not a subset of VkSubpassDependency srcAccessMask " "for any self-dependency of subpass %d of %s for which dstAccessMask is also a subset. " "Candidate VkSubpassDependency are pDependencies entries [%s].", loc.dot(Field::srcStageMask).Message().c_str(), src_stage_mask, active_subpass, core->report_data->FormatHandle(rp_handle).c_str(), self_dep_ss.str().c_str()); core->LogError(rp_handle, vuid, "%s (0x%" PRIx64 ") is not a subset of VkSubpassDependency dstAccessMask " "for any self-dependency of subpass %d of %s for which srcAccessMask is also a subset. " "Candidate VkSubpassDependency are pDependencies entries [%s].", loc.dot(Field::dstStageMask).Message().c_str(), dst_stage_mask, active_subpass, core->report_data->FormatHandle(rp_handle).c_str(), self_dep_ss.str().c_str()); } return !match; } bool ValidateAccess(const CoreErrorLocation &loc, VkAccessFlags2KHR src_access_mask, VkAccessFlags2KHR dst_access_mask) { bool match = false; for (const auto self_dep_index : self_dependencies) { const auto sub_dep = GetSubPassDepBarrier(dependencies[self_dep_index]); match = (src_access_mask == (sub_dep.srcAccessMask & src_access_mask)) && (dst_access_mask == (sub_dep.dstAccessMask & dst_access_mask)); if (match) break; } if (!match) { std::stringstream self_dep_ss; stream_join(self_dep_ss, ", ", self_dependencies); core->LogError(rp_handle, vuid, "%s (0x%" PRIx64 ") is not a subset of VkSubpassDependency " "srcAccessMask of subpass %d of %s. Candidate VkSubpassDependency are pDependencies entries [%s].", loc.dot(Field::srcAccessMask).Message().c_str(), src_access_mask, active_subpass, core->report_data->FormatHandle(rp_handle).c_str(), self_dep_ss.str().c_str()); core->LogError(rp_handle, vuid, "%s (0x%" PRIx64 ") is not a subset of VkSubpassDependency " "dstAccessMask of subpass %d of %s. Candidate VkSubpassDependency are pDependencies entries [%s].", loc.dot(Field::dstAccessMask).Message().c_str(), dst_access_mask, active_subpass, core->report_data->FormatHandle(rp_handle).c_str(), self_dep_ss.str().c_str()); } return !match; } bool ValidateDependencyFlag(VkDependencyFlags dependency_flags) { bool match = false; for (const auto self_dep_index : self_dependencies) { const auto &sub_dep = dependencies[self_dep_index]; match = sub_dep.dependencyFlags == dependency_flags; if (match) break; } if (!match) { std::stringstream self_dep_ss; stream_join(self_dep_ss, ", ", self_dependencies); core->LogError(rp_handle, vuid, "%s: dependencyFlags param (0x%X) does not equal VkSubpassDependency dependencyFlags value for any " "self-dependency of subpass %d of %s. Candidate VkSubpassDependency are pDependencies entries [%s].", func_name.c_str(), dependency_flags, active_subpass, core->report_data->FormatHandle(rp_handle).c_str(), self_dep_ss.str().c_str()); } return !match; } }; // Validate VUs for Pipeline Barriers that are within a renderPass // Pre: cb_state->activeRenderPass must be a pointer to valid renderPass state bool CoreChecks::ValidateRenderPassPipelineBarriers(const CoreErrorLocation &outer_loc, const CMD_BUFFER_STATE *cb_state, VkPipelineStageFlags src_stage_mask, VkPipelineStageFlags dst_stage_mask, VkDependencyFlags dependency_flags, uint32_t mem_barrier_count, const VkMemoryBarrier *mem_barriers, uint32_t buffer_mem_barrier_count, const VkBufferMemoryBarrier *buffer_mem_barriers, uint32_t image_mem_barrier_count, const VkImageMemoryBarrier *image_barriers) const { bool skip = false; const auto rp_state = cb_state->activeRenderPass; RenderPassDepState state(this, outer_loc.StringFuncName().c_str(), "VUID-vkCmdPipelineBarrier-pDependencies-02285", cb_state->activeSubpass, rp_state->renderPass, enabled_features, rp_state->self_dependencies[cb_state->activeSubpass], rp_state->createInfo.pDependencies); if (state.self_dependencies.size() == 0) { skip |= LogError(state.rp_handle, "VUID-vkCmdPipelineBarrier-pDependencies-02285", "%s Barriers cannot be set during subpass %d of %s with no self-dependency specified.", outer_loc.Message().c_str(), state.active_subpass, report_data->FormatHandle(state.rp_handle).c_str()); return skip; } // Grab ref to current subpassDescription up-front for use below const auto &sub_desc = rp_state->createInfo.pSubpasses[state.active_subpass]; skip |= state.ValidateStage(outer_loc, src_stage_mask, dst_stage_mask); if (0 != buffer_mem_barrier_count) { skip |= LogError(state.rp_handle, "VUID-vkCmdPipelineBarrier-bufferMemoryBarrierCount-01178", "%s: bufferMemoryBarrierCount is non-zero (%d) for subpass %d of %s.", state.func_name.c_str(), buffer_mem_barrier_count, state.active_subpass, report_data->FormatHandle(rp_state->renderPass).c_str()); } for (uint32_t i = 0; i < mem_barrier_count; ++i) { const auto &mem_barrier = mem_barriers[i]; CoreErrorLocation loc(outer_loc.func_name, RefPage::VkMemoryBarrier, Field::pMemoryBarriers, i); skip |= state.ValidateAccess(loc, mem_barrier.srcAccessMask, mem_barrier.dstAccessMask); } for (uint32_t i = 0; i < image_mem_barrier_count; ++i) { const auto &img_barrier = image_barriers[i]; CoreErrorLocation loc(outer_loc.func_name, RefPage::VkImageMemoryBarrier, Field::pImageMemoryBarriers, i); skip |= state.ValidateAccess(loc, img_barrier.srcAccessMask, img_barrier.dstAccessMask); if (VK_QUEUE_FAMILY_IGNORED != img_barrier.srcQueueFamilyIndex || VK_QUEUE_FAMILY_IGNORED != img_barrier.dstQueueFamilyIndex) { skip |= LogError(state.rp_handle, "VUID-vkCmdPipelineBarrier-srcQueueFamilyIndex-01182", "%s is %d and dstQueueFamilyIndex is %d but both must be VK_QUEUE_FAMILY_IGNORED.", loc.dot(Field::srcQueueFamilyIndex).Message().c_str(), img_barrier.srcQueueFamilyIndex, img_barrier.dstQueueFamilyIndex); } // Secondary CBs can have null framebuffer so record will queue up validation in that case 'til FB is known if (VK_NULL_HANDLE != cb_state->activeFramebuffer) { skip |= ValidateImageBarrierAttachment(loc, cb_state, cb_state->activeFramebuffer.get(), state.active_subpass, sub_desc, state.rp_handle, img_barrier); } } skip |= state.ValidateDependencyFlag(dependency_flags); return skip; } bool CoreChecks::ValidateRenderPassPipelineBarriers(const CoreErrorLocation &outer_loc, const CMD_BUFFER_STATE *cb_state, const VkDependencyInfoKHR *dep_info) const { bool skip = false; const auto rp_state = cb_state->activeRenderPass; RenderPassDepState state(this, outer_loc.StringFuncName().c_str(), "VUID-vkCmdPipelineBarrier2KHR-pDependencies-02285", cb_state->activeSubpass, rp_state->renderPass, enabled_features, rp_state->self_dependencies[cb_state->activeSubpass], rp_state->createInfo.pDependencies); if (state.self_dependencies.size() == 0) { skip |= LogError(state.rp_handle, state.vuid, "%s: Barriers cannot be set during subpass %d of %s with no self-dependency specified.", state.func_name.c_str(), state.active_subpass, report_data->FormatHandle(rp_state->renderPass).c_str()); return skip; } // Grab ref to current subpassDescription up-front for use below const auto &sub_desc = rp_state->createInfo.pSubpasses[state.active_subpass]; for (uint32_t i = 0; i < dep_info->memoryBarrierCount; ++i) { const auto &mem_barrier = dep_info->pMemoryBarriers[i]; CoreErrorLocation loc(outer_loc.func_name, RefPage::VkMemoryBarrier2KHR, Field::pMemoryBarriers, i); skip |= state.ValidateStage(loc, mem_barrier.srcStageMask, mem_barrier.dstStageMask); skip |= state.ValidateAccess(loc, mem_barrier.srcAccessMask, mem_barrier.dstAccessMask); } if (0 != dep_info->bufferMemoryBarrierCount) { skip |= LogError(state.rp_handle, "VUID-vkCmdPipelineBarrier2KHR-bufferMemoryBarrierCount-01178", "%s: bufferMemoryBarrierCount is non-zero (%d) for subpass %d of %s.", state.func_name.c_str(), dep_info->bufferMemoryBarrierCount, state.active_subpass, report_data->FormatHandle(state.rp_handle).c_str()); } for (uint32_t i = 0; i < dep_info->imageMemoryBarrierCount; ++i) { const auto &img_barrier = dep_info->pImageMemoryBarriers[i]; CoreErrorLocation loc(outer_loc.func_name, RefPage::VkImageMemoryBarrier2KHR, Field::pImageMemoryBarriers, i); skip |= state.ValidateStage(loc, img_barrier.srcStageMask, img_barrier.dstStageMask); skip |= state.ValidateAccess(loc, img_barrier.srcAccessMask, img_barrier.dstAccessMask); if (VK_QUEUE_FAMILY_IGNORED != img_barrier.srcQueueFamilyIndex || VK_QUEUE_FAMILY_IGNORED != img_barrier.dstQueueFamilyIndex) { skip |= LogError(state.rp_handle, "VUID-vkCmdPipelineBarrier2KHR-srcQueueFamilyIndex-01182", "%s is %d and dstQueueFamilyIndex is %d but both must be VK_QUEUE_FAMILY_IGNORED.", loc.dot(Field::srcQueueFamilyIndex).Message().c_str(), img_barrier.srcQueueFamilyIndex, img_barrier.dstQueueFamilyIndex); } // Secondary CBs can have null framebuffer so record will queue up validation in that case 'til FB is known if (VK_NULL_HANDLE != cb_state->activeFramebuffer) { skip |= ValidateImageBarrierAttachment(loc, cb_state, cb_state->activeFramebuffer.get(), state.active_subpass, sub_desc, state.rp_handle, img_barrier); } } skip |= state.ValidateDependencyFlag(dep_info->dependencyFlags); return skip; } bool CoreChecks::ValidateStageMasksAgainstQueueCapabilities(const LogObjectList &objects, const CoreErrorLocation &loc, VkQueueFlags queue_flags, VkPipelineStageFlags2KHR stage_mask) const { bool skip = false; // these are always allowed. stage_mask &= ~(VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT_KHR | VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT_KHR | VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT_KHR | VK_PIPELINE_STAGE_2_HOST_BIT_KHR); if (stage_mask == 0) { return skip; } static const std::map<VkPipelineStageFlags2KHR, VkQueueFlags> metaFlags{ {VK_PIPELINE_STAGE_2_ALL_GRAPHICS_BIT_KHR, VK_QUEUE_GRAPHICS_BIT}, {VK_PIPELINE_STAGE_2_ALL_TRANSFER_BIT_KHR, VK_QUEUE_TRANSFER_BIT}, {VK_PIPELINE_STAGE_2_PRE_RASTERIZATION_SHADERS_BIT_KHR, VK_QUEUE_GRAPHICS_BIT}, {VK_PIPELINE_STAGE_2_VERTEX_INPUT_BIT_KHR, VK_QUEUE_GRAPHICS_BIT}, }; for (const auto &entry : metaFlags) { if (((entry.first & stage_mask) != 0) && ((entry.second & queue_flags) == 0)) { auto vuid = sync_vuid_maps::GetStageQueueCapVUID(loc, entry.first); skip |= LogError(objects, vuid, "%s flag %s is not compatible with the queue family properties (%s) of this command buffer.", loc.Message().c_str(), sync_utils::StringPipelineStageFlags(entry.first).c_str(), string_VkQueueFlags(queue_flags).c_str()); } stage_mask &= ~entry.first; } if (stage_mask == 0) { return skip; } auto supported_flags = sync_utils::ExpandPipelineStages(VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT_KHR, queue_flags); auto bad_flags = stage_mask & ~supported_flags; // Lookup each bit in the stagemask and check for overlap between its table bits and queue_flags for (size_t i = 0; i < sizeof(bad_flags) * 8; i++) { VkPipelineStageFlags2KHR bit = (1ULL << i) & bad_flags; if (bit) { auto vuid = sync_vuid_maps::GetStageQueueCapVUID(loc, bit); skip |= LogError( objects, vuid, "%s flag %s is not compatible with the queue family properties (%s) of this command buffer.", loc.Message().c_str(), sync_utils::StringPipelineStageFlags(bit).c_str(), string_VkQueueFlags(queue_flags).c_str()); } } return skip; } bool CoreChecks::ValidatePipelineStageFeatureEnables(const LogObjectList &objects, const CoreErrorLocation &loc, VkPipelineStageFlags2KHR stage_mask) const { bool skip = false; if (!enabled_features.synchronization2_features.synchronization2 && stage_mask == 0) { auto vuid = sync_vuid_maps::GetBadFeatureVUID(loc, 0); std::stringstream msg; msg << loc.Message() << " must not be 0 unless synchronization2 is enabled."; skip |= LogError(objects, vuid, "%s", msg.str().c_str()); } auto disabled_stages = sync_utils::DisabledPipelineStages(enabled_features); auto bad_bits = stage_mask & disabled_stages; if (bad_bits == 0) { return skip; } for (size_t i = 0; i < sizeof(bad_bits) * 8; i++) { VkPipelineStageFlags2KHR bit = 1ULL << i; if (bit & bad_bits) { auto vuid = sync_vuid_maps::GetBadFeatureVUID(loc, bit); std::stringstream msg; msg << loc.Message() << " includes " << sync_utils::StringPipelineStageFlags(bit) << " when the device does not have " << sync_vuid_maps::kFeatureNameMap.at(bit) << " feature enabled."; skip |= LogError(objects, vuid, "%s", msg.str().c_str()); } } return skip; } bool CoreChecks::ValidatePipelineStage(const LogObjectList &objects, const CoreErrorLocation &loc, VkQueueFlags queue_flags, VkPipelineStageFlags2KHR stage_mask) const { bool skip = false; skip |= ValidateStageMasksAgainstQueueCapabilities(objects, loc, queue_flags, stage_mask); skip |= ValidatePipelineStageFeatureEnables(objects, loc, stage_mask); return skip; } bool CoreChecks::ValidateAccessMask(const LogObjectList &objects, const CoreErrorLocation &loc, VkQueueFlags queue_flags, VkAccessFlags2KHR access_mask, VkPipelineStageFlags2KHR stage_mask) const { bool skip = false; // Early out if all commands set if ((stage_mask & VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT_KHR) != 0) return skip; // or if only generic memory accesses are specified (or we got a 0 mask) access_mask &= ~(VK_ACCESS_2_MEMORY_READ_BIT_KHR | VK_ACCESS_2_MEMORY_WRITE_BIT_KHR); if (access_mask == 0) return skip; auto expanded_stages = sync_utils::ExpandPipelineStages(stage_mask, queue_flags); // TODO: auto valid_accesses = sync_utils::CompatibleAccessMask(expanded_stages); auto bad_accesses = (access_mask & ~valid_accesses); if (bad_accesses == 0) { return skip; } for (size_t i = 0; i < sizeof(bad_accesses) * 8; i++) { VkAccessFlags2KHR bit = (1ULL << i); if (bad_accesses & bit) { auto vuid = sync_vuid_maps::GetBadAccessFlagsVUID(loc, bit); std::stringstream msg; msg << loc.Message() << " bit " << sync_utils::StringAccessFlags(bit) << " is not supported by stage mask (" << sync_utils::StringPipelineStageFlags(stage_mask) << ")."; skip |= LogError(objects, vuid, "%s", msg.str().c_str()); } } return skip; } bool CoreChecks::ValidateEventStageMask(const ValidationStateTracker *state_data, const CMD_BUFFER_STATE *pCB, size_t eventCount, size_t firstEventIndex, VkPipelineStageFlags2KHR sourceStageMask, EventToStageMap *localEventToStageMap) { bool skip = false; VkPipelineStageFlags2KHR stage_mask = 0; const auto max_event = std::min((firstEventIndex + eventCount), pCB->events.size()); for (size_t event_index = firstEventIndex; event_index < max_event; ++event_index) { auto event = pCB->events[event_index]; auto event_data = localEventToStageMap->find(event); if (event_data != localEventToStageMap->end()) { stage_mask |= event_data->second; } else { auto global_event_data = state_data->GetEventState(event); if (!global_event_data) { skip |= state_data->LogError(event, kVUID_Core_DrawState_InvalidEvent, "%s cannot be waited on if it has never been set.", state_data->report_data->FormatHandle(event).c_str()); } else { stage_mask |= global_event_data->stageMask; } } } // TODO: Need to validate that host_bit is only set if set event is called // but set event can be called at any time. if (sourceStageMask != stage_mask && sourceStageMask != (stage_mask | VK_PIPELINE_STAGE_HOST_BIT)) { skip |= state_data->LogError( pCB->commandBuffer, "VUID-vkCmdWaitEvents-srcStageMask-parameter", "Submitting cmdbuffer with call to VkCmdWaitEvents using srcStageMask 0x%" PRIx64 " which must be the bitwise OR of " "the stageMask parameters used in calls to vkCmdSetEvent and VK_PIPELINE_STAGE_HOST_BIT if used with " "vkSetEvent but instead is 0x%" PRIx64 ".", sourceStageMask, stage_mask); } return skip; } // Check if all barriers are of a given operation type. template <typename Barrier, typename OpCheck> bool AllTransferOp(const COMMAND_POOL_STATE *pool, OpCheck &op_check, uint32_t count, const Barrier *barriers) { if (!pool) return false; for (uint32_t b = 0; b < count; b++) { if (!op_check(pool, barriers[b])) return false; } return true; } // Look at the barriers to see if we they are all release or all acquire, the result impacts queue properties validation template <typename BufBarrier, typename ImgBarrier> BarrierOperationsType CoreChecks::ComputeBarrierOperationsType(const CMD_BUFFER_STATE *cb_state, uint32_t buffer_barrier_count, const BufBarrier *buffer_barriers, uint32_t image_barrier_count, const ImgBarrier *image_barriers) const { auto pool = cb_state->command_pool.get(); BarrierOperationsType op_type = kGeneral; // Look at the barrier details only if they exist // Note: AllTransferOp returns true for count == 0 if ((buffer_barrier_count + image_barrier_count) != 0) { if (AllTransferOp(pool, TempIsReleaseOp<BufBarrier>, buffer_barrier_count, buffer_barriers) && AllTransferOp(pool, TempIsReleaseOp<ImgBarrier>, image_barrier_count, image_barriers)) { op_type = kAllRelease; } else if (AllTransferOp(pool, IsAcquireOp<BufBarrier>, buffer_barrier_count, buffer_barriers) && AllTransferOp(pool, IsAcquireOp<ImgBarrier>, image_barrier_count, image_barriers)) { op_type = kAllAcquire; } } return op_type; } bool CoreChecks::PreCallValidateCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) const { bool skip = false; const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); auto queue_flags = GetQueueFlags(*cb_state); LogObjectList objects(commandBuffer); CoreErrorLocation loc(ErrFunc::vkCmdWaitEvents, RefPage::vkCmdWaitEvents); skip |= ValidatePipelineStage(objects, loc.dot(Field::srcStageMask), queue_flags, srcStageMask); skip |= ValidatePipelineStage(objects, loc.dot(Field::dstStageMask), queue_flags, dstStageMask); skip |= ValidateCmdQueueFlags(cb_state, "vkCmdWaitEvents()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, "VUID-vkCmdWaitEvents-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_WAITEVENTS, "vkCmdWaitEvents()"); skip |= ValidateBarriers(loc, cb_state, srcStageMask, dstStageMask, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers); return skip; } bool CoreChecks::PreCallValidateCmdWaitEvents2KHR(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents, const VkDependencyInfoKHR *pDependencyInfos) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = false; for (uint32_t i = 0; (i < eventCount) && !skip; i++) { LogObjectList objects(commandBuffer); objects.add(pEvents[i]); CoreErrorLocation loc(ErrFunc::vkCmdWaitEvents2KHR, RefPage::vkCmdWaitEvents2KHR, Field::pDependencyInfos, i); if (pDependencyInfos[i].dependencyFlags != 0) { skip |= LogError(objects, "VUID-vkCmdWaitEvents2KHR-dependencyFlags-03844", "%s (%s) must be 0.", loc.dot(Field::dependencyFlags).Message().c_str(), string_VkDependencyFlags(pDependencyInfos[i].dependencyFlags).c_str()); } skip |= ValidateDependencyInfo(objects, loc, cb_state, kGeneral, &pDependencyInfos[i]); } skip |= ValidateCmdQueueFlags(cb_state, "vkCmdWaitEvents()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, "VUID-vkCmdWaitEvents-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_WAITEVENTS, "vkCmdWaitEvents()"); return skip; } void CoreChecks::PreCallRecordCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents, VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); // The StateTracker added will add to the events vector. auto first_event_index = cb_state->events.size(); StateTracker::PreCallRecordCmdWaitEvents(commandBuffer, eventCount, pEvents, sourceStageMask, dstStageMask, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers); auto event_added_count = cb_state->events.size() - first_event_index; const CMD_BUFFER_STATE *cb_state_const = cb_state; cb_state->eventUpdates.emplace_back( [cb_state_const, event_added_count, first_event_index, sourceStageMask]( const ValidationStateTracker *device_data, bool do_validate, EventToStageMap *localEventToStageMap) { if (!do_validate) return false; return ValidateEventStageMask(device_data, cb_state_const, event_added_count, first_event_index, sourceStageMask, localEventToStageMap); }); TransitionImageLayouts(cb_state, imageMemoryBarrierCount, pImageMemoryBarriers); } void CoreChecks::PreCallRecordCmdWaitEvents2KHR(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents, const VkDependencyInfoKHR *pDependencyInfos) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); // The StateTracker added will add to the events vector. auto first_event_index = cb_state->events.size(); StateTracker::PreCallRecordCmdWaitEvents2KHR(commandBuffer, eventCount, pEvents, pDependencyInfos); auto event_added_count = cb_state->events.size() - first_event_index; const CMD_BUFFER_STATE *cb_state_const = cb_state; for (uint32_t i = 0; i < eventCount; i++) { const auto &dep_info = pDependencyInfos[i]; auto stage_masks = sync_utils::GetGlobalStageMasks(dep_info); cb_state->eventUpdates.emplace_back( [cb_state_const, event_added_count, first_event_index, stage_masks]( const ValidationStateTracker *device_data, bool do_validate, EventToStageMap *localEventToStageMap) { if (!do_validate) return false; return ValidateEventStageMask(device_data, cb_state_const, event_added_count, first_event_index, stage_masks.src, localEventToStageMap); }); TransitionImageLayouts(cb_state, dep_info.imageMemoryBarrierCount, dep_info.pImageMemoryBarriers); } } void CoreChecks::PostCallRecordCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents, VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); RecordBarriers(ErrFunc::vkCmdWaitEvents, cb_state, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers); } void CoreChecks::PostCallRecordCmdWaitEvents2KHR(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents, const VkDependencyInfoKHR *pDependencyInfos) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); for (uint32_t i = 0; i < eventCount; i++) { const auto &dep_info = pDependencyInfos[i]; RecordBarriers(ErrFunc::vkCmdWaitEvents2KHR, cb_state, dep_info); } } bool CoreChecks::PreCallValidateCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) const { bool skip = false; const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); LogObjectList objects(commandBuffer); auto queue_flags = GetQueueFlags(*cb_state); CoreErrorLocation loc(ErrFunc::vkCmdPipelineBarrier, RefPage::vkCmdPipelineBarrier); auto op_type = ComputeBarrierOperationsType(cb_state, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers); if (op_type == kAllRelease || op_type == kGeneral) { skip |= ValidatePipelineStage(objects, loc.dot(Field::srcStageMask), queue_flags, srcStageMask); } if (op_type == kAllAcquire || op_type == kGeneral) { skip |= ValidatePipelineStage(objects, loc.dot(Field::dstStageMask), queue_flags, dstStageMask); } skip |= ValidateCmdQueueFlags(cb_state, "vkCmdPipelineBarrier()", VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, "VUID-vkCmdPipelineBarrier-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_PIPELINEBARRIER, "vkCmdPipelineBarrier()"); if (cb_state->activeRenderPass) { skip |= ValidateRenderPassPipelineBarriers(loc, cb_state, srcStageMask, dstStageMask, dependencyFlags, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers); if (skip) return true; // Early return to avoid redundant errors from below calls } else { if (dependencyFlags & VK_DEPENDENCY_VIEW_LOCAL_BIT) { skip = LogError(objects, "VUID-vkCmdPipelineBarrier-dependencyFlags-01186", "%s VK_DEPENDENCY_VIEW_LOCAL_BIT must not be set outside of a render pass instance", loc.dot(Field::dependencyFlags).Message().c_str()); } } skip |= ValidateBarriers(loc, cb_state, srcStageMask, dstStageMask, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers); return skip; } bool CoreChecks::PreCallValidateCmdPipelineBarrier2KHR(VkCommandBuffer commandBuffer, const VkDependencyInfoKHR *pDependencyInfo) const { bool skip = false; const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); LogObjectList objects(commandBuffer); auto op_type = ComputeBarrierOperationsType(cb_state, pDependencyInfo->bufferMemoryBarrierCount, pDependencyInfo->pBufferMemoryBarriers, pDependencyInfo->imageMemoryBarrierCount, pDependencyInfo->pImageMemoryBarriers); CoreErrorLocation loc(ErrFunc::vkCmdPipelineBarrier2KHR, RefPage::vkCmdPipelineBarrier2KHR, Field::pDependencyInfo); skip |= ValidateCmdQueueFlags(cb_state, "vkCmdPipelineBarrier2KHR()", VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, "VUID-vkCmdPipelineBarrier-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_PIPELINEBARRIER, "vkCmdPipelineBarrier()"); if (cb_state->activeRenderPass) { skip |= ValidateRenderPassPipelineBarriers(loc, cb_state, pDependencyInfo); if (skip) return true; // Early return to avoid redundant errors from below calls } else { if (pDependencyInfo->dependencyFlags & VK_DEPENDENCY_VIEW_LOCAL_BIT) { skip = LogError(objects, "VUID-vkCmdPipelineBarrier2KHR-dependencyFlags-01186", "%s VK_DEPENDENCY_VIEW_LOCAL_BIT must not be set outside of a render pass instance", loc.dot(Field::dependencyFlags).Message().c_str()); } } skip |= ValidateDependencyInfo(objects, loc, cb_state, op_type, pDependencyInfo); return skip; } void CoreChecks::PreCallRecordCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); RecordBarriers(ErrFunc::vkCmdPipelineBarrier, cb_state, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers); TransitionImageLayouts(cb_state, imageMemoryBarrierCount, pImageMemoryBarriers); } void CoreChecks::PreCallRecordCmdPipelineBarrier2KHR(VkCommandBuffer commandBuffer, const VkDependencyInfoKHR *pDependencyInfo) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); RecordBarriers(ErrFunc::vkCmdPipelineBarrier2KHR, cb_state, *pDependencyInfo); TransitionImageLayouts(cb_state, pDependencyInfo->imageMemoryBarrierCount, pDependencyInfo->pImageMemoryBarriers); } bool CoreChecks::ValidateBeginQuery(const CMD_BUFFER_STATE *cb_state, const QueryObject &query_obj, VkFlags flags, CMD_TYPE cmd, const char *cmd_name, const ValidateBeginQueryVuids *vuids) const { bool skip = false; const auto *query_pool_state = GetQueryPoolState(query_obj.pool); const auto &query_pool_ci = query_pool_state->createInfo; if (query_pool_ci.queryType == VK_QUERY_TYPE_TIMESTAMP) { skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdBeginQuery-queryType-02804", "%s: The querypool's query type must not be VK_QUERY_TYPE_TIMESTAMP.", cmd_name); } // Check for nexted queries if (cb_state->activeQueries.size()) { for (auto a_query : cb_state->activeQueries) { auto active_query_pool_state = GetQueryPoolState(a_query.pool); if (active_query_pool_state->createInfo.queryType == query_pool_ci.queryType) { LogObjectList obj_list(cb_state->commandBuffer); obj_list.add(query_obj.pool); obj_list.add(a_query.pool); skip |= LogError(obj_list, vuids->vuid_dup_query_type, "%s: Within the same command buffer %s, query %d from pool %s has same queryType as active query " "%d from pool %s.", cmd_name, report_data->FormatHandle(cb_state->commandBuffer).c_str(), query_obj.index, report_data->FormatHandle(query_obj.pool).c_str(), a_query.index, report_data->FormatHandle(a_query.pool).c_str()); } } } // There are tighter queue constraints to test for certain query pools if (query_pool_ci.queryType == VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT) { skip |= ValidateCmdQueueFlags(cb_state, cmd_name, VK_QUEUE_GRAPHICS_BIT, vuids->vuid_queue_feedback); } if (query_pool_ci.queryType == VK_QUERY_TYPE_OCCLUSION) { skip |= ValidateCmdQueueFlags(cb_state, cmd_name, VK_QUEUE_GRAPHICS_BIT, vuids->vuid_queue_occlusion); } if (query_pool_ci.queryType == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) { if (!cb_state->performance_lock_acquired) { skip |= LogError(cb_state->commandBuffer, vuids->vuid_profile_lock, "%s: profiling lock must be held before vkBeginCommandBuffer is called on " "a command buffer where performance queries are recorded.", cmd_name); } if (query_pool_state->has_perf_scope_command_buffer && cb_state->commandCount > 0) { skip |= LogError(cb_state->commandBuffer, vuids->vuid_scope_not_first, "%s: Query pool %s was created with a counter of scope " "VK_QUERY_SCOPE_COMMAND_BUFFER_KHR but %s is not the first recorded " "command in the command buffer.", cmd_name, report_data->FormatHandle(query_obj.pool).c_str(), cmd_name); } if (query_pool_state->has_perf_scope_render_pass && cb_state->activeRenderPass) { skip |= LogError(cb_state->commandBuffer, vuids->vuid_scope_in_rp, "%s: Query pool %s was created with a counter of scope " "VK_QUERY_SCOPE_RENDER_PASS_KHR but %s is inside a render pass.", cmd_name, report_data->FormatHandle(query_obj.pool).c_str(), cmd_name); } } skip |= ValidateCmdQueueFlags(cb_state, cmd_name, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, vuids->vuid_queue_flags); if (flags & VK_QUERY_CONTROL_PRECISE_BIT) { if (!enabled_features.core.occlusionQueryPrecise) { skip |= LogError(cb_state->commandBuffer, vuids->vuid_precise, "%s: VK_QUERY_CONTROL_PRECISE_BIT provided, but precise occlusion queries not enabled on the device.", cmd_name); } if (query_pool_ci.queryType != VK_QUERY_TYPE_OCCLUSION) { skip |= LogError(cb_state->commandBuffer, vuids->vuid_precise, "%s: VK_QUERY_CONTROL_PRECISE_BIT provided, but pool query type is not VK_QUERY_TYPE_OCCLUSION", cmd_name); } } if (query_obj.query >= query_pool_ci.queryCount) { skip |= LogError(cb_state->commandBuffer, vuids->vuid_query_count, "%s: Query index %" PRIu32 " must be less than query count %" PRIu32 " of %s.", cmd_name, query_obj.query, query_pool_ci.queryCount, report_data->FormatHandle(query_obj.pool).c_str()); } if (cb_state->unprotected == false) { skip |= LogError(cb_state->commandBuffer, vuids->vuid_protected_cb, "%s: command can't be used in protected command buffers.", cmd_name); } skip |= ValidateCmd(cb_state, cmd, cmd_name); return skip; } bool CoreChecks::PreCallValidateCmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) const { if (disabled[query_validation]) return false; const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); QueryObject query_obj(queryPool, slot); struct BeginQueryVuids : ValidateBeginQueryVuids { BeginQueryVuids() : ValidateBeginQueryVuids() { vuid_queue_flags = "VUID-vkCmdBeginQuery-commandBuffer-cmdpool"; vuid_queue_feedback = "VUID-vkCmdBeginQuery-queryType-02327"; vuid_queue_occlusion = "VUID-vkCmdBeginQuery-queryType-00803"; vuid_precise = "VUID-vkCmdBeginQuery-queryType-00800"; vuid_query_count = "VUID-vkCmdBeginQuery-query-00802"; vuid_profile_lock = "VUID-vkCmdBeginQuery-queryPool-03223"; vuid_scope_not_first = "VUID-vkCmdBeginQuery-queryPool-03224"; vuid_scope_in_rp = "VUID-vkCmdBeginQuery-queryPool-03225"; vuid_dup_query_type = "VUID-vkCmdBeginQuery-queryPool-01922"; vuid_protected_cb = "VUID-vkCmdBeginQuery-commandBuffer-01885"; } }; BeginQueryVuids vuids; return ValidateBeginQuery(cb_state, query_obj, flags, CMD_BEGINQUERY, "vkCmdBeginQuery()", &vuids); } bool CoreChecks::VerifyQueryIsReset(const ValidationStateTracker *state_data, VkCommandBuffer commandBuffer, QueryObject query_obj, const char *func_name, VkQueryPool &firstPerfQueryPool, uint32_t perfPass, QueryMap *localQueryToStateMap) { bool skip = false; const auto *query_pool_state = state_data->GetQueryPoolState(query_obj.pool); const auto &query_pool_ci = query_pool_state->createInfo; QueryState state = state_data->GetQueryState(localQueryToStateMap, query_obj.pool, query_obj.query, perfPass); // If reset was in another command buffer, check the global map if (state == QUERYSTATE_UNKNOWN) { state = state_data->GetQueryState(&state_data->queryToStateMap, query_obj.pool, query_obj.query, perfPass); } // Performance queries have limitation upon when they can be // reset. if (query_pool_ci.queryType == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR && state == QUERYSTATE_UNKNOWN && perfPass >= query_pool_state->n_performance_passes) { // If the pass is invalid, assume RESET state, another error // will be raised in ValidatePerformanceQuery(). state = QUERYSTATE_RESET; } if (state != QUERYSTATE_RESET) { skip |= state_data->LogError(commandBuffer, kVUID_Core_DrawState_QueryNotReset, "%s: %s and query %" PRIu32 ": query not reset. " "After query pool creation, each query must be reset before it is used. " "Queries must also be reset between uses.", func_name, state_data->report_data->FormatHandle(query_obj.pool).c_str(), query_obj.query); } return skip; } bool CoreChecks::ValidatePerformanceQuery(const ValidationStateTracker *state_data, VkCommandBuffer commandBuffer, QueryObject query_obj, const char *func_name, VkQueryPool &firstPerfQueryPool, uint32_t perfPass, QueryMap *localQueryToStateMap) { const auto *query_pool_state = state_data->GetQueryPoolState(query_obj.pool); const auto &query_pool_ci = query_pool_state->createInfo; if (query_pool_ci.queryType != VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) return false; const CMD_BUFFER_STATE *cb_state = state_data->GetCBState(commandBuffer); bool skip = false; if (perfPass >= query_pool_state->n_performance_passes) { skip |= state_data->LogError(commandBuffer, "VUID-VkPerformanceQuerySubmitInfoKHR-counterPassIndex-03221", "Invalid counterPassIndex (%u, maximum allowed %u) value for query pool %s.", perfPass, query_pool_state->n_performance_passes, state_data->report_data->FormatHandle(query_obj.pool).c_str()); } if (!cb_state->performance_lock_acquired || cb_state->performance_lock_released) { skip |= state_data->LogError(commandBuffer, "VUID-vkQueueSubmit-pCommandBuffers-03220", "Commandbuffer %s was submitted and contains a performance query but the" "profiling lock was not held continuously throughout the recording of commands.", state_data->report_data->FormatHandle(commandBuffer).c_str()); } QueryState command_buffer_state = state_data->GetQueryState(localQueryToStateMap, query_obj.pool, query_obj.query, perfPass); if (command_buffer_state == QUERYSTATE_RESET) { skip |= state_data->LogError( commandBuffer, query_obj.indexed ? "VUID-vkCmdBeginQueryIndexedEXT-None-02863" : "VUID-vkCmdBeginQuery-None-02863", "VkQuery begin command recorded in a command buffer that, either directly or " "through secondary command buffers, also contains a vkCmdResetQueryPool command " "affecting the same query."); } if (firstPerfQueryPool != VK_NULL_HANDLE) { if (firstPerfQueryPool != query_obj.pool && !state_data->enabled_features.performance_query_features.performanceCounterMultipleQueryPools) { skip |= state_data->LogError( commandBuffer, query_obj.indexed ? "VUID-vkCmdBeginQueryIndexedEXT-queryPool-03226" : "VUID-vkCmdBeginQuery-queryPool-03226", "Commandbuffer %s contains more than one performance query pool but " "performanceCounterMultipleQueryPools is not enabled.", state_data->report_data->FormatHandle(commandBuffer).c_str()); } } else { firstPerfQueryPool = query_obj.pool; } return skip; } void CoreChecks::EnqueueVerifyBeginQuery(VkCommandBuffer command_buffer, const QueryObject &query_obj, const char *func_name) { CMD_BUFFER_STATE *cb_state = GetCBState(command_buffer); // Enqueue the submit time validation here, ahead of the submit time state update in the StateTracker's PostCallRecord cb_state->queryUpdates.emplace_back([command_buffer, query_obj, func_name](const ValidationStateTracker *device_data, bool do_validate, VkQueryPool &firstPerfQueryPool, uint32_t perfPass, QueryMap *localQueryToStateMap) { if (!do_validate) return false; bool skip = false; skip |= ValidatePerformanceQuery(device_data, command_buffer, query_obj, func_name, firstPerfQueryPool, perfPass, localQueryToStateMap); skip |= VerifyQueryIsReset(device_data, command_buffer, query_obj, func_name, firstPerfQueryPool, perfPass, localQueryToStateMap); return skip; }); } void CoreChecks::PreCallRecordCmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) { if (disabled[query_validation]) return; QueryObject query_obj = {queryPool, slot}; EnqueueVerifyBeginQuery(commandBuffer, query_obj, "vkCmdBeginQuery()"); } void CoreChecks::EnqueueVerifyEndQuery(VkCommandBuffer command_buffer, const QueryObject &query_obj) { CMD_BUFFER_STATE *cb_state = GetCBState(command_buffer); // Enqueue the submit time validation here, ahead of the submit time state update in the StateTracker's PostCallRecord cb_state->queryUpdates.emplace_back([command_buffer, query_obj](const ValidationStateTracker *device_data, bool do_validate, VkQueryPool &firstPerfQueryPool, uint32_t perfPass, QueryMap *localQueryToStateMap) { if (!do_validate) return false; bool skip = false; const CMD_BUFFER_STATE *cb_state = device_data->GetCBState(command_buffer); const auto *query_pool_state = device_data->GetQueryPoolState(query_obj.pool); if (query_pool_state->has_perf_scope_command_buffer && (cb_state->commandCount - 1) != query_obj.endCommandIndex) { skip |= device_data->LogError(command_buffer, "VUID-vkCmdEndQuery-queryPool-03227", "vkCmdEndQuery: Query pool %s was created with a counter of scope" "VK_QUERY_SCOPE_COMMAND_BUFFER_KHR but the end of the query is not the last " "command in the command buffer %s.", device_data->report_data->FormatHandle(query_obj.pool).c_str(), device_data->report_data->FormatHandle(command_buffer).c_str()); } return skip; }); } bool CoreChecks::ValidateCmdEndQuery(const CMD_BUFFER_STATE *cb_state, const QueryObject &query_obj, CMD_TYPE cmd, const char *cmd_name, const ValidateEndQueryVuids *vuids) const { bool skip = false; if (!cb_state->activeQueries.count(query_obj)) { skip |= LogError(cb_state->commandBuffer, vuids->vuid_active_queries, "%s: Ending a query before it was started: %s, index %d.", cmd_name, report_data->FormatHandle(query_obj.pool).c_str(), query_obj.query); } const auto *query_pool_state = GetQueryPoolState(query_obj.pool); const auto &query_pool_ci = query_pool_state->createInfo; if (query_pool_ci.queryType == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) { if (query_pool_state->has_perf_scope_render_pass && cb_state->activeRenderPass) { skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdEndQuery-queryPool-03228", "%s: Query pool %s was created with a counter of scope " "VK_QUERY_SCOPE_RENDER_PASS_KHR but %s is inside a render pass.", cmd_name, report_data->FormatHandle(query_obj.pool).c_str(), cmd_name); } } skip |= ValidateCmdQueueFlags(cb_state, cmd_name, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, vuids->vuid_queue_flags); skip |= ValidateCmd(cb_state, cmd, cmd_name); if (cb_state->unprotected == false) { skip |= LogError(cb_state->commandBuffer, vuids->vuid_protected_cb, "%s: command can't be used in protected command buffers.", cmd_name); } return skip; } bool CoreChecks::PreCallValidateCmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) const { if (disabled[query_validation]) return false; bool skip = false; QueryObject query_obj = {queryPool, slot}; const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); const QUERY_POOL_STATE *query_pool_state = GetQueryPoolState(queryPool); if (query_pool_state) { const uint32_t available_query_count = query_pool_state->createInfo.queryCount; // Only continue validating if the slot is even within range if (slot >= available_query_count) { skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdEndQuery-query-00810", "vkCmdEndQuery(): query index (%u) is greater or equal to the queryPool size (%u).", slot, available_query_count); } else { struct EndQueryVuids : ValidateEndQueryVuids { EndQueryVuids() : ValidateEndQueryVuids() { vuid_queue_flags = "VUID-vkCmdEndQuery-commandBuffer-cmdpool"; vuid_active_queries = "VUID-vkCmdEndQuery-None-01923"; vuid_protected_cb = "VUID-vkCmdEndQuery-commandBuffer-01886"; } }; EndQueryVuids vuids; skip |= ValidateCmdEndQuery(cb_state, query_obj, CMD_ENDQUERY, "vkCmdEndQuery()", &vuids); } } return skip; } void CoreChecks::PreCallRecordCmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) { if (disabled[query_validation]) return; const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); QueryObject query_obj = {queryPool, slot}; query_obj.endCommandIndex = cb_state->commandCount - 1; EnqueueVerifyEndQuery(commandBuffer, query_obj); } bool CoreChecks::ValidateQueryPoolIndex(VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, const char *func_name, const char *first_vuid, const char *sum_vuid) const { bool skip = false; const QUERY_POOL_STATE *query_pool_state = GetQueryPoolState(queryPool); if (query_pool_state) { const uint32_t available_query_count = query_pool_state->createInfo.queryCount; if (firstQuery >= available_query_count) { skip |= LogError(queryPool, first_vuid, "%s: In Query %s the firstQuery (%u) is greater or equal to the queryPool size (%u).", func_name, report_data->FormatHandle(queryPool).c_str(), firstQuery, available_query_count); } if ((firstQuery + queryCount) > available_query_count) { skip |= LogError(queryPool, sum_vuid, "%s: In Query %s the sum of firstQuery (%u) + queryCount (%u) is greater than the queryPool size (%u).", func_name, report_data->FormatHandle(queryPool).c_str(), firstQuery, queryCount, available_query_count); } } return skip; } bool CoreChecks::PreCallValidateCmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) const { if (disabled[query_validation]) return false; const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = InsideRenderPass(cb_state, "vkCmdResetQueryPool()", "VUID-vkCmdResetQueryPool-renderpass"); skip |= ValidateCmd(cb_state, CMD_RESETQUERYPOOL, "VkCmdResetQueryPool()"); skip |= ValidateCmdQueueFlags(cb_state, "VkCmdResetQueryPool()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, "VUID-vkCmdResetQueryPool-commandBuffer-cmdpool"); skip |= ValidateQueryPoolIndex(queryPool, firstQuery, queryCount, "VkCmdResetQueryPool()", "VUID-vkCmdResetQueryPool-firstQuery-00796", "VUID-vkCmdResetQueryPool-firstQuery-00797"); return skip; } static QueryResultType GetQueryResultType(QueryState state, VkQueryResultFlags flags) { switch (state) { case QUERYSTATE_UNKNOWN: return QUERYRESULT_UNKNOWN; case QUERYSTATE_RESET: case QUERYSTATE_RUNNING: if (flags & VK_QUERY_RESULT_WAIT_BIT) { return ((state == QUERYSTATE_RESET) ? QUERYRESULT_WAIT_ON_RESET : QUERYRESULT_WAIT_ON_RUNNING); } else if ((flags & VK_QUERY_RESULT_PARTIAL_BIT) || (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)) { return QUERYRESULT_SOME_DATA; } else { return QUERYRESULT_NO_DATA; } case QUERYSTATE_ENDED: if ((flags & VK_QUERY_RESULT_WAIT_BIT) || (flags & VK_QUERY_RESULT_PARTIAL_BIT) || (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)) { return QUERYRESULT_SOME_DATA; } else { return QUERYRESULT_UNKNOWN; } case QUERYSTATE_AVAILABLE: return QUERYRESULT_SOME_DATA; } assert(false); return QUERYRESULT_UNKNOWN; } bool CoreChecks::ValidateCopyQueryPoolResults(const ValidationStateTracker *state_data, VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, uint32_t perfPass, VkQueryResultFlags flags, QueryMap *localQueryToStateMap) { bool skip = false; for (uint32_t i = 0; i < queryCount; i++) { QueryState state = state_data->GetQueryState(localQueryToStateMap, queryPool, firstQuery + i, perfPass); QueryResultType result_type = GetQueryResultType(state, flags); if (result_type != QUERYRESULT_SOME_DATA && result_type != QUERYRESULT_UNKNOWN) { skip |= state_data->LogError( commandBuffer, kVUID_Core_DrawState_InvalidQuery, "vkCmdCopyQueryPoolResults(): Requesting a copy from query to buffer on %s query %" PRIu32 ": %s", state_data->report_data->FormatHandle(queryPool).c_str(), firstQuery + i, string_QueryResultType(result_type)); } } return skip; } bool CoreChecks::PreCallValidateCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags) const { if (disabled[query_validation]) return false; const auto cb_state = GetCBState(commandBuffer); const auto dst_buff_state = GetBufferState(dstBuffer); assert(cb_state); assert(dst_buff_state); bool skip = ValidateMemoryIsBoundToBuffer(dst_buff_state, "vkCmdCopyQueryPoolResults()", "VUID-vkCmdCopyQueryPoolResults-dstBuffer-00826"); skip |= ValidateQueryPoolStride("VUID-vkCmdCopyQueryPoolResults-flags-00822", "VUID-vkCmdCopyQueryPoolResults-flags-00823", stride, "dstOffset", dstOffset, flags); // Validate that DST buffer has correct usage flags set skip |= ValidateBufferUsageFlags(dst_buff_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "VUID-vkCmdCopyQueryPoolResults-dstBuffer-00825", "vkCmdCopyQueryPoolResults()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT"); skip |= ValidateCmdQueueFlags(cb_state, "vkCmdCopyQueryPoolResults()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, "VUID-vkCmdCopyQueryPoolResults-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_COPYQUERYPOOLRESULTS, "vkCmdCopyQueryPoolResults()"); skip |= InsideRenderPass(cb_state, "vkCmdCopyQueryPoolResults()", "VUID-vkCmdCopyQueryPoolResults-renderpass"); skip |= ValidateQueryPoolIndex(queryPool, firstQuery, queryCount, "vkCmdCopyQueryPoolResults()", "VUID-vkCmdCopyQueryPoolResults-firstQuery-00820", "VUID-vkCmdCopyQueryPoolResults-firstQuery-00821"); if (dstOffset >= dst_buff_state->requirements.size) { skip |= LogError(commandBuffer, "VUID-vkCmdCopyQueryPoolResults-dstOffset-00819", "vkCmdCopyQueryPoolResults() dstOffset (0x%" PRIxLEAST64 ") is not less than the size (0x%" PRIxLEAST64 ") of buffer (%s).", dstOffset, dst_buff_state->requirements.size, report_data->FormatHandle(dst_buff_state->buffer).c_str()); } else if (dstOffset + (queryCount * stride) > dst_buff_state->requirements.size) { skip |= LogError(commandBuffer, "VUID-vkCmdCopyQueryPoolResults-dstBuffer-00824", "vkCmdCopyQueryPoolResults() storage required (0x%" PRIxLEAST64 ") equal to dstOffset + (queryCount * stride) is greater than the size (0x%" PRIxLEAST64 ") of buffer (%s).", dstOffset + (queryCount * stride), dst_buff_state->requirements.size, report_data->FormatHandle(dst_buff_state->buffer).c_str()); } auto query_pool_state_iter = queryPoolMap.find(queryPool); if (query_pool_state_iter != queryPoolMap.end()) { auto query_pool_state = query_pool_state_iter->second.get(); if (query_pool_state->createInfo.queryType == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) { skip |= ValidatePerformanceQueryResults("vkCmdCopyQueryPoolResults", query_pool_state, firstQuery, queryCount, flags); if (!phys_dev_ext_props.performance_query_props.allowCommandBufferQueryCopies) { skip |= LogError(commandBuffer, "VUID-vkCmdCopyQueryPoolResults-queryType-03232", "vkCmdCopyQueryPoolResults called with query pool %s but " "VkPhysicalDevicePerformanceQueryPropertiesKHR::allowCommandBufferQueryCopies " "is not set.", report_data->FormatHandle(queryPool).c_str()); } } if ((query_pool_state->createInfo.queryType == VK_QUERY_TYPE_TIMESTAMP) && ((flags & VK_QUERY_RESULT_PARTIAL_BIT) != 0)) { skip |= LogError(commandBuffer, "VUID-vkCmdCopyQueryPoolResults-queryType-00827", "vkCmdCopyQueryPoolResults() query pool %s was created with VK_QUERY_TYPE_TIMESTAMP so flags must not " "contain VK_QUERY_RESULT_PARTIAL_BIT.", report_data->FormatHandle(queryPool).c_str()); } if (query_pool_state->createInfo.queryType == VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL) { skip |= LogError(queryPool, "VUID-vkCmdCopyQueryPoolResults-queryType-02734", "vkCmdCopyQueryPoolResults() called but QueryPool %s was created with queryType " "VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL.", report_data->FormatHandle(queryPool).c_str()); } } return skip; } void CoreChecks::PreCallRecordCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags) { if (disabled[query_validation]) return; auto cb_state = GetCBState(commandBuffer); cb_state->queryUpdates.emplace_back([commandBuffer, queryPool, firstQuery, queryCount, flags]( const ValidationStateTracker *device_data, bool do_validate, VkQueryPool &firstPerfQueryPool, uint32_t perfPass, QueryMap *localQueryToStateMap) { if (!do_validate) return false; return ValidateCopyQueryPoolResults(device_data, commandBuffer, queryPool, firstQuery, queryCount, perfPass, flags, localQueryToStateMap); }); } bool CoreChecks::PreCallValidateCmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout, VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size, const void *pValues) const { bool skip = false; const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); skip |= ValidateCmdQueueFlags(cb_state, "vkCmdPushConstants()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, "VUID-vkCmdPushConstants-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_PUSHCONSTANTS, "vkCmdPushConstants()"); skip |= ValidatePushConstantRange(offset, size, "vkCmdPushConstants()"); if (0 == stageFlags) { skip |= LogError(commandBuffer, "VUID-vkCmdPushConstants-stageFlags-requiredbitmask", "vkCmdPushConstants() call has no stageFlags set."); } // Check if pipeline_layout VkPushConstantRange(s) overlapping offset, size have stageFlags set for each stage in the command // stageFlags argument, *and* that the command stageFlags argument has bits set for the stageFlags in each overlapping range. if (!skip) { const auto &ranges = *GetPipelineLayout(layout)->push_constant_ranges; VkShaderStageFlags found_stages = 0; for (const auto &range : ranges) { if ((offset >= range.offset) && (offset + size <= range.offset + range.size)) { VkShaderStageFlags matching_stages = range.stageFlags & stageFlags; if (matching_stages != range.stageFlags) { skip |= LogError(commandBuffer, "VUID-vkCmdPushConstants-offset-01796", "vkCmdPushConstants(): stageFlags (%s, offset (%" PRIu32 "), and size (%" PRIu32 "), must contain all stages in overlapping VkPushConstantRange stageFlags (%s), offset (%" PRIu32 "), and size (%" PRIu32 ") in %s.", string_VkShaderStageFlags(stageFlags).c_str(), offset, size, string_VkShaderStageFlags(range.stageFlags).c_str(), range.offset, range.size, report_data->FormatHandle(layout).c_str()); } // Accumulate all stages we've found found_stages = matching_stages | found_stages; } } if (found_stages != stageFlags) { uint32_t missing_stages = ~found_stages & stageFlags; skip |= LogError( commandBuffer, "VUID-vkCmdPushConstants-offset-01795", "vkCmdPushConstants(): %s, VkPushConstantRange in %s overlapping offset = %d and size = %d, do not contain %s.", string_VkShaderStageFlags(stageFlags).c_str(), report_data->FormatHandle(layout).c_str(), offset, size, string_VkShaderStageFlags(missing_stages).c_str()); } } return skip; } bool CoreChecks::PreCallValidateCmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t slot) const { if (disabled[query_validation]) return false; const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdWriteTimestamp()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT, "VUID-vkCmdWriteTimestamp-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_WRITETIMESTAMP, "vkCmdWriteTimestamp()"); const QUERY_POOL_STATE *query_pool_state = GetQueryPoolState(queryPool); if ((query_pool_state != nullptr) && (query_pool_state->createInfo.queryType != VK_QUERY_TYPE_TIMESTAMP)) { skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdWriteTimestamp-queryPool-01416", "vkCmdWriteTimestamp(): Query Pool %s was not created with VK_QUERY_TYPE_TIMESTAMP.", report_data->FormatHandle(queryPool).c_str()); } const uint32_t timestamp_valid_bits = GetPhysicalDeviceState()->queue_family_properties[cb_state->command_pool->queueFamilyIndex].timestampValidBits; if (timestamp_valid_bits == 0) { skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdWriteTimestamp-timestampValidBits-00829", "vkCmdWriteTimestamp(): Query Pool %s has a timestampValidBits value of zero.", report_data->FormatHandle(queryPool).c_str()); } return skip; } bool CoreChecks::PreCallValidateCmdWriteTimestamp2KHR(VkCommandBuffer commandBuffer, VkPipelineStageFlags2KHR stage, VkQueryPool queryPool, uint32_t slot) const { if (disabled[query_validation]) return false; const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdWriteTimestamp2KHR()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT, "VUID-vkCmdWriteTimestamp2KHR-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_WRITETIMESTAMP, "vkCmdWriteTimestamp2KHR()"); CoreErrorLocation loc(ErrFunc::vkCmdWriteTimestamp2KHR, RefPage::vkCmdWriteTimestamp2KHR, Field::stage); if ((stage & (stage - 1)) != 0) { skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdWriteTimestamp2KHR-stage-03859", "%s (%s) must only set a single pipeline stage.", loc.Message().c_str(), string_VkPipelineStageFlags2KHR(stage).c_str()); } skip |= ValidatePipelineStage(LogObjectList(cb_state->commandBuffer), loc, GetQueueFlags(*cb_state), stage); loc.field_name = Field::queryPool; const QUERY_POOL_STATE *query_pool_state = GetQueryPoolState(queryPool); if ((query_pool_state != nullptr) && (query_pool_state->createInfo.queryType != VK_QUERY_TYPE_TIMESTAMP)) { skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdWriteTimestamp2KHR-queryPool-03861", "%s Query Pool %s was not created with VK_QUERY_TYPE_TIMESTAMP.", loc.Message().c_str(), report_data->FormatHandle(queryPool).c_str()); } const uint32_t timestampValidBits = GetPhysicalDeviceState()->queue_family_properties[cb_state->command_pool->queueFamilyIndex].timestampValidBits; if (timestampValidBits == 0) { skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdWriteTimestamp2KHR-timestampValidBits-03863", "%s Query Pool %s has a timestampValidBits value of zero.", loc.Message().c_str(), report_data->FormatHandle(queryPool).c_str()); } return skip; } void CoreChecks::PreCallRecordCmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t slot) { if (disabled[query_validation]) return; // Enqueue the submit time validation check here, before the submit time state update in StateTracker::PostCall... CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); QueryObject query = {queryPool, slot}; const char *func_name = "vkCmdWriteTimestamp()"; cb_state->queryUpdates.emplace_back([commandBuffer, query, func_name](const ValidationStateTracker *device_data, bool do_validate, VkQueryPool &firstPerfQueryPool, uint32_t perfPass, QueryMap *localQueryToStateMap) { if (!do_validate) return false; return VerifyQueryIsReset(device_data, commandBuffer, query, func_name, firstPerfQueryPool, perfPass, localQueryToStateMap); }); } void CoreChecks::PreCallRecordCmdWriteTimestamp2KHR(VkCommandBuffer commandBuffer, VkPipelineStageFlags2KHR pipelineStage, VkQueryPool queryPool, uint32_t slot) { if (disabled[query_validation]) return; // Enqueue the submit time validation check here, before the submit time state update in StateTracker::PostCall... CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); QueryObject query = {queryPool, slot}; const char *func_name = "vkCmdWriteTimestamp()"; cb_state->queryUpdates.emplace_back([commandBuffer, query, func_name](const ValidationStateTracker *device_data, bool do_validate, VkQueryPool &firstPerfQueryPool, uint32_t perfPass, QueryMap *localQueryToStateMap) { if (!do_validate) return false; return VerifyQueryIsReset(device_data, commandBuffer, query, func_name, firstPerfQueryPool, perfPass, localQueryToStateMap); }); } void CoreChecks::PreCallRecordCmdWriteAccelerationStructuresPropertiesKHR(VkCommandBuffer commandBuffer, uint32_t accelerationStructureCount, const VkAccelerationStructureKHR *pAccelerationStructures, VkQueryType queryType, VkQueryPool queryPool, uint32_t firstQuery) { if (disabled[query_validation]) return; // Enqueue the submit time validation check here, before the submit time state update in StateTracker::PostCall... CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); const char *func_name = "vkCmdWriteAccelerationStructuresPropertiesKHR()"; cb_state->queryUpdates.emplace_back([accelerationStructureCount, commandBuffer, firstQuery, func_name, queryPool]( const ValidationStateTracker *device_data, bool do_validate, VkQueryPool &firstPerfQueryPool, uint32_t perfPass, QueryMap *localQueryToStateMap) { if (!do_validate) return false; bool skip = false; for (uint32_t i = 0; i < accelerationStructureCount; i++) { QueryObject query = {{queryPool, firstQuery + i}, perfPass}; skip |= VerifyQueryIsReset(device_data, commandBuffer, query, func_name, firstPerfQueryPool, perfPass, localQueryToStateMap); } return skip; }); } bool CoreChecks::MatchUsage(uint32_t count, const VkAttachmentReference2 *attachments, const VkFramebufferCreateInfo *fbci, VkImageUsageFlagBits usage_flag, const char *error_code) const { bool skip = false; if (attachments) { for (uint32_t attach = 0; attach < count; attach++) { if (attachments[attach].attachment != VK_ATTACHMENT_UNUSED) { // Attachment counts are verified elsewhere, but prevent an invalid access if (attachments[attach].attachment < fbci->attachmentCount) { if ((fbci->flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT) == 0) { const VkImageView *image_view = &fbci->pAttachments[attachments[attach].attachment]; auto view_state = GetImageViewState(*image_view); if (view_state) { const VkImageCreateInfo *ici = &GetImageState(view_state->create_info.image)->createInfo; if (ici != nullptr) { auto creation_usage = ici->usage; const auto stencil_usage_info = LvlFindInChain<VkImageStencilUsageCreateInfo>(ici->pNext); if (stencil_usage_info) { creation_usage |= stencil_usage_info->stencilUsage; } if ((creation_usage & usage_flag) == 0) { skip |= LogError(device, error_code, "vkCreateFramebuffer: Framebuffer Attachment (%d) conflicts with the image's " "IMAGE_USAGE flags (%s).", attachments[attach].attachment, string_VkImageUsageFlagBits(usage_flag)); } } } } else { const VkFramebufferAttachmentsCreateInfo *fbaci = LvlFindInChain<VkFramebufferAttachmentsCreateInfo>(fbci->pNext); if (fbaci != nullptr && fbaci->pAttachmentImageInfos != nullptr && fbaci->attachmentImageInfoCount > attachments[attach].attachment) { uint32_t image_usage = fbaci->pAttachmentImageInfos[attachments[attach].attachment].usage; if ((image_usage & usage_flag) == 0) { skip |= LogError(device, error_code, "vkCreateFramebuffer: Framebuffer attachment info (%d) conflicts with the image's " "IMAGE_USAGE flags (%s).", attachments[attach].attachment, string_VkImageUsageFlagBits(usage_flag)); } } } } } } } return skip; } bool CoreChecks::ValidateFramebufferCreateInfo(const VkFramebufferCreateInfo *pCreateInfo) const { bool skip = false; const VkFramebufferAttachmentsCreateInfo *framebuffer_attachments_create_info = LvlFindInChain<VkFramebufferAttachmentsCreateInfo>(pCreateInfo->pNext); if ((pCreateInfo->flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT) != 0) { if (!enabled_features.core12.imagelessFramebuffer) { skip |= LogError(device, "VUID-VkFramebufferCreateInfo-flags-03189", "vkCreateFramebuffer(): VkFramebufferCreateInfo flags includes VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT, " "but the imagelessFramebuffer feature is not enabled."); } if (framebuffer_attachments_create_info == nullptr) { skip |= LogError(device, "VUID-VkFramebufferCreateInfo-flags-03190", "vkCreateFramebuffer(): VkFramebufferCreateInfo flags includes VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT, " "but no instance of VkFramebufferAttachmentsCreateInfo is present in the pNext chain."); } else { if (framebuffer_attachments_create_info->attachmentImageInfoCount != 0 && framebuffer_attachments_create_info->attachmentImageInfoCount != pCreateInfo->attachmentCount) { skip |= LogError(device, "VUID-VkFramebufferCreateInfo-flags-03191", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachmentCount is %u, but " "VkFramebufferAttachmentsCreateInfo attachmentImageInfoCount is %u.", pCreateInfo->attachmentCount, framebuffer_attachments_create_info->attachmentImageInfoCount); } } } auto rp_state = GetRenderPassState(pCreateInfo->renderPass); if (rp_state) { const VkRenderPassCreateInfo2 *rpci = rp_state->createInfo.ptr(); if (rpci->attachmentCount != pCreateInfo->attachmentCount) { skip |= LogError(pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-attachmentCount-00876", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachmentCount of %u does not match attachmentCount " "of %u of %s being used to create Framebuffer.", pCreateInfo->attachmentCount, rpci->attachmentCount, report_data->FormatHandle(pCreateInfo->renderPass).c_str()); } else { // attachmentCounts match, so make sure corresponding attachment details line up if ((pCreateInfo->flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT) == 0) { const VkImageView *image_views = pCreateInfo->pAttachments; for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) { auto view_state = GetImageViewState(image_views[i]); if (view_state == nullptr) { skip |= LogError( image_views[i], "VUID-VkFramebufferCreateInfo-flags-02778", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u is not a valid VkImageView.", i); } else { auto &ivci = view_state->create_info; if (ivci.format != rpci->pAttachments[i].format) { skip |= LogError( pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-pAttachments-00880", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has format of %s that does not " "match the format of %s used by the corresponding attachment for %s.", i, string_VkFormat(ivci.format), string_VkFormat(rpci->pAttachments[i].format), report_data->FormatHandle(pCreateInfo->renderPass).c_str()); } const VkImageCreateInfo *ici = &GetImageState(ivci.image)->createInfo; if (ici->samples != rpci->pAttachments[i].samples) { skip |= LogError(pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-pAttachments-00881", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has %s samples that do not " "match the %s " "samples used by the corresponding attachment for %s.", i, string_VkSampleCountFlagBits(ici->samples), string_VkSampleCountFlagBits(rpci->pAttachments[i].samples), report_data->FormatHandle(pCreateInfo->renderPass).c_str()); } // Verify that image memory is valid auto image_data = GetImageState(ivci.image); skip |= ValidateMemoryIsBoundToImage(image_data, "vkCreateFramebuffer()", "UNASSIGNED-CoreValidation-BoundResourceFreedMemoryAccess"); // Verify that view only has a single mip level if (ivci.subresourceRange.levelCount != 1) { skip |= LogError( device, "VUID-VkFramebufferCreateInfo-pAttachments-00883", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has mip levelCount of %u but " "only a single mip level (levelCount == 1) is allowed when creating a Framebuffer.", i, ivci.subresourceRange.levelCount); } const uint32_t mip_level = ivci.subresourceRange.baseMipLevel; uint32_t mip_width = max(1u, ici->extent.width >> mip_level); uint32_t mip_height = max(1u, ici->extent.height >> mip_level); bool used_as_input_color_resolve_depth_stencil_attachment = false; bool used_as_fragment_shading_rate_attachment = false; bool fsr_non_zero_viewmasks = false; for (uint32_t j = 0; j < rpci->subpassCount; ++j) { const VkSubpassDescription2 &subpass = rpci->pSubpasses[j]; uint32_t highest_view_bit = 0; for (uint32_t k = 0; k < 32; ++k) { if (((subpass.viewMask >> k) & 1) != 0) { highest_view_bit = k; } } for (uint32_t k = 0; k < rpci->pSubpasses[j].inputAttachmentCount; ++k) { if (subpass.pInputAttachments[k].attachment == i) { used_as_input_color_resolve_depth_stencil_attachment = true; break; } } for (uint32_t k = 0; k < rpci->pSubpasses[j].colorAttachmentCount; ++k) { if (subpass.pColorAttachments[k].attachment == i || (subpass.pResolveAttachments && subpass.pResolveAttachments[k].attachment == i)) { used_as_input_color_resolve_depth_stencil_attachment = true; break; } } if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment == i) { used_as_input_color_resolve_depth_stencil_attachment = true; } if (used_as_input_color_resolve_depth_stencil_attachment) { if (ivci.subresourceRange.layerCount <= highest_view_bit) { skip |= LogError( device, "VUID-VkFramebufferCreateInfo-renderPass-04536", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has a layer count (%u) " "less than or equal to the highest bit in the view mask (%u) of subpass %u.", i, ivci.subresourceRange.layerCount, highest_view_bit, j); } } if (enabled_features.fragment_shading_rate_features.attachmentFragmentShadingRate) { const VkFragmentShadingRateAttachmentInfoKHR *fsr_attachment; fsr_attachment = LvlFindInChain<VkFragmentShadingRateAttachmentInfoKHR>(subpass.pNext); if (fsr_attachment && fsr_attachment->pFragmentShadingRateAttachment->attachment == i) { used_as_fragment_shading_rate_attachment = true; if ((mip_width * fsr_attachment->shadingRateAttachmentTexelSize.width) < pCreateInfo->width) { skip |= LogError(device, "VUID-VkFramebufferCreateInfo-flags-04539", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level " "%u is used as a " "fragment shading rate attachment in subpass %u, but the product of its " "width (%u) and the " "specified shading rate texel width (%u) are smaller than the " "corresponding framebuffer width (%u).", i, ivci.subresourceRange.baseMipLevel, j, mip_width, fsr_attachment->shadingRateAttachmentTexelSize.width, pCreateInfo->width); } if ((mip_height * fsr_attachment->shadingRateAttachmentTexelSize.height) < pCreateInfo->height) { skip |= LogError(device, "VUID-VkFramebufferCreateInfo-flags-04540", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u " "is used as a " "fragment shading rate attachment in subpass %u, but the product of its " "height (%u) and the " "specified shading rate texel height (%u) are smaller than the corresponding " "framebuffer height (%u).", i, ivci.subresourceRange.baseMipLevel, j, mip_height, fsr_attachment->shadingRateAttachmentTexelSize.height, pCreateInfo->height); } if (highest_view_bit != 0) { fsr_non_zero_viewmasks = true; } if (ivci.subresourceRange.layerCount <= highest_view_bit) { skip |= LogError( device, "VUID-VkFramebufferCreateInfo-flags-04537", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has a layer count (%u) " "less than or equal to the highest bit in the view mask (%u) of subpass %u.", i, ivci.subresourceRange.layerCount, highest_view_bit, j); } } } } if (enabled_features.fragment_density_map_features.fragmentDensityMap) { const VkRenderPassFragmentDensityMapCreateInfoEXT *fdm_attachment; fdm_attachment = LvlFindInChain<VkRenderPassFragmentDensityMapCreateInfoEXT>(rpci->pNext); if (fdm_attachment && fdm_attachment->fragmentDensityMapAttachment.attachment == i) { uint32_t ceiling_width = static_cast<uint32_t>(ceil( static_cast<float>(pCreateInfo->width) / std::max(static_cast<float>( phys_dev_ext_props.fragment_density_map_props.maxFragmentDensityTexelSize.width), 1.0f))); if (mip_width < ceiling_width) { skip |= LogError( device, "VUID-VkFramebufferCreateInfo-pAttachments-02555", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has width " "smaller than the corresponding the ceiling of framebuffer width / " "maxFragmentDensityTexelSize.width " "Here are the respective dimensions for attachment #%u, the ceiling value:\n " "attachment #%u, framebuffer:\n" "width: %u, the ceiling value: %u\n", i, ivci.subresourceRange.baseMipLevel, i, i, mip_width, ceiling_width); } uint32_t ceiling_height = static_cast<uint32_t>(ceil( static_cast<float>(pCreateInfo->height) / std::max(static_cast<float>( phys_dev_ext_props.fragment_density_map_props.maxFragmentDensityTexelSize.height), 1.0f))); if (mip_height < ceiling_height) { skip |= LogError( device, "VUID-VkFramebufferCreateInfo-pAttachments-02556", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has height " "smaller than the corresponding the ceiling of framebuffer height / " "maxFragmentDensityTexelSize.height " "Here are the respective dimensions for attachment #%u, the ceiling value:\n " "attachment #%u, framebuffer:\n" "height: %u, the ceiling value: %u\n", i, ivci.subresourceRange.baseMipLevel, i, i, mip_height, ceiling_height); } } } if (used_as_input_color_resolve_depth_stencil_attachment) { if (mip_width < pCreateInfo->width) { skip |= LogError(device, "VUID-VkFramebufferCreateInfo-flags-04533", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has width " "smaller than the corresponding framebuffer width (%u).", i, mip_width, pCreateInfo->width); } if (mip_height < pCreateInfo->height) { skip |= LogError( device, "VUID-VkFramebufferCreateInfo-flags-04534", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has height " "smaller than the corresponding framebuffer height (%u).", i, mip_height, pCreateInfo->height); } if (ivci.subresourceRange.layerCount < pCreateInfo->layers) { skip |= LogError(device, "VUID-VkFramebufferCreateInfo-flags-04535", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has a layer count (%u) " "smaller than the corresponding framebuffer layer count (%u).", i, ivci.subresourceRange.layerCount, pCreateInfo->layers); } } if (used_as_fragment_shading_rate_attachment && !fsr_non_zero_viewmasks) { if (ivci.subresourceRange.layerCount != 1 && ivci.subresourceRange.layerCount < pCreateInfo->layers) { skip |= LogError(device, "VUID-VkFramebufferCreateInfo-flags-04538", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has a layer count (%u) " "smaller than the corresponding framebuffer layer count (%u).", i, ivci.subresourceRange.layerCount, pCreateInfo->layers); } } if (IsIdentitySwizzle(ivci.components) == false) { skip |= LogError( device, "VUID-VkFramebufferCreateInfo-pAttachments-00884", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has non-identy swizzle. All " "framebuffer attachments must have been created with the identity swizzle. Here are the actual " "swizzle values:\n" "r swizzle = %s\n" "g swizzle = %s\n" "b swizzle = %s\n" "a swizzle = %s\n", i, string_VkComponentSwizzle(ivci.components.r), string_VkComponentSwizzle(ivci.components.g), string_VkComponentSwizzle(ivci.components.b), string_VkComponentSwizzle(ivci.components.a)); } if ((ivci.viewType == VK_IMAGE_VIEW_TYPE_2D) || (ivci.viewType == VK_IMAGE_VIEW_TYPE_2D)) { const auto image_state = GetImageState(ivci.image); if (image_state->createInfo.imageType == VK_IMAGE_TYPE_3D) { if (FormatIsDepthOrStencil(ivci.format)) { LogObjectList objlist(device); objlist.add(ivci.image); skip |= LogError( objlist, "VUID-VkFramebufferCreateInfo-pAttachments-00891", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has an image view type of " "%s " "which was taken from image %s of type VK_IMAGE_TYPE_3D, but the image view format is a " "depth/stencil format %s", i, string_VkImageViewType(ivci.viewType), report_data->FormatHandle(ivci.image).c_str(), string_VkFormat(ivci.format)); } } } if (ivci.viewType == VK_IMAGE_VIEW_TYPE_3D) { LogObjectList objlist(device); objlist.add(image_views[i]); skip |= LogError(objlist, "VUID-VkFramebufferCreateInfo-flags-04113", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has an image view type " "of VK_IMAGE_VIEW_TYPE_3D", i); } } } } else if (framebuffer_attachments_create_info) { // VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT is set for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) { auto &aii = framebuffer_attachments_create_info->pAttachmentImageInfos[i]; bool format_found = false; for (uint32_t j = 0; j < aii.viewFormatCount; ++j) { if (aii.pViewFormats[j] == rpci->pAttachments[i].format) { format_found = true; } } if (!format_found) { skip |= LogError(pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-flags-03205", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info #%u does not include " "format %s used " "by the corresponding attachment for renderPass (%s).", i, string_VkFormat(rpci->pAttachments[i].format), report_data->FormatHandle(pCreateInfo->renderPass).c_str()); } bool used_as_input_color_resolve_depth_stencil_attachment = false; bool used_as_fragment_shading_rate_attachment = false; bool fsr_non_zero_viewmasks = false; for (uint32_t j = 0; j < rpci->subpassCount; ++j) { const VkSubpassDescription2 &subpass = rpci->pSubpasses[j]; uint32_t highest_view_bit = 0; for (int k = 0; k < 32; ++k) { if (((subpass.viewMask >> k) & 1) != 0) { highest_view_bit = k; } } for (uint32_t k = 0; k < rpci->pSubpasses[j].inputAttachmentCount; ++k) { if (subpass.pInputAttachments[k].attachment == i) { used_as_input_color_resolve_depth_stencil_attachment = true; break; } } for (uint32_t k = 0; k < rpci->pSubpasses[j].colorAttachmentCount; ++k) { if (subpass.pColorAttachments[k].attachment == i || (subpass.pResolveAttachments && subpass.pResolveAttachments[k].attachment == i)) { used_as_input_color_resolve_depth_stencil_attachment = true; break; } } if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment == i) { used_as_input_color_resolve_depth_stencil_attachment = true; } if (enabled_features.fragment_shading_rate_features.attachmentFragmentShadingRate) { const VkFragmentShadingRateAttachmentInfoKHR *fsr_attachment; fsr_attachment = LvlFindInChain<VkFragmentShadingRateAttachmentInfoKHR>(subpass.pNext); if (fsr_attachment && fsr_attachment->pFragmentShadingRateAttachment->attachment == i) { used_as_fragment_shading_rate_attachment = true; if ((aii.width * fsr_attachment->shadingRateAttachmentTexelSize.width) < pCreateInfo->width) { skip |= LogError( device, "VUID-VkFramebufferCreateInfo-flags-04543", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u is used as a " "fragment shading rate attachment in subpass %u, but the product of its width (%u) and the " "specified shading rate texel width (%u) are smaller than the corresponding framebuffer " "width (%u).", i, j, aii.width, fsr_attachment->shadingRateAttachmentTexelSize.width, pCreateInfo->width); } if ((aii.height * fsr_attachment->shadingRateAttachmentTexelSize.height) < pCreateInfo->height) { skip |= LogError(device, "VUID-VkFramebufferCreateInfo-flags-04544", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u is used as a " "fragment shading rate attachment in subpass %u, but the product of its " "height (%u) and the " "specified shading rate texel height (%u) are smaller than the corresponding " "framebuffer height (%u).", i, j, aii.height, fsr_attachment->shadingRateAttachmentTexelSize.height, pCreateInfo->height); } if (highest_view_bit != 0) { fsr_non_zero_viewmasks = true; } if (aii.layerCount != 1 && aii.layerCount <= highest_view_bit) { skip |= LogError( device, kVUIDUndefined, "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has a layer count (%u) " "less than or equal to the highest bit in the view mask (%u) of subpass %u.", i, aii.layerCount, highest_view_bit, j); } } } } if (used_as_input_color_resolve_depth_stencil_attachment) { if (aii.width < pCreateInfo->width) { skip |= LogError( device, "VUID-VkFramebufferCreateInfo-flags-04541", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info #%u has a width of only #%u, " "but framebuffer has a width of #%u.", i, aii.width, pCreateInfo->width); } if (aii.height < pCreateInfo->height) { skip |= LogError( device, "VUID-VkFramebufferCreateInfo-flags-04542", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info #%u has a height of only #%u, " "but framebuffer has a height of #%u.", i, aii.height, pCreateInfo->height); } const char *mismatched_layers_no_multiview_vuid = device_extensions.vk_khr_multiview ? "VUID-VkFramebufferCreateInfo-renderPass-04546" : "VUID-VkFramebufferCreateInfo-flags-04547"; if ((rpci->subpassCount == 0) || (rpci->pSubpasses[0].viewMask == 0)) { if (aii.layerCount < pCreateInfo->layers) { skip |= LogError( device, mismatched_layers_no_multiview_vuid, "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info #%u has only #%u layers, " "but framebuffer has #%u layers.", i, aii.layerCount, pCreateInfo->layers); } } } if (used_as_fragment_shading_rate_attachment && !fsr_non_zero_viewmasks) { if (aii.layerCount != 1 && aii.layerCount < pCreateInfo->layers) { skip |= LogError(device, "VUID-VkFramebufferCreateInfo-flags-04545", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has a layer count (%u) " "smaller than the corresponding framebuffer layer count (%u).", i, aii.layerCount, pCreateInfo->layers); } } } // Validate image usage uint32_t attachment_index = VK_ATTACHMENT_UNUSED; for (uint32_t i = 0; i < rpci->subpassCount; ++i) { skip |= MatchUsage(rpci->pSubpasses[i].colorAttachmentCount, rpci->pSubpasses[i].pColorAttachments, pCreateInfo, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-flags-03201"); skip |= MatchUsage(rpci->pSubpasses[i].colorAttachmentCount, rpci->pSubpasses[i].pResolveAttachments, pCreateInfo, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-flags-03201"); skip |= MatchUsage(1, rpci->pSubpasses[i].pDepthStencilAttachment, pCreateInfo, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-flags-03202"); skip |= MatchUsage(rpci->pSubpasses[i].inputAttachmentCount, rpci->pSubpasses[i].pInputAttachments, pCreateInfo, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-flags-03204"); const VkSubpassDescriptionDepthStencilResolve *depth_stencil_resolve = LvlFindInChain<VkSubpassDescriptionDepthStencilResolve>(rpci->pSubpasses[i].pNext); if (device_extensions.vk_khr_depth_stencil_resolve && depth_stencil_resolve != nullptr) { skip |= MatchUsage(1, depth_stencil_resolve->pDepthStencilResolveAttachment, pCreateInfo, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-flags-03203"); } const VkFragmentShadingRateAttachmentInfoKHR *fragment_shading_rate_attachment_info = LvlFindInChain<VkFragmentShadingRateAttachmentInfoKHR>(rpci->pSubpasses[i].pNext); if (enabled_features.fragment_shading_rate_features.attachmentFragmentShadingRate && fragment_shading_rate_attachment_info != nullptr) { skip |= MatchUsage(1, fragment_shading_rate_attachment_info->pFragmentShadingRateAttachment, pCreateInfo, VK_IMAGE_USAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR, "VUID-VkFramebufferCreateInfo-flags-04549"); } } if (device_extensions.vk_khr_multiview) { if ((rpci->subpassCount > 0) && (rpci->pSubpasses[0].viewMask != 0)) { for (uint32_t i = 0; i < rpci->subpassCount; ++i) { const VkSubpassDescriptionDepthStencilResolve *depth_stencil_resolve = LvlFindInChain<VkSubpassDescriptionDepthStencilResolve>(rpci->pSubpasses[i].pNext); uint32_t view_bits = rpci->pSubpasses[i].viewMask; uint32_t highest_view_bit = 0; for (int j = 0; j < 32; ++j) { if (((view_bits >> j) & 1) != 0) { highest_view_bit = j; } } for (uint32_t j = 0; j < rpci->pSubpasses[i].colorAttachmentCount; ++j) { attachment_index = rpci->pSubpasses[i].pColorAttachments[j].attachment; if (attachment_index != VK_ATTACHMENT_UNUSED) { uint32_t layer_count = framebuffer_attachments_create_info->pAttachmentImageInfos[attachment_index].layerCount; if (layer_count <= highest_view_bit) { skip |= LogError( pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-renderPass-03198", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info %u " "only specifies %u layers, but the view mask for subpass %u in renderPass (%s) " "includes layer %u, with that attachment specified as a color attachment %u.", attachment_index, layer_count, i, report_data->FormatHandle(pCreateInfo->renderPass).c_str(), highest_view_bit, j); } } if (rpci->pSubpasses[i].pResolveAttachments) { attachment_index = rpci->pSubpasses[i].pResolveAttachments[j].attachment; if (attachment_index != VK_ATTACHMENT_UNUSED) { uint32_t layer_count = framebuffer_attachments_create_info->pAttachmentImageInfos[attachment_index].layerCount; if (layer_count <= highest_view_bit) { skip |= LogError( pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-renderPass-03198", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info %u " "only specifies %u layers, but the view mask for subpass %u in renderPass (%s) " "includes layer %u, with that attachment specified as a resolve attachment %u.", attachment_index, layer_count, i, report_data->FormatHandle(pCreateInfo->renderPass).c_str(), highest_view_bit, j); } } } } for (uint32_t j = 0; j < rpci->pSubpasses[i].inputAttachmentCount; ++j) { attachment_index = rpci->pSubpasses[i].pInputAttachments[j].attachment; if (attachment_index != VK_ATTACHMENT_UNUSED) { uint32_t layer_count = framebuffer_attachments_create_info->pAttachmentImageInfos[attachment_index].layerCount; if (layer_count <= highest_view_bit) { skip |= LogError( pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-renderPass-03198", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info %u " "only specifies %u layers, but the view mask for subpass %u in renderPass (%s) " "includes layer %u, with that attachment specified as an input attachment %u.", attachment_index, layer_count, i, report_data->FormatHandle(pCreateInfo->renderPass).c_str(), highest_view_bit, j); } } } if (rpci->pSubpasses[i].pDepthStencilAttachment != nullptr) { attachment_index = rpci->pSubpasses[i].pDepthStencilAttachment->attachment; if (attachment_index != VK_ATTACHMENT_UNUSED) { uint32_t layer_count = framebuffer_attachments_create_info->pAttachmentImageInfos[attachment_index].layerCount; if (layer_count <= highest_view_bit) { skip |= LogError( pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-renderPass-03198", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info %u " "only specifies %u layers, but the view mask for subpass %u in renderPass (%s) " "includes layer %u, with that attachment specified as a depth/stencil attachment.", attachment_index, layer_count, i, report_data->FormatHandle(pCreateInfo->renderPass).c_str(), highest_view_bit); } } if (device_extensions.vk_khr_depth_stencil_resolve && depth_stencil_resolve != nullptr && depth_stencil_resolve->pDepthStencilResolveAttachment != nullptr) { attachment_index = depth_stencil_resolve->pDepthStencilResolveAttachment->attachment; if (attachment_index != VK_ATTACHMENT_UNUSED) { uint32_t layer_count = framebuffer_attachments_create_info->pAttachmentImageInfos[attachment_index].layerCount; if (layer_count <= highest_view_bit) { skip |= LogError( pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-renderPass-03198", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info %u " "only specifies %u layers, but the view mask for subpass %u in renderPass (%s) " "includes layer %u, with that attachment specified as a depth/stencil resolve " "attachment.", attachment_index, layer_count, i, report_data->FormatHandle(pCreateInfo->renderPass).c_str(), highest_view_bit); } } } } } } } } if ((pCreateInfo->flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT) == 0) { // Verify correct attachment usage flags for (uint32_t subpass = 0; subpass < rpci->subpassCount; subpass++) { const VkSubpassDescription2 &subpass_description = rpci->pSubpasses[subpass]; // Verify input attachments: skip |= MatchUsage(subpass_description.inputAttachmentCount, subpass_description.pInputAttachments, pCreateInfo, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-pAttachments-00879"); // Verify color attachments: skip |= MatchUsage(subpass_description.colorAttachmentCount, subpass_description.pColorAttachments, pCreateInfo, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-pAttachments-00877"); // Verify depth/stencil attachments: skip |= MatchUsage(1, subpass_description.pDepthStencilAttachment, pCreateInfo, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-pAttachments-02633"); // Verify depth/stecnil resolve if (device_extensions.vk_khr_depth_stencil_resolve) { const VkSubpassDescriptionDepthStencilResolve *ds_resolve = LvlFindInChain<VkSubpassDescriptionDepthStencilResolve>(subpass_description.pNext); if (ds_resolve) { skip |= MatchUsage(1, ds_resolve->pDepthStencilResolveAttachment, pCreateInfo, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-pAttachments-02634"); } } // Verify fragment shading rate attachments if (enabled_features.fragment_shading_rate_features.attachmentFragmentShadingRate) { const VkFragmentShadingRateAttachmentInfoKHR *fragment_shading_rate_attachment_info = LvlFindInChain<VkFragmentShadingRateAttachmentInfoKHR>(subpass_description.pNext); if (fragment_shading_rate_attachment_info) { skip |= MatchUsage(1, fragment_shading_rate_attachment_info->pFragmentShadingRateAttachment, pCreateInfo, VK_IMAGE_USAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR, "VUID-VkFramebufferCreateInfo-flags-04548"); } } } } bool b_has_non_zero_view_masks = false; for (uint32_t i = 0; i < rpci->subpassCount; ++i) { if (rpci->pSubpasses[i].viewMask != 0) { b_has_non_zero_view_masks = true; break; } } if (b_has_non_zero_view_masks && pCreateInfo->layers != 1) { skip |= LogError(pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-renderPass-02531", "vkCreateFramebuffer(): VkFramebufferCreateInfo has #%u layers but " "renderPass (%s) was specified with non-zero view masks\n", pCreateInfo->layers, report_data->FormatHandle(pCreateInfo->renderPass).c_str()); } } } // Verify FB dimensions are within physical device limits if (pCreateInfo->width > phys_dev_props.limits.maxFramebufferWidth) { skip |= LogError(device, "VUID-VkFramebufferCreateInfo-width-00886", "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo width exceeds physical device limits. Requested " "width: %u, device max: %u\n", pCreateInfo->width, phys_dev_props.limits.maxFramebufferWidth); } if (pCreateInfo->height > phys_dev_props.limits.maxFramebufferHeight) { skip |= LogError(device, "VUID-VkFramebufferCreateInfo-height-00888", "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo height exceeds physical device limits. Requested " "height: %u, device max: %u\n", pCreateInfo->height, phys_dev_props.limits.maxFramebufferHeight); } if (pCreateInfo->layers > phys_dev_props.limits.maxFramebufferLayers) { skip |= LogError(device, "VUID-VkFramebufferCreateInfo-layers-00890", "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo layers exceeds physical device limits. Requested " "layers: %u, device max: %u\n", pCreateInfo->layers, phys_dev_props.limits.maxFramebufferLayers); } // Verify FB dimensions are greater than zero if (pCreateInfo->width <= 0) { skip |= LogError(device, "VUID-VkFramebufferCreateInfo-width-00885", "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo width must be greater than zero."); } if (pCreateInfo->height <= 0) { skip |= LogError(device, "VUID-VkFramebufferCreateInfo-height-00887", "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo height must be greater than zero."); } if (pCreateInfo->layers <= 0) { skip |= LogError(device, "VUID-VkFramebufferCreateInfo-layers-00889", "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo layers must be greater than zero."); } return skip; } bool CoreChecks::PreCallValidateCreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkFramebuffer *pFramebuffer) const { // TODO : Verify that renderPass FB is created with is compatible with FB bool skip = false; skip |= ValidateFramebufferCreateInfo(pCreateInfo); return skip; } static bool FindDependency(const uint32_t index, const uint32_t dependent, const std::vector<DAGNode> &subpass_to_node, std::unordered_set<uint32_t> &processed_nodes) { // If we have already checked this node we have not found a dependency path so return false. if (processed_nodes.count(index)) return false; processed_nodes.insert(index); const DAGNode &node = subpass_to_node[index]; // Look for a dependency path. If one exists return true else recurse on the previous nodes. if (std::find(node.prev.begin(), node.prev.end(), dependent) == node.prev.end()) { for (auto elem : node.prev) { if (FindDependency(elem, dependent, subpass_to_node, processed_nodes)) return true; } } else { return true; } return false; } bool CoreChecks::IsImageLayoutReadOnly(VkImageLayout layout) const { if ((layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) || (layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) || (layout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL) || (layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL)) { return true; } return false; } bool CoreChecks::CheckDependencyExists(const VkRenderPass renderpass, const uint32_t subpass, const VkImageLayout layout, const std::vector<SubpassLayout> &dependent_subpasses, const std::vector<DAGNode> &subpass_to_node, bool &skip) const { bool result = true; bool b_image_layout_read_only = IsImageLayoutReadOnly(layout); // Loop through all subpasses that share the same attachment and make sure a dependency exists for (uint32_t k = 0; k < dependent_subpasses.size(); ++k) { const SubpassLayout &sp = dependent_subpasses[k]; if (subpass == sp.index) continue; if (b_image_layout_read_only && IsImageLayoutReadOnly(sp.layout)) continue; const DAGNode &node = subpass_to_node[subpass]; // Check for a specified dependency between the two nodes. If one exists we are done. auto prev_elem = std::find(node.prev.begin(), node.prev.end(), sp.index); auto next_elem = std::find(node.next.begin(), node.next.end(), sp.index); if (prev_elem == node.prev.end() && next_elem == node.next.end()) { // If no dependency exits an implicit dependency still might. If not, throw an error. std::unordered_set<uint32_t> processed_nodes; if (!(FindDependency(subpass, sp.index, subpass_to_node, processed_nodes) || FindDependency(sp.index, subpass, subpass_to_node, processed_nodes))) { skip |= LogError(renderpass, kVUID_Core_DrawState_InvalidRenderpass, "A dependency between subpasses %d and %d must exist but one is not specified.", subpass, sp.index); result = false; } } } return result; } bool CoreChecks::CheckPreserved(const VkRenderPass renderpass, const VkRenderPassCreateInfo2 *pCreateInfo, const int index, const uint32_t attachment, const std::vector<DAGNode> &subpass_to_node, int depth, bool &skip) const { const DAGNode &node = subpass_to_node[index]; // If this node writes to the attachment return true as next nodes need to preserve the attachment. const VkSubpassDescription2 &subpass = pCreateInfo->pSubpasses[index]; for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) { if (attachment == subpass.pColorAttachments[j].attachment) return true; } for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) { if (attachment == subpass.pInputAttachments[j].attachment) return true; } if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { if (attachment == subpass.pDepthStencilAttachment->attachment) return true; } bool result = false; // Loop through previous nodes and see if any of them write to the attachment. for (auto elem : node.prev) { result |= CheckPreserved(renderpass, pCreateInfo, elem, attachment, subpass_to_node, depth + 1, skip); } // If the attachment was written to by a previous node than this node needs to preserve it. if (result && depth > 0) { bool has_preserved = false; for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) { if (subpass.pPreserveAttachments[j] == attachment) { has_preserved = true; break; } } if (!has_preserved) { skip |= LogError(renderpass, kVUID_Core_DrawState_InvalidRenderpass, "Attachment %d is used by a later subpass and must be preserved in subpass %d.", attachment, index); } } return result; } template <class T> bool IsRangeOverlapping(T offset1, T size1, T offset2, T size2) { return (((offset1 + size1) > offset2) && ((offset1 + size1) < (offset2 + size2))) || ((offset1 > offset2) && (offset1 < (offset2 + size2))); } bool IsRegionOverlapping(VkImageSubresourceRange range1, VkImageSubresourceRange range2) { return (IsRangeOverlapping(range1.baseMipLevel, range1.levelCount, range2.baseMipLevel, range2.levelCount) && IsRangeOverlapping(range1.baseArrayLayer, range1.layerCount, range2.baseArrayLayer, range2.layerCount)); } bool CoreChecks::ValidateDependencies(FRAMEBUFFER_STATE const *framebuffer, RENDER_PASS_STATE const *renderPass) const { bool skip = false; auto const framebuffer_info = framebuffer->createInfo.ptr(); auto const create_info = renderPass->createInfo.ptr(); auto const &subpass_to_node = renderPass->subpassToNode; struct Attachment { std::vector<SubpassLayout> outputs; std::vector<SubpassLayout> inputs; std::vector<uint32_t> overlapping; }; std::vector<Attachment> attachments(create_info->attachmentCount); if (!(framebuffer_info->flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT)) { // Find overlapping attachments for (uint32_t i = 0; i < create_info->attachmentCount; ++i) { for (uint32_t j = i + 1; j < create_info->attachmentCount; ++j) { VkImageView viewi = framebuffer_info->pAttachments[i]; VkImageView viewj = framebuffer_info->pAttachments[j]; if (viewi == viewj) { attachments[i].overlapping.emplace_back(j); attachments[j].overlapping.emplace_back(i); continue; } auto view_state_i = GetImageViewState(viewi); auto view_state_j = GetImageViewState(viewj); if (!view_state_i || !view_state_j) { continue; } auto view_ci_i = view_state_i->create_info; auto view_ci_j = view_state_j->create_info; if (view_ci_i.image == view_ci_j.image && IsRegionOverlapping(view_ci_i.subresourceRange, view_ci_j.subresourceRange)) { attachments[i].overlapping.emplace_back(j); attachments[j].overlapping.emplace_back(i); continue; } auto image_data_i = GetImageState(view_ci_i.image); auto image_data_j = GetImageState(view_ci_j.image); if (!image_data_i || !image_data_j) { continue; } if (image_data_i->binding.mem_state == image_data_j->binding.mem_state && IsRangeOverlapping(image_data_i->binding.offset, image_data_i->binding.size, image_data_j->binding.offset, image_data_j->binding.size)) { attachments[i].overlapping.emplace_back(j); attachments[j].overlapping.emplace_back(i); } } } } // Find for each attachment the subpasses that use them. unordered_set<uint32_t> attachment_indices; for (uint32_t i = 0; i < create_info->subpassCount; ++i) { const VkSubpassDescription2 &subpass = create_info->pSubpasses[i]; attachment_indices.clear(); for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) { uint32_t attachment = subpass.pInputAttachments[j].attachment; if (attachment == VK_ATTACHMENT_UNUSED) continue; SubpassLayout sp = {i, subpass.pInputAttachments[j].layout}; attachments[attachment].inputs.emplace_back(sp); for (auto overlapping_attachment : attachments[attachment].overlapping) { attachments[overlapping_attachment].inputs.emplace_back(sp); } } for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) { uint32_t attachment = subpass.pColorAttachments[j].attachment; if (attachment == VK_ATTACHMENT_UNUSED) continue; SubpassLayout sp = {i, subpass.pColorAttachments[j].layout}; attachments[attachment].outputs.emplace_back(sp); for (auto overlapping_attachment : attachments[attachment].overlapping) { attachments[overlapping_attachment].outputs.emplace_back(sp); } attachment_indices.insert(attachment); } if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { uint32_t attachment = subpass.pDepthStencilAttachment->attachment; SubpassLayout sp = {i, subpass.pDepthStencilAttachment->layout}; attachments[attachment].outputs.emplace_back(sp); for (auto overlapping_attachment : attachments[attachment].overlapping) { attachments[overlapping_attachment].outputs.emplace_back(sp); } if (attachment_indices.count(attachment)) { skip |= LogError(renderPass->renderPass, kVUID_Core_DrawState_InvalidRenderpass, "Cannot use same attachment (%u) as both color and depth output in same subpass (%u).", attachment, i); } } } // If there is a dependency needed make sure one exists for (uint32_t i = 0; i < create_info->subpassCount; ++i) { const VkSubpassDescription2 &subpass = create_info->pSubpasses[i]; // If the attachment is an input then all subpasses that output must have a dependency relationship for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) { uint32_t attachment = subpass.pInputAttachments[j].attachment; if (attachment == VK_ATTACHMENT_UNUSED) continue; CheckDependencyExists(renderPass->renderPass, i, subpass.pInputAttachments[j].layout, attachments[attachment].outputs, subpass_to_node, skip); } // If the attachment is an output then all subpasses that use the attachment must have a dependency relationship for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) { uint32_t attachment = subpass.pColorAttachments[j].attachment; if (attachment == VK_ATTACHMENT_UNUSED) continue; CheckDependencyExists(renderPass->renderPass, i, subpass.pColorAttachments[j].layout, attachments[attachment].outputs, subpass_to_node, skip); CheckDependencyExists(renderPass->renderPass, i, subpass.pColorAttachments[j].layout, attachments[attachment].inputs, subpass_to_node, skip); } if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { const uint32_t &attachment = subpass.pDepthStencilAttachment->attachment; CheckDependencyExists(renderPass->renderPass, i, subpass.pDepthStencilAttachment->layout, attachments[attachment].outputs, subpass_to_node, skip); CheckDependencyExists(renderPass->renderPass, i, subpass.pDepthStencilAttachment->layout, attachments[attachment].inputs, subpass_to_node, skip); } } // Loop through implicit dependencies, if this pass reads make sure the attachment is preserved for all passes after it was // written. for (uint32_t i = 0; i < create_info->subpassCount; ++i) { const VkSubpassDescription2 &subpass = create_info->pSubpasses[i]; for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) { CheckPreserved(renderPass->renderPass, create_info, i, subpass.pInputAttachments[j].attachment, subpass_to_node, 0, skip); } } return skip; } bool CoreChecks::ValidateRenderPassDAG(RenderPassCreateVersion rp_version, const VkRenderPassCreateInfo2 *pCreateInfo) const { bool skip = false; const char *vuid; const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2); for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) { const VkSubpassDependency2 &dependency = pCreateInfo->pDependencies[i]; auto latest_src_stage = sync_utils::GetLogicallyLatestGraphicsPipelineStage(dependency.srcStageMask); auto earliest_dst_stage = sync_utils::GetLogicallyEarliestGraphicsPipelineStage(dependency.dstStageMask); // The first subpass here serves as a good proxy for "is multiview enabled" - since all view masks need to be non-zero if // any are, which enables multiview. if (use_rp2 && (dependency.dependencyFlags & VK_DEPENDENCY_VIEW_LOCAL_BIT) && (pCreateInfo->pSubpasses[0].viewMask == 0)) { skip |= LogError( device, "VUID-VkRenderPassCreateInfo2-viewMask-03059", "Dependency %u specifies the VK_DEPENDENCY_VIEW_LOCAL_BIT, but multiview is not enabled for this render pass.", i); } else if (use_rp2 && !(dependency.dependencyFlags & VK_DEPENDENCY_VIEW_LOCAL_BIT) && dependency.viewOffset != 0) { skip |= LogError(device, "VUID-VkSubpassDependency2-dependencyFlags-03092", "Dependency %u specifies the VK_DEPENDENCY_VIEW_LOCAL_BIT, but also specifies a view offset of %u.", i, dependency.viewOffset); } else if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL || dependency.dstSubpass == VK_SUBPASS_EXTERNAL) { if (dependency.srcSubpass == dependency.dstSubpass) { vuid = use_rp2 ? "VUID-VkSubpassDependency2-srcSubpass-03085" : "VUID-VkSubpassDependency-srcSubpass-00865"; skip |= LogError(device, vuid, "The src and dst subpasses in dependency %u are both external.", i); } else if (dependency.dependencyFlags & VK_DEPENDENCY_VIEW_LOCAL_BIT) { if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL) { vuid = "VUID-VkSubpassDependency-dependencyFlags-02520"; } else { // dependency.dstSubpass == VK_SUBPASS_EXTERNAL vuid = "VUID-VkSubpassDependency-dependencyFlags-02521"; } if (use_rp2) { // Create render pass 2 distinguishes between source and destination external dependencies. if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL) { vuid = "VUID-VkSubpassDependency2-dependencyFlags-03090"; } else { vuid = "VUID-VkSubpassDependency2-dependencyFlags-03091"; } } skip |= LogError(device, vuid, "Dependency %u specifies an external dependency but also specifies VK_DEPENDENCY_VIEW_LOCAL_BIT.", i); } } else if (dependency.srcSubpass > dependency.dstSubpass) { vuid = use_rp2 ? "VUID-VkSubpassDependency2-srcSubpass-03084" : "VUID-VkSubpassDependency-srcSubpass-00864"; skip |= LogError(device, vuid, "Dependency %u specifies a dependency from a later subpass (%u) to an earlier subpass (%u), which is " "disallowed to prevent cyclic dependencies.", i, dependency.srcSubpass, dependency.dstSubpass); } else if (dependency.srcSubpass == dependency.dstSubpass) { if (dependency.viewOffset != 0) { vuid = use_rp2 ? "VUID-VkSubpassDependency2-viewOffset-02530" : "VUID-VkRenderPassCreateInfo-pNext-01930"; skip |= LogError(device, vuid, "Dependency %u specifies a self-dependency but has a non-zero view offset of %u", i, dependency.viewOffset); } else if ((dependency.dependencyFlags | VK_DEPENDENCY_VIEW_LOCAL_BIT) != dependency.dependencyFlags && pCreateInfo->pSubpasses[dependency.srcSubpass].viewMask > 1) { vuid = use_rp2 ? "VUID-VkRenderPassCreateInfo2-pDependencies-03060" : "VUID-VkSubpassDependency-srcSubpass-00872"; skip |= LogError(device, vuid, "Dependency %u specifies a self-dependency for subpass %u with a non-zero view mask, but does not " "specify VK_DEPENDENCY_VIEW_LOCAL_BIT.", i, dependency.srcSubpass); } else if ((HasNonFramebufferStagePipelineStageFlags(dependency.srcStageMask) || HasNonFramebufferStagePipelineStageFlags(dependency.dstStageMask)) && (sync_utils::GetGraphicsPipelineStageLogicalOrdinal(latest_src_stage) > sync_utils::GetGraphicsPipelineStageLogicalOrdinal(earliest_dst_stage))) { vuid = use_rp2 ? "VUID-VkSubpassDependency2-srcSubpass-03087" : "VUID-VkSubpassDependency-srcSubpass-00867"; skip |= LogError( device, vuid, "Dependency %u specifies a self-dependency from logically-later stage (%s) to a logically-earlier stage (%s).", i, sync_utils::StringPipelineStageFlags(latest_src_stage).c_str(), sync_utils::StringPipelineStageFlags(earliest_dst_stage).c_str()); } else if ((HasNonFramebufferStagePipelineStageFlags(dependency.srcStageMask) == false) && (HasNonFramebufferStagePipelineStageFlags(dependency.dstStageMask) == false) && ((dependency.dependencyFlags & VK_DEPENDENCY_BY_REGION_BIT) == 0)) { vuid = use_rp2 ? "VUID-VkSubpassDependency2-srcSubpass-02245" : "VUID-VkSubpassDependency-srcSubpass-02243"; skip |= LogError(device, vuid, "Dependency %u specifies a self-dependency for subpass %u with both stages including a " "framebuffer-space stage, but does not specify VK_DEPENDENCY_BY_REGION_BIT in dependencyFlags.", i, dependency.srcSubpass); } } } return skip; } bool CoreChecks::ValidateAttachmentIndex(RenderPassCreateVersion rp_version, uint32_t attachment, uint32_t attachment_count, const char *error_type, const char *function_name) const { bool skip = false; const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2); assert(attachment != VK_ATTACHMENT_UNUSED); if (attachment >= attachment_count) { const char *vuid = use_rp2 ? "VUID-VkRenderPassCreateInfo2-attachment-03051" : "VUID-VkRenderPassCreateInfo-attachment-00834"; skip |= LogError(device, vuid, "%s: %s attachment %d must be less than the total number of attachments %d.", function_name, error_type, attachment, attachment_count); } return skip; } enum AttachmentType { ATTACHMENT_COLOR = 1, ATTACHMENT_DEPTH = 2, ATTACHMENT_INPUT = 4, ATTACHMENT_PRESERVE = 8, ATTACHMENT_RESOLVE = 16, }; char const *StringAttachmentType(uint8_t type) { switch (type) { case ATTACHMENT_COLOR: return "color"; case ATTACHMENT_DEPTH: return "depth"; case ATTACHMENT_INPUT: return "input"; case ATTACHMENT_PRESERVE: return "preserve"; case ATTACHMENT_RESOLVE: return "resolve"; default: return "(multiple)"; } } bool CoreChecks::AddAttachmentUse(RenderPassCreateVersion rp_version, uint32_t subpass, std::vector<uint8_t> &attachment_uses, std::vector<VkImageLayout> &attachment_layouts, uint32_t attachment, uint8_t new_use, VkImageLayout new_layout) const { if (attachment >= attachment_uses.size()) return false; /* out of range, but already reported */ bool skip = false; auto &uses = attachment_uses[attachment]; const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2); const char *vuid; const char *const function_name = use_rp2 ? "vkCreateRenderPass2()" : "vkCreateRenderPass()"; if (uses & new_use) { if (attachment_layouts[attachment] != new_layout) { vuid = use_rp2 ? "VUID-VkSubpassDescription2-layout-02528" : "VUID-VkSubpassDescription-layout-02519"; skip |= LogError(device, vuid, "%s: subpass %u already uses attachment %u with a different image layout (%s vs %s).", function_name, subpass, attachment, string_VkImageLayout(attachment_layouts[attachment]), string_VkImageLayout(new_layout)); } } else if (uses & ~ATTACHMENT_INPUT || (uses && (new_use == ATTACHMENT_RESOLVE || new_use == ATTACHMENT_PRESERVE))) { /* Note: input attachments are assumed to be done first. */ vuid = use_rp2 ? "VUID-VkSubpassDescription2-pPreserveAttachments-03074" : "VUID-VkSubpassDescription-pPreserveAttachments-00854"; skip |= LogError(device, vuid, "%s: subpass %u uses attachment %u as both %s and %s attachment.", function_name, subpass, attachment, StringAttachmentType(uses), StringAttachmentType(new_use)); } else { attachment_layouts[attachment] = new_layout; uses |= new_use; } return skip; } // Handles attachment references regardless of type (input, color, depth, etc) // Input attachments have extra VUs associated with them bool CoreChecks::ValidateAttachmentReference(RenderPassCreateVersion rp_version, VkAttachmentReference2 reference, bool input, const char *error_type, const char *function_name) const { bool skip = false; // Currently all VUs require attachment to not be UNUSED assert(reference.attachment != VK_ATTACHMENT_UNUSED); // currently VkAttachmentReference and VkAttachmentReference2 have no overlapping VUs if (rp_version == RENDER_PASS_VERSION_1) { switch (reference.layout) { case VK_IMAGE_LAYOUT_UNDEFINED: case VK_IMAGE_LAYOUT_PREINITIALIZED: case VK_IMAGE_LAYOUT_PRESENT_SRC_KHR: case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL: case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL: case VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL: case VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL: skip |= LogError(device, "VUID-VkAttachmentReference-layout-00857", "%s: Layout for %s is %s but must not be " "VK_IMAGE_LAYOUT_[UNDEFINED|PREINITIALIZED|PRESENT_SRC_KHR|DEPTH_ATTACHMENT_OPTIMAL|DEPTH_READ_" "ONLY_OPTIMAL|STENCIL_ATTACHMENT_OPTIMAL|STENCIL_READ_ONLY_OPTIMAL].", function_name, error_type, string_VkImageLayout(reference.layout)); break; default: break; } } else { const auto *attachment_reference_stencil_layout = LvlFindInChain<VkAttachmentReferenceStencilLayout>(reference.pNext); switch (reference.layout) { case VK_IMAGE_LAYOUT_UNDEFINED: case VK_IMAGE_LAYOUT_PREINITIALIZED: case VK_IMAGE_LAYOUT_PRESENT_SRC_KHR: skip |= LogError(device, "VUID-VkAttachmentReference2-layout-03077", "%s: Layout for %s is %s but must not be VK_IMAGE_LAYOUT_[UNDEFINED|PREINITIALIZED|PRESENT_SRC_KHR].", function_name, error_type, string_VkImageLayout(reference.layout)); break; case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL: case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL: // This check doesn't rely on the aspect mask value if (attachment_reference_stencil_layout) { const VkImageLayout stencil_layout = attachment_reference_stencil_layout->stencilLayout; // clang-format off if (stencil_layout == VK_IMAGE_LAYOUT_UNDEFINED || stencil_layout == VK_IMAGE_LAYOUT_PREINITIALIZED || stencil_layout == VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL || stencil_layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL || stencil_layout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL || stencil_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL || stencil_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL || stencil_layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL || stencil_layout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL || stencil_layout == VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) { skip |= LogError(device, "VUID-VkAttachmentReferenceStencilLayout-stencilLayout-03318", "%s: In %s with pNext chain instance VkAttachmentReferenceStencilLayout, " "the stencilLayout (%s) must not be " "VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_PREINITIALIZED, " "VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, " "VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL, " "VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL, " "VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, " "VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, " "VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL, " "VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL, or " "VK_IMAGE_LAYOUT_PRESENT_SRC_KHR.", function_name, error_type, string_VkImageLayout(stencil_layout)); } } // clang-format on break; default: break; } // Extra case to check for all 4 seperate depth/stencil layout // This makes the above switch case much easier to read switch (reference.layout) { case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL: case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL: case VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL: case VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL: if (!enabled_features.core12.separateDepthStencilLayouts) { skip |= LogError(device, "VUID-VkAttachmentReference2-separateDepthStencilLayouts-03313", "%s: Layout for %s is %s but without separateDepthStencilLayouts enabled the layout must not " "be VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL, " "VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL, or VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL.", function_name, error_type, string_VkImageLayout(reference.layout)); } default: break; } } return skip; } bool CoreChecks::ValidateRenderpassAttachmentUsage(RenderPassCreateVersion rp_version, const VkRenderPassCreateInfo2 *pCreateInfo, const char *function_name) const { bool skip = false; const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2); const char *vuid; for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) { VkFormat format = pCreateInfo->pAttachments[i].format; if (pCreateInfo->pAttachments[i].initialLayout == VK_IMAGE_LAYOUT_UNDEFINED) { if ((FormatIsColor(format) || FormatHasDepth(format)) && pCreateInfo->pAttachments[i].loadOp == VK_ATTACHMENT_LOAD_OP_LOAD) { skip |= LogWarning(device, kVUID_Core_DrawState_InvalidRenderpass, "%s: Render pass pAttachment[%u] has loadOp == VK_ATTACHMENT_LOAD_OP_LOAD and initialLayout == " "VK_IMAGE_LAYOUT_UNDEFINED. This is probably not what you intended. Consider using " "VK_ATTACHMENT_LOAD_OP_DONT_CARE instead if the image truely is undefined at the start of the " "render pass.", function_name, i); } if (FormatHasStencil(format) && pCreateInfo->pAttachments[i].stencilLoadOp == VK_ATTACHMENT_LOAD_OP_LOAD) { skip |= LogWarning(device, kVUID_Core_DrawState_InvalidRenderpass, "%s: Render pass pAttachment[%u] has stencilLoadOp == VK_ATTACHMENT_LOAD_OP_LOAD and initialLayout " "== VK_IMAGE_LAYOUT_UNDEFINED. This is probably not what you intended. Consider using " "VK_ATTACHMENT_LOAD_OP_DONT_CARE instead if the image truely is undefined at the start of the " "render pass.", function_name, i); } } } // Track when we're observing the first use of an attachment std::vector<bool> attach_first_use(pCreateInfo->attachmentCount, true); // Track if attachments are used as input as well as another type std::unordered_set<uint32_t> input_attachments; for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) { const VkSubpassDescription2 &subpass = pCreateInfo->pSubpasses[i]; std::vector<uint8_t> attachment_uses(pCreateInfo->attachmentCount); std::vector<VkImageLayout> attachment_layouts(pCreateInfo->attachmentCount); if (subpass.pipelineBindPoint != VK_PIPELINE_BIND_POINT_GRAPHICS) { vuid = use_rp2 ? "VUID-VkSubpassDescription2-pipelineBindPoint-03062" : "VUID-VkSubpassDescription-pipelineBindPoint-00844"; skip |= LogError(device, vuid, "%s: Pipeline bind point for pSubpasses[%d] must be VK_PIPELINE_BIND_POINT_GRAPHICS.", function_name, i); } // Check input attachments first // - so we can detect first-use-as-input for VU #00349 // - if other color or depth/stencil is also input, it limits valid layouts for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) { auto const &attachment_ref = subpass.pInputAttachments[j]; const uint32_t attachment_index = attachment_ref.attachment; const VkImageAspectFlags aspect_mask = attachment_ref.aspectMask; if (attachment_index != VK_ATTACHMENT_UNUSED) { input_attachments.insert(attachment_index); std::string error_type = "pSubpasses[" + std::to_string(i) + "].pInputAttachments[" + std::to_string(j) + "]"; skip |= ValidateAttachmentReference(rp_version, attachment_ref, true, error_type.c_str(), function_name); skip |= ValidateAttachmentIndex(rp_version, attachment_index, pCreateInfo->attachmentCount, error_type.c_str(), function_name); if (aspect_mask & VK_IMAGE_ASPECT_METADATA_BIT) { vuid = use_rp2 ? "VUID-VkSubpassDescription2-attachment-02801" : "VUID-VkInputAttachmentAspectReference-aspectMask-01964"; skip |= LogError( device, vuid, "%s: Aspect mask for input attachment reference %d in subpass %d includes VK_IMAGE_ASPECT_METADATA_BIT.", function_name, j, i); } else if (aspect_mask & (VK_IMAGE_ASPECT_MEMORY_PLANE_0_BIT_EXT | VK_IMAGE_ASPECT_MEMORY_PLANE_1_BIT_EXT | VK_IMAGE_ASPECT_MEMORY_PLANE_2_BIT_EXT | VK_IMAGE_ASPECT_MEMORY_PLANE_3_BIT_EXT)) { vuid = use_rp2 ? "VUID-VkSubpassDescription2-attachment-04563" : "VUID-VkInputAttachmentAspectReference-aspectMask-02250"; skip |= LogError(device, vuid, "%s: Aspect mask for input attachment reference %d in subpass %d includes " "VK_IMAGE_ASPECT_MEMORY_PLANE_*_BIT_EXT bit.", function_name, j, i); } if (attachment_index < pCreateInfo->attachmentCount) { skip |= AddAttachmentUse(rp_version, i, attachment_uses, attachment_layouts, attachment_index, ATTACHMENT_INPUT, attachment_ref.layout); vuid = use_rp2 ? "VUID-VkRenderPassCreateInfo2-attachment-02525" : "VUID-VkRenderPassCreateInfo-pNext-01963"; skip |= ValidateImageAspectMask(VK_NULL_HANDLE, pCreateInfo->pAttachments[attachment_index].format, aspect_mask, function_name, vuid); if (attach_first_use[attachment_index]) { skip |= ValidateLayoutVsAttachmentDescription(report_data, rp_version, subpass.pInputAttachments[j].layout, attachment_index, pCreateInfo->pAttachments[attachment_index]); bool used_as_depth = (subpass.pDepthStencilAttachment != NULL && subpass.pDepthStencilAttachment->attachment == attachment_index); bool used_as_color = false; for (uint32_t k = 0; !used_as_depth && !used_as_color && k < subpass.colorAttachmentCount; ++k) { used_as_color = (subpass.pColorAttachments[k].attachment == attachment_index); } if (!used_as_depth && !used_as_color && pCreateInfo->pAttachments[attachment_index].loadOp == VK_ATTACHMENT_LOAD_OP_CLEAR) { vuid = use_rp2 ? "VUID-VkSubpassDescription2-loadOp-03064" : "VUID-VkSubpassDescription-loadOp-00846"; skip |= LogError(device, vuid, "%s: attachment %u is first used as an input attachment in %s with loadOp set to " "VK_ATTACHMENT_LOAD_OP_CLEAR.", function_name, attachment_index, error_type.c_str()); } } attach_first_use[attachment_index] = false; } if (rp_version == RENDER_PASS_VERSION_2) { // These are validated automatically as part of parameter validation for create renderpass 1 // as they are in a struct that only applies to input attachments - not so for v2. // Check for 0 if (aspect_mask == 0) { skip |= LogError(device, "VUID-VkSubpassDescription2-attachment-02800", "%s: Input attachment %s aspect mask must not be 0.", function_name, error_type.c_str()); } else { const VkImageAspectFlags valid_bits = (VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT | VK_IMAGE_ASPECT_METADATA_BIT | VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT | VK_IMAGE_ASPECT_PLANE_2_BIT | VK_IMAGE_ASPECT_MEMORY_PLANE_0_BIT_EXT | VK_IMAGE_ASPECT_MEMORY_PLANE_1_BIT_EXT | VK_IMAGE_ASPECT_MEMORY_PLANE_2_BIT_EXT | VK_IMAGE_ASPECT_MEMORY_PLANE_3_BIT_EXT); // Check for valid aspect mask bits if (aspect_mask & ~valid_bits) { skip |= LogError(device, "VUID-VkSubpassDescription2-attachment-02799", "%s: Input attachment %s aspect mask (0x%" PRIx32 ")is invalid.", function_name, error_type.c_str(), aspect_mask); } } } const VkFormatFeatureFlags valid_flags = VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT | VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT; const VkFormat attachment_format = pCreateInfo->pAttachments[attachment_index].format; const VkFormatFeatureFlags format_features = GetPotentialFormatFeatures(attachment_format); if ((format_features & valid_flags) == 0) { vuid = use_rp2 ? "VUID-VkSubpassDescription2-pInputAttachments-02897" : "VUID-VkSubpassDescription-pInputAttachments-02647"; skip |= LogError(device, vuid, "%s: Input attachment %s format (%s) does not contain VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT " "| VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT.", function_name, error_type.c_str(), string_VkFormat(attachment_format)); } // Validate layout vuid = use_rp2 ? "VUID-VkSubpassDescription2-None-04439" : "VUID-VkSubpassDescription-None-04437"; switch (attachment_ref.layout) { case VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR: case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL: case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL: case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: case VK_IMAGE_LAYOUT_GENERAL: case VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL_KHR: break; // valid layouts default: skip |= LogError(device, vuid, "%s: %s layout is %s but input attachments must be " "VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR, " "VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL, " "VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL, " "VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, " "VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL, or " "VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL_KHR", function_name, error_type.c_str(), string_VkImageLayout(attachment_ref.layout)); break; } } } for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) { std::string error_type = "pSubpasses[" + std::to_string(i) + "].pPreserveAttachments[" + std::to_string(j) + "]"; uint32_t attachment = subpass.pPreserveAttachments[j]; if (attachment == VK_ATTACHMENT_UNUSED) { vuid = use_rp2 ? "VUID-VkSubpassDescription2-attachment-03073" : "VUID-VkSubpassDescription-attachment-00853"; skip |= LogError(device, vuid, "%s: Preserve attachment (%d) must not be VK_ATTACHMENT_UNUSED.", function_name, j); } else { skip |= ValidateAttachmentIndex(rp_version, attachment, pCreateInfo->attachmentCount, error_type.c_str(), function_name); if (attachment < pCreateInfo->attachmentCount) { skip |= AddAttachmentUse(rp_version, i, attachment_uses, attachment_layouts, attachment, ATTACHMENT_PRESERVE, VkImageLayout(0) /* preserve doesn't have any layout */); } } } bool subpass_performs_resolve = false; for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) { if (subpass.pResolveAttachments) { std::string error_type = "pSubpasses[" + std::to_string(i) + "].pResolveAttachments[" + std::to_string(j) + "]"; auto const &attachment_ref = subpass.pResolveAttachments[j]; if (attachment_ref.attachment != VK_ATTACHMENT_UNUSED) { skip |= ValidateAttachmentReference(rp_version, attachment_ref, false, error_type.c_str(), function_name); skip |= ValidateAttachmentIndex(rp_version, attachment_ref.attachment, pCreateInfo->attachmentCount, error_type.c_str(), function_name); if (attachment_ref.attachment < pCreateInfo->attachmentCount) { skip |= AddAttachmentUse(rp_version, i, attachment_uses, attachment_layouts, attachment_ref.attachment, ATTACHMENT_RESOLVE, attachment_ref.layout); subpass_performs_resolve = true; if (pCreateInfo->pAttachments[attachment_ref.attachment].samples != VK_SAMPLE_COUNT_1_BIT) { vuid = use_rp2 ? "VUID-VkSubpassDescription2-pResolveAttachments-03067" : "VUID-VkSubpassDescription-pResolveAttachments-00849"; skip |= LogError( device, vuid, "%s: Subpass %u requests multisample resolve into attachment %u, which must " "have VK_SAMPLE_COUNT_1_BIT but has %s.", function_name, i, attachment_ref.attachment, string_VkSampleCountFlagBits(pCreateInfo->pAttachments[attachment_ref.attachment].samples)); } } const VkFormat attachment_format = pCreateInfo->pAttachments[attachment_ref.attachment].format; const VkFormatFeatureFlags format_features = GetPotentialFormatFeatures(attachment_format); if ((format_features & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT) == 0) { vuid = use_rp2 ? "VUID-VkSubpassDescription2-pResolveAttachments-02899" : "VUID-VkSubpassDescription-pResolveAttachments-02649"; skip |= LogError(device, vuid, "%s: Resolve attachment %s format (%s) does not contain " "VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT.", function_name, error_type.c_str(), string_VkFormat(attachment_format)); } } } } if (subpass.pDepthStencilAttachment) { std::string error_type = "pSubpasses[" + std::to_string(i) + "].pDepthStencilAttachment"; const uint32_t attachment = subpass.pDepthStencilAttachment->attachment; const VkImageLayout image_layout = subpass.pDepthStencilAttachment->layout; if (attachment != VK_ATTACHMENT_UNUSED) { skip |= ValidateAttachmentReference(rp_version, *subpass.pDepthStencilAttachment, false, error_type.c_str(), function_name); skip |= ValidateAttachmentIndex(rp_version, attachment, pCreateInfo->attachmentCount, error_type.c_str(), function_name); if (attachment < pCreateInfo->attachmentCount) { skip |= AddAttachmentUse(rp_version, i, attachment_uses, attachment_layouts, attachment, ATTACHMENT_DEPTH, image_layout); if (attach_first_use[attachment]) { skip |= ValidateLayoutVsAttachmentDescription(report_data, rp_version, image_layout, attachment, pCreateInfo->pAttachments[attachment]); } attach_first_use[attachment] = false; } const VkFormat attachment_format = pCreateInfo->pAttachments[attachment].format; const VkFormatFeatureFlags format_features = GetPotentialFormatFeatures(attachment_format); if ((format_features & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT) == 0) { vuid = use_rp2 ? "VUID-VkSubpassDescription2-pDepthStencilAttachment-02900" : "VUID-VkSubpassDescription-pDepthStencilAttachment-02650"; skip |= LogError(device, vuid, "%s: Depth Stencil %s format (%s) does not contain " "VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT.", function_name, error_type.c_str(), string_VkFormat(attachment_format)); } // Check for valid imageLayout vuid = use_rp2 ? "VUID-VkSubpassDescription2-None-04439" : "VUID-VkSubpassDescription-None-04437"; switch (image_layout) { case VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR: case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL: case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL: case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: case VK_IMAGE_LAYOUT_GENERAL: break; // valid layouts case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL: case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL: case VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL: case VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL: case VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL_KHR: case VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL_KHR: if (input_attachments.find(attachment) != input_attachments.end()) { skip |= LogError( device, vuid, "%s: %s is also an input attachment so the layout (%s) must not be " "VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL, " "VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL, VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL, " "VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL, VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL_KHR " "or VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL_KHR.", function_name, error_type.c_str(), string_VkImageLayout(image_layout)); } break; default: skip |= LogError(device, vuid, "%s: %s layout is %s but depth/stencil attachments must be " "VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, " "VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL, " "VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL, " "VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL, " "VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL, " "VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR, " "VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL, " "VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL, " "VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, " "VK_IMAGE_LAYOUT_GENERAL, ", "VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL_KHR or" "VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL_KHR.", function_name, error_type.c_str(), string_VkImageLayout(image_layout)); break; } } } uint32_t last_sample_count_attachment = VK_ATTACHMENT_UNUSED; for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) { std::string error_type = "pSubpasses[" + std::to_string(i) + "].pColorAttachments[" + std::to_string(j) + "]"; auto const &attachment_ref = subpass.pColorAttachments[j]; const uint32_t attachment_index = attachment_ref.attachment; if (attachment_index != VK_ATTACHMENT_UNUSED) { skip |= ValidateAttachmentReference(rp_version, attachment_ref, false, error_type.c_str(), function_name); skip |= ValidateAttachmentIndex(rp_version, attachment_index, pCreateInfo->attachmentCount, error_type.c_str(), function_name); if (attachment_index < pCreateInfo->attachmentCount) { skip |= AddAttachmentUse(rp_version, i, attachment_uses, attachment_layouts, attachment_index, ATTACHMENT_COLOR, attachment_ref.layout); VkSampleCountFlagBits current_sample_count = pCreateInfo->pAttachments[attachment_index].samples; if (last_sample_count_attachment != VK_ATTACHMENT_UNUSED) { VkSampleCountFlagBits last_sample_count = pCreateInfo->pAttachments[subpass.pColorAttachments[last_sample_count_attachment].attachment].samples; if (current_sample_count != last_sample_count) { vuid = use_rp2 ? "VUID-VkSubpassDescription2-pColorAttachments-03069" : "VUID-VkSubpassDescription-pColorAttachments-01417"; skip |= LogError( device, vuid, "%s: Subpass %u attempts to render to color attachments with inconsistent sample counts." "Color attachment ref %u has sample count %s, whereas previous color attachment ref %u has " "sample count %s.", function_name, i, j, string_VkSampleCountFlagBits(current_sample_count), last_sample_count_attachment, string_VkSampleCountFlagBits(last_sample_count)); } } last_sample_count_attachment = j; if (subpass_performs_resolve && current_sample_count == VK_SAMPLE_COUNT_1_BIT) { vuid = use_rp2 ? "VUID-VkSubpassDescription2-pResolveAttachments-03066" : "VUID-VkSubpassDescription-pResolveAttachments-00848"; skip |= LogError(device, vuid, "%s: Subpass %u requests multisample resolve from attachment %u which has " "VK_SAMPLE_COUNT_1_BIT.", function_name, i, attachment_index); } if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED && subpass.pDepthStencilAttachment->attachment < pCreateInfo->attachmentCount) { const auto depth_stencil_sample_count = pCreateInfo->pAttachments[subpass.pDepthStencilAttachment->attachment].samples; if (device_extensions.vk_amd_mixed_attachment_samples) { if (pCreateInfo->pAttachments[attachment_index].samples > depth_stencil_sample_count) { vuid = use_rp2 ? "VUID-VkSubpassDescription2-pColorAttachments-03070" : "VUID-VkSubpassDescription-pColorAttachments-01506"; skip |= LogError(device, vuid, "%s: %s has %s which is larger than depth/stencil attachment %s.", function_name, error_type.c_str(), string_VkSampleCountFlagBits(pCreateInfo->pAttachments[attachment_index].samples), string_VkSampleCountFlagBits(depth_stencil_sample_count)); break; } } if (!device_extensions.vk_amd_mixed_attachment_samples && !device_extensions.vk_nv_framebuffer_mixed_samples && current_sample_count != depth_stencil_sample_count) { vuid = use_rp2 ? "VUID-VkSubpassDescription2-pDepthStencilAttachment-03071" : "VUID-VkSubpassDescription-pDepthStencilAttachment-01418"; skip |= LogError(device, vuid, "%s: Subpass %u attempts to render to use a depth/stencil attachment with sample " "count that differs " "from color attachment %u." "The depth attachment ref has sample count %s, whereas color attachment ref %u has " "sample count %s.", function_name, i, j, string_VkSampleCountFlagBits(depth_stencil_sample_count), j, string_VkSampleCountFlagBits(current_sample_count)); break; } } const VkFormat attachment_format = pCreateInfo->pAttachments[attachment_index].format; const VkFormatFeatureFlags format_features = GetPotentialFormatFeatures(attachment_format); if ((format_features & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT) == 0) { vuid = use_rp2 ? "VUID-VkSubpassDescription2-pColorAttachments-02898" : "VUID-VkSubpassDescription-pColorAttachments-02648"; skip |= LogError(device, vuid, "%s: Color attachment %s format (%s) does not contain " "VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT.", function_name, error_type.c_str(), string_VkFormat(attachment_format)); } if (attach_first_use[attachment_index]) { skip |= ValidateLayoutVsAttachmentDescription(report_data, rp_version, subpass.pColorAttachments[j].layout, attachment_index, pCreateInfo->pAttachments[attachment_index]); } attach_first_use[attachment_index] = false; } // Check for valid imageLayout vuid = use_rp2 ? "VUID-VkSubpassDescription2-None-04439" : "VUID-VkSubpassDescription-None-04437"; switch (attachment_ref.layout) { case VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR: case VK_IMAGE_LAYOUT_GENERAL: break; // valid layouts case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: case VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL_KHR: if (input_attachments.find(attachment_index) != input_attachments.end()) { skip |= LogError(device, vuid, "%s: %s is also an input attachment so the layout (%s) must not be " "VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL or VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL_KHR.", function_name, error_type.c_str(), string_VkImageLayout(attachment_ref.layout)); } break; default: skip |= LogError(device, vuid, "%s: %s layout is %s but color attachments must be " "VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, " "VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL_KHR, " "VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR or " "VK_IMAGE_LAYOUT_GENERAL.", function_name, error_type.c_str(), string_VkImageLayout(attachment_ref.layout)); break; } } if (subpass_performs_resolve && subpass.pResolveAttachments[j].attachment != VK_ATTACHMENT_UNUSED && subpass.pResolveAttachments[j].attachment < pCreateInfo->attachmentCount) { if (attachment_index == VK_ATTACHMENT_UNUSED) { vuid = use_rp2 ? "VUID-VkSubpassDescription2-pResolveAttachments-03065" : "VUID-VkSubpassDescription-pResolveAttachments-00847"; skip |= LogError(device, vuid, "%s: Subpass %u requests multisample resolve from attachment %u which has " "attachment=VK_ATTACHMENT_UNUSED.", function_name, i, attachment_index); } else { const auto &color_desc = pCreateInfo->pAttachments[attachment_index]; const auto &resolve_desc = pCreateInfo->pAttachments[subpass.pResolveAttachments[j].attachment]; if (color_desc.format != resolve_desc.format) { vuid = use_rp2 ? "VUID-VkSubpassDescription2-pResolveAttachments-03068" : "VUID-VkSubpassDescription-pResolveAttachments-00850"; skip |= LogError(device, vuid, "%s: %s resolves to an attachment with a " "different format. color format: %u, resolve format: %u.", function_name, error_type.c_str(), color_desc.format, resolve_desc.format); } } } } } return skip; } bool CoreChecks::ValidateCreateRenderPass(VkDevice device, RenderPassCreateVersion rp_version, const VkRenderPassCreateInfo2 *pCreateInfo, const char *function_name) const { bool skip = false; const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2); const char *vuid; skip |= ValidateRenderpassAttachmentUsage(rp_version, pCreateInfo, function_name); skip |= ValidateRenderPassDAG(rp_version, pCreateInfo); // Validate multiview correlation and view masks bool view_mask_zero = false; bool view_mask_non_zero = false; for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) { const VkSubpassDescription2 &subpass = pCreateInfo->pSubpasses[i]; if (subpass.viewMask != 0) { view_mask_non_zero = true; } else { view_mask_zero = true; } if ((subpass.flags & VK_SUBPASS_DESCRIPTION_PER_VIEW_POSITION_X_ONLY_BIT_NVX) != 0 && (subpass.flags & VK_SUBPASS_DESCRIPTION_PER_VIEW_ATTRIBUTES_BIT_NVX) == 0) { vuid = use_rp2 ? "VUID-VkSubpassDescription2-flags-03076" : "VUID-VkSubpassDescription-flags-00856"; skip |= LogError(device, vuid, "%s: The flags parameter of subpass description %u includes " "VK_SUBPASS_DESCRIPTION_PER_VIEW_POSITION_X_ONLY_BIT_NVX but does not also include " "VK_SUBPASS_DESCRIPTION_PER_VIEW_ATTRIBUTES_BIT_NVX.", function_name, i); } } if (rp_version == RENDER_PASS_VERSION_2) { if (view_mask_non_zero && view_mask_zero) { skip |= LogError(device, "VUID-VkRenderPassCreateInfo2-viewMask-03058", "%s: Some view masks are non-zero whilst others are zero.", function_name); } if (view_mask_zero && pCreateInfo->correlatedViewMaskCount != 0) { skip |= LogError(device, "VUID-VkRenderPassCreateInfo2-viewMask-03057", "%s: Multiview is not enabled but correlation masks are still provided", function_name); } } uint32_t aggregated_cvms = 0; for (uint32_t i = 0; i < pCreateInfo->correlatedViewMaskCount; ++i) { if (aggregated_cvms & pCreateInfo->pCorrelatedViewMasks[i]) { vuid = use_rp2 ? "VUID-VkRenderPassCreateInfo2-pCorrelatedViewMasks-03056" : "VUID-VkRenderPassMultiviewCreateInfo-pCorrelationMasks-00841"; skip |= LogError(device, vuid, "%s: pCorrelatedViewMasks[%u] contains a previously appearing view bit.", function_name, i); } aggregated_cvms |= pCreateInfo->pCorrelatedViewMasks[i]; } LogObjectList objects(device); auto queue_flags = sync_utils::kAllQueueTypes; auto func_name = use_rp2 ? ErrFunc::vkCreateRenderPass2 : ErrFunc::vkCreateRenderPass; auto refpage = use_rp2 ? RefPage::VkSubpassDependency2 : RefPage::VkSubpassDependency; for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) { auto const &dependency = pCreateInfo->pDependencies[i]; CoreErrorLocation loc(func_name, refpage, Field::pDependencies, i); skip |= ValidateSubpassBarrier(objects, loc, queue_flags, dependency); } return skip; } bool CoreChecks::PreCallValidateCreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) const { bool skip = false; // Handle extension structs from KHR_multiview and KHR_maintenance2 that can only be validated for RP1 (indices out of bounds) const VkRenderPassMultiviewCreateInfo *multiview_info = LvlFindInChain<VkRenderPassMultiviewCreateInfo>(pCreateInfo->pNext); if (multiview_info) { if (multiview_info->subpassCount && multiview_info->subpassCount != pCreateInfo->subpassCount) { skip |= LogError(device, "VUID-VkRenderPassCreateInfo-pNext-01928", "vkCreateRenderPass(): Subpass count is %u but multiview info has a subpass count of %u.", pCreateInfo->subpassCount, multiview_info->subpassCount); } else if (multiview_info->dependencyCount && multiview_info->dependencyCount != pCreateInfo->dependencyCount) { skip |= LogError(device, "VUID-VkRenderPassCreateInfo-pNext-01929", "vkCreateRenderPass(): Dependency count is %u but multiview info has a dependency count of %u.", pCreateInfo->dependencyCount, multiview_info->dependencyCount); } } const VkRenderPassInputAttachmentAspectCreateInfo *input_attachment_aspect_info = LvlFindInChain<VkRenderPassInputAttachmentAspectCreateInfo>(pCreateInfo->pNext); if (input_attachment_aspect_info) { for (uint32_t i = 0; i < input_attachment_aspect_info->aspectReferenceCount; ++i) { uint32_t subpass = input_attachment_aspect_info->pAspectReferences[i].subpass; uint32_t attachment = input_attachment_aspect_info->pAspectReferences[i].inputAttachmentIndex; if (subpass >= pCreateInfo->subpassCount) { skip |= LogError(device, "VUID-VkRenderPassCreateInfo-pNext-01926", "vkCreateRenderPass(): Subpass index %u specified by input attachment aspect info %u is greater " "than the subpass " "count of %u for this render pass.", subpass, i, pCreateInfo->subpassCount); } else if (pCreateInfo->pSubpasses && attachment >= pCreateInfo->pSubpasses[subpass].inputAttachmentCount) { skip |= LogError(device, "VUID-VkRenderPassCreateInfo-pNext-01927", "vkCreateRenderPass(): Input attachment index %u specified by input attachment aspect info %u is " "greater than the " "input attachment count of %u for this subpass.", attachment, i, pCreateInfo->pSubpasses[subpass].inputAttachmentCount); } } } const VkRenderPassFragmentDensityMapCreateInfoEXT *fragment_density_map_info = LvlFindInChain<VkRenderPassFragmentDensityMapCreateInfoEXT>(pCreateInfo->pNext); if (fragment_density_map_info) { if (fragment_density_map_info->fragmentDensityMapAttachment.attachment != VK_ATTACHMENT_UNUSED) { if (fragment_density_map_info->fragmentDensityMapAttachment.attachment >= pCreateInfo->attachmentCount) { skip |= LogError(device, "VUID-VkRenderPassFragmentDensityMapCreateInfoEXT-fragmentDensityMapAttachment-02547", "vkCreateRenderPass(): fragmentDensityMapAttachment %u must be less than attachmentCount %u of " "for this render pass.", fragment_density_map_info->fragmentDensityMapAttachment.attachment, pCreateInfo->attachmentCount); } else { if (!(fragment_density_map_info->fragmentDensityMapAttachment.layout == VK_IMAGE_LAYOUT_FRAGMENT_DENSITY_MAP_OPTIMAL_EXT || fragment_density_map_info->fragmentDensityMapAttachment.layout == VK_IMAGE_LAYOUT_GENERAL)) { skip |= LogError(device, "VUID-VkRenderPassFragmentDensityMapCreateInfoEXT-fragmentDensityMapAttachment-02549", "vkCreateRenderPass(): Layout of fragmentDensityMapAttachment %u' must be equal to " "VK_IMAGE_LAYOUT_FRAGMENT_DENSITY_MAP_OPTIMAL_EXT, or VK_IMAGE_LAYOUT_GENERAL.", fragment_density_map_info->fragmentDensityMapAttachment.attachment); } if (!(pCreateInfo->pAttachments[fragment_density_map_info->fragmentDensityMapAttachment.attachment].loadOp == VK_ATTACHMENT_LOAD_OP_LOAD || pCreateInfo->pAttachments[fragment_density_map_info->fragmentDensityMapAttachment.attachment].loadOp == VK_ATTACHMENT_LOAD_OP_DONT_CARE)) { skip |= LogError( device, "VUID-VkRenderPassFragmentDensityMapCreateInfoEXT-fragmentDensityMapAttachment-02550", "vkCreateRenderPass(): FragmentDensityMapAttachment %u' must reference an attachment with a loadOp " "equal to VK_ATTACHMENT_LOAD_OP_LOAD or VK_ATTACHMENT_LOAD_OP_DONT_CARE.", fragment_density_map_info->fragmentDensityMapAttachment.attachment); } if (pCreateInfo->pAttachments[fragment_density_map_info->fragmentDensityMapAttachment.attachment].storeOp != VK_ATTACHMENT_STORE_OP_DONT_CARE) { skip |= LogError( device, "VUID-VkRenderPassFragmentDensityMapCreateInfoEXT-fragmentDensityMapAttachment-02551", "vkCreateRenderPass(): FragmentDensityMapAttachment %u' must reference an attachment with a storeOp " "equal to VK_ATTACHMENT_STORE_OP_DONT_CARE.", fragment_density_map_info->fragmentDensityMapAttachment.attachment); } } } } if (!skip) { safe_VkRenderPassCreateInfo2 create_info_2; ConvertVkRenderPassCreateInfoToV2KHR(*pCreateInfo, &create_info_2); skip |= ValidateCreateRenderPass(device, RENDER_PASS_VERSION_1, create_info_2.ptr(), "vkCreateRenderPass()"); } return skip; } bool CoreChecks::ValidateDepthStencilResolve(const VkPhysicalDeviceVulkan12Properties &core12_props, const VkRenderPassCreateInfo2 *pCreateInfo, const char *function_name) const { bool skip = false; // If the pNext list of VkSubpassDescription2 includes a VkSubpassDescriptionDepthStencilResolve structure, // then that structure describes depth/stencil resolve operations for the subpass. for (uint32_t i = 0; i < pCreateInfo->subpassCount; i++) { const VkSubpassDescription2 &subpass = pCreateInfo->pSubpasses[i]; const auto *resolve = LvlFindInChain<VkSubpassDescriptionDepthStencilResolve>(subpass.pNext); if (resolve == nullptr) { continue; } const bool resolve_attachment_not_unused = (resolve->pDepthStencilResolveAttachment != nullptr && resolve->pDepthStencilResolveAttachment->attachment != VK_ATTACHMENT_UNUSED); const bool valid_resolve_attachment_index = (resolve_attachment_not_unused && resolve->pDepthStencilResolveAttachment->attachment < pCreateInfo->attachmentCount); const bool ds_attachment_not_unused = (subpass.pDepthStencilAttachment != nullptr && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED); const bool valid_ds_attachment_index = (ds_attachment_not_unused && subpass.pDepthStencilAttachment->attachment < pCreateInfo->attachmentCount); if (resolve_attachment_not_unused && subpass.pDepthStencilAttachment != nullptr && subpass.pDepthStencilAttachment->attachment == VK_ATTACHMENT_UNUSED) { skip |= LogError(device, "VUID-VkSubpassDescriptionDepthStencilResolve-pDepthStencilResolveAttachment-03177", "%s: Subpass %u includes a VkSubpassDescriptionDepthStencilResolve " "structure with resolve attachment %u, but pDepthStencilAttachment=VK_ATTACHMENT_UNUSED.", function_name, i, resolve->pDepthStencilResolveAttachment->attachment); } if (resolve_attachment_not_unused && resolve->depthResolveMode == VK_RESOLVE_MODE_NONE && resolve->stencilResolveMode == VK_RESOLVE_MODE_NONE) { skip |= LogError(device, "VUID-VkSubpassDescriptionDepthStencilResolve-pDepthStencilResolveAttachment-03178", "%s: Subpass %u includes a VkSubpassDescriptionDepthStencilResolve " "structure with resolve attachment %u, but both depth and stencil resolve modes are " "VK_RESOLVE_MODE_NONE.", function_name, i, resolve->pDepthStencilResolveAttachment->attachment); } if (resolve_attachment_not_unused && valid_ds_attachment_index && pCreateInfo->pAttachments[subpass.pDepthStencilAttachment->attachment].samples == VK_SAMPLE_COUNT_1_BIT) { skip |= LogError( device, "VUID-VkSubpassDescriptionDepthStencilResolve-pDepthStencilResolveAttachment-03179", "%s: Subpass %u includes a VkSubpassDescriptionDepthStencilResolve " "structure with resolve attachment %u. However pDepthStencilAttachment has sample count=VK_SAMPLE_COUNT_1_BIT.", function_name, i, resolve->pDepthStencilResolveAttachment->attachment); } if (valid_resolve_attachment_index && pCreateInfo->pAttachments[resolve->pDepthStencilResolveAttachment->attachment].samples != VK_SAMPLE_COUNT_1_BIT) { skip |= LogError(device, "VUID-VkSubpassDescriptionDepthStencilResolve-pDepthStencilResolveAttachment-03180", "%s: Subpass %u includes a VkSubpassDescriptionDepthStencilResolve " "structure with resolve attachment %u which has sample count=VK_SAMPLE_COUNT_1_BIT.", function_name, i, resolve->pDepthStencilResolveAttachment->attachment); } VkFormat depth_stencil_attachment_format = (valid_ds_attachment_index ? pCreateInfo->pAttachments[subpass.pDepthStencilAttachment->attachment].format : VK_FORMAT_UNDEFINED); VkFormat depth_stencil_resolve_attachment_format = (valid_resolve_attachment_index ? pCreateInfo->pAttachments[resolve->pDepthStencilResolveAttachment->attachment].format : VK_FORMAT_UNDEFINED); if (valid_ds_attachment_index && valid_resolve_attachment_index) { const auto resolve_depth_size = FormatDepthSize(depth_stencil_resolve_attachment_format); const auto resolve_stencil_size = FormatStencilSize(depth_stencil_resolve_attachment_format); if (resolve_depth_size > 0 && ((FormatDepthSize(depth_stencil_attachment_format) != resolve_depth_size) || (FormatDepthNumericalType(depth_stencil_attachment_format) != FormatDepthNumericalType(depth_stencil_resolve_attachment_format)))) { skip |= LogError( device, "VUID-VkSubpassDescriptionDepthStencilResolve-pDepthStencilResolveAttachment-03181", "%s: Subpass %u includes a VkSubpassDescriptionDepthStencilResolve " "structure with resolve attachment %u which has a depth component (size %u). The depth component " "of pDepthStencilAttachment must have the same number of bits (currently %u) and the same numerical type.", function_name, i, resolve->pDepthStencilResolveAttachment->attachment, resolve_depth_size, FormatDepthSize(depth_stencil_attachment_format)); } if (resolve_stencil_size > 0 && ((FormatStencilSize(depth_stencil_attachment_format) != resolve_stencil_size) || (FormatStencilNumericalType(depth_stencil_attachment_format) != FormatStencilNumericalType(depth_stencil_resolve_attachment_format)))) { skip |= LogError( device, "VUID-VkSubpassDescriptionDepthStencilResolve-pDepthStencilResolveAttachment-03182", "%s: Subpass %u includes a VkSubpassDescriptionDepthStencilResolve " "structure with resolve attachment %u which has a stencil component (size %u). The stencil component " "of pDepthStencilAttachment must have the same number of bits (currently %u) and the same numerical type.", function_name, i, resolve->pDepthStencilResolveAttachment->attachment, resolve_stencil_size, FormatStencilSize(depth_stencil_attachment_format)); } } if (!(resolve->depthResolveMode == VK_RESOLVE_MODE_NONE || resolve->depthResolveMode & core12_props.supportedDepthResolveModes)) { skip |= LogError(device, "VUID-VkSubpassDescriptionDepthStencilResolve-depthResolveMode-03183", "%s: Subpass %u includes a VkSubpassDescriptionDepthStencilResolve " "structure with invalid depthResolveMode=%u.", function_name, i, resolve->depthResolveMode); } if (!(resolve->stencilResolveMode == VK_RESOLVE_MODE_NONE || resolve->stencilResolveMode & core12_props.supportedStencilResolveModes)) { skip |= LogError(device, "VUID-VkSubpassDescriptionDepthStencilResolve-stencilResolveMode-03184", "%s: Subpass %u includes a VkSubpassDescriptionDepthStencilResolve " "structure with invalid stencilResolveMode=%u.", function_name, i, resolve->stencilResolveMode); } if (valid_resolve_attachment_index && FormatIsDepthAndStencil(depth_stencil_resolve_attachment_format) && core12_props.independentResolve == VK_FALSE && core12_props.independentResolveNone == VK_FALSE && !(resolve->depthResolveMode == resolve->stencilResolveMode)) { skip |= LogError(device, "VUID-VkSubpassDescriptionDepthStencilResolve-pDepthStencilResolveAttachment-03185", "%s: Subpass %u includes a VkSubpassDescriptionDepthStencilResolve " "structure. The values of depthResolveMode (%u) and stencilResolveMode (%u) must be identical.", function_name, i, resolve->depthResolveMode, resolve->stencilResolveMode); } if (valid_resolve_attachment_index && FormatIsDepthAndStencil(depth_stencil_resolve_attachment_format) && core12_props.independentResolve == VK_FALSE && core12_props.independentResolveNone == VK_TRUE && !(resolve->depthResolveMode == resolve->stencilResolveMode || resolve->depthResolveMode == VK_RESOLVE_MODE_NONE || resolve->stencilResolveMode == VK_RESOLVE_MODE_NONE)) { skip |= LogError(device, "VUID-VkSubpassDescriptionDepthStencilResolve-pDepthStencilResolveAttachment-03186", "%s: Subpass %u includes a VkSubpassDescriptionDepthStencilResolve " "structure. The values of depthResolveMode (%u) and stencilResolveMode (%u) must be identical, or " "one of them must be %u.", function_name, i, resolve->depthResolveMode, resolve->stencilResolveMode, VK_RESOLVE_MODE_NONE); } } return skip; } bool CoreChecks::ValidateCreateRenderPass2(VkDevice device, const VkRenderPassCreateInfo2 *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass, const char *function_name) const { bool skip = false; if (device_extensions.vk_khr_depth_stencil_resolve) { skip |= ValidateDepthStencilResolve(phys_dev_props_core12, pCreateInfo, function_name); } skip |= ValidateFragmentShadingRateAttachments(device, pCreateInfo); safe_VkRenderPassCreateInfo2 create_info_2(pCreateInfo); skip |= ValidateCreateRenderPass(device, RENDER_PASS_VERSION_2, create_info_2.ptr(), function_name); return skip; } bool CoreChecks::ValidateFragmentShadingRateAttachments(VkDevice device, const VkRenderPassCreateInfo2 *pCreateInfo) const { bool skip = false; if (enabled_features.fragment_shading_rate_features.attachmentFragmentShadingRate) { for (uint32_t attachment_description = 0; attachment_description < pCreateInfo->attachmentCount; ++attachment_description) { std::vector<uint32_t> used_as_fragment_shading_rate_attachment; // Prepass to find any use as a fragment shading rate attachment structures and validate them independently for (uint32_t subpass = 0; subpass < pCreateInfo->subpassCount; ++subpass) { const VkFragmentShadingRateAttachmentInfoKHR *fragment_shading_rate_attachment = LvlFindInChain<VkFragmentShadingRateAttachmentInfoKHR>(pCreateInfo->pSubpasses[subpass].pNext); if (fragment_shading_rate_attachment && fragment_shading_rate_attachment->pFragmentShadingRateAttachment) { const VkAttachmentReference2 &attachment_reference = *(fragment_shading_rate_attachment->pFragmentShadingRateAttachment); if (attachment_reference.attachment == attachment_description) { used_as_fragment_shading_rate_attachment.push_back(subpass); } if (((pCreateInfo->flags & VK_RENDER_PASS_CREATE_TRANSFORM_BIT_QCOM) != 0) && (attachment_reference.attachment != VK_ATTACHMENT_UNUSED)) { skip |= LogError(device, "VUID-VkRenderPassCreateInfo2-flags-04521", "vkCreateRenderPass2: Render pass includes VK_RENDER_PASS_CREATE_TRANSFORM_BIT_QCOM but " "a fragment shading rate attachment is specified in subpass %u.", subpass); } if (attachment_reference.attachment != VK_ATTACHMENT_UNUSED) { const VkFormatFeatureFlags potential_format_features = GetPotentialFormatFeatures(pCreateInfo->pAttachments[attachment_reference.attachment].format); if (!(potential_format_features & VK_FORMAT_FEATURE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR)) { skip |= LogError(device, "VUID-VkRenderPassCreateInfo2-pAttachments-04586", "vkCreateRenderPass2: Attachment description %u is used in subpass %u as a fragment " "shading rate attachment, but specifies format %s, which does not support " "VK_FORMAT_FEATURE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR.", attachment_reference.attachment, subpass, string_VkFormat(pCreateInfo->pAttachments[attachment_reference.attachment].format)); } if (attachment_reference.layout != VK_IMAGE_LAYOUT_GENERAL && attachment_reference.layout != VK_IMAGE_LAYOUT_FRAGMENT_SHADING_RATE_ATTACHMENT_OPTIMAL_KHR) { skip |= LogError( device, "VUID-VkFragmentShadingRateAttachmentInfoKHR-pFragmentShadingRateAttachment-04524", "vkCreateRenderPass2: Fragment shading rate attachment in subpass %u specifies a layout of %s.", subpass, string_VkImageLayout(attachment_reference.layout)); } if (!IsPowerOfTwo(fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.width)) { skip |= LogError(device, "VUID-VkFragmentShadingRateAttachmentInfoKHR-pFragmentShadingRateAttachment-04525", "vkCreateRenderPass2: Fragment shading rate attachment in subpass %u has a " "non-power-of-two texel width of %u.", subpass, fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.width); } if (fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.width < phys_dev_ext_props.fragment_shading_rate_props.minFragmentShadingRateAttachmentTexelSize.width) { LogError( device, "VUID-VkFragmentShadingRateAttachmentInfoKHR-pFragmentShadingRateAttachment-04526", "vkCreateRenderPass2: Fragment shading rate attachment in subpass %u has a texel width of %u which " "is lower than the advertised minimum width %u.", subpass, fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.width, phys_dev_ext_props.fragment_shading_rate_props.minFragmentShadingRateAttachmentTexelSize.width); } if (fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.width > phys_dev_ext_props.fragment_shading_rate_props.maxFragmentShadingRateAttachmentTexelSize.width) { LogError( device, "VUID-VkFragmentShadingRateAttachmentInfoKHR-pFragmentShadingRateAttachment-04527", "vkCreateRenderPass2: Fragment shading rate attachment in subpass %u has a texel width of %u which " "is higher than the advertised maximum width %u.", subpass, fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.width, phys_dev_ext_props.fragment_shading_rate_props.maxFragmentShadingRateAttachmentTexelSize.width); } if (!IsPowerOfTwo(fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.height)) { skip |= LogError(device, "VUID-VkFragmentShadingRateAttachmentInfoKHR-pFragmentShadingRateAttachment-04528", "vkCreateRenderPass2: Fragment shading rate attachment in subpass %u has a " "non-power-of-two texel height of %u.", subpass, fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.height); } if (fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.height < phys_dev_ext_props.fragment_shading_rate_props.minFragmentShadingRateAttachmentTexelSize.height) { LogError( device, "VUID-VkFragmentShadingRateAttachmentInfoKHR-pFragmentShadingRateAttachment-04529", "vkCreateRenderPass2: Fragment shading rate attachment in subpass %u has a texel height of %u " "which is lower than the advertised minimum height %u.", subpass, fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.height, phys_dev_ext_props.fragment_shading_rate_props.minFragmentShadingRateAttachmentTexelSize.height); } if (fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.height > phys_dev_ext_props.fragment_shading_rate_props.maxFragmentShadingRateAttachmentTexelSize.height) { LogError( device, "VUID-VkFragmentShadingRateAttachmentInfoKHR-pFragmentShadingRateAttachment-04530", "vkCreateRenderPass2: Fragment shading rate attachment in subpass %u has a texel height of %u " "which is higher than the advertised maximum height %u.", subpass, fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.height, phys_dev_ext_props.fragment_shading_rate_props.maxFragmentShadingRateAttachmentTexelSize.height); } uint32_t aspect_ratio = fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.width / fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.height; uint32_t inverse_aspect_ratio = fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.height / fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.width; if (aspect_ratio > phys_dev_ext_props.fragment_shading_rate_props.maxFragmentShadingRateAttachmentTexelSizeAspectRatio) { LogError( device, "VUID-VkFragmentShadingRateAttachmentInfoKHR-pFragmentShadingRateAttachment-04531", "vkCreateRenderPass2: Fragment shading rate attachment in subpass %u has a texel size of %u by %u, " "which has an aspect ratio %u, which is higher than the advertised maximum aspect ratio %u.", subpass, fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.width, fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.height, aspect_ratio, phys_dev_ext_props.fragment_shading_rate_props .maxFragmentShadingRateAttachmentTexelSizeAspectRatio); } if (inverse_aspect_ratio > phys_dev_ext_props.fragment_shading_rate_props.maxFragmentShadingRateAttachmentTexelSizeAspectRatio) { LogError( device, "VUID-VkFragmentShadingRateAttachmentInfoKHR-pFragmentShadingRateAttachment-04532", "vkCreateRenderPass2: Fragment shading rate attachment in subpass %u has a texel size of %u by %u, " "which has an inverse aspect ratio of %u, which is higher than the advertised maximum aspect ratio " "%u.", subpass, fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.width, fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.height, inverse_aspect_ratio, phys_dev_ext_props.fragment_shading_rate_props .maxFragmentShadingRateAttachmentTexelSizeAspectRatio); } } } } // Lambda function turning a vector of integers into a string auto vector_to_string = [&](std::vector<uint32_t> vector) { std::stringstream ss; size_t size = vector.size(); for (size_t i = 0; i < used_as_fragment_shading_rate_attachment.size(); i++) { if (size == 2 && i == 1) { ss << " and "; } else if (size > 2 && i == size - 2) { ss << ", and "; } else if (i != 0) { ss << ", "; } ss << vector[i]; } return ss.str(); }; // Search for other uses of the same attachment if (!used_as_fragment_shading_rate_attachment.empty()) { for (uint32_t subpass = 0; subpass < pCreateInfo->subpassCount; ++subpass) { const VkSubpassDescription2 &subpass_info = pCreateInfo->pSubpasses[subpass]; const VkSubpassDescriptionDepthStencilResolve *depth_stencil_resolve_attachment = LvlFindInChain<VkSubpassDescriptionDepthStencilResolve>(subpass_info.pNext); std::string fsr_attachment_subpasses_string = vector_to_string(used_as_fragment_shading_rate_attachment); for (uint32_t attachment = 0; attachment < subpass_info.colorAttachmentCount; ++attachment) { if (subpass_info.pColorAttachments[attachment].attachment == attachment_description) { skip |= LogError( device, "VUID-VkRenderPassCreateInfo2-pAttachments-04585", "vkCreateRenderPass2: Attachment description %u is used as a fragment shading rate attachment in " "subpass(es) %s but also as color attachment %u in subpass %u", attachment_description, fsr_attachment_subpasses_string.c_str(), attachment, subpass); } } for (uint32_t attachment = 0; attachment < subpass_info.colorAttachmentCount; ++attachment) { if (subpass_info.pResolveAttachments && subpass_info.pResolveAttachments[attachment].attachment == attachment_description) { skip |= LogError( device, "VUID-VkRenderPassCreateInfo2-pAttachments-04585", "vkCreateRenderPass2: Attachment description %u is used as a fragment shading rate attachment in " "subpass(es) %s but also as color resolve attachment %u in subpass %u", attachment_description, fsr_attachment_subpasses_string.c_str(), attachment, subpass); } } for (uint32_t attachment = 0; attachment < subpass_info.inputAttachmentCount; ++attachment) { if (subpass_info.pInputAttachments[attachment].attachment == attachment_description) { skip |= LogError( device, "VUID-VkRenderPassCreateInfo2-pAttachments-04585", "vkCreateRenderPass2: Attachment description %u is used as a fragment shading rate attachment in " "subpass(es) %s but also as input attachment %u in subpass %u", attachment_description, fsr_attachment_subpasses_string.c_str(), attachment, subpass); } } if (subpass_info.pDepthStencilAttachment) { if (subpass_info.pDepthStencilAttachment->attachment == attachment_description) { skip |= LogError( device, "VUID-VkRenderPassCreateInfo2-pAttachments-04585", "vkCreateRenderPass2: Attachment description %u is used as a fragment shading rate attachment in " "subpass(es) %s but also as the depth/stencil attachment in subpass %u", attachment_description, fsr_attachment_subpasses_string.c_str(), subpass); } } if (depth_stencil_resolve_attachment && depth_stencil_resolve_attachment->pDepthStencilResolveAttachment) { if (depth_stencil_resolve_attachment->pDepthStencilResolveAttachment->attachment == attachment_description) { skip |= LogError( device, "VUID-VkRenderPassCreateInfo2-pAttachments-04585", "vkCreateRenderPass2: Attachment description %u is used as a fragment shading rate attachment in " "subpass(es) %s but also as the depth/stencil resolve attachment in subpass %u", attachment_description, fsr_attachment_subpasses_string.c_str(), subpass); } } } } } } return skip; } bool CoreChecks::PreCallValidateCreateRenderPass2KHR(VkDevice device, const VkRenderPassCreateInfo2 *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) const { return ValidateCreateRenderPass2(device, pCreateInfo, pAllocator, pRenderPass, "vkCreateRenderPass2KHR()"); } bool CoreChecks::PreCallValidateCreateRenderPass2(VkDevice device, const VkRenderPassCreateInfo2 *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) const { return ValidateCreateRenderPass2(device, pCreateInfo, pAllocator, pRenderPass, "vkCreateRenderPass2()"); } bool CoreChecks::ValidatePrimaryCommandBuffer(const CMD_BUFFER_STATE *pCB, char const *cmd_name, const char *error_code) const { bool skip = false; if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) { skip |= LogError(pCB->commandBuffer, error_code, "Cannot execute command %s on a secondary command buffer.", cmd_name); } return skip; } bool CoreChecks::VerifyRenderAreaBounds(const VkRenderPassBeginInfo *pRenderPassBegin) const { bool skip = false; const safe_VkFramebufferCreateInfo *framebuffer_info = &GetFramebufferState(pRenderPassBegin->framebuffer)->createInfo; if (pRenderPassBegin->renderArea.offset.x < 0 || (pRenderPassBegin->renderArea.offset.x + pRenderPassBegin->renderArea.extent.width) > framebuffer_info->width || pRenderPassBegin->renderArea.offset.y < 0 || (pRenderPassBegin->renderArea.offset.y + pRenderPassBegin->renderArea.extent.height) > framebuffer_info->height) { skip |= static_cast<bool>(LogError( pRenderPassBegin->renderPass, kVUID_Core_DrawState_InvalidRenderArea, "Cannot execute a render pass with renderArea not within the bound of the framebuffer. RenderArea: x %d, y %d, width " "%d, height %d. Framebuffer: width %d, height %d.", pRenderPassBegin->renderArea.offset.x, pRenderPassBegin->renderArea.offset.y, pRenderPassBegin->renderArea.extent.width, pRenderPassBegin->renderArea.extent.height, framebuffer_info->width, framebuffer_info->height)); } return skip; } bool CoreChecks::VerifyFramebufferAndRenderPassImageViews(const VkRenderPassBeginInfo *pRenderPassBeginInfo, const char *func_name) const { bool skip = false; const VkRenderPassAttachmentBeginInfo *render_pass_attachment_begin_info = LvlFindInChain<VkRenderPassAttachmentBeginInfo>(pRenderPassBeginInfo->pNext); if (render_pass_attachment_begin_info && render_pass_attachment_begin_info->attachmentCount != 0) { const safe_VkFramebufferCreateInfo *framebuffer_create_info = &GetFramebufferState(pRenderPassBeginInfo->framebuffer)->createInfo; const VkFramebufferAttachmentsCreateInfo *framebuffer_attachments_create_info = LvlFindInChain<VkFramebufferAttachmentsCreateInfo>(framebuffer_create_info->pNext); if ((framebuffer_create_info->flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT) == 0) { skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03207", "%s: Image views specified at render pass begin, but framebuffer not created with " "VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT", func_name); } else if (framebuffer_attachments_create_info) { if (framebuffer_attachments_create_info->attachmentImageInfoCount != render_pass_attachment_begin_info->attachmentCount) { skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03208", "%s: %u image views specified at render pass begin, but framebuffer " "created expecting %u attachments", func_name, render_pass_attachment_begin_info->attachmentCount, framebuffer_attachments_create_info->attachmentImageInfoCount); } else { const safe_VkRenderPassCreateInfo2 *render_pass_create_info = &GetRenderPassState(pRenderPassBeginInfo->renderPass)->createInfo; for (uint32_t i = 0; i < render_pass_attachment_begin_info->attachmentCount; ++i) { const auto image_view_state = GetImageViewState(render_pass_attachment_begin_info->pAttachments[i]); const VkImageViewCreateInfo *image_view_create_info = &image_view_state->create_info; const VkFramebufferAttachmentImageInfo *framebuffer_attachment_image_info = &framebuffer_attachments_create_info->pAttachmentImageInfos[i]; const VkImageCreateInfo *image_create_info = &GetImageState(image_view_create_info->image)->createInfo; if (framebuffer_attachment_image_info->flags != image_create_info->flags) { skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03209", "%s: Image view #%u created from an image with flags set as 0x%X, " "but image info #%u used to create the framebuffer had flags set as 0x%X", func_name, i, image_create_info->flags, i, framebuffer_attachment_image_info->flags); } if (framebuffer_attachment_image_info->usage != image_view_state->inherited_usage) { // Give clearer message if this error is due to the "inherited" part or not if (image_create_info->usage == image_view_state->inherited_usage) { skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-04627", "%s: Image view #%u created from an image with usage set as 0x%X, " "but image info #%u used to create the framebuffer had usage set as 0x%X", func_name, i, image_create_info->usage, i, framebuffer_attachment_image_info->usage); } else { skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-04627", "%s: Image view #%u created from an image with usage set as 0x%X but using " "VkImageViewUsageCreateInfo the inherited usage is the subset 0x%X " "and the image info #%u used to create the framebuffer had usage set as 0x%X", func_name, i, image_create_info->usage, image_view_state->inherited_usage, i, framebuffer_attachment_image_info->usage); } } uint32_t view_width = image_create_info->extent.width >> image_view_create_info->subresourceRange.baseMipLevel; if (framebuffer_attachment_image_info->width != view_width) { skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03211", "%s: Image view #%u created from an image subresource with width set as %u, " "but image info #%u used to create the framebuffer had width set as %u", func_name, i, view_width, i, framebuffer_attachment_image_info->width); } uint32_t view_height = image_create_info->extent.width >> image_view_create_info->subresourceRange.baseMipLevel; if (framebuffer_attachment_image_info->height != view_height) { skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03212", "%s: Image view #%u created from an image subresource with height set as %u, " "but image info #%u used to create the framebuffer had height set as %u", func_name, i, view_height, i, framebuffer_attachment_image_info->height); } if (framebuffer_attachment_image_info->layerCount != image_view_create_info->subresourceRange.layerCount) { skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03213", "%s: Image view #%u created with a subresource range with a layerCount of %u, " "but image info #%u used to create the framebuffer had layerCount set as %u", func_name, i, image_view_create_info->subresourceRange.layerCount, i, framebuffer_attachment_image_info->layerCount); } const VkImageFormatListCreateInfo *image_format_list_create_info = LvlFindInChain<VkImageFormatListCreateInfo>(image_create_info->pNext); if (image_format_list_create_info) { if (image_format_list_create_info->viewFormatCount != framebuffer_attachment_image_info->viewFormatCount) { skip |= LogError( pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03214", "VkRenderPassBeginInfo: Image view #%u created with an image with a viewFormatCount of %u, " "but image info #%u used to create the framebuffer had viewFormatCount set as %u", i, image_format_list_create_info->viewFormatCount, i, framebuffer_attachment_image_info->viewFormatCount); } for (uint32_t j = 0; j < image_format_list_create_info->viewFormatCount; ++j) { bool format_found = false; for (uint32_t k = 0; k < framebuffer_attachment_image_info->viewFormatCount; ++k) { if (image_format_list_create_info->pViewFormats[j] == framebuffer_attachment_image_info->pViewFormats[k]) { format_found = true; } } if (!format_found) { skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03215", "VkRenderPassBeginInfo: Image view #%u created with an image including the format " "%s in its view format list, " "but image info #%u used to create the framebuffer does not include this format", i, string_VkFormat(image_format_list_create_info->pViewFormats[j]), i); } } } if (render_pass_create_info->pAttachments[i].format != image_view_create_info->format) { skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03216", "%s: Image view #%u created with a format of %s, " "but render pass attachment description #%u created with a format of %s", func_name, i, string_VkFormat(image_view_create_info->format), i, string_VkFormat(render_pass_create_info->pAttachments[i].format)); } if (render_pass_create_info->pAttachments[i].samples != image_create_info->samples) { skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03217", "%s: Image view #%u created with an image with %s samples, " "but render pass attachment description #%u created with %s samples", func_name, i, string_VkSampleCountFlagBits(image_create_info->samples), i, string_VkSampleCountFlagBits(render_pass_create_info->pAttachments[i].samples)); } if (image_view_create_info->subresourceRange.levelCount != 1) { skip |= LogError(render_pass_attachment_begin_info->pAttachments[i], "VUID-VkRenderPassAttachmentBeginInfo-pAttachments-03218", "%s: Image view #%u created with multiple (%u) mip levels.", func_name, i, image_view_create_info->subresourceRange.levelCount); } if (IsIdentitySwizzle(image_view_create_info->components) == false) { skip |= LogError( render_pass_attachment_begin_info->pAttachments[i], "VUID-VkRenderPassAttachmentBeginInfo-pAttachments-03219", "%s: Image view #%u created with non-identity swizzle. All " "framebuffer attachments must have been created with the identity swizzle. Here are the actual " "swizzle values:\n" "r swizzle = %s\n" "g swizzle = %s\n" "b swizzle = %s\n" "a swizzle = %s\n", func_name, i, string_VkComponentSwizzle(image_view_create_info->components.r), string_VkComponentSwizzle(image_view_create_info->components.g), string_VkComponentSwizzle(image_view_create_info->components.b), string_VkComponentSwizzle(image_view_create_info->components.a)); } if (image_view_create_info->viewType == VK_IMAGE_VIEW_TYPE_3D) { skip |= LogError(render_pass_attachment_begin_info->pAttachments[i], "VUID-VkRenderPassAttachmentBeginInfo-pAttachments-04114", "%s: Image view #%u created with type VK_IMAGE_VIEW_TYPE_3D", func_name, i); } } } } } return skip; } // If this is a stencil format, make sure the stencil[Load|Store]Op flag is checked, while if it is a depth/color attachment the // [load|store]Op flag must be checked // TODO: The memory valid flag in DEVICE_MEMORY_STATE should probably be split to track the validity of stencil memory separately. template <typename T> static bool FormatSpecificLoadAndStoreOpSettings(VkFormat format, T color_depth_op, T stencil_op, T op) { if (color_depth_op != op && stencil_op != op) { return false; } bool check_color_depth_load_op = !FormatIsStencilOnly(format); bool check_stencil_load_op = FormatIsDepthAndStencil(format) || !check_color_depth_load_op; return ((check_color_depth_load_op && (color_depth_op == op)) || (check_stencil_load_op && (stencil_op == op))); } bool CoreChecks::ValidateCmdBeginRenderPass(VkCommandBuffer commandBuffer, RenderPassCreateVersion rp_version, const VkRenderPassBeginInfo *pRenderPassBegin) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); auto render_pass_state = pRenderPassBegin ? GetRenderPassState(pRenderPassBegin->renderPass) : nullptr; auto framebuffer = pRenderPassBegin ? GetFramebufferState(pRenderPassBegin->framebuffer) : nullptr; bool skip = false; const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2); const char *vuid; const char *const function_name = use_rp2 ? "vkCmdBeginRenderPass2()" : "vkCmdBeginRenderPass()"; if (render_pass_state) { uint32_t clear_op_size = 0; // Make sure pClearValues is at least as large as last LOAD_OP_CLEAR // Handle extension struct from EXT_sample_locations const VkRenderPassSampleLocationsBeginInfoEXT *sample_locations_begin_info = LvlFindInChain<VkRenderPassSampleLocationsBeginInfoEXT>(pRenderPassBegin->pNext); if (sample_locations_begin_info) { for (uint32_t i = 0; i < sample_locations_begin_info->attachmentInitialSampleLocationsCount; ++i) { const VkAttachmentSampleLocationsEXT &sample_location = sample_locations_begin_info->pAttachmentInitialSampleLocations[i]; skip |= ValidateSampleLocationsInfo(&sample_location.sampleLocationsInfo, function_name); if (sample_location.attachmentIndex >= render_pass_state->createInfo.attachmentCount) { skip |= LogError(device, "VUID-VkAttachmentSampleLocationsEXT-attachmentIndex-01531", "%s: Attachment index %u specified by attachment sample locations %u is greater than the " "attachment count of %u for the render pass being begun.", function_name, sample_location.attachmentIndex, i, render_pass_state->createInfo.attachmentCount); } } for (uint32_t i = 0; i < sample_locations_begin_info->postSubpassSampleLocationsCount; ++i) { const VkSubpassSampleLocationsEXT &sample_location = sample_locations_begin_info->pPostSubpassSampleLocations[i]; skip |= ValidateSampleLocationsInfo(&sample_location.sampleLocationsInfo, function_name); if (sample_location.subpassIndex >= render_pass_state->createInfo.subpassCount) { skip |= LogError(device, "VUID-VkSubpassSampleLocationsEXT-subpassIndex-01532", "%s: Subpass index %u specified by subpass sample locations %u is greater than the subpass count " "of %u for the render pass being begun.", function_name, sample_location.subpassIndex, i, render_pass_state->createInfo.subpassCount); } } } for (uint32_t i = 0; i < render_pass_state->createInfo.attachmentCount; ++i) { auto attachment = &render_pass_state->createInfo.pAttachments[i]; if (FormatSpecificLoadAndStoreOpSettings(attachment->format, attachment->loadOp, attachment->stencilLoadOp, VK_ATTACHMENT_LOAD_OP_CLEAR)) { clear_op_size = static_cast<uint32_t>(i) + 1; if (FormatHasDepth(attachment->format)) { skip |= ValidateClearDepthStencilValue(commandBuffer, pRenderPassBegin->pClearValues[i].depthStencil, function_name); } } } if (clear_op_size > pRenderPassBegin->clearValueCount) { skip |= LogError(render_pass_state->renderPass, "VUID-VkRenderPassBeginInfo-clearValueCount-00902", "In %s the VkRenderPassBeginInfo struct has a clearValueCount of %u but there " "must be at least %u entries in pClearValues array to account for the highest index attachment in " "%s that uses VK_ATTACHMENT_LOAD_OP_CLEAR is %u. Note that the pClearValues array is indexed by " "attachment number so even if some pClearValues entries between 0 and %u correspond to attachments " "that aren't cleared they will be ignored.", function_name, pRenderPassBegin->clearValueCount, clear_op_size, report_data->FormatHandle(render_pass_state->renderPass).c_str(), clear_op_size, clear_op_size - 1); } skip |= VerifyFramebufferAndRenderPassImageViews(pRenderPassBegin, function_name); skip |= VerifyRenderAreaBounds(pRenderPassBegin); skip |= VerifyFramebufferAndRenderPassLayouts(rp_version, cb_state, pRenderPassBegin, GetFramebufferState(pRenderPassBegin->framebuffer)); if (framebuffer->rp_state->renderPass != render_pass_state->renderPass) { skip |= ValidateRenderPassCompatibility("render pass", render_pass_state, "framebuffer", framebuffer->rp_state.get(), function_name, "VUID-VkRenderPassBeginInfo-renderPass-00904"); } vuid = use_rp2 ? "VUID-vkCmdBeginRenderPass2-renderpass" : "VUID-vkCmdBeginRenderPass-renderpass"; skip |= InsideRenderPass(cb_state, function_name, vuid); skip |= ValidateDependencies(framebuffer, render_pass_state); vuid = use_rp2 ? "VUID-vkCmdBeginRenderPass2-bufferlevel" : "VUID-vkCmdBeginRenderPass-bufferlevel"; skip |= ValidatePrimaryCommandBuffer(cb_state, function_name, vuid); vuid = use_rp2 ? "VUID-vkCmdBeginRenderPass2-commandBuffer-cmdpool" : "VUID-vkCmdBeginRenderPass-commandBuffer-cmdpool"; skip |= ValidateCmdQueueFlags(cb_state, function_name, VK_QUEUE_GRAPHICS_BIT, vuid); const CMD_TYPE cmd_type = use_rp2 ? CMD_BEGINRENDERPASS2 : CMD_BEGINRENDERPASS; skip |= ValidateCmd(cb_state, cmd_type, function_name); } auto chained_device_group_struct = LvlFindInChain<VkDeviceGroupRenderPassBeginInfo>(pRenderPassBegin->pNext); if (chained_device_group_struct) { skip |= ValidateDeviceMaskToPhysicalDeviceCount(chained_device_group_struct->deviceMask, pRenderPassBegin->renderPass, "VUID-VkDeviceGroupRenderPassBeginInfo-deviceMask-00905"); skip |= ValidateDeviceMaskToZero(chained_device_group_struct->deviceMask, pRenderPassBegin->renderPass, "VUID-VkDeviceGroupRenderPassBeginInfo-deviceMask-00906"); skip |= ValidateDeviceMaskToCommandBuffer(cb_state, chained_device_group_struct->deviceMask, pRenderPassBegin->renderPass, "VUID-VkDeviceGroupRenderPassBeginInfo-deviceMask-00907"); if (chained_device_group_struct->deviceRenderAreaCount != 0 && chained_device_group_struct->deviceRenderAreaCount != physical_device_count) { skip |= LogError(pRenderPassBegin->renderPass, "VUID-VkDeviceGroupRenderPassBeginInfo-deviceRenderAreaCount-00908", "%s: deviceRenderAreaCount[%" PRIu32 "] is invaild. Physical device count is %" PRIu32 ".", function_name, chained_device_group_struct->deviceRenderAreaCount, physical_device_count); } } return skip; } bool CoreChecks::PreCallValidateCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, VkSubpassContents contents) const { bool skip = ValidateCmdBeginRenderPass(commandBuffer, RENDER_PASS_VERSION_1, pRenderPassBegin); return skip; } bool CoreChecks::PreCallValidateCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, const VkSubpassBeginInfo *pSubpassBeginInfo) const { bool skip = ValidateCmdBeginRenderPass(commandBuffer, RENDER_PASS_VERSION_2, pRenderPassBegin); return skip; } bool CoreChecks::PreCallValidateCmdBeginRenderPass2(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, const VkSubpassBeginInfo *pSubpassBeginInfo) const { bool skip = ValidateCmdBeginRenderPass(commandBuffer, RENDER_PASS_VERSION_2, pRenderPassBegin); return skip; } void CoreChecks::RecordCmdBeginRenderPassLayouts(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, const VkSubpassContents contents) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); auto render_pass_state = pRenderPassBegin ? GetRenderPassState(pRenderPassBegin->renderPass) : nullptr; auto framebuffer = pRenderPassBegin ? GetFramebufferState(pRenderPassBegin->framebuffer) : nullptr; if (render_pass_state) { // transition attachments to the correct layouts for beginning of renderPass and first subpass TransitionBeginRenderPassLayouts(cb_state, render_pass_state, framebuffer); } } void CoreChecks::PreCallRecordCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, VkSubpassContents contents) { StateTracker::PreCallRecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents); RecordCmdBeginRenderPassLayouts(commandBuffer, pRenderPassBegin, contents); } void CoreChecks::PreCallRecordCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, const VkSubpassBeginInfo *pSubpassBeginInfo) { StateTracker::PreCallRecordCmdBeginRenderPass2KHR(commandBuffer, pRenderPassBegin, pSubpassBeginInfo); RecordCmdBeginRenderPassLayouts(commandBuffer, pRenderPassBegin, pSubpassBeginInfo->contents); } void CoreChecks::PreCallRecordCmdBeginRenderPass2(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, const VkSubpassBeginInfo *pSubpassBeginInfo) { StateTracker::PreCallRecordCmdBeginRenderPass2(commandBuffer, pRenderPassBegin, pSubpassBeginInfo); RecordCmdBeginRenderPassLayouts(commandBuffer, pRenderPassBegin, pSubpassBeginInfo->contents); } bool CoreChecks::ValidateCmdNextSubpass(RenderPassCreateVersion rp_version, VkCommandBuffer commandBuffer) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = false; const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2); const char *vuid; const char *const function_name = use_rp2 ? "vkCmdNextSubpass2()" : "vkCmdNextSubpass()"; vuid = use_rp2 ? "VUID-vkCmdNextSubpass2-bufferlevel" : "VUID-vkCmdNextSubpass-bufferlevel"; skip |= ValidatePrimaryCommandBuffer(cb_state, function_name, vuid); vuid = use_rp2 ? "VUID-vkCmdNextSubpass2-commandBuffer-cmdpool" : "VUID-vkCmdNextSubpass-commandBuffer-cmdpool"; skip |= ValidateCmdQueueFlags(cb_state, function_name, VK_QUEUE_GRAPHICS_BIT, vuid); const CMD_TYPE cmd_type = use_rp2 ? CMD_NEXTSUBPASS2 : CMD_NEXTSUBPASS; skip |= ValidateCmd(cb_state, cmd_type, function_name); vuid = use_rp2 ? "VUID-vkCmdNextSubpass2-renderpass" : "VUID-vkCmdNextSubpass-renderpass"; skip |= OutsideRenderPass(cb_state, function_name, vuid); auto subpass_count = cb_state->activeRenderPass->createInfo.subpassCount; if (cb_state->activeSubpass == subpass_count - 1) { vuid = use_rp2 ? "VUID-vkCmdNextSubpass2-None-03102" : "VUID-vkCmdNextSubpass-None-00909"; skip |= LogError(commandBuffer, vuid, "%s: Attempted to advance beyond final subpass.", function_name); } return skip; } bool CoreChecks::PreCallValidateCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) const { return ValidateCmdNextSubpass(RENDER_PASS_VERSION_1, commandBuffer); } bool CoreChecks::PreCallValidateCmdNextSubpass2KHR(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo, const VkSubpassEndInfo *pSubpassEndInfo) const { return ValidateCmdNextSubpass(RENDER_PASS_VERSION_2, commandBuffer); } bool CoreChecks::PreCallValidateCmdNextSubpass2(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo, const VkSubpassEndInfo *pSubpassEndInfo) const { return ValidateCmdNextSubpass(RENDER_PASS_VERSION_2, commandBuffer); } void CoreChecks::RecordCmdNextSubpassLayouts(VkCommandBuffer commandBuffer, VkSubpassContents contents) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); TransitionSubpassLayouts(cb_state, cb_state->activeRenderPass.get(), cb_state->activeSubpass, Get<FRAMEBUFFER_STATE>(cb_state->activeRenderPassBeginInfo.framebuffer)); } void CoreChecks::PostCallRecordCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) { StateTracker::PostCallRecordCmdNextSubpass(commandBuffer, contents); RecordCmdNextSubpassLayouts(commandBuffer, contents); } void CoreChecks::PostCallRecordCmdNextSubpass2KHR(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo, const VkSubpassEndInfo *pSubpassEndInfo) { StateTracker::PostCallRecordCmdNextSubpass2KHR(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo); RecordCmdNextSubpassLayouts(commandBuffer, pSubpassBeginInfo->contents); } void CoreChecks::PostCallRecordCmdNextSubpass2(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo, const VkSubpassEndInfo *pSubpassEndInfo) { StateTracker::PostCallRecordCmdNextSubpass2(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo); RecordCmdNextSubpassLayouts(commandBuffer, pSubpassBeginInfo->contents); } bool CoreChecks::ValidateCmdEndRenderPass(RenderPassCreateVersion rp_version, VkCommandBuffer commandBuffer) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = false; const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2); const char *vuid; const char *const function_name = use_rp2 ? "vkCmdEndRenderPass2KHR()" : "vkCmdEndRenderPass()"; RENDER_PASS_STATE *rp_state = cb_state->activeRenderPass.get(); if (rp_state) { if (cb_state->activeSubpass != rp_state->createInfo.subpassCount - 1) { vuid = use_rp2 ? "VUID-vkCmdEndRenderPass2-None-03103" : "VUID-vkCmdEndRenderPass-None-00910"; skip |= LogError(commandBuffer, vuid, "%s: Called before reaching final subpass.", function_name); } } vuid = use_rp2 ? "VUID-vkCmdEndRenderPass2-renderpass" : "VUID-vkCmdEndRenderPass-renderpass"; skip |= OutsideRenderPass(cb_state, function_name, vuid); vuid = use_rp2 ? "VUID-vkCmdEndRenderPass2-bufferlevel" : "VUID-vkCmdEndRenderPass-bufferlevel"; skip |= ValidatePrimaryCommandBuffer(cb_state, function_name, vuid); vuid = use_rp2 ? "VUID-vkCmdEndRenderPass2-commandBuffer-cmdpool" : "VUID-vkCmdEndRenderPass-commandBuffer-cmdpool"; skip |= ValidateCmdQueueFlags(cb_state, function_name, VK_QUEUE_GRAPHICS_BIT, vuid); const CMD_TYPE cmd_type = use_rp2 ? CMD_ENDRENDERPASS2 : CMD_ENDRENDERPASS; skip |= ValidateCmd(cb_state, cmd_type, function_name); return skip; } bool CoreChecks::PreCallValidateCmdEndRenderPass(VkCommandBuffer commandBuffer) const { bool skip = ValidateCmdEndRenderPass(RENDER_PASS_VERSION_1, commandBuffer); return skip; } bool CoreChecks::PreCallValidateCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) const { bool skip = ValidateCmdEndRenderPass(RENDER_PASS_VERSION_2, commandBuffer); return skip; } bool CoreChecks::PreCallValidateCmdEndRenderPass2(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) const { bool skip = ValidateCmdEndRenderPass(RENDER_PASS_VERSION_2, commandBuffer); return skip; } void CoreChecks::RecordCmdEndRenderPassLayouts(VkCommandBuffer commandBuffer) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); TransitionFinalSubpassLayouts(cb_state, cb_state->activeRenderPassBeginInfo.ptr(), cb_state->activeFramebuffer.get()); } void CoreChecks::PostCallRecordCmdEndRenderPass(VkCommandBuffer commandBuffer) { // Record the end at the CoreLevel to ensure StateTracker cleanup doesn't step on anything we need. RecordCmdEndRenderPassLayouts(commandBuffer); StateTracker::PostCallRecordCmdEndRenderPass(commandBuffer); } void CoreChecks::PostCallRecordCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) { // Record the end at the CoreLevel to ensure StateTracker cleanup doesn't step on anything we need. RecordCmdEndRenderPassLayouts(commandBuffer); StateTracker::PostCallRecordCmdEndRenderPass2KHR(commandBuffer, pSubpassEndInfo); } void CoreChecks::PostCallRecordCmdEndRenderPass2(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) { RecordCmdEndRenderPassLayouts(commandBuffer); StateTracker::PostCallRecordCmdEndRenderPass2(commandBuffer, pSubpassEndInfo); } bool CoreChecks::ValidateFramebuffer(VkCommandBuffer primaryBuffer, const CMD_BUFFER_STATE *pCB, VkCommandBuffer secondaryBuffer, const CMD_BUFFER_STATE *pSubCB, const char *caller) const { bool skip = false; if (!pSubCB->beginInfo.pInheritanceInfo) { return skip; } VkFramebuffer primary_fb = pCB->activeFramebuffer ? pCB->activeFramebuffer->framebuffer : VK_NULL_HANDLE; VkFramebuffer secondary_fb = pSubCB->beginInfo.pInheritanceInfo->framebuffer; if (secondary_fb != VK_NULL_HANDLE) { if (primary_fb != secondary_fb) { LogObjectList objlist(primaryBuffer); objlist.add(secondaryBuffer); objlist.add(secondary_fb); objlist.add(primary_fb); skip |= LogError(objlist, "VUID-vkCmdExecuteCommands-pCommandBuffers-00099", "vkCmdExecuteCommands() called w/ invalid secondary %s which has a %s" " that is not the same as the primary command buffer's current active %s.", report_data->FormatHandle(secondaryBuffer).c_str(), report_data->FormatHandle(secondary_fb).c_str(), report_data->FormatHandle(primary_fb).c_str()); } auto fb = GetFramebufferState(secondary_fb); if (!fb) { LogObjectList objlist(primaryBuffer); objlist.add(secondaryBuffer); objlist.add(secondary_fb); skip |= LogError(objlist, kVUID_Core_DrawState_InvalidSecondaryCommandBuffer, "vkCmdExecuteCommands() called w/ invalid %s which has invalid %s.", report_data->FormatHandle(secondaryBuffer).c_str(), report_data->FormatHandle(secondary_fb).c_str()); return skip; } } return skip; } bool CoreChecks::ValidateSecondaryCommandBufferState(const CMD_BUFFER_STATE *pCB, const CMD_BUFFER_STATE *pSubCB) const { bool skip = false; unordered_set<int> active_types; if (!disabled[query_validation]) { for (auto query_object : pCB->activeQueries) { auto query_pool_state = GetQueryPoolState(query_object.pool); if (query_pool_state) { if (query_pool_state->createInfo.queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS && pSubCB->beginInfo.pInheritanceInfo) { VkQueryPipelineStatisticFlags cmd_buf_statistics = pSubCB->beginInfo.pInheritanceInfo->pipelineStatistics; if ((cmd_buf_statistics & query_pool_state->createInfo.pipelineStatistics) != cmd_buf_statistics) { LogObjectList objlist(pCB->commandBuffer); objlist.add(query_object.pool); skip |= LogError( objlist, "VUID-vkCmdExecuteCommands-commandBuffer-00104", "vkCmdExecuteCommands() called w/ invalid %s which has invalid active %s" ". Pipeline statistics is being queried so the command buffer must have all bits set on the queryPool.", report_data->FormatHandle(pCB->commandBuffer).c_str(), report_data->FormatHandle(query_object.pool).c_str()); } } active_types.insert(query_pool_state->createInfo.queryType); } } for (auto query_object : pSubCB->startedQueries) { auto query_pool_state = GetQueryPoolState(query_object.pool); if (query_pool_state && active_types.count(query_pool_state->createInfo.queryType)) { LogObjectList objlist(pCB->commandBuffer); objlist.add(query_object.pool); skip |= LogError(objlist, kVUID_Core_DrawState_InvalidSecondaryCommandBuffer, "vkCmdExecuteCommands() called w/ invalid %s which has invalid active %s" " of type %d but a query of that type has been started on secondary %s.", report_data->FormatHandle(pCB->commandBuffer).c_str(), report_data->FormatHandle(query_object.pool).c_str(), query_pool_state->createInfo.queryType, report_data->FormatHandle(pSubCB->commandBuffer).c_str()); } } } auto primary_pool = pCB->command_pool.get(); auto secondary_pool = pSubCB->command_pool.get(); if (primary_pool && secondary_pool && (primary_pool->queueFamilyIndex != secondary_pool->queueFamilyIndex)) { LogObjectList objlist(pSubCB->commandBuffer); objlist.add(pCB->commandBuffer); skip |= LogError(objlist, "VUID-vkCmdExecuteCommands-pCommandBuffers-00094", "vkCmdExecuteCommands(): Primary %s created in queue family %d has secondary " "%s created in queue family %d.", report_data->FormatHandle(pCB->commandBuffer).c_str(), primary_pool->queueFamilyIndex, report_data->FormatHandle(pSubCB->commandBuffer).c_str(), secondary_pool->queueFamilyIndex); } return skip; } bool CoreChecks::PreCallValidateCmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount, const VkCommandBuffer *pCommandBuffers) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = false; const CMD_BUFFER_STATE *sub_cb_state = NULL; std::unordered_set<const CMD_BUFFER_STATE *> linked_command_buffers; for (uint32_t i = 0; i < commandBuffersCount; i++) { sub_cb_state = GetCBState(pCommandBuffers[i]); assert(sub_cb_state); if (VK_COMMAND_BUFFER_LEVEL_PRIMARY == sub_cb_state->createInfo.level) { skip |= LogError(pCommandBuffers[i], "VUID-vkCmdExecuteCommands-pCommandBuffers-00088", "vkCmdExecuteCommands() called w/ Primary %s in element %u of pCommandBuffers array. All " "cmd buffers in pCommandBuffers array must be secondary.", report_data->FormatHandle(pCommandBuffers[i]).c_str(), i); } else if (VK_COMMAND_BUFFER_LEVEL_SECONDARY == sub_cb_state->createInfo.level) { if (sub_cb_state->beginInfo.pInheritanceInfo != nullptr) { const auto secondary_rp_state = GetRenderPassState(sub_cb_state->beginInfo.pInheritanceInfo->renderPass); if (cb_state->activeRenderPass && !(sub_cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) { LogObjectList objlist(pCommandBuffers[i]); objlist.add(cb_state->activeRenderPass->renderPass); skip |= LogError(objlist, "VUID-vkCmdExecuteCommands-pCommandBuffers-00096", "vkCmdExecuteCommands(): Secondary %s is executed within a %s " "instance scope, but the Secondary Command Buffer does not have the " "VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set in VkCommandBufferBeginInfo::flags when " "the vkBeginCommandBuffer() was called.", report_data->FormatHandle(pCommandBuffers[i]).c_str(), report_data->FormatHandle(cb_state->activeRenderPass->renderPass).c_str()); } else if (!cb_state->activeRenderPass && (sub_cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) { skip |= LogError(pCommandBuffers[i], "VUID-vkCmdExecuteCommands-pCommandBuffers-00100", "vkCmdExecuteCommands(): Secondary %s is executed outside a render pass " "instance scope, but the Secondary Command Buffer does have the " "VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set in VkCommandBufferBeginInfo::flags when " "the vkBeginCommandBuffer() was called.", report_data->FormatHandle(pCommandBuffers[i]).c_str()); } else if (cb_state->activeRenderPass && (sub_cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) { // Make sure render pass is compatible with parent command buffer pass if has continue if (cb_state->activeRenderPass->renderPass != secondary_rp_state->renderPass) { skip |= ValidateRenderPassCompatibility( "primary command buffer", cb_state->activeRenderPass.get(), "secondary command buffer", secondary_rp_state, "vkCmdExecuteCommands()", "VUID-vkCmdExecuteCommands-pInheritanceInfo-00098"); } // If framebuffer for secondary CB is not NULL, then it must match active FB from primaryCB skip |= ValidateFramebuffer(commandBuffer, cb_state, pCommandBuffers[i], sub_cb_state, "vkCmdExecuteCommands()"); if (!sub_cb_state->cmd_execute_commands_functions.empty()) { // Inherit primary's activeFramebuffer and while running validate functions for (auto &function : sub_cb_state->cmd_execute_commands_functions) { skip |= function(cb_state, cb_state->activeFramebuffer.get()); } } } } } // TODO(mlentine): Move more logic into this method skip |= ValidateSecondaryCommandBufferState(cb_state, sub_cb_state); skip |= ValidateCommandBufferState(sub_cb_state, "vkCmdExecuteCommands()", 0, "VUID-vkCmdExecuteCommands-pCommandBuffers-00089"); if (!(sub_cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) { if (sub_cb_state->in_use.load()) { skip |= LogError( cb_state->commandBuffer, "VUID-vkCmdExecuteCommands-pCommandBuffers-00091", "vkCmdExecuteCommands(): Cannot execute pending %s without VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set.", report_data->FormatHandle(sub_cb_state->commandBuffer).c_str()); } // We use an const_cast, because one cannot query a container keyed on a non-const pointer using a const pointer if (cb_state->linkedCommandBuffers.count(const_cast<CMD_BUFFER_STATE *>(sub_cb_state))) { LogObjectList objlist(cb_state->commandBuffer); objlist.add(sub_cb_state->commandBuffer); skip |= LogError(objlist, "VUID-vkCmdExecuteCommands-pCommandBuffers-00092", "vkCmdExecuteCommands(): Cannot execute %s without VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT " "set if previously executed in %s", report_data->FormatHandle(sub_cb_state->commandBuffer).c_str(), report_data->FormatHandle(cb_state->commandBuffer).c_str()); } const auto insert_pair = linked_command_buffers.insert(sub_cb_state); if (!insert_pair.second) { skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdExecuteCommands-pCommandBuffers-00093", "vkCmdExecuteCommands(): Cannot duplicate %s in pCommandBuffers without " "VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set.", report_data->FormatHandle(cb_state->commandBuffer).c_str()); } if (cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) { // Warn that non-simultaneous secondary cmd buffer renders primary non-simultaneous LogObjectList objlist(pCommandBuffers[i]); objlist.add(cb_state->commandBuffer); skip |= LogWarning(objlist, kVUID_Core_DrawState_InvalidCommandBufferSimultaneousUse, "vkCmdExecuteCommands(): Secondary %s does not have " "VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary " "%s to be treated as if it does not have " "VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set, even though it does.", report_data->FormatHandle(pCommandBuffers[i]).c_str(), report_data->FormatHandle(cb_state->commandBuffer).c_str()); } } if (!cb_state->activeQueries.empty() && !enabled_features.core.inheritedQueries) { skip |= LogError(pCommandBuffers[i], "VUID-vkCmdExecuteCommands-commandBuffer-00101", "vkCmdExecuteCommands(): Secondary %s cannot be submitted with a query in flight and " "inherited queries not supported on this device.", report_data->FormatHandle(pCommandBuffers[i]).c_str()); } // Validate initial layout uses vs. the primary cmd buffer state // Novel Valid usage: "UNASSIGNED-vkCmdExecuteCommands-commandBuffer-00001" // initial layout usage of secondary command buffers resources must match parent command buffer const auto *const_cb_state = static_cast<const CMD_BUFFER_STATE *>(cb_state); for (const auto &sub_layout_map_entry : sub_cb_state->image_layout_map) { const auto image = sub_layout_map_entry.first; const auto *image_state = GetImageState(image); if (!image_state) continue; // Can't set layouts of a dead image const auto *cb_subres_map = GetImageSubresourceLayoutMap(const_cb_state, image); // Const getter can be null in which case we have nothing to check against for this image... if (!cb_subres_map) continue; const auto &sub_cb_subres_map = sub_layout_map_entry.second; // Validate the initial_uses, that they match the current state of the primary cb, or absent a current state, // that the match any initial_layout. for (const auto &subres_layout : *sub_cb_subres_map) { const auto &sub_layout = subres_layout.initial_layout; const auto &subresource = subres_layout.subresource; if (VK_IMAGE_LAYOUT_UNDEFINED == sub_layout) continue; // secondary doesn't care about current or initial // Look up the layout to compared to the intial layout of the sub command buffer (current else initial) auto cb_layouts = cb_subres_map->GetSubresourceLayouts(subresource); auto cb_layout = cb_layouts.current_layout; const char *layout_type = "current"; if (cb_layouts.current_layout == kInvalidLayout) { cb_layout = cb_layouts.initial_layout; layout_type = "initial"; } if ((cb_layout != kInvalidLayout) && (cb_layout != sub_layout)) { skip |= LogError(pCommandBuffers[i], "UNASSIGNED-vkCmdExecuteCommands-commandBuffer-00001", "%s: Executed secondary command buffer using %s (subresource: aspectMask 0x%X array layer %u, " "mip level %u) which expects layout %s--instead, image %s layout is %s.", "vkCmdExecuteCommands():", report_data->FormatHandle(image).c_str(), subresource.aspectMask, subresource.arrayLayer, subresource.mipLevel, string_VkImageLayout(sub_layout), layout_type, string_VkImageLayout(cb_layout)); } } } // All commands buffers involved must be protected or unprotected if ((cb_state->unprotected == false) && (sub_cb_state->unprotected == true)) { LogObjectList objlist(cb_state->commandBuffer); objlist.add(sub_cb_state->commandBuffer); skip |= LogError( objlist, "VUID-vkCmdExecuteCommands-commandBuffer-01820", "vkCmdExecuteCommands(): command buffer %s is protected while secondary command buffer %s is a unprotected", report_data->FormatHandle(cb_state->commandBuffer).c_str(), report_data->FormatHandle(sub_cb_state->commandBuffer).c_str()); } else if ((cb_state->unprotected == true) && (sub_cb_state->unprotected == false)) { LogObjectList objlist(cb_state->commandBuffer); objlist.add(sub_cb_state->commandBuffer); skip |= LogError( objlist, "VUID-vkCmdExecuteCommands-commandBuffer-01821", "vkCmdExecuteCommands(): command buffer %s is unprotected while secondary command buffer %s is a protected", report_data->FormatHandle(cb_state->commandBuffer).c_str(), report_data->FormatHandle(sub_cb_state->commandBuffer).c_str()); } } skip |= ValidatePrimaryCommandBuffer(cb_state, "vkCmdExecuteCommands()", "VUID-vkCmdExecuteCommands-bufferlevel"); skip |= ValidateCmdQueueFlags(cb_state, "vkCmdExecuteCommands()", VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, "VUID-vkCmdExecuteCommands-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_EXECUTECOMMANDS, "vkCmdExecuteCommands()"); return skip; } bool CoreChecks::PreCallValidateMapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags, void **ppData) const { bool skip = false; const DEVICE_MEMORY_STATE *mem_info = GetDevMemState(mem); if (mem_info) { if ((phys_dev_mem_props.memoryTypes[mem_info->alloc_info.memoryTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) { skip = LogError(mem, "VUID-vkMapMemory-memory-00682", "Mapping Memory without VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set: %s.", report_data->FormatHandle(mem).c_str()); } if (mem_info->multi_instance) { skip = LogError(mem, "VUID-vkMapMemory-memory-00683", "Memory (%s) must not have been allocated with multiple instances -- either by supplying a deviceMask " "with more than one bit set, or by allocation from a heap with the MULTI_INSTANCE heap flag set.", report_data->FormatHandle(mem).c_str()); } skip |= ValidateMapMemRange(mem_info, offset, size); } return skip; } bool CoreChecks::PreCallValidateUnmapMemory(VkDevice device, VkDeviceMemory mem) const { bool skip = false; const auto mem_info = GetDevMemState(mem); if (mem_info && !mem_info->mapped_range.size) { // Valid Usage: memory must currently be mapped skip |= LogError(mem, "VUID-vkUnmapMemory-memory-00689", "Unmapping Memory without memory being mapped: %s.", report_data->FormatHandle(mem).c_str()); } return skip; } bool CoreChecks::ValidateMemoryIsMapped(const char *funcName, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) const { bool skip = false; for (uint32_t i = 0; i < memRangeCount; ++i) { auto mem_info = GetDevMemState(pMemRanges[i].memory); if (mem_info) { // Makes sure the memory is already mapped if (mem_info->mapped_range.size == 0) { skip = LogError(pMemRanges[i].memory, "VUID-VkMappedMemoryRange-memory-00684", "%s: Attempting to use memory (%s) that is not currently host mapped.", funcName, report_data->FormatHandle(pMemRanges[i].memory).c_str()); } if (pMemRanges[i].size == VK_WHOLE_SIZE) { if (mem_info->mapped_range.offset > pMemRanges[i].offset) { skip |= LogError(pMemRanges[i].memory, "VUID-VkMappedMemoryRange-size-00686", "%s: Flush/Invalidate offset (" PRINTF_SIZE_T_SPECIFIER ") is less than Memory Object's offset (" PRINTF_SIZE_T_SPECIFIER ").", funcName, static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(mem_info->mapped_range.offset)); } } else { const uint64_t data_end = (mem_info->mapped_range.size == VK_WHOLE_SIZE) ? mem_info->alloc_info.allocationSize : (mem_info->mapped_range.offset + mem_info->mapped_range.size); if ((mem_info->mapped_range.offset > pMemRanges[i].offset) || (data_end < (pMemRanges[i].offset + pMemRanges[i].size))) { skip |= LogError(pMemRanges[i].memory, "VUID-VkMappedMemoryRange-size-00685", "%s: Flush/Invalidate size or offset (" PRINTF_SIZE_T_SPECIFIER ", " PRINTF_SIZE_T_SPECIFIER ") exceed the Memory Object's upper-bound (" PRINTF_SIZE_T_SPECIFIER ").", funcName, static_cast<size_t>(pMemRanges[i].offset + pMemRanges[i].size), static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(data_end)); } } } } return skip; } bool CoreChecks::ValidateMappedMemoryRangeDeviceLimits(const char *func_name, uint32_t mem_range_count, const VkMappedMemoryRange *mem_ranges) const { bool skip = false; for (uint32_t i = 0; i < mem_range_count; ++i) { const uint64_t atom_size = phys_dev_props.limits.nonCoherentAtomSize; const VkDeviceSize offset = mem_ranges[i].offset; const VkDeviceSize size = mem_ranges[i].size; if (SafeModulo(offset, atom_size) != 0) { skip |= LogError(mem_ranges->memory, "VUID-VkMappedMemoryRange-offset-00687", "%s: Offset in pMemRanges[%d] is 0x%" PRIxLEAST64 ", which is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 ").", func_name, i, offset, atom_size); } auto mem_info = GetDevMemState(mem_ranges[i].memory); if (mem_info) { const VkDeviceSize allocation_size = mem_info->alloc_info.allocationSize; if ((size != VK_WHOLE_SIZE) && (size + offset != allocation_size) && (SafeModulo(size, atom_size) != 0)) { skip |= LogError(mem_ranges->memory, "VUID-VkMappedMemoryRange-size-01390", "%s: Size in pMemRanges[%d] is 0x%" PRIxLEAST64 ", which is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 ").", func_name, i, size, atom_size); } else if ((size == VK_WHOLE_SIZE) && SafeModulo(allocation_size - offset, atom_size) != 0) { skip |= LogError(mem_ranges->memory, "VUID-VkMappedMemoryRange-size-01389", "%s: Size in pMemRanges[%d] is VK_WHOLE_SIZE and allocationSize minus offset (0x%" PRIxLEAST64 " - 0x%" PRIxLEAST64 ") is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 ").", func_name, i, allocation_size, offset, atom_size); } } } return skip; } bool CoreChecks::PreCallValidateFlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) const { bool skip = false; skip |= ValidateMappedMemoryRangeDeviceLimits("vkFlushMappedMemoryRanges", memRangeCount, pMemRanges); skip |= ValidateMemoryIsMapped("vkFlushMappedMemoryRanges", memRangeCount, pMemRanges); return skip; } bool CoreChecks::PreCallValidateInvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) const { bool skip = false; skip |= ValidateMappedMemoryRangeDeviceLimits("vkInvalidateMappedMemoryRanges", memRangeCount, pMemRanges); skip |= ValidateMemoryIsMapped("vkInvalidateMappedMemoryRanges", memRangeCount, pMemRanges); return skip; } bool CoreChecks::PreCallValidateGetDeviceMemoryCommitment(VkDevice device, VkDeviceMemory mem, VkDeviceSize *pCommittedMem) const { bool skip = false; const auto mem_info = GetDevMemState(mem); if (mem_info) { if ((phys_dev_mem_props.memoryTypes[mem_info->alloc_info.memoryTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) == 0) { skip = LogError(mem, "VUID-vkGetDeviceMemoryCommitment-memory-00690", "vkGetDeviceMemoryCommitment(): Querying commitment for memory without " "VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT set: %s.", report_data->FormatHandle(mem).c_str()); } } return skip; } bool CoreChecks::ValidateBindImageMemory(uint32_t bindInfoCount, const VkBindImageMemoryInfo *pBindInfos, const char *api_name) const { bool skip = false; bool bind_image_mem_2 = strcmp(api_name, "vkBindImageMemory()") != 0; char error_prefix[128]; strcpy(error_prefix, api_name); // Track all image sub resources if they are bound for bind_image_mem_2 // uint32_t[3] is which index in pBindInfos for max 3 planes // Non disjoint images act as a single plane std::unordered_map<VkImage, std::array<uint32_t, 3>> resources_bound; for (uint32_t i = 0; i < bindInfoCount; i++) { if (bind_image_mem_2 == true) { sprintf(error_prefix, "%s pBindInfos[%u]", api_name, i); } const VkBindImageMemoryInfo &bind_info = pBindInfos[i]; const IMAGE_STATE *image_state = GetImageState(bind_info.image); if (image_state) { // Track objects tied to memory skip |= ValidateSetMemBinding(bind_info.memory, VulkanTypedHandle(bind_info.image, kVulkanObjectTypeImage), error_prefix); const auto plane_info = LvlFindInChain<VkBindImagePlaneMemoryInfo>(bind_info.pNext); const auto mem_info = GetDevMemState(bind_info.memory); // Need extra check for disjoint flag incase called without bindImage2 and don't want false positive errors // no 'else' case as if that happens another VUID is already being triggered for it being invalid if ((plane_info == nullptr) && (image_state->disjoint == false)) { // Check non-disjoint images VkMemoryRequirements // All validation using the image_state->requirements for external AHB is check in android only section if (image_state->external_ahb == false) { const VkMemoryRequirements mem_req = image_state->requirements; // Validate memory requirements alignment if (SafeModulo(bind_info.memoryOffset, mem_req.alignment) != 0) { const char *validation_error; if (bind_image_mem_2 == false) { validation_error = "VUID-vkBindImageMemory-memoryOffset-01048"; } else if (device_extensions.vk_khr_sampler_ycbcr_conversion) { validation_error = "VUID-VkBindImageMemoryInfo-pNext-01616"; } else { validation_error = "VUID-VkBindImageMemoryInfo-memoryOffset-01613"; } skip |= LogError(bind_info.image, validation_error, "%s: memoryOffset is 0x%" PRIxLEAST64 " but must be an integer multiple of the VkMemoryRequirements::alignment value 0x%" PRIxLEAST64 ", returned from a call to vkGetImageMemoryRequirements with image.", error_prefix, bind_info.memoryOffset, mem_req.alignment); } if (mem_info) { safe_VkMemoryAllocateInfo alloc_info = mem_info->alloc_info; // Validate memory requirements size if (mem_req.size > alloc_info.allocationSize - bind_info.memoryOffset) { const char *validation_error; if (bind_image_mem_2 == false) { validation_error = "VUID-vkBindImageMemory-size-01049"; } else if (device_extensions.vk_khr_sampler_ycbcr_conversion) { validation_error = "VUID-VkBindImageMemoryInfo-pNext-01617"; } else { validation_error = "VUID-VkBindImageMemoryInfo-memory-01614"; } skip |= LogError(bind_info.image, validation_error, "%s: memory size minus memoryOffset is 0x%" PRIxLEAST64 " but must be at least as large as VkMemoryRequirements::size value 0x%" PRIxLEAST64 ", returned from a call to vkGetImageMemoryRequirements with image.", error_prefix, alloc_info.allocationSize - bind_info.memoryOffset, mem_req.size); } // Validate memory type used { const char *validation_error; if (bind_image_mem_2 == false) { validation_error = "VUID-vkBindImageMemory-memory-01047"; } else if (device_extensions.vk_khr_sampler_ycbcr_conversion) { validation_error = "VUID-VkBindImageMemoryInfo-pNext-01615"; } else { validation_error = "VUID-VkBindImageMemoryInfo-memory-01612"; } skip |= ValidateMemoryTypes(mem_info, mem_req.memoryTypeBits, error_prefix, validation_error); } } } if (bind_image_mem_2 == true) { // since its a non-disjoint image, finding VkImage in map is a duplicate auto it = resources_bound.find(image_state->image); if (it == resources_bound.end()) { std::array<uint32_t, 3> bound_index = {i, UINT32_MAX, UINT32_MAX}; resources_bound.emplace(image_state->image, bound_index); } else { skip |= LogError( bind_info.image, "VUID-vkBindImageMemory2-pBindInfos-04006", "%s: The same non-disjoint image resource is being bound twice at pBindInfos[%d] and pBindInfos[%d]", error_prefix, it->second[0], i); } } } else if ((plane_info != nullptr) && (image_state->disjoint == true)) { // Check disjoint images VkMemoryRequirements for given plane int plane = 0; // All validation using the image_state->plane*_requirements for external AHB is check in android only section if (image_state->external_ahb == false) { VkMemoryRequirements disjoint_mem_req = {}; const VkImageAspectFlagBits aspect = plane_info->planeAspect; switch (aspect) { case VK_IMAGE_ASPECT_PLANE_0_BIT: plane = 0; disjoint_mem_req = image_state->plane0_requirements; break; case VK_IMAGE_ASPECT_PLANE_1_BIT: plane = 1; disjoint_mem_req = image_state->plane1_requirements; break; case VK_IMAGE_ASPECT_PLANE_2_BIT: plane = 2; disjoint_mem_req = image_state->plane2_requirements; break; default: assert(false); // parameter validation should have caught this break; } // Validate memory requirements alignment if (SafeModulo(bind_info.memoryOffset, disjoint_mem_req.alignment) != 0) { skip |= LogError( bind_info.image, "VUID-VkBindImageMemoryInfo-pNext-01620", "%s: memoryOffset is 0x%" PRIxLEAST64 " but must be an integer multiple of the VkMemoryRequirements::alignment value 0x%" PRIxLEAST64 ", returned from a call to vkGetImageMemoryRequirements2 with disjoint image for aspect plane %s.", error_prefix, bind_info.memoryOffset, disjoint_mem_req.alignment, string_VkImageAspectFlagBits(aspect)); } if (mem_info) { safe_VkMemoryAllocateInfo alloc_info = mem_info->alloc_info; // Validate memory requirements size if (disjoint_mem_req.size > alloc_info.allocationSize - bind_info.memoryOffset) { skip |= LogError( bind_info.image, "VUID-VkBindImageMemoryInfo-pNext-01621", "%s: memory size minus memoryOffset is 0x%" PRIxLEAST64 " but must be at least as large as VkMemoryRequirements::size value 0x%" PRIxLEAST64 ", returned from a call to vkGetImageMemoryRequirements with disjoint image for aspect plane %s.", error_prefix, alloc_info.allocationSize - bind_info.memoryOffset, disjoint_mem_req.size, string_VkImageAspectFlagBits(aspect)); } // Validate memory type used { skip |= ValidateMemoryTypes(mem_info, disjoint_mem_req.memoryTypeBits, error_prefix, "VUID-VkBindImageMemoryInfo-pNext-01619"); } } } auto it = resources_bound.find(image_state->image); if (it == resources_bound.end()) { std::array<uint32_t, 3> bound_index = {UINT32_MAX, UINT32_MAX, UINT32_MAX}; bound_index[plane] = i; resources_bound.emplace(image_state->image, bound_index); } else { if (it->second[plane] == UINT32_MAX) { it->second[plane] = i; } else { skip |= LogError(bind_info.image, "VUID-vkBindImageMemory2-pBindInfos-04006", "%s: The same disjoint image sub-resource for plane %d is being bound twice at " "pBindInfos[%d] and pBindInfos[%d]", error_prefix, plane, it->second[plane], i); } } } if (mem_info) { // Validate bound memory range information // if memory is exported to an AHB then the mem_info->allocationSize must be zero and this check is not needed if ((mem_info->is_export == false) || ((mem_info->export_handle_type_flags & VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID) == 0)) { skip |= ValidateInsertImageMemoryRange(bind_info.image, mem_info, bind_info.memoryOffset, error_prefix); } // Validate dedicated allocation if (mem_info->is_dedicated) { if (enabled_features.dedicated_allocation_image_aliasing_features.dedicatedAllocationImageAliasing) { const auto orig_image_state = GetImageState(mem_info->dedicated_image); const auto current_image_state = GetImageState(bind_info.image); if ((bind_info.memoryOffset != 0) || !orig_image_state || !current_image_state || !current_image_state->IsCreateInfoDedicatedAllocationImageAliasingCompatible( orig_image_state->createInfo)) { const char *validation_error; if (bind_image_mem_2 == false) { validation_error = "VUID-vkBindImageMemory-memory-02629"; } else { validation_error = "VUID-VkBindImageMemoryInfo-memory-02629"; } LogObjectList objlist(bind_info.image); objlist.add(bind_info.memory); objlist.add(mem_info->dedicated_image); skip |= LogError( objlist, validation_error, "%s: for dedicated memory allocation %s, VkMemoryDedicatedAllocateInfo:: %s must compatible " "with %s and memoryOffset 0x%" PRIxLEAST64 " must be zero.", error_prefix, report_data->FormatHandle(bind_info.memory).c_str(), report_data->FormatHandle(mem_info->dedicated_image).c_str(), report_data->FormatHandle(bind_info.image).c_str(), bind_info.memoryOffset); } } else { if ((mem_info->dedicated_image != VK_NULL_HANDLE) && ((bind_info.memoryOffset != 0) || (mem_info->dedicated_image != bind_info.image))) { const char *validation_error; if (bind_image_mem_2 == false) { validation_error = "VUID-vkBindImageMemory-memory-01509"; } else { validation_error = "VUID-VkBindImageMemoryInfo-memory-01509"; } LogObjectList objlist(bind_info.image); objlist.add(bind_info.memory); objlist.add(mem_info->dedicated_image); skip |= LogError(objlist, validation_error, "%s: for dedicated memory allocation %s, VkMemoryDedicatedAllocateInfo:: %s must be equal " "to %s and memoryOffset 0x%" PRIxLEAST64 " must be zero.", error_prefix, report_data->FormatHandle(bind_info.memory).c_str(), report_data->FormatHandle(mem_info->dedicated_image).c_str(), report_data->FormatHandle(bind_info.image).c_str(), bind_info.memoryOffset); } } } // Validate export memory handles if ((mem_info->export_handle_type_flags != 0) && ((mem_info->export_handle_type_flags & image_state->external_memory_handle) == 0)) { const char *vuid = bind_image_mem_2 ? "VUID-VkBindImageMemoryInfo-memory-02728" : "VUID-vkBindImageMemory-memory-02728"; LogObjectList objlist(bind_info.image); objlist.add(bind_info.memory); skip |= LogError(objlist, vuid, "%s: The VkDeviceMemory (%s) has an external handleType of %s which does not include at least " "one handle from VkImage (%s) handleType %s.", error_prefix, report_data->FormatHandle(bind_info.memory).c_str(), string_VkExternalMemoryHandleTypeFlags(mem_info->export_handle_type_flags).c_str(), report_data->FormatHandle(bind_info.image).c_str(), string_VkExternalMemoryHandleTypeFlags(image_state->external_memory_handle).c_str()); } // Validate import memory handles if (mem_info->is_import_ahb == true) { skip |= ValidateImageImportedHandleANDROID(api_name, image_state->external_memory_handle, bind_info.memory, bind_info.image); } else if (mem_info->is_import == true) { if ((mem_info->import_handle_type_flags & image_state->external_memory_handle) == 0) { const char *vuid = nullptr; if ((bind_image_mem_2) && (device_extensions.vk_android_external_memory_android_hardware_buffer)) { vuid = "VUID-VkBindImageMemoryInfo-memory-02989"; } else if ((!bind_image_mem_2) && (device_extensions.vk_android_external_memory_android_hardware_buffer)) { vuid = "VUID-vkBindImageMemory-memory-02989"; } else if ((bind_image_mem_2) && (!device_extensions.vk_android_external_memory_android_hardware_buffer)) { vuid = "VUID-VkBindImageMemoryInfo-memory-02729"; } else if ((!bind_image_mem_2) && (!device_extensions.vk_android_external_memory_android_hardware_buffer)) { vuid = "VUID-vkBindImageMemory-memory-02729"; } LogObjectList objlist(bind_info.image); objlist.add(bind_info.memory); skip |= LogError(objlist, vuid, "%s: The VkDeviceMemory (%s) was created with an import operation with handleType of %s " "which is not set in the VkImage (%s) VkExternalMemoryImageCreateInfo::handleType (%s)", api_name, report_data->FormatHandle(bind_info.memory).c_str(), string_VkExternalMemoryHandleTypeFlags(mem_info->import_handle_type_flags).c_str(), report_data->FormatHandle(bind_info.image).c_str(), string_VkExternalMemoryHandleTypeFlags(image_state->external_memory_handle).c_str()); } } // Validate mix of protected buffer and memory if ((image_state->unprotected == false) && (mem_info->unprotected == true)) { const char *vuid = bind_image_mem_2 ? "VUID-VkBindImageMemoryInfo-None-01901" : "VUID-vkBindImageMemory-None-01901"; LogObjectList objlist(bind_info.image); objlist.add(bind_info.memory); skip |= LogError(objlist, vuid, "%s: The VkDeviceMemory (%s) was not created with protected memory but the VkImage (%s) was " "set to use protected memory.", api_name, report_data->FormatHandle(bind_info.memory).c_str(), report_data->FormatHandle(bind_info.image).c_str()); } else if ((image_state->unprotected == true) && (mem_info->unprotected == false)) { const char *vuid = bind_image_mem_2 ? "VUID-VkBindImageMemoryInfo-None-01902" : "VUID-vkBindImageMemory-None-01902"; LogObjectList objlist(bind_info.image); objlist.add(bind_info.memory); skip |= LogError(objlist, vuid, "%s: The VkDeviceMemory (%s) was created with protected memory but the VkImage (%s) was not " "set to use protected memory.", api_name, report_data->FormatHandle(bind_info.memory).c_str(), report_data->FormatHandle(bind_info.image).c_str()); } } const auto swapchain_info = LvlFindInChain<VkBindImageMemorySwapchainInfoKHR>(bind_info.pNext); if (swapchain_info) { if (bind_info.memory != VK_NULL_HANDLE) { skip |= LogError(bind_info.image, "VUID-VkBindImageMemoryInfo-pNext-01631", "%s: %s is not VK_NULL_HANDLE.", error_prefix, report_data->FormatHandle(bind_info.memory).c_str()); } if (image_state->create_from_swapchain != swapchain_info->swapchain) { LogObjectList objlist(image_state->image); objlist.add(image_state->create_from_swapchain); objlist.add(swapchain_info->swapchain); skip |= LogError( objlist, kVUID_Core_BindImageMemory_Swapchain, "%s: %s is created by %s, but the image is bound by %s. The image should be created and bound by the same " "swapchain", error_prefix, report_data->FormatHandle(image_state->image).c_str(), report_data->FormatHandle(image_state->create_from_swapchain).c_str(), report_data->FormatHandle(swapchain_info->swapchain).c_str()); } const auto swapchain_state = GetSwapchainState(swapchain_info->swapchain); if (swapchain_state && swapchain_state->images.size() <= swapchain_info->imageIndex) { skip |= LogError(bind_info.image, "VUID-VkBindImageMemorySwapchainInfoKHR-imageIndex-01644", "%s: imageIndex (%i) is out of bounds of %s images (size: %i)", error_prefix, swapchain_info->imageIndex, report_data->FormatHandle(swapchain_info->swapchain).c_str(), static_cast<int>(swapchain_state->images.size())); } } else { if (image_state->create_from_swapchain) { skip |= LogError(bind_info.image, "VUID-VkBindImageMemoryInfo-image-01630", "%s: pNext of VkBindImageMemoryInfo doesn't include VkBindImageMemorySwapchainInfoKHR.", error_prefix); } if (!mem_info) { skip |= LogError(bind_info.image, "VUID-VkBindImageMemoryInfo-pNext-01632", "%s: %s is invalid.", error_prefix, report_data->FormatHandle(bind_info.memory).c_str()); } } if (plane_info) { // Checks for disjoint bit in image if (image_state->disjoint == false) { skip |= LogError( bind_info.image, "VUID-VkBindImageMemoryInfo-pNext-01618", "%s: pNext of VkBindImageMemoryInfo contains VkBindImagePlaneMemoryInfo and %s is not created with " "VK_IMAGE_CREATE_DISJOINT_BIT.", error_prefix, report_data->FormatHandle(image_state->image).c_str()); } // Make sure planeAspect is only a single, valid plane uint32_t planes = FormatPlaneCount(image_state->createInfo.format); VkImageAspectFlags aspect = plane_info->planeAspect; if ((2 == planes) && (aspect != VK_IMAGE_ASPECT_PLANE_0_BIT) && (aspect != VK_IMAGE_ASPECT_PLANE_1_BIT)) { skip |= LogError( bind_info.image, "VUID-VkBindImagePlaneMemoryInfo-planeAspect-02283", "%s: Image %s VkBindImagePlaneMemoryInfo::planeAspect is %s but can only be VK_IMAGE_ASPECT_PLANE_0_BIT" "or VK_IMAGE_ASPECT_PLANE_1_BIT.", error_prefix, report_data->FormatHandle(image_state->image).c_str(), string_VkImageAspectFlags(aspect).c_str()); } if ((3 == planes) && (aspect != VK_IMAGE_ASPECT_PLANE_0_BIT) && (aspect != VK_IMAGE_ASPECT_PLANE_1_BIT) && (aspect != VK_IMAGE_ASPECT_PLANE_2_BIT)) { skip |= LogError( bind_info.image, "VUID-VkBindImagePlaneMemoryInfo-planeAspect-02283", "%s: Image %s VkBindImagePlaneMemoryInfo::planeAspect is %s but can only be VK_IMAGE_ASPECT_PLANE_0_BIT" "or VK_IMAGE_ASPECT_PLANE_1_BIT or VK_IMAGE_ASPECT_PLANE_2_BIT.", error_prefix, report_data->FormatHandle(image_state->image).c_str(), string_VkImageAspectFlags(aspect).c_str()); } } } } // Check to make sure all disjoint planes were bound for (std::pair<const VkImage, std::array<uint32_t, 3>> &resource : resources_bound) { const IMAGE_STATE *image_state = GetImageState(resource.first); if (image_state->disjoint == true) { uint32_t total_planes = FormatPlaneCount(image_state->createInfo.format); for (uint32_t i = 0; i < total_planes; i++) { if (resource.second[i] == UINT32_MAX) { skip |= LogError(resource.first, "VUID-vkBindImageMemory2-pBindInfos-02858", "%s: Plane %u of the disjoint image was not bound. All %d planes need to bound individually " "in separate pBindInfos in a single call.", api_name, i, total_planes); } } } } return skip; } bool CoreChecks::PreCallValidateBindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) const { bool skip = false; const IMAGE_STATE *image_state = GetImageState(image); if (image_state) { // Checks for no disjoint bit if (image_state->disjoint == true) { skip |= LogError(image, "VUID-vkBindImageMemory-image-01608", "%s must not have been created with the VK_IMAGE_CREATE_DISJOINT_BIT (need to use vkBindImageMemory2).", report_data->FormatHandle(image).c_str()); } } VkBindImageMemoryInfo bind_info = {}; bind_info.sType = VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO; bind_info.pNext = nullptr; bind_info.image = image; bind_info.memory = mem; bind_info.memoryOffset = memoryOffset; skip |= ValidateBindImageMemory(1, &bind_info, "vkBindImageMemory()"); return skip; } bool CoreChecks::PreCallValidateBindImageMemory2(VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfo *pBindInfos) const { return ValidateBindImageMemory(bindInfoCount, pBindInfos, "vkBindImageMemory2()"); } bool CoreChecks::PreCallValidateBindImageMemory2KHR(VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfo *pBindInfos) const { return ValidateBindImageMemory(bindInfoCount, pBindInfos, "vkBindImageMemory2KHR()"); } bool CoreChecks::PreCallValidateSetEvent(VkDevice device, VkEvent event) const { bool skip = false; const auto event_state = GetEventState(event); if (event_state) { if (event_state->write_in_use) { skip |= LogError(event, kVUID_Core_DrawState_QueueForwardProgress, "vkSetEvent(): %s that is already in use by a command buffer.", report_data->FormatHandle(event).c_str()); } if (event_state->flags & VK_EVENT_CREATE_DEVICE_ONLY_BIT_KHR) { skip |= LogError(event, "VUID-vkSetEvent-event-03941", "vkSetEvent(): %s was created with VK_EVENT_CREATE_DEVICE_ONLY_BIT_KHR.", report_data->FormatHandle(event).c_str()); } } return skip; } bool CoreChecks::PreCallValidateResetEvent(VkDevice device, VkEvent event) const { bool skip = false; const auto event_state = GetEventState(event); if (event_state) { if (event_state->flags & VK_EVENT_CREATE_DEVICE_ONLY_BIT_KHR) { skip |= LogError(event, "VUID-vkResetEvent-event-03823", "vkResetEvent(): %s was created with VK_EVENT_CREATE_DEVICE_ONLY_BIT_KHR.", report_data->FormatHandle(event).c_str()); } } return skip; } bool CoreChecks::PreCallValidateGetEventStatus(VkDevice device, VkEvent event) const { bool skip = false; const auto event_state = GetEventState(event); if (event_state) { if (event_state->flags & VK_EVENT_CREATE_DEVICE_ONLY_BIT_KHR) { skip |= LogError(event, "VUID-vkGetEventStatus-event-03940", "vkGetEventStatus(): %s was created with VK_EVENT_CREATE_DEVICE_ONLY_BIT_KHR.", report_data->FormatHandle(event).c_str()); } } return skip; } bool CoreChecks::PreCallValidateQueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo, VkFence fence) const { const auto queue_data = GetQueueState(queue); const auto fence_state = GetFenceState(fence); bool skip = ValidateFenceForSubmit(fence_state, "VUID-vkQueueBindSparse-fence-01114", "VUID-vkQueueBindSparse-fence-01113", "VkQueueBindSparse()"); if (skip) { return true; } const auto queue_flags = GetPhysicalDeviceState()->queue_family_properties[queue_data->queueFamilyIndex].queueFlags; if (!(queue_flags & VK_QUEUE_SPARSE_BINDING_BIT)) { skip |= LogError(queue, "VUID-vkQueueBindSparse-queuetype", "vkQueueBindSparse(): a non-memory-management capable queue -- VK_QUEUE_SPARSE_BINDING_BIT not set."); } unordered_set<VkSemaphore> signaled_semaphores; unordered_set<VkSemaphore> unsignaled_semaphores; unordered_set<VkSemaphore> internal_semaphores; auto *vuid_error = device_extensions.vk_khr_timeline_semaphore ? "VUID-vkQueueBindSparse-pWaitSemaphores-03245" : kVUID_Core_DrawState_QueueForwardProgress; for (uint32_t bind_idx = 0; bind_idx < bindInfoCount; ++bind_idx) { const VkBindSparseInfo &bind_info = pBindInfo[bind_idx]; auto timeline_semaphore_submit_info = LvlFindInChain<VkTimelineSemaphoreSubmitInfo>(pBindInfo->pNext); std::vector<SEMAPHORE_WAIT> semaphore_waits; std::vector<VkSemaphore> semaphore_signals; for (uint32_t i = 0; i < bind_info.waitSemaphoreCount; ++i) { VkSemaphore semaphore = bind_info.pWaitSemaphores[i]; const auto semaphore_state = GetSemaphoreState(semaphore); if (semaphore_state && semaphore_state->type == VK_SEMAPHORE_TYPE_TIMELINE && !timeline_semaphore_submit_info) { skip |= LogError(semaphore, "VUID-VkBindSparseInfo-pWaitSemaphores-03246", "VkQueueBindSparse: pBindInfo[%u].pWaitSemaphores[%u] (%s) is a timeline semaphore, but " "pBindInfo[%u] does not include an instance of VkTimelineSemaphoreSubmitInfo", bind_idx, i, report_data->FormatHandle(semaphore).c_str(), bind_idx); } if (semaphore_state && semaphore_state->type == VK_SEMAPHORE_TYPE_TIMELINE && timeline_semaphore_submit_info && bind_info.waitSemaphoreCount != timeline_semaphore_submit_info->waitSemaphoreValueCount) { skip |= LogError(semaphore, "VUID-VkBindSparseInfo-pNext-03247", "VkQueueBindSparse: pBindInfo[%u].pWaitSemaphores[%u] (%s) is a timeline semaphore, it contains " "an instance of VkTimelineSemaphoreSubmitInfo, but waitSemaphoreValueCount (%u) is different " "than pBindInfo[%u].waitSemaphoreCount (%u)", bind_idx, i, report_data->FormatHandle(semaphore).c_str(), timeline_semaphore_submit_info->waitSemaphoreValueCount, bind_idx, bind_info.waitSemaphoreCount); } if (semaphore_state && semaphore_state->type == VK_SEMAPHORE_TYPE_BINARY && (semaphore_state->scope == kSyncScopeInternal || internal_semaphores.count(semaphore))) { if (unsignaled_semaphores.count(semaphore) || (!(signaled_semaphores.count(semaphore)) && !(semaphore_state->signaled) && !SemaphoreWasSignaled(semaphore))) { LogObjectList objlist(semaphore); objlist.add(queue); skip |= LogError( objlist, semaphore_state->scope == kSyncScopeInternal ? vuid_error : kVUID_Core_DrawState_QueueForwardProgress, "vkQueueBindSparse(): Queue %s is waiting on pBindInfo[%u].pWaitSemaphores[%u] (%s) that has no way to be " "signaled.", report_data->FormatHandle(queue).c_str(), bind_idx, i, report_data->FormatHandle(semaphore).c_str()); } else { signaled_semaphores.erase(semaphore); unsignaled_semaphores.insert(semaphore); } } if (semaphore_state && semaphore_state->type == VK_SEMAPHORE_TYPE_BINARY && semaphore_state->scope == kSyncScopeExternalTemporary) { internal_semaphores.insert(semaphore); } } for (uint32_t i = 0; i < bind_info.signalSemaphoreCount; ++i) { VkSemaphore semaphore = bind_info.pSignalSemaphores[i]; const auto semaphore_state = GetSemaphoreState(semaphore); if (semaphore_state && semaphore_state->type == VK_SEMAPHORE_TYPE_TIMELINE && !timeline_semaphore_submit_info) { skip |= LogError(semaphore, "VUID-VkBindSparseInfo-pWaitSemaphores-03246", "VkQueueBindSparse: pBindInfo[%u].pSignalSemaphores[%u] (%s) is a timeline semaphore, but " "pBindInfo[%u] does not include an instance of VkTimelineSemaphoreSubmitInfo", bind_idx, i, report_data->FormatHandle(semaphore).c_str(), bind_idx); } if (semaphore_state && semaphore_state->type == VK_SEMAPHORE_TYPE_TIMELINE && timeline_semaphore_submit_info && timeline_semaphore_submit_info->pSignalSemaphoreValues[i] <= semaphore_state->payload) { LogObjectList objlist(semaphore); objlist.add(queue); skip |= LogError(objlist, "VUID-VkBindSparseInfo-pSignalSemaphores-03249", "VkQueueBindSparse: signal value (0x%" PRIx64 ") in %s must be greater than current timeline semaphore %s value (0x%" PRIx64 ") in pBindInfo[%u].pSignalSemaphores[%u]", semaphore_state->payload, report_data->FormatHandle(queue).c_str(), report_data->FormatHandle(semaphore).c_str(), timeline_semaphore_submit_info->pSignalSemaphoreValues[i], bind_idx, i); } if (semaphore_state && semaphore_state->type == VK_SEMAPHORE_TYPE_TIMELINE && timeline_semaphore_submit_info && bind_info.signalSemaphoreCount != timeline_semaphore_submit_info->signalSemaphoreValueCount) { skip |= LogError(semaphore, "VUID-VkBindSparseInfo-pNext-03248", "VkQueueBindSparse: pBindInfo[%u].pSignalSemaphores[%u] (%s) is a timeline semaphore, it contains " "an instance of VkTimelineSemaphoreSubmitInfo, but signalSemaphoreValueCount (%u) is different " "than pBindInfo[%u].signalSemaphoreCount (%u)", bind_idx, i, report_data->FormatHandle(semaphore).c_str(), timeline_semaphore_submit_info->signalSemaphoreValueCount, bind_idx, bind_info.signalSemaphoreCount); } if (semaphore_state && semaphore_state->type == VK_SEMAPHORE_TYPE_BINARY && semaphore_state->scope == kSyncScopeInternal) { if (signaled_semaphores.count(semaphore) || (!(unsignaled_semaphores.count(semaphore)) && semaphore_state->signaled)) { LogObjectList objlist(semaphore); objlist.add(queue); objlist.add(semaphore_state->signaler.first); skip |= LogError(objlist, kVUID_Core_DrawState_QueueForwardProgress, "vkQueueBindSparse(): %s is signaling pBindInfo[%u].pSignalSemaphores[%u] (%s) that was " "previously signaled by %s but has not since been waited on by any queue.", report_data->FormatHandle(queue).c_str(), bind_idx, i, report_data->FormatHandle(semaphore).c_str(), report_data->FormatHandle(semaphore_state->signaler.first).c_str()); } else { unsignaled_semaphores.erase(semaphore); signaled_semaphores.insert(semaphore); } } } for (uint32_t image_idx = 0; image_idx < bind_info.imageBindCount; ++image_idx) { const VkSparseImageMemoryBindInfo &image_bind = bind_info.pImageBinds[image_idx]; const auto image_state = GetImageState(image_bind.image); if (image_state && !(image_state->createInfo.flags & VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT)) { skip |= LogError(image_bind.image, "VUID-VkSparseImageMemoryBindInfo-image-02901", "vkQueueBindSparse(): pBindInfo[%u].pImageBinds[%u]: image must have been created with " "VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT set", bind_idx, image_idx); } } } if (skip) return skip; // Now verify maxTimelineSemaphoreValueDifference for (uint32_t bind_idx = 0; bind_idx < bindInfoCount; ++bind_idx) { CoreErrorLocation outer_loc(ErrFunc::vkQueueBindSparse, RefPage::VkBindSparseInfo); const VkBindSparseInfo *bind_info = &pBindInfo[bind_idx]; auto *info = LvlFindInChain<VkTimelineSemaphoreSubmitInfo>(bind_info->pNext); if (info) { // If there are any timeline semaphores, this condition gets checked before the early return above if (info->waitSemaphoreValueCount) { for (uint32_t i = 0; i < bind_info->waitSemaphoreCount; ++i) { auto loc = outer_loc.dot(Field::pWaitSemaphoreValues, i); VkSemaphore semaphore = bind_info->pWaitSemaphores[i]; skip |= ValidateMaxTimelineSemaphoreValueDifference(loc, semaphore, info->pWaitSemaphoreValues[i]); } } // If there are any timeline semaphores, this condition gets checked before the early return above if (info->signalSemaphoreValueCount) { for (uint32_t i = 0; i < bind_info->signalSemaphoreCount; ++i) { auto loc = outer_loc.dot(Field::pSignalSemaphoreValues, i); VkSemaphore semaphore = bind_info->pSignalSemaphores[i]; skip |= ValidateMaxTimelineSemaphoreValueDifference(loc, semaphore, info->pSignalSemaphoreValues[i]); } } } } return skip; } bool CoreChecks::ValidateSignalSemaphore(VkDevice device, const VkSemaphoreSignalInfo *pSignalInfo, const char *api_name) const { bool skip = false; const auto semaphore_state = GetSemaphoreState(pSignalInfo->semaphore); if (semaphore_state && semaphore_state->type != VK_SEMAPHORE_TYPE_TIMELINE) { skip |= LogError(pSignalInfo->semaphore, "VUID-VkSemaphoreSignalInfo-semaphore-03257", "%s(): semaphore %s must be of VK_SEMAPHORE_TYPE_TIMELINE type", api_name, report_data->FormatHandle(pSignalInfo->semaphore).c_str()); return skip; } if (semaphore_state && semaphore_state->payload >= pSignalInfo->value) { skip |= LogError(pSignalInfo->semaphore, "VUID-VkSemaphoreSignalInfo-value-03258", "%s(): value must be greater than current semaphore %s value", api_name, report_data->FormatHandle(pSignalInfo->semaphore).c_str()); } for (auto &pair : queueMap) { const QUEUE_STATE &queue_state = pair.second; for (const auto &submission : queue_state.submissions) { for (const auto &signal_semaphore : submission.signalSemaphores) { if (signal_semaphore.semaphore == pSignalInfo->semaphore && pSignalInfo->value >= signal_semaphore.payload) { skip |= LogError(pSignalInfo->semaphore, "VUID-VkSemaphoreSignalInfo-value-03259", "%s(): value must be greater than value of pending signal operation " "for semaphore %s", api_name, report_data->FormatHandle(pSignalInfo->semaphore).c_str()); } } } } if (!skip) { CoreErrorLocation loc(ErrFunc::vkSignalSemaphore, RefPage::VkSemaphoreSignalInfo, Field::value); skip |= ValidateMaxTimelineSemaphoreValueDifference(loc, pSignalInfo->semaphore, pSignalInfo->value); } return skip; } bool CoreChecks::PreCallValidateSignalSemaphore(VkDevice device, const VkSemaphoreSignalInfo *pSignalInfo) const { return ValidateSignalSemaphore(device, pSignalInfo, "vkSignalSemaphore"); } bool CoreChecks::PreCallValidateSignalSemaphoreKHR(VkDevice device, const VkSemaphoreSignalInfo *pSignalInfo) const { return ValidateSignalSemaphore(device, pSignalInfo, "vkSignalSemaphoreKHR"); } bool CoreChecks::ValidateImportSemaphore(VkSemaphore semaphore, const char *caller_name) const { bool skip = false; const SEMAPHORE_STATE *sema_node = GetSemaphoreState(semaphore); if (sema_node) { const VulkanTypedHandle obj_struct(semaphore, kVulkanObjectTypeSemaphore); skip |= ValidateObjectNotInUse(sema_node, obj_struct, caller_name, kVUIDUndefined); } return skip; } #ifdef VK_USE_PLATFORM_WIN32_KHR bool CoreChecks::PreCallValidateImportSemaphoreWin32HandleKHR( VkDevice device, const VkImportSemaphoreWin32HandleInfoKHR *pImportSemaphoreWin32HandleInfo) const { return ValidateImportSemaphore(pImportSemaphoreWin32HandleInfo->semaphore, "vkImportSemaphoreWin32HandleKHR"); } #endif // VK_USE_PLATFORM_WIN32_KHR bool CoreChecks::PreCallValidateImportSemaphoreFdKHR(VkDevice device, const VkImportSemaphoreFdInfoKHR *pImportSemaphoreFdInfo) const { return ValidateImportSemaphore(pImportSemaphoreFdInfo->semaphore, "vkImportSemaphoreFdKHR"); } bool CoreChecks::ValidateImportFence(VkFence fence, const char *vuid, const char *caller_name) const { const FENCE_STATE *fence_node = GetFenceState(fence); bool skip = false; if (fence_node && fence_node->scope == kSyncScopeInternal && fence_node->state == FENCE_INFLIGHT) { skip |= LogError(fence, vuid, "%s: Fence %s that is currently in use.", caller_name, report_data->FormatHandle(fence).c_str()); } return skip; } #ifdef VK_USE_PLATFORM_WIN32_KHR bool CoreChecks::PreCallValidateImportFenceWin32HandleKHR( VkDevice device, const VkImportFenceWin32HandleInfoKHR *pImportFenceWin32HandleInfo) const { return ValidateImportFence(pImportFenceWin32HandleInfo->fence, "VUID-vkImportFenceWin32HandleKHR-fence-04448", "vkImportFenceWin32HandleKHR()"); } #endif // VK_USE_PLATFORM_WIN32_KHR bool CoreChecks::PreCallValidateImportFenceFdKHR(VkDevice device, const VkImportFenceFdInfoKHR *pImportFenceFdInfo) const { return ValidateImportFence(pImportFenceFdInfo->fence, "VUID-vkImportFenceFdKHR-fence-01463", "vkImportFenceFdKHR()"); } static VkImageCreateInfo GetSwapchainImpliedImageCreateInfo(VkSwapchainCreateInfoKHR const *pCreateInfo) { VkImageCreateInfo result = {}; result.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; result.pNext = nullptr; if (pCreateInfo->flags & VK_SWAPCHAIN_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT_KHR) { result.flags |= VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT; } if (pCreateInfo->flags & VK_SWAPCHAIN_CREATE_PROTECTED_BIT_KHR) result.flags |= VK_IMAGE_CREATE_PROTECTED_BIT; if (pCreateInfo->flags & VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR) { result.flags |= VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT | VK_IMAGE_CREATE_EXTENDED_USAGE_BIT; } result.imageType = VK_IMAGE_TYPE_2D; result.format = pCreateInfo->imageFormat; result.extent.width = pCreateInfo->imageExtent.width; result.extent.height = pCreateInfo->imageExtent.height; result.extent.depth = 1; result.mipLevels = 1; result.arrayLayers = pCreateInfo->imageArrayLayers; result.samples = VK_SAMPLE_COUNT_1_BIT; result.tiling = VK_IMAGE_TILING_OPTIMAL; result.usage = pCreateInfo->imageUsage; result.sharingMode = pCreateInfo->imageSharingMode; result.queueFamilyIndexCount = pCreateInfo->queueFamilyIndexCount; result.pQueueFamilyIndices = pCreateInfo->pQueueFamilyIndices; result.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; return result; } bool CoreChecks::ValidateCreateSwapchain(const char *func_name, VkSwapchainCreateInfoKHR const *pCreateInfo, const SURFACE_STATE *surface_state, const SWAPCHAIN_NODE *old_swapchain_state) const { // All physical devices and queue families are required to be able to present to any native window on Android; require the // application to have established support on any other platform. if (!instance_extensions.vk_khr_android_surface) { auto support_predicate = [this](decltype(surface_state->gpu_queue_support)::value_type qs) -> bool { // TODO: should restrict search only to queue families of VkDeviceQueueCreateInfos, not whole phys. device return (qs.first.gpu == physical_device) && qs.second; }; const auto &support = surface_state->gpu_queue_support; bool is_supported = std::any_of(support.begin(), support.end(), support_predicate); if (!is_supported) { if (LogError( device, "VUID-VkSwapchainCreateInfoKHR-surface-01270", "%s: pCreateInfo->surface is not known at this time to be supported for presentation by this device. The " "vkGetPhysicalDeviceSurfaceSupportKHR() must be called beforehand, and it must return VK_TRUE support with " "this surface for at least one queue family of this device.", func_name)) { return true; } } } if (old_swapchain_state) { if (old_swapchain_state->createInfo.surface != pCreateInfo->surface) { if (LogError(pCreateInfo->oldSwapchain, "VUID-VkSwapchainCreateInfoKHR-oldSwapchain-01933", "%s: pCreateInfo->oldSwapchain's surface is not pCreateInfo->surface", func_name)) { return true; } } if (old_swapchain_state->retired) { if (LogError(pCreateInfo->oldSwapchain, "VUID-VkSwapchainCreateInfoKHR-oldSwapchain-01933", "%s: pCreateInfo->oldSwapchain is retired", func_name)) { return true; } } } if ((pCreateInfo->imageExtent.width == 0) || (pCreateInfo->imageExtent.height == 0)) { if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageExtent-01689", "%s: pCreateInfo->imageExtent = (%d, %d) which is illegal.", func_name, pCreateInfo->imageExtent.width, pCreateInfo->imageExtent.height)) { return true; } } auto physical_device_state = GetPhysicalDeviceState(); bool skip = false; VkSurfaceTransformFlagBitsKHR current_transform = physical_device_state->surfaceCapabilities.currentTransform; if ((pCreateInfo->preTransform & current_transform) != pCreateInfo->preTransform) { skip |= LogPerformanceWarning(physical_device, kVUID_Core_Swapchain_PreTransform, "%s: pCreateInfo->preTransform (%s) doesn't match the currentTransform (%s) returned by " "vkGetPhysicalDeviceSurfaceCapabilitiesKHR, the presentation engine will transform the image " "content as part of the presentation operation.", func_name, string_VkSurfaceTransformFlagBitsKHR(pCreateInfo->preTransform), string_VkSurfaceTransformFlagBitsKHR(current_transform)); } VkSurfaceCapabilitiesKHR capabilities{}; DispatchGetPhysicalDeviceSurfaceCapabilitiesKHR(physical_device_state->phys_device, pCreateInfo->surface, &capabilities); // Validate pCreateInfo->minImageCount against VkSurfaceCapabilitiesKHR::{min|max}ImageCount: if (pCreateInfo->minImageCount < capabilities.minImageCount) { if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-minImageCount-01271", "%s called with minImageCount = %d, which is outside the bounds returned by " "vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. minImageCount = %d, maxImageCount = %d).", func_name, pCreateInfo->minImageCount, capabilities.minImageCount, capabilities.maxImageCount)) { return true; } } if ((capabilities.maxImageCount > 0) && (pCreateInfo->minImageCount > capabilities.maxImageCount)) { if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-minImageCount-01272", "%s called with minImageCount = %d, which is outside the bounds returned by " "vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. minImageCount = %d, maxImageCount = %d).", func_name, pCreateInfo->minImageCount, capabilities.minImageCount, capabilities.maxImageCount)) { return true; } } // Validate pCreateInfo->imageExtent against VkSurfaceCapabilitiesKHR::{current|min|max}ImageExtent: if ((pCreateInfo->imageExtent.width < capabilities.minImageExtent.width) || (pCreateInfo->imageExtent.width > capabilities.maxImageExtent.width) || (pCreateInfo->imageExtent.height < capabilities.minImageExtent.height) || (pCreateInfo->imageExtent.height > capabilities.maxImageExtent.height)) { if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageExtent-01274", "%s called with imageExtent = (%d,%d), which is outside the bounds returned by " "vkGetPhysicalDeviceSurfaceCapabilitiesKHR(): currentExtent = (%d,%d), minImageExtent = (%d,%d), " "maxImageExtent = (%d,%d).", func_name, pCreateInfo->imageExtent.width, pCreateInfo->imageExtent.height, capabilities.currentExtent.width, capabilities.currentExtent.height, capabilities.minImageExtent.width, capabilities.minImageExtent.height, capabilities.maxImageExtent.width, capabilities.maxImageExtent.height)) { return true; } } // pCreateInfo->preTransform should have exactly one bit set, and that bit must also be set in // VkSurfaceCapabilitiesKHR::supportedTransforms. if (!pCreateInfo->preTransform || (pCreateInfo->preTransform & (pCreateInfo->preTransform - 1)) || !(pCreateInfo->preTransform & capabilities.supportedTransforms)) { // This is an error situation; one for which we'd like to give the developer a helpful, multi-line error message. Build // it up a little at a time, and then log it: std::string error_string = ""; char str[1024]; // Here's the first part of the message: sprintf(str, "%s called with a non-supported pCreateInfo->preTransform (i.e. %s). Supported values are:\n", func_name, string_VkSurfaceTransformFlagBitsKHR(pCreateInfo->preTransform)); error_string += str; for (int i = 0; i < 32; i++) { // Build up the rest of the message: if ((1 << i) & capabilities.supportedTransforms) { const char *new_str = string_VkSurfaceTransformFlagBitsKHR(static_cast<VkSurfaceTransformFlagBitsKHR>(1 << i)); sprintf(str, " %s\n", new_str); error_string += str; } } // Log the message that we've built up: if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-preTransform-01279", "%s.", error_string.c_str())) return true; } // pCreateInfo->compositeAlpha should have exactly one bit set, and that bit must also be set in // VkSurfaceCapabilitiesKHR::supportedCompositeAlpha if (!pCreateInfo->compositeAlpha || (pCreateInfo->compositeAlpha & (pCreateInfo->compositeAlpha - 1)) || !((pCreateInfo->compositeAlpha) & capabilities.supportedCompositeAlpha)) { // This is an error situation; one for which we'd like to give the developer a helpful, multi-line error message. Build // it up a little at a time, and then log it: std::string error_string = ""; char str[1024]; // Here's the first part of the message: sprintf(str, "%s called with a non-supported pCreateInfo->compositeAlpha (i.e. %s). Supported values are:\n", func_name, string_VkCompositeAlphaFlagBitsKHR(pCreateInfo->compositeAlpha)); error_string += str; for (int i = 0; i < 32; i++) { // Build up the rest of the message: if ((1 << i) & capabilities.supportedCompositeAlpha) { const char *new_str = string_VkCompositeAlphaFlagBitsKHR(static_cast<VkCompositeAlphaFlagBitsKHR>(1 << i)); sprintf(str, " %s\n", new_str); error_string += str; } } // Log the message that we've built up: if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-compositeAlpha-01280", "%s.", error_string.c_str())) return true; } // Validate pCreateInfo->imageArrayLayers against VkSurfaceCapabilitiesKHR::maxImageArrayLayers: if (pCreateInfo->imageArrayLayers > capabilities.maxImageArrayLayers) { if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageArrayLayers-01275", "%s called with a non-supported imageArrayLayers (i.e. %d). Maximum value is %d.", func_name, pCreateInfo->imageArrayLayers, capabilities.maxImageArrayLayers)) { return true; } } // Validate pCreateInfo->imageUsage against VkSurfaceCapabilitiesKHR::supportedUsageFlags: if (pCreateInfo->imageUsage != (pCreateInfo->imageUsage & capabilities.supportedUsageFlags)) { const char *validation_error = "VUID-VkSwapchainCreateInfoKHR-imageUsage-01276"; if ((IsExtEnabled(device_extensions.vk_khr_shared_presentable_image) == true) && ((pCreateInfo->presentMode == VK_PRESENT_MODE_IMMEDIATE_KHR) || (pCreateInfo->presentMode == VK_PRESENT_MODE_MAILBOX_KHR) || (pCreateInfo->presentMode == VK_PRESENT_MODE_FIFO_KHR) || (pCreateInfo->presentMode == VK_PRESENT_MODE_FIFO_RELAXED_KHR))) { validation_error = "VUID-VkSwapchainCreateInfoKHR-presentMode-01427"; } if (LogError(device, validation_error, "%s called with a non-supported pCreateInfo->imageUsage (i.e. 0x%08x). Supported flag bits are 0x%08x.", func_name, pCreateInfo->imageUsage, capabilities.supportedUsageFlags)) { return true; } } if (device_extensions.vk_khr_surface_protected_capabilities && (pCreateInfo->flags & VK_SWAPCHAIN_CREATE_PROTECTED_BIT_KHR)) { VkPhysicalDeviceSurfaceInfo2KHR surface_info = {VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SURFACE_INFO_2_KHR}; surface_info.surface = pCreateInfo->surface; VkSurfaceProtectedCapabilitiesKHR surface_protected_capabilities = {VK_STRUCTURE_TYPE_SURFACE_PROTECTED_CAPABILITIES_KHR}; VkSurfaceCapabilities2KHR surface_capabilities = {VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR}; surface_capabilities.pNext = &surface_protected_capabilities; DispatchGetPhysicalDeviceSurfaceCapabilities2KHR(physical_device_state->phys_device, &surface_info, &surface_capabilities); if (!surface_protected_capabilities.supportsProtected) { if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-flags-03187", "%s: pCreateInfo->flags contains VK_SWAPCHAIN_CREATE_PROTECTED_BIT_KHR but the surface " "capabilities does not have VkSurfaceProtectedCapabilitiesKHR.supportsProtected set to VK_TRUE.", func_name)) { return true; } } } std::vector<VkSurfaceFormatKHR> surface_formats; const auto *surface_formats_ref = &surface_formats; // Validate pCreateInfo values with the results of vkGetPhysicalDeviceSurfaceFormatsKHR(): if (physical_device_state->surface_formats.empty()) { uint32_t surface_format_count = 0; DispatchGetPhysicalDeviceSurfaceFormatsKHR(physical_device, pCreateInfo->surface, &surface_format_count, nullptr); surface_formats.resize(surface_format_count); DispatchGetPhysicalDeviceSurfaceFormatsKHR(physical_device, pCreateInfo->surface, &surface_format_count, &surface_formats[0]); } else { surface_formats_ref = &physical_device_state->surface_formats; } { // Validate pCreateInfo->imageFormat against VkSurfaceFormatKHR::format: bool found_format = false; bool found_color_space = false; bool found_match = false; for (auto const &format : *surface_formats_ref) { if (pCreateInfo->imageFormat == format.format) { // Validate pCreateInfo->imageColorSpace against VkSurfaceFormatKHR::colorSpace: found_format = true; if (pCreateInfo->imageColorSpace == format.colorSpace) { found_match = true; break; } } else { if (pCreateInfo->imageColorSpace == format.colorSpace) { found_color_space = true; } } } if (!found_match) { if (!found_format) { if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01273", "%s called with a non-supported pCreateInfo->imageFormat (%s).", func_name, string_VkFormat(pCreateInfo->imageFormat))) { return true; } } if (!found_color_space) { if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01273", "%s called with a non-supported pCreateInfo->imageColorSpace (%s).", func_name, string_VkColorSpaceKHR(pCreateInfo->imageColorSpace))) { return true; } } } } std::vector<VkPresentModeKHR> present_modes; const auto *present_modes_ref = &present_modes; // Validate pCreateInfo values with the results of vkGetPhysicalDeviceSurfacePresentModesKHR(): if (physical_device_state->present_modes.empty()) { uint32_t present_mode_count = 0; DispatchGetPhysicalDeviceSurfacePresentModesKHR(physical_device_state->phys_device, pCreateInfo->surface, &present_mode_count, nullptr); present_modes.resize(present_mode_count); DispatchGetPhysicalDeviceSurfacePresentModesKHR(physical_device_state->phys_device, pCreateInfo->surface, &present_mode_count, &present_modes[0]); } else { present_modes_ref = &physical_device_state->present_modes; } // Validate pCreateInfo->presentMode against vkGetPhysicalDeviceSurfacePresentModesKHR(): bool found_match = std::find(present_modes_ref->begin(), present_modes_ref->end(), pCreateInfo->presentMode) != present_modes_ref->end(); if (!found_match) { if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-presentMode-01281", "%s called with a non-supported presentMode (i.e. %s).", func_name, string_VkPresentModeKHR(pCreateInfo->presentMode))) { return true; } } // Validate state for shared presentable case if (VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR == pCreateInfo->presentMode || VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR == pCreateInfo->presentMode) { if (!device_extensions.vk_khr_shared_presentable_image) { if (LogError( device, kVUID_Core_DrawState_ExtensionNotEnabled, "%s called with presentMode %s which requires the VK_KHR_shared_presentable_image extension, which has not " "been enabled.", func_name, string_VkPresentModeKHR(pCreateInfo->presentMode))) { return true; } } else if (pCreateInfo->minImageCount != 1) { if (LogError( device, "VUID-VkSwapchainCreateInfoKHR-minImageCount-01383", "%s called with presentMode %s, but minImageCount value is %d. For shared presentable image, minImageCount " "must be 1.", func_name, string_VkPresentModeKHR(pCreateInfo->presentMode), pCreateInfo->minImageCount)) { return true; } } } if (pCreateInfo->flags & VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR) { if (!device_extensions.vk_khr_swapchain_mutable_format) { if (LogError(device, kVUID_Core_DrawState_ExtensionNotEnabled, "%s: pCreateInfo->flags contains VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR which requires the " "VK_KHR_swapchain_mutable_format extension, which has not been enabled.", func_name)) { return true; } } else { const auto *image_format_list = LvlFindInChain<VkImageFormatListCreateInfo>(pCreateInfo->pNext); if (image_format_list == nullptr) { if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-flags-03168", "%s: pCreateInfo->flags contains VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR but the pNext chain of " "pCreateInfo does not contain an instance of VkImageFormatListCreateInfo.", func_name)) { return true; } } else if (image_format_list->viewFormatCount == 0) { if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-flags-03168", "%s: pCreateInfo->flags contains VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR but the viewFormatCount " "member of VkImageFormatListCreateInfo in the pNext chain is zero.", func_name)) { return true; } } else { bool found_base_format = false; for (uint32_t i = 0; i < image_format_list->viewFormatCount; ++i) { if (image_format_list->pViewFormats[i] == pCreateInfo->imageFormat) { found_base_format = true; break; } } if (!found_base_format) { if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-flags-03168", "%s: pCreateInfo->flags contains VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR but none of the " "elements of the pViewFormats member of VkImageFormatListCreateInfo match " "pCreateInfo->imageFormat.", func_name)) { return true; } } } } } if ((pCreateInfo->imageSharingMode == VK_SHARING_MODE_CONCURRENT) && pCreateInfo->pQueueFamilyIndices) { bool skip1 = ValidatePhysicalDeviceQueueFamilies(pCreateInfo->queueFamilyIndexCount, pCreateInfo->pQueueFamilyIndices, "vkCreateBuffer", "pCreateInfo->pQueueFamilyIndices", "VUID-VkSwapchainCreateInfoKHR-imageSharingMode-01428"); if (skip1) return true; } // Validate pCreateInfo->imageUsage against GetPhysicalDeviceFormatProperties const VkFormatProperties format_properties = GetPDFormatProperties(pCreateInfo->imageFormat); const VkFormatFeatureFlags tiling_features = format_properties.optimalTilingFeatures; if (tiling_features == 0) { if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01778", "%s: pCreateInfo->imageFormat %s with tiling VK_IMAGE_TILING_OPTIMAL has no supported format features on this " "physical device.", func_name, string_VkFormat(pCreateInfo->imageFormat))) { return true; } } else if ((pCreateInfo->imageUsage & VK_IMAGE_USAGE_SAMPLED_BIT) && !(tiling_features & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT)) { if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01778", "%s: pCreateInfo->imageFormat %s with tiling VK_IMAGE_TILING_OPTIMAL does not support usage that includes " "VK_IMAGE_USAGE_SAMPLED_BIT.", func_name, string_VkFormat(pCreateInfo->imageFormat))) { return true; } } else if ((pCreateInfo->imageUsage & VK_IMAGE_USAGE_STORAGE_BIT) && !(tiling_features & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT)) { if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01778", "%s: pCreateInfo->imageFormat %s with tiling VK_IMAGE_TILING_OPTIMAL does not support usage that includes " "VK_IMAGE_USAGE_STORAGE_BIT.", func_name, string_VkFormat(pCreateInfo->imageFormat))) { return true; } } else if ((pCreateInfo->imageUsage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT) && !(tiling_features & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT)) { if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01778", "%s: pCreateInfo->imageFormat %s with tiling VK_IMAGE_TILING_OPTIMAL does not support usage that includes " "VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT.", func_name, string_VkFormat(pCreateInfo->imageFormat))) { return true; } } else if ((pCreateInfo->imageUsage & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) && !(tiling_features & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT)) { if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01778", "%s: pCreateInfo->imageFormat %s with tiling VK_IMAGE_TILING_OPTIMAL does not support usage that includes " "VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT.", func_name, string_VkFormat(pCreateInfo->imageFormat))) { return true; } } else if ((pCreateInfo->imageUsage & VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT) && !(tiling_features & (VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT | VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT))) { if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01778", "%s: pCreateInfo->imageFormat %s with tiling VK_IMAGE_TILING_OPTIMAL does not support usage that includes " "VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT or VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT.", func_name, string_VkFormat(pCreateInfo->imageFormat))) { return true; } } const VkImageCreateInfo image_create_info = GetSwapchainImpliedImageCreateInfo(pCreateInfo); VkImageFormatProperties image_properties = {}; const VkResult image_properties_result = DispatchGetPhysicalDeviceImageFormatProperties( physical_device, image_create_info.format, image_create_info.imageType, image_create_info.tiling, image_create_info.usage, image_create_info.flags, &image_properties); if (image_properties_result != VK_SUCCESS) { if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01778", "vkGetPhysicalDeviceImageFormatProperties() unexpectedly failed, " "when called for %s validation with following params: " "format: %s, imageType: %s, " "tiling: %s, usage: %s, " "flags: %s.", func_name, string_VkFormat(image_create_info.format), string_VkImageType(image_create_info.imageType), string_VkImageTiling(image_create_info.tiling), string_VkImageUsageFlags(image_create_info.usage).c_str(), string_VkImageCreateFlags(image_create_info.flags).c_str())) { return true; } } // Validate pCreateInfo->imageArrayLayers against VkImageFormatProperties::maxArrayLayers if (pCreateInfo->imageArrayLayers > image_properties.maxArrayLayers) { if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01778", "%s called with a non-supported imageArrayLayers (i.e. %d). " "Maximum value returned by vkGetPhysicalDeviceImageFormatProperties() is %d " "for imageFormat %s with tiling VK_IMAGE_TILING_OPTIMAL", func_name, pCreateInfo->imageArrayLayers, image_properties.maxArrayLayers, string_VkFormat(pCreateInfo->imageFormat))) { return true; } } // Validate pCreateInfo->imageExtent against VkImageFormatProperties::maxExtent if ((pCreateInfo->imageExtent.width > image_properties.maxExtent.width) || (pCreateInfo->imageExtent.height > image_properties.maxExtent.height)) { if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01778", "%s called with imageExtent = (%d,%d), which is bigger than max extent (%d,%d)" "returned by vkGetPhysicalDeviceImageFormatProperties(): " "for imageFormat %s with tiling VK_IMAGE_TILING_OPTIMAL", func_name, pCreateInfo->imageExtent.width, pCreateInfo->imageExtent.height, image_properties.maxExtent.width, image_properties.maxExtent.height, string_VkFormat(pCreateInfo->imageFormat))) { return true; } } if ((pCreateInfo->flags & VK_SWAPCHAIN_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT_KHR) && device_group_create_info.physicalDeviceCount == 1) { if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-physicalDeviceCount-01429", "%s called with flags containing VK_SWAPCHAIN_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT_KHR" "but logical device was created with VkDeviceGroupDeviceCreateInfo::physicalDeviceCount equal to 1", func_name)) { return true; } } return skip; } bool CoreChecks::PreCallValidateCreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchain) const { const auto surface_state = GetSurfaceState(pCreateInfo->surface); const auto old_swapchain_state = GetSwapchainState(pCreateInfo->oldSwapchain); return ValidateCreateSwapchain("vkCreateSwapchainKHR()", pCreateInfo, surface_state, old_swapchain_state); } void CoreChecks::PreCallRecordDestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) { if (swapchain) { auto swapchain_data = GetSwapchainState(swapchain); if (swapchain_data) { for (const auto &swapchain_image : swapchain_data->images) { imageLayoutMap.erase(swapchain_image.image); qfo_release_image_barrier_map.erase(swapchain_image.image); } } } StateTracker::PreCallRecordDestroySwapchainKHR(device, swapchain, pAllocator); } bool CoreChecks::PreCallValidateGetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pSwapchainImageCount, VkImage *pSwapchainImages) const { auto swapchain_state = GetSwapchainState(swapchain); bool skip = false; if (swapchain_state && pSwapchainImages) { if (*pSwapchainImageCount > swapchain_state->get_swapchain_image_count) { skip |= LogError(device, kVUID_Core_Swapchain_InvalidCount, "vkGetSwapchainImagesKHR() called with non-NULL pSwapchainImages, and with pSwapchainImageCount set to a " "value (%d) that is greater than the value (%d) that was returned when pSwapchainImages was NULL.", *pSwapchainImageCount, swapchain_state->get_swapchain_image_count); } } return skip; } void CoreChecks::PostCallRecordGetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pSwapchainImageCount, VkImage *pSwapchainImages, VkResult result) { // This function will run twice. The first is to get pSwapchainImageCount. The second is to get pSwapchainImages. // The first time in StateTracker::PostCallRecordGetSwapchainImagesKHR only generates the container's size. // The second time in StateTracker::PostCallRecordGetSwapchainImagesKHR will create VKImage and IMAGE_STATE. // So GlobalImageLayoutMap saving new IMAGE_STATEs has to run in the second time. // pSwapchainImages is not nullptr and it needs to wait until StateTracker::PostCallRecordGetSwapchainImagesKHR. uint32_t new_swapchain_image_index = 0; if (((result == VK_SUCCESS) || (result == VK_INCOMPLETE)) && pSwapchainImages) { auto swapchain_state = GetSwapchainState(swapchain); const auto image_vector_size = swapchain_state->images.size(); for (; new_swapchain_image_index < *pSwapchainImageCount; ++new_swapchain_image_index) { if ((new_swapchain_image_index >= image_vector_size) || (swapchain_state->images[new_swapchain_image_index].image == VK_NULL_HANDLE)) { break; }; } } StateTracker::PostCallRecordGetSwapchainImagesKHR(device, swapchain, pSwapchainImageCount, pSwapchainImages, result); if (((result == VK_SUCCESS) || (result == VK_INCOMPLETE)) && pSwapchainImages) { for (; new_swapchain_image_index < *pSwapchainImageCount; ++new_swapchain_image_index) { auto image_state = Get<IMAGE_STATE>(pSwapchainImages[new_swapchain_image_index]); AddInitialLayoutintoImageLayoutMap(*image_state, imageLayoutMap); } } } bool CoreChecks::PreCallValidateQueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) const { bool skip = false; const auto queue_state = GetQueueState(queue); for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) { const auto semaphore_state = GetSemaphoreState(pPresentInfo->pWaitSemaphores[i]); if (semaphore_state && semaphore_state->type != VK_SEMAPHORE_TYPE_BINARY) { skip |= LogError(pPresentInfo->pWaitSemaphores[i], "VUID-vkQueuePresentKHR-pWaitSemaphores-03267", "vkQueuePresentKHR: pWaitSemaphores[%u] (%s) is not a VK_SEMAPHORE_TYPE_BINARY", i, report_data->FormatHandle(pPresentInfo->pWaitSemaphores[i]).c_str()); } if (semaphore_state && !semaphore_state->signaled && !SemaphoreWasSignaled(pPresentInfo->pWaitSemaphores[i])) { LogObjectList objlist(queue); objlist.add(pPresentInfo->pWaitSemaphores[i]); skip |= LogError(objlist, "VUID-vkQueuePresentKHR-pWaitSemaphores-03873", "vkQueuePresentKHR: Queue %s is waiting on pWaitSemaphores[%u] (%s) that has no way to be signaled.", report_data->FormatHandle(queue).c_str(), i, report_data->FormatHandle(pPresentInfo->pWaitSemaphores[i]).c_str()); } } for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) { const auto swapchain_data = GetSwapchainState(pPresentInfo->pSwapchains[i]); if (swapchain_data) { // VU currently is 2-in-1, covers being a valid index and valid layout const char *validation_error = (device_extensions.vk_khr_shared_presentable_image) ? "VUID-VkPresentInfoKHR-pImageIndices-01430" : "VUID-VkPresentInfoKHR-pImageIndices-01296"; // Check if index is even possible to be acquired to give better error message if (pPresentInfo->pImageIndices[i] >= swapchain_data->images.size()) { skip |= LogError( pPresentInfo->pSwapchains[i], validation_error, "vkQueuePresentKHR: pSwapchains[%u] image index is too large (%u). There are only %u images in this swapchain.", i, pPresentInfo->pImageIndices[i], static_cast<uint32_t>(swapchain_data->images.size())); } else { auto image = swapchain_data->images[pPresentInfo->pImageIndices[i]].image; const auto image_state = GetImageState(image); if (!image_state->acquired) { skip |= LogError(pPresentInfo->pSwapchains[i], validation_error, "vkQueuePresentKHR: pSwapchains[%u] image index %u has not been acquired.", i, pPresentInfo->pImageIndices[i]); } vector<VkImageLayout> layouts; if (FindLayouts(image, layouts)) { for (auto layout : layouts) { if ((layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) && (!device_extensions.vk_khr_shared_presentable_image || (layout != VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR))) { skip |= LogError(queue, validation_error, "vkQueuePresentKHR(): pSwapchains[%u] images passed to present must be in layout " "VK_IMAGE_LAYOUT_PRESENT_SRC_KHR or " "VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR but is in %s.", i, string_VkImageLayout(layout)); } } } } // All physical devices and queue families are required to be able to present to any native window on Android; require // the application to have established support on any other platform. if (!instance_extensions.vk_khr_android_surface) { const auto surface_state = GetSurfaceState(swapchain_data->createInfo.surface); auto support_it = surface_state->gpu_queue_support.find({physical_device, queue_state->queueFamilyIndex}); if (support_it == surface_state->gpu_queue_support.end()) { skip |= LogError( pPresentInfo->pSwapchains[i], kVUID_Core_DrawState_SwapchainUnsupportedQueue, "vkQueuePresentKHR: Presenting pSwapchains[%u] image without calling vkGetPhysicalDeviceSurfaceSupportKHR", i); } else if (!support_it->second) { skip |= LogError( pPresentInfo->pSwapchains[i], "VUID-vkQueuePresentKHR-pSwapchains-01292", "vkQueuePresentKHR: Presenting pSwapchains[%u] image on queue that cannot present to this surface.", i); } } } } if (pPresentInfo->pNext) { // Verify ext struct const auto *present_regions = LvlFindInChain<VkPresentRegionsKHR>(pPresentInfo->pNext); if (present_regions) { for (uint32_t i = 0; i < present_regions->swapchainCount; ++i) { const auto swapchain_data = GetSwapchainState(pPresentInfo->pSwapchains[i]); assert(swapchain_data); VkPresentRegionKHR region = present_regions->pRegions[i]; for (uint32_t j = 0; j < region.rectangleCount; ++j) { VkRectLayerKHR rect = region.pRectangles[j]; if ((rect.offset.x + rect.extent.width) > swapchain_data->createInfo.imageExtent.width) { skip |= LogError(pPresentInfo->pSwapchains[i], "VUID-VkRectLayerKHR-offset-01261", "vkQueuePresentKHR(): For VkPresentRegionKHR down pNext chain, " "pRegion[%i].pRectangles[%i], the sum of offset.x (%i) and extent.width (%i) is greater " "than the corresponding swapchain's imageExtent.width (%i).", i, j, rect.offset.x, rect.extent.width, swapchain_data->createInfo.imageExtent.width); } if ((rect.offset.y + rect.extent.height) > swapchain_data->createInfo.imageExtent.height) { skip |= LogError(pPresentInfo->pSwapchains[i], "VUID-VkRectLayerKHR-offset-01261", "vkQueuePresentKHR(): For VkPresentRegionKHR down pNext chain, " "pRegion[%i].pRectangles[%i], the sum of offset.y (%i) and extent.height (%i) is greater " "than the corresponding swapchain's imageExtent.height (%i).", i, j, rect.offset.y, rect.extent.height, swapchain_data->createInfo.imageExtent.height); } if (rect.layer > swapchain_data->createInfo.imageArrayLayers) { skip |= LogError( pPresentInfo->pSwapchains[i], "VUID-VkRectLayerKHR-layer-01262", "vkQueuePresentKHR(): For VkPresentRegionKHR down pNext chain, pRegion[%i].pRectangles[%i], the layer " "(%i) is greater than the corresponding swapchain's imageArrayLayers (%i).", i, j, rect.layer, swapchain_data->createInfo.imageArrayLayers); } } } } const auto *present_times_info = LvlFindInChain<VkPresentTimesInfoGOOGLE>(pPresentInfo->pNext); if (present_times_info) { if (pPresentInfo->swapchainCount != present_times_info->swapchainCount) { skip |= LogError(pPresentInfo->pSwapchains[0], "VUID-VkPresentTimesInfoGOOGLE-swapchainCount-01247", "vkQueuePresentKHR(): VkPresentTimesInfoGOOGLE.swapchainCount is %i but pPresentInfo->swapchainCount " "is %i. For VkPresentTimesInfoGOOGLE down pNext chain of VkPresentInfoKHR, " "VkPresentTimesInfoGOOGLE.swapchainCount must equal VkPresentInfoKHR.swapchainCount.", present_times_info->swapchainCount, pPresentInfo->swapchainCount); } } } return skip; } bool CoreChecks::PreCallValidateCreateSharedSwapchainsKHR(VkDevice device, uint32_t swapchainCount, const VkSwapchainCreateInfoKHR *pCreateInfos, const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchains) const { bool skip = false; if (pCreateInfos) { for (uint32_t i = 0; i < swapchainCount; i++) { const auto surface_state = GetSurfaceState(pCreateInfos[i].surface); const auto old_swapchain_state = GetSwapchainState(pCreateInfos[i].oldSwapchain); std::stringstream func_name; func_name << "vkCreateSharedSwapchainsKHR[" << swapchainCount << "]()"; skip |= ValidateCreateSwapchain(func_name.str().c_str(), &pCreateInfos[i], surface_state, old_swapchain_state); } } return skip; } bool CoreChecks::ValidateAcquireNextImage(VkDevice device, const CommandVersion cmd_version, VkSwapchainKHR swapchain, uint64_t timeout, VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex, const char *func_name, const char *semaphore_type_vuid) const { bool skip = false; auto semaphore_state = GetSemaphoreState(semaphore); if (semaphore_state && semaphore_state->type != VK_SEMAPHORE_TYPE_BINARY) { skip |= LogError(semaphore, semaphore_type_vuid, "%s: %s is not a VK_SEMAPHORE_TYPE_BINARY", func_name, report_data->FormatHandle(semaphore).c_str()); } if (semaphore_state && semaphore_state->scope == kSyncScopeInternal && semaphore_state->signaled) { skip |= LogError(semaphore, "VUID-vkAcquireNextImageKHR-semaphore-01286", "%s: Semaphore must not be currently signaled or in a wait state.", func_name); } auto fence_state = GetFenceState(fence); if (fence_state) { skip |= ValidateFenceForSubmit(fence_state, "VUID-vkAcquireNextImageKHR-fence-01287", "VUID-vkAcquireNextImageKHR-fence-01287", "vkAcquireNextImageKHR()"); } const auto swapchain_data = GetSwapchainState(swapchain); if (swapchain_data) { if (swapchain_data->retired) { skip |= LogError(swapchain, "VUID-vkAcquireNextImageKHR-swapchain-01285", "%s: This swapchain has been retired. The application can still present any images it " "has acquired, but cannot acquire any more.", func_name); } auto physical_device_state = GetPhysicalDeviceState(); // TODO: this is technically wrong on many levels, but requires massive cleanup if (physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHR_called) { const uint32_t acquired_images = static_cast<uint32_t>( std::count_if(swapchain_data->images.begin(), swapchain_data->images.end(), [this](SWAPCHAIN_IMAGE image) { auto const state = GetImageState(image.image); return (state && state->acquired); })); const uint32_t swapchain_image_count = static_cast<uint32_t>(swapchain_data->images.size()); const auto min_image_count = physical_device_state->surfaceCapabilities.minImageCount; const bool too_many_already_acquired = acquired_images > swapchain_image_count - min_image_count; if (timeout == UINT64_MAX && too_many_already_acquired) { const char *vuid = "INVALID-vuid"; if (cmd_version == CMD_VERSION_1) { vuid = "VUID-vkAcquireNextImageKHR-swapchain-01802"; } else if (cmd_version == CMD_VERSION_2) { vuid = "VUID-vkAcquireNextImage2KHR-swapchain-01803"; } else { assert(false); } const uint32_t acquirable = swapchain_image_count - min_image_count + 1; skip |= LogError(swapchain, vuid, "%s: Application has already previously acquired %" PRIu32 " image%s from swapchain. Only %" PRIu32 " %s available to be acquired using a timeout of UINT64_MAX (given the swapchain has %" PRIu32 ", and VkSurfaceCapabilitiesKHR::minImageCount is %" PRIu32 ").", func_name, acquired_images, acquired_images > 1 ? "s" : "", acquirable, acquirable > 1 ? "are" : "is", swapchain_image_count, min_image_count); } } } return skip; } bool CoreChecks::PreCallValidateAcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout, VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) const { return ValidateAcquireNextImage(device, CMD_VERSION_1, swapchain, timeout, semaphore, fence, pImageIndex, "vkAcquireNextImageKHR", "VUID-vkAcquireNextImageKHR-semaphore-03265"); } bool CoreChecks::PreCallValidateAcquireNextImage2KHR(VkDevice device, const VkAcquireNextImageInfoKHR *pAcquireInfo, uint32_t *pImageIndex) const { bool skip = false; skip |= ValidateDeviceMaskToPhysicalDeviceCount(pAcquireInfo->deviceMask, pAcquireInfo->swapchain, "VUID-VkAcquireNextImageInfoKHR-deviceMask-01290"); skip |= ValidateDeviceMaskToZero(pAcquireInfo->deviceMask, pAcquireInfo->swapchain, "VUID-VkAcquireNextImageInfoKHR-deviceMask-01291"); skip |= ValidateAcquireNextImage(device, CMD_VERSION_2, pAcquireInfo->swapchain, pAcquireInfo->timeout, pAcquireInfo->semaphore, pAcquireInfo->fence, pImageIndex, "vkAcquireNextImage2KHR", "VUID-VkAcquireNextImageInfoKHR-semaphore-03266"); return skip; } bool CoreChecks::PreCallValidateDestroySurfaceKHR(VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks *pAllocator) const { const auto surface_state = GetSurfaceState(surface); bool skip = false; if ((surface_state) && (surface_state->swapchain)) { skip |= LogError(instance, "VUID-vkDestroySurfaceKHR-surface-01266", "vkDestroySurfaceKHR() called before its associated VkSwapchainKHR was destroyed."); } return skip; } #ifdef VK_USE_PLATFORM_WAYLAND_KHR bool CoreChecks::PreCallValidateGetPhysicalDeviceWaylandPresentationSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, struct wl_display *display) const { const auto pd_state = GetPhysicalDeviceState(physicalDevice); return ValidateQueueFamilyIndex(pd_state, queueFamilyIndex, "VUID-vkGetPhysicalDeviceWaylandPresentationSupportKHR-queueFamilyIndex-01306", "vkGetPhysicalDeviceWaylandPresentationSupportKHR", "queueFamilyIndex"); } #endif // VK_USE_PLATFORM_WAYLAND_KHR #ifdef VK_USE_PLATFORM_WIN32_KHR bool CoreChecks::PreCallValidateGetPhysicalDeviceWin32PresentationSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex) const { const auto pd_state = GetPhysicalDeviceState(physicalDevice); return ValidateQueueFamilyIndex(pd_state, queueFamilyIndex, "VUID-vkGetPhysicalDeviceWin32PresentationSupportKHR-queueFamilyIndex-01309", "vkGetPhysicalDeviceWin32PresentationSupportKHR", "queueFamilyIndex"); } #endif // VK_USE_PLATFORM_WIN32_KHR #ifdef VK_USE_PLATFORM_XCB_KHR bool CoreChecks::PreCallValidateGetPhysicalDeviceXcbPresentationSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, xcb_connection_t *connection, xcb_visualid_t visual_id) const { const auto pd_state = GetPhysicalDeviceState(physicalDevice); return ValidateQueueFamilyIndex(pd_state, queueFamilyIndex, "VUID-vkGetPhysicalDeviceXcbPresentationSupportKHR-queueFamilyIndex-01312", "vkGetPhysicalDeviceXcbPresentationSupportKHR", "queueFamilyIndex"); } #endif // VK_USE_PLATFORM_XCB_KHR #ifdef VK_USE_PLATFORM_XLIB_KHR bool CoreChecks::PreCallValidateGetPhysicalDeviceXlibPresentationSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, Display *dpy, VisualID visualID) const { const auto pd_state = GetPhysicalDeviceState(physicalDevice); return ValidateQueueFamilyIndex(pd_state, queueFamilyIndex, "VUID-vkGetPhysicalDeviceXlibPresentationSupportKHR-queueFamilyIndex-01315", "vkGetPhysicalDeviceXlibPresentationSupportKHR", "queueFamilyIndex"); } #endif // VK_USE_PLATFORM_XLIB_KHR bool CoreChecks::PreCallValidateGetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, VkSurfaceKHR surface, VkBool32 *pSupported) const { const auto physical_device_state = GetPhysicalDeviceState(physicalDevice); return ValidateQueueFamilyIndex(physical_device_state, queueFamilyIndex, "VUID-vkGetPhysicalDeviceSurfaceSupportKHR-queueFamilyIndex-01269", "vkGetPhysicalDeviceSurfaceSupportKHR", "queueFamilyIndex"); } bool CoreChecks::ValidateDescriptorUpdateTemplate(const char *func_name, const VkDescriptorUpdateTemplateCreateInfo *pCreateInfo) const { bool skip = false; const auto layout = GetDescriptorSetLayoutShared(pCreateInfo->descriptorSetLayout); if (VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET == pCreateInfo->templateType && !layout) { skip |= LogError(pCreateInfo->descriptorSetLayout, "VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00350", "%s: Invalid pCreateInfo->descriptorSetLayout (%s)", func_name, report_data->FormatHandle(pCreateInfo->descriptorSetLayout).c_str()); } else if (VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR == pCreateInfo->templateType) { auto bind_point = pCreateInfo->pipelineBindPoint; bool valid_bp = (bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS) || (bind_point == VK_PIPELINE_BIND_POINT_COMPUTE) || (bind_point == VK_PIPELINE_BIND_POINT_RAY_TRACING_NV); if (!valid_bp) { skip |= LogError(device, "VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00351", "%s: Invalid pCreateInfo->pipelineBindPoint (%" PRIu32 ").", func_name, static_cast<uint32_t>(bind_point)); } const auto pipeline_layout = GetPipelineLayout(pCreateInfo->pipelineLayout); if (!pipeline_layout) { skip |= LogError(pCreateInfo->pipelineLayout, "VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00352", "%s: Invalid pCreateInfo->pipelineLayout (%s)", func_name, report_data->FormatHandle(pCreateInfo->pipelineLayout).c_str()); } else { const uint32_t pd_set = pCreateInfo->set; if ((pd_set >= pipeline_layout->set_layouts.size()) || !pipeline_layout->set_layouts[pd_set] || !pipeline_layout->set_layouts[pd_set]->IsPushDescriptor()) { skip |= LogError(pCreateInfo->pipelineLayout, "VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00353", "%s: pCreateInfo->set (%" PRIu32 ") does not refer to the push descriptor set layout for pCreateInfo->pipelineLayout (%s).", func_name, pd_set, report_data->FormatHandle(pCreateInfo->pipelineLayout).c_str()); } } } return skip; } bool CoreChecks::PreCallValidateCreateDescriptorUpdateTemplate(VkDevice device, const VkDescriptorUpdateTemplateCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkDescriptorUpdateTemplate *pDescriptorUpdateTemplate) const { bool skip = ValidateDescriptorUpdateTemplate("vkCreateDescriptorUpdateTemplate()", pCreateInfo); return skip; } bool CoreChecks::PreCallValidateCreateDescriptorUpdateTemplateKHR(VkDevice device, const VkDescriptorUpdateTemplateCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkDescriptorUpdateTemplate *pDescriptorUpdateTemplate) const { bool skip = ValidateDescriptorUpdateTemplate("vkCreateDescriptorUpdateTemplateKHR()", pCreateInfo); return skip; } bool CoreChecks::ValidateUpdateDescriptorSetWithTemplate(VkDescriptorSet descriptorSet, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void *pData) const { bool skip = false; auto const template_map_entry = desc_template_map.find(descriptorUpdateTemplate); if ((template_map_entry == desc_template_map.end()) || (template_map_entry->second.get() == nullptr)) { // Object tracker will report errors for invalid descriptorUpdateTemplate values, avoiding a crash in release builds // but retaining the assert as template support is new enough to want to investigate these in debug builds. assert(0); } else { const TEMPLATE_STATE *template_state = template_map_entry->second.get(); // TODO: Validate template push descriptor updates if (template_state->create_info.templateType == VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET) { skip = ValidateUpdateDescriptorSetsWithTemplateKHR(descriptorSet, template_state, pData); } } return skip; } bool CoreChecks::PreCallValidateUpdateDescriptorSetWithTemplate(VkDevice device, VkDescriptorSet descriptorSet, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void *pData) const { return ValidateUpdateDescriptorSetWithTemplate(descriptorSet, descriptorUpdateTemplate, pData); } bool CoreChecks::PreCallValidateUpdateDescriptorSetWithTemplateKHR(VkDevice device, VkDescriptorSet descriptorSet, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void *pData) const { return ValidateUpdateDescriptorSetWithTemplate(descriptorSet, descriptorUpdateTemplate, pData); } bool CoreChecks::PreCallValidateCmdPushDescriptorSetWithTemplateKHR(VkCommandBuffer commandBuffer, VkDescriptorUpdateTemplate descriptorUpdateTemplate, VkPipelineLayout layout, uint32_t set, const void *pData) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); const char *const func_name = "vkPushDescriptorSetWithTemplateKHR()"; bool skip = false; skip |= ValidateCmd(cb_state, CMD_PUSHDESCRIPTORSETWITHTEMPLATEKHR, func_name); const auto layout_data = GetPipelineLayout(layout); const auto dsl = GetDslFromPipelineLayout(layout_data, set); // Validate the set index points to a push descriptor set and is in range if (dsl) { if (!dsl->IsPushDescriptor()) { skip = LogError(layout, "VUID-vkCmdPushDescriptorSetKHR-set-00365", "%s: Set index %" PRIu32 " does not match push descriptor set layout index for %s.", func_name, set, report_data->FormatHandle(layout).c_str()); } } else if (layout_data && (set >= layout_data->set_layouts.size())) { skip = LogError(layout, "VUID-vkCmdPushDescriptorSetKHR-set-00364", "%s: Set index %" PRIu32 " is outside of range for %s (set < %" PRIu32 ").", func_name, set, report_data->FormatHandle(layout).c_str(), static_cast<uint32_t>(layout_data->set_layouts.size())); } const auto template_state = GetDescriptorTemplateState(descriptorUpdateTemplate); if (template_state) { const auto &template_ci = template_state->create_info; static const std::map<VkPipelineBindPoint, std::string> bind_errors = { std::make_pair(VK_PIPELINE_BIND_POINT_GRAPHICS, "VUID-vkCmdPushDescriptorSetWithTemplateKHR-commandBuffer-00366"), std::make_pair(VK_PIPELINE_BIND_POINT_COMPUTE, "VUID-vkCmdPushDescriptorSetWithTemplateKHR-commandBuffer-00366"), std::make_pair(VK_PIPELINE_BIND_POINT_RAY_TRACING_NV, "VUID-vkCmdPushDescriptorSetWithTemplateKHR-commandBuffer-00366")}; skip |= ValidatePipelineBindPoint(cb_state, template_ci.pipelineBindPoint, func_name, bind_errors); if (template_ci.templateType != VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR) { skip |= LogError(cb_state->commandBuffer, kVUID_Core_PushDescriptorUpdate_TemplateType, "%s: descriptorUpdateTemplate %s was not created with flag " "VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR.", func_name, report_data->FormatHandle(descriptorUpdateTemplate).c_str()); } if (template_ci.set != set) { skip |= LogError(cb_state->commandBuffer, kVUID_Core_PushDescriptorUpdate_Template_SetMismatched, "%s: descriptorUpdateTemplate %s created with set %" PRIu32 " does not match command parameter set %" PRIu32 ".", func_name, report_data->FormatHandle(descriptorUpdateTemplate).c_str(), template_ci.set, set); } if (!CompatForSet(set, layout_data, GetPipelineLayout(template_ci.pipelineLayout))) { LogObjectList objlist(cb_state->commandBuffer); objlist.add(descriptorUpdateTemplate); objlist.add(template_ci.pipelineLayout); objlist.add(layout); skip |= LogError(objlist, kVUID_Core_PushDescriptorUpdate_Template_LayoutMismatched, "%s: descriptorUpdateTemplate %s created with %s is incompatible with command parameter " "%s for set %" PRIu32, func_name, report_data->FormatHandle(descriptorUpdateTemplate).c_str(), report_data->FormatHandle(template_ci.pipelineLayout).c_str(), report_data->FormatHandle(layout).c_str(), set); } } if (dsl && template_state) { // Create an empty proxy in order to use the existing descriptor set update validation cvdescriptorset::DescriptorSet proxy_ds(VK_NULL_HANDLE, nullptr, dsl, 0, this); // Decode the template into a set of write updates cvdescriptorset::DecodedTemplateUpdate decoded_template(this, VK_NULL_HANDLE, template_state, pData, dsl->GetDescriptorSetLayout()); // Validate the decoded update against the proxy_ds skip |= ValidatePushDescriptorsUpdate(&proxy_ds, static_cast<uint32_t>(decoded_template.desc_writes.size()), decoded_template.desc_writes.data(), func_name); } return skip; } bool CoreChecks::ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(VkPhysicalDevice physicalDevice, uint32_t planeIndex, const char *api_name) const { bool skip = false; const auto physical_device_state = GetPhysicalDeviceState(physicalDevice); if (physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHR_called) { if (planeIndex >= physical_device_state->display_plane_property_count) { skip |= LogError(physicalDevice, "VUID-vkGetDisplayPlaneSupportedDisplaysKHR-planeIndex-01249", "%s(): planeIndex (%u) must be in the range [0, %d] that was returned by " "vkGetPhysicalDeviceDisplayPlanePropertiesKHR " "or vkGetPhysicalDeviceDisplayPlaneProperties2KHR. Do you have the plane index hardcoded?", api_name, planeIndex, physical_device_state->display_plane_property_count - 1); } } return skip; } bool CoreChecks::PreCallValidateGetDisplayPlaneSupportedDisplaysKHR(VkPhysicalDevice physicalDevice, uint32_t planeIndex, uint32_t *pDisplayCount, VkDisplayKHR *pDisplays) const { bool skip = false; skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(physicalDevice, planeIndex, "vkGetDisplayPlaneSupportedDisplaysKHR"); return skip; } bool CoreChecks::PreCallValidateGetDisplayPlaneCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkDisplayModeKHR mode, uint32_t planeIndex, VkDisplayPlaneCapabilitiesKHR *pCapabilities) const { bool skip = false; skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(physicalDevice, planeIndex, "vkGetDisplayPlaneCapabilitiesKHR"); return skip; } bool CoreChecks::PreCallValidateGetDisplayPlaneCapabilities2KHR(VkPhysicalDevice physicalDevice, const VkDisplayPlaneInfo2KHR *pDisplayPlaneInfo, VkDisplayPlaneCapabilities2KHR *pCapabilities) const { bool skip = false; skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(physicalDevice, pDisplayPlaneInfo->planeIndex, "vkGetDisplayPlaneCapabilities2KHR"); return skip; } bool CoreChecks::PreCallValidateCreateDisplayPlaneSurfaceKHR(VkInstance instance, const VkDisplaySurfaceCreateInfoKHR *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) const { bool skip = false; const VkDisplayModeKHR display_mode = pCreateInfo->displayMode; const uint32_t plane_index = pCreateInfo->planeIndex; if (pCreateInfo->alphaMode == VK_DISPLAY_PLANE_ALPHA_GLOBAL_BIT_KHR) { const float global_alpha = pCreateInfo->globalAlpha; if ((global_alpha > 1.0f) || (global_alpha < 0.0f)) { skip |= LogError( display_mode, "VUID-VkDisplaySurfaceCreateInfoKHR-alphaMode-01254", "vkCreateDisplayPlaneSurfaceKHR(): alphaMode is VK_DISPLAY_PLANE_ALPHA_GLOBAL_BIT_KHR but globalAlpha is %f.", global_alpha); } } const DISPLAY_MODE_STATE *dm_state = GetDisplayModeState(display_mode); if (dm_state != nullptr) { // Get physical device from VkDisplayModeKHR state tracking const VkPhysicalDevice physical_device = dm_state->physical_device; const auto physical_device_state = GetPhysicalDeviceState(physical_device); VkPhysicalDeviceProperties device_properties = {}; DispatchGetPhysicalDeviceProperties(physical_device, &device_properties); const uint32_t width = pCreateInfo->imageExtent.width; const uint32_t height = pCreateInfo->imageExtent.height; if (width >= device_properties.limits.maxImageDimension2D) { skip |= LogError(display_mode, "VUID-VkDisplaySurfaceCreateInfoKHR-width-01256", "vkCreateDisplayPlaneSurfaceKHR(): width (%" PRIu32 ") exceeds device limit maxImageDimension2D (%" PRIu32 ").", width, device_properties.limits.maxImageDimension2D); } if (height >= device_properties.limits.maxImageDimension2D) { skip |= LogError(display_mode, "VUID-VkDisplaySurfaceCreateInfoKHR-width-01256", "vkCreateDisplayPlaneSurfaceKHR(): height (%" PRIu32 ") exceeds device limit maxImageDimension2D (%" PRIu32 ").", height, device_properties.limits.maxImageDimension2D); } if (physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHR_called) { if (plane_index >= physical_device_state->display_plane_property_count) { skip |= LogError(display_mode, "VUID-VkDisplaySurfaceCreateInfoKHR-planeIndex-01252", "vkCreateDisplayPlaneSurfaceKHR(): planeIndex (%u) must be in the range [0, %d] that was returned by " "vkGetPhysicalDeviceDisplayPlanePropertiesKHR " "or vkGetPhysicalDeviceDisplayPlaneProperties2KHR. Do you have the plane index hardcoded?", plane_index, physical_device_state->display_plane_property_count - 1); } else { // call here once we know the plane index used is a valid plane index VkDisplayPlaneCapabilitiesKHR plane_capabilities; DispatchGetDisplayPlaneCapabilitiesKHR(physical_device, display_mode, plane_index, &plane_capabilities); if ((pCreateInfo->alphaMode & plane_capabilities.supportedAlpha) == 0) { skip |= LogError(display_mode, "VUID-VkDisplaySurfaceCreateInfoKHR-alphaMode-01255", "vkCreateDisplayPlaneSurfaceKHR(): alphaMode is %s but planeIndex %u supportedAlpha (0x%x) " "does not support the mode.", string_VkDisplayPlaneAlphaFlagBitsKHR(pCreateInfo->alphaMode), plane_index, plane_capabilities.supportedAlpha); } } } } return skip; } bool CoreChecks::PreCallValidateCmdDebugMarkerBeginEXT(VkCommandBuffer commandBuffer, const VkDebugMarkerMarkerInfoEXT *pMarkerInfo) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); return ValidateCmd(cb_state, CMD_DEBUGMARKERBEGINEXT, "vkCmdDebugMarkerBeginEXT()"); } bool CoreChecks::PreCallValidateCmdDebugMarkerEndEXT(VkCommandBuffer commandBuffer) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); return ValidateCmd(cb_state, CMD_DEBUGMARKERENDEXT, "vkCmdDebugMarkerEndEXT()"); } bool CoreChecks::PreCallValidateCmdBeginQueryIndexedEXT(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query, VkQueryControlFlags flags, uint32_t index) const { if (disabled[query_validation]) return false; const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); QueryObject query_obj(queryPool, query, index); const char *cmd_name = "vkCmdBeginQueryIndexedEXT()"; struct BeginQueryIndexedVuids : ValidateBeginQueryVuids { BeginQueryIndexedVuids() : ValidateBeginQueryVuids() { vuid_queue_flags = "VUID-vkCmdBeginQueryIndexedEXT-commandBuffer-cmdpool"; vuid_queue_feedback = "VUID-vkCmdBeginQueryIndexedEXT-queryType-02338"; vuid_queue_occlusion = "VUID-vkCmdBeginQueryIndexedEXT-queryType-00803"; vuid_precise = "VUID-vkCmdBeginQueryIndexedEXT-queryType-00800"; vuid_query_count = "VUID-vkCmdBeginQueryIndexedEXT-query-00802"; vuid_profile_lock = "VUID-vkCmdBeginQueryIndexedEXT-queryPool-03223"; vuid_scope_not_first = "VUID-vkCmdBeginQueryIndexedEXT-queryPool-03224"; vuid_scope_in_rp = "VUID-vkCmdBeginQueryIndexedEXT-queryPool-03225"; vuid_dup_query_type = "VUID-vkCmdBeginQueryIndexedEXT-queryPool-01922"; vuid_protected_cb = "VUID-vkCmdBeginQueryIndexedEXT-commandBuffer-01885"; } }; BeginQueryIndexedVuids vuids; bool skip = ValidateBeginQuery(cb_state, query_obj, flags, CMD_BEGINQUERYINDEXEDEXT, cmd_name, &vuids); // Extension specific VU's const auto &query_pool_ci = GetQueryPoolState(query_obj.pool)->createInfo; if (query_pool_ci.queryType == VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT) { if (device_extensions.vk_ext_transform_feedback && (index >= phys_dev_ext_props.transform_feedback_props.maxTransformFeedbackStreams)) { skip |= LogError( cb_state->commandBuffer, "VUID-vkCmdBeginQueryIndexedEXT-queryType-02339", "%s: index %" PRIu32 " must be less than VkPhysicalDeviceTransformFeedbackPropertiesEXT::maxTransformFeedbackStreams %" PRIu32 ".", cmd_name, index, phys_dev_ext_props.transform_feedback_props.maxTransformFeedbackStreams); } } else if (index != 0) { skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdBeginQueryIndexedEXT-queryType-02340", "%s: index %" PRIu32 " must be zero if %s was not created with type VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT.", cmd_name, index, report_data->FormatHandle(queryPool).c_str()); } return skip; } void CoreChecks::PreCallRecordCmdBeginQueryIndexedEXT(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query, VkQueryControlFlags flags, uint32_t index) { if (disabled[query_validation]) return; QueryObject query_obj = {queryPool, query, index}; EnqueueVerifyBeginQuery(commandBuffer, query_obj, "vkCmdBeginQueryIndexedEXT()"); } void CoreChecks::PreCallRecordCmdEndQueryIndexedEXT(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query, uint32_t index) { if (disabled[query_validation]) return; const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); QueryObject query_obj = {queryPool, query, index}; query_obj.endCommandIndex = cb_state->commandCount - 1; EnqueueVerifyEndQuery(commandBuffer, query_obj); } bool CoreChecks::PreCallValidateCmdEndQueryIndexedEXT(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query, uint32_t index) const { if (disabled[query_validation]) return false; QueryObject query_obj = {queryPool, query, index}; const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); struct EndQueryIndexedVuids : ValidateEndQueryVuids { EndQueryIndexedVuids() : ValidateEndQueryVuids() { vuid_queue_flags = "VUID-vkCmdEndQueryIndexedEXT-commandBuffer-cmdpool"; vuid_active_queries = "VUID-vkCmdEndQueryIndexedEXT-None-02342"; vuid_protected_cb = "VUID-vkCmdEndQueryIndexedEXT-commandBuffer-02344"; } }; EndQueryIndexedVuids vuids; return ValidateCmdEndQuery(cb_state, query_obj, CMD_ENDQUERYINDEXEDEXT, "vkCmdEndQueryIndexedEXT()", &vuids); } bool CoreChecks::PreCallValidateCmdSetDiscardRectangleEXT(VkCommandBuffer commandBuffer, uint32_t firstDiscardRectangle, uint32_t discardRectangleCount, const VkRect2D *pDiscardRectangles) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); // Minimal validation for command buffer state return ValidateCmd(cb_state, CMD_SETDISCARDRECTANGLEEXT, "vkCmdSetDiscardRectangleEXT()"); } bool CoreChecks::PreCallValidateCmdSetSampleLocationsEXT(VkCommandBuffer commandBuffer, const VkSampleLocationsInfoEXT *pSampleLocationsInfo) const { bool skip = false; const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); // Minimal validation for command buffer state skip |= ValidateCmd(cb_state, CMD_SETSAMPLELOCATIONSEXT, "vkCmdSetSampleLocationsEXT()"); skip |= ValidateSampleLocationsInfo(pSampleLocationsInfo, "vkCmdSetSampleLocationsEXT"); const auto lv_bind_point = ConvertToLvlBindPoint(VK_PIPELINE_BIND_POINT_GRAPHICS); const auto *pipe = cb_state->lastBound[lv_bind_point].pipeline_state; if (pipe != nullptr) { // Check same error with different log messages const safe_VkPipelineMultisampleStateCreateInfo *multisample_state = pipe->graphicsPipelineCI.pMultisampleState; if (multisample_state == nullptr) { skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdSetSampleLocationsEXT-sampleLocationsPerPixel-01529", "vkCmdSetSampleLocationsEXT(): pSampleLocationsInfo->sampleLocationsPerPixel must be equal to " "rasterizationSamples, but the bound graphics pipeline was created without a multisample state"); } else if (multisample_state->rasterizationSamples != pSampleLocationsInfo->sampleLocationsPerPixel) { skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdSetSampleLocationsEXT-sampleLocationsPerPixel-01529", "vkCmdSetSampleLocationsEXT(): pSampleLocationsInfo->sampleLocationsPerPixel (%s) is not equal to " "the last bound pipeline's rasterizationSamples (%s)", string_VkSampleCountFlagBits(pSampleLocationsInfo->sampleLocationsPerPixel), string_VkSampleCountFlagBits(multisample_state->rasterizationSamples)); } } return skip; } bool CoreChecks::ValidateCreateSamplerYcbcrConversion(const char *func_name, const VkSamplerYcbcrConversionCreateInfo *create_info) const { bool skip = false; const VkFormat conversion_format = create_info->format; // Need to check for external format conversion first as it allows for non-UNORM format bool external_format = false; #ifdef VK_USE_PLATFORM_ANDROID_KHR const VkExternalFormatANDROID *ext_format_android = LvlFindInChain<VkExternalFormatANDROID>(create_info->pNext); if ((nullptr != ext_format_android) && (0 != ext_format_android->externalFormat)) { external_format = true; if (VK_FORMAT_UNDEFINED != create_info->format) { return LogError(device, "VUID-VkSamplerYcbcrConversionCreateInfo-format-01904", "%s: CreateInfo format is not VK_FORMAT_UNDEFINED while " "there is a chained VkExternalFormatANDROID struct with a non-zero externalFormat.", func_name); } } #endif // VK_USE_PLATFORM_ANDROID_KHR if ((external_format == false) && (FormatIsUNorm(conversion_format) == false)) { const char *vuid = (device_extensions.vk_android_external_memory_android_hardware_buffer) ? "VUID-VkSamplerYcbcrConversionCreateInfo-format-04061" : "VUID-VkSamplerYcbcrConversionCreateInfo-format-04060"; skip |= LogError(device, vuid, "%s: CreateInfo format (%s) is not an UNORM format and there is no external format conversion being created.", func_name, string_VkFormat(conversion_format)); } // Gets VkFormatFeatureFlags according to Sampler Ycbcr Conversion Format Features // (vkspec.html#potential-format-features) VkFormatFeatureFlags format_features = VK_FORMAT_FEATURE_FLAG_BITS_MAX_ENUM; if (conversion_format == VK_FORMAT_UNDEFINED) { #ifdef VK_USE_PLATFORM_ANDROID_KHR // only check for external format inside VK_FORMAT_UNDEFINED check to prevent unnecessary extra errors from no format // features being supported if (external_format == true) { auto it = ahb_ext_formats_map.find(ext_format_android->externalFormat); if (it != ahb_ext_formats_map.end()) { format_features = it->second; } } #endif // VK_USE_PLATFORM_ANDROID_KHR } else { format_features = GetPotentialFormatFeatures(conversion_format); } // Check all VUID that are based off of VkFormatFeatureFlags // These can't be in StatelessValidation due to needing possible External AHB state for feature support if (((format_features & VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT) == 0) && ((format_features & VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT) == 0)) { skip |= LogError(device, "VUID-VkSamplerYcbcrConversionCreateInfo-format-01650", "%s: Format %s does not support either VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT or " "VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT", func_name, string_VkFormat(conversion_format)); } if ((format_features & VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT) == 0) { if (FormatIsXChromaSubsampled(conversion_format) && create_info->xChromaOffset == VK_CHROMA_LOCATION_COSITED_EVEN) { skip |= LogError(device, "VUID-VkSamplerYcbcrConversionCreateInfo-xChromaOffset-01651", "%s: Format %s does not support VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT so xChromaOffset can't " "be VK_CHROMA_LOCATION_COSITED_EVEN", func_name, string_VkFormat(conversion_format)); } if (FormatIsYChromaSubsampled(conversion_format) && create_info->yChromaOffset == VK_CHROMA_LOCATION_COSITED_EVEN) { skip |= LogError(device, "VUID-VkSamplerYcbcrConversionCreateInfo-xChromaOffset-01651", "%s: Format %s does not support VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT so yChromaOffset can't " "be VK_CHROMA_LOCATION_COSITED_EVEN", func_name, string_VkFormat(conversion_format)); } } if ((format_features & VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT) == 0) { if (FormatIsXChromaSubsampled(conversion_format) && create_info->xChromaOffset == VK_CHROMA_LOCATION_MIDPOINT) { skip |= LogError(device, "VUID-VkSamplerYcbcrConversionCreateInfo-xChromaOffset-01652", "%s: Format %s does not support VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT so xChromaOffset can't " "be VK_CHROMA_LOCATION_MIDPOINT", func_name, string_VkFormat(conversion_format)); } if (FormatIsYChromaSubsampled(conversion_format) && create_info->yChromaOffset == VK_CHROMA_LOCATION_MIDPOINT) { skip |= LogError(device, "VUID-VkSamplerYcbcrConversionCreateInfo-xChromaOffset-01652", "%s: Format %s does not support VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT so yChromaOffset can't " "be VK_CHROMA_LOCATION_MIDPOINT", func_name, string_VkFormat(conversion_format)); } } if (((format_features & VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT) == 0) && (create_info->forceExplicitReconstruction == VK_TRUE)) { skip |= LogError(device, "VUID-VkSamplerYcbcrConversionCreateInfo-forceExplicitReconstruction-01656", "%s: Format %s does not support " "VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT so " "forceExplicitReconstruction must be VK_FALSE", func_name, string_VkFormat(conversion_format)); } if (((format_features & VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT) == 0) && (create_info->chromaFilter == VK_FILTER_LINEAR)) { skip |= LogError(device, "VUID-VkSamplerYcbcrConversionCreateInfo-chromaFilter-01657", "%s: Format %s does not support VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT so " "chromaFilter must not be VK_FILTER_LINEAR", func_name, string_VkFormat(conversion_format)); } return skip; } bool CoreChecks::PreCallValidateCreateSamplerYcbcrConversion(VkDevice device, const VkSamplerYcbcrConversionCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSamplerYcbcrConversion *pYcbcrConversion) const { return ValidateCreateSamplerYcbcrConversion("vkCreateSamplerYcbcrConversion()", pCreateInfo); } bool CoreChecks::PreCallValidateCreateSamplerYcbcrConversionKHR(VkDevice device, const VkSamplerYcbcrConversionCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSamplerYcbcrConversion *pYcbcrConversion) const { return ValidateCreateSamplerYcbcrConversion("vkCreateSamplerYcbcrConversionKHR()", pCreateInfo); } bool CoreChecks::PreCallValidateCreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSampler *pSampler) const { bool skip = false; if (samplerMap.size() >= phys_dev_props.limits.maxSamplerAllocationCount) { skip |= LogError( device, "VUID-vkCreateSampler-maxSamplerAllocationCount-04110", "vkCreateSampler(): Number of currently valid sampler objects (%zu) is not less than the maximum allowed (%u).", samplerMap.size(), phys_dev_props.limits.maxSamplerAllocationCount); } if (enabled_features.core11.samplerYcbcrConversion == VK_TRUE) { const VkSamplerYcbcrConversionInfo *conversion_info = LvlFindInChain<VkSamplerYcbcrConversionInfo>(pCreateInfo->pNext); if (conversion_info != nullptr) { const VkSamplerYcbcrConversion sampler_ycbcr_conversion = conversion_info->conversion; const SAMPLER_YCBCR_CONVERSION_STATE *ycbcr_state = GetSamplerYcbcrConversionState(sampler_ycbcr_conversion); if ((ycbcr_state->format_features & VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT) == 0) { const VkFilter chroma_filter = ycbcr_state->chromaFilter; if (pCreateInfo->minFilter != chroma_filter) { skip |= LogError( device, "VUID-VkSamplerCreateInfo-minFilter-01645", "VkCreateSampler: VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT is " "not supported for SamplerYcbcrConversion's (%u) format %s so minFilter (%s) needs to be equal to " "chromaFilter (%s)", report_data->FormatHandle(sampler_ycbcr_conversion).c_str(), string_VkFormat(ycbcr_state->format), string_VkFilter(pCreateInfo->minFilter), string_VkFilter(chroma_filter)); } if (pCreateInfo->magFilter != chroma_filter) { skip |= LogError( device, "VUID-VkSamplerCreateInfo-minFilter-01645", "VkCreateSampler: VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT is " "not supported for SamplerYcbcrConversion's (%u) format %s so minFilter (%s) needs to be equal to " "chromaFilter (%s)", report_data->FormatHandle(sampler_ycbcr_conversion).c_str(), string_VkFormat(ycbcr_state->format), string_VkFilter(pCreateInfo->minFilter), string_VkFilter(chroma_filter)); } } // At this point there is a known sampler YCbCr conversion enabled const auto *sampler_reduction = LvlFindInChain<VkSamplerReductionModeCreateInfo>(pCreateInfo->pNext); if (sampler_reduction != nullptr) { if (sampler_reduction->reductionMode != VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE) { skip |= LogError(device, "VUID-VkSamplerCreateInfo-None-01647", "A sampler YCbCr Conversion is being used creating this sampler so the sampler reduction mode " "must be VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE."); } } } } if (pCreateInfo->borderColor == VK_BORDER_COLOR_INT_CUSTOM_EXT || pCreateInfo->borderColor == VK_BORDER_COLOR_FLOAT_CUSTOM_EXT) { if (!enabled_features.custom_border_color_features.customBorderColors) { skip |= LogError(device, "VUID-VkSamplerCreateInfo-customBorderColors-04085", "vkCreateSampler(): A custom border color was specified without enabling the custom border color feature"); } auto custom_create_info = LvlFindInChain<VkSamplerCustomBorderColorCreateInfoEXT>(pCreateInfo->pNext); if (custom_create_info) { if (custom_create_info->format == VK_FORMAT_UNDEFINED && !enabled_features.custom_border_color_features.customBorderColorWithoutFormat) { skip |= LogError(device, "VUID-VkSamplerCustomBorderColorCreateInfoEXT-format-04014", "vkCreateSampler(): A custom border color was specified as VK_FORMAT_UNDEFINED without the " "customBorderColorWithoutFormat feature being enabled"); } } if (custom_border_color_sampler_count >= phys_dev_ext_props.custom_border_color_props.maxCustomBorderColorSamplers) { skip |= LogError(device, "VUID-VkSamplerCreateInfo-None-04012", "vkCreateSampler(): Creating a sampler with a custom border color will exceed the " "maxCustomBorderColorSamplers limit of %d", phys_dev_ext_props.custom_border_color_props.maxCustomBorderColorSamplers); } } if (ExtEnabled::kNotEnabled != device_extensions.vk_khr_portability_subset) { if ((VK_FALSE == enabled_features.portability_subset_features.samplerMipLodBias) && pCreateInfo->mipLodBias != 0) { skip |= LogError(device, "VUID-VkSamplerCreateInfo-samplerMipLodBias-04467", "vkCreateSampler (portability error): mip LOD bias not supported."); } } return skip; } bool CoreChecks::ValidateGetBufferDeviceAddress(VkDevice device, const VkBufferDeviceAddressInfo *pInfo, const char *apiName) const { bool skip = false; if (!enabled_features.core12.bufferDeviceAddress && !enabled_features.buffer_device_address_ext.bufferDeviceAddress) { skip |= LogError(pInfo->buffer, "VUID-vkGetBufferDeviceAddress-bufferDeviceAddress-03324", "%s: The bufferDeviceAddress feature must: be enabled.", apiName); } if (physical_device_count > 1 && !enabled_features.core12.bufferDeviceAddressMultiDevice && !enabled_features.buffer_device_address_ext.bufferDeviceAddressMultiDevice) { skip |= LogError(pInfo->buffer, "VUID-vkGetBufferDeviceAddress-device-03325", "%s: If device was created with multiple physical devices, then the " "bufferDeviceAddressMultiDevice feature must: be enabled.", apiName); } const auto buffer_state = GetBufferState(pInfo->buffer); if (buffer_state) { if (!(buffer_state->createInfo.flags & VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT)) { skip |= ValidateMemoryIsBoundToBuffer(buffer_state, apiName, "VUID-VkBufferDeviceAddressInfo-buffer-02600"); } skip |= ValidateBufferUsageFlags(buffer_state, VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT, true, "VUID-VkBufferDeviceAddressInfo-buffer-02601", apiName, "VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT"); } return skip; } bool CoreChecks::PreCallValidateGetBufferDeviceAddressEXT(VkDevice device, const VkBufferDeviceAddressInfo *pInfo) const { return ValidateGetBufferDeviceAddress(device, static_cast<const VkBufferDeviceAddressInfo *>(pInfo), "vkGetBufferDeviceAddressEXT"); } bool CoreChecks::PreCallValidateGetBufferDeviceAddressKHR(VkDevice device, const VkBufferDeviceAddressInfo *pInfo) const { return ValidateGetBufferDeviceAddress(device, static_cast<const VkBufferDeviceAddressInfo *>(pInfo), "vkGetBufferDeviceAddressKHR"); } bool CoreChecks::PreCallValidateGetBufferDeviceAddress(VkDevice device, const VkBufferDeviceAddressInfo *pInfo) const { return ValidateGetBufferDeviceAddress(device, static_cast<const VkBufferDeviceAddressInfo *>(pInfo), "vkGetBufferDeviceAddress"); } bool CoreChecks::ValidateGetBufferOpaqueCaptureAddress(VkDevice device, const VkBufferDeviceAddressInfo *pInfo, const char *apiName) const { bool skip = false; if (!enabled_features.core12.bufferDeviceAddress) { skip |= LogError(pInfo->buffer, "VUID-vkGetBufferOpaqueCaptureAddress-None-03326", "%s(): The bufferDeviceAddress feature must: be enabled.", apiName); } if (physical_device_count > 1 && !enabled_features.core12.bufferDeviceAddressMultiDevice) { skip |= LogError(pInfo->buffer, "VUID-vkGetBufferOpaqueCaptureAddress-device-03327", "%s(): If device was created with multiple physical devices, then the " "bufferDeviceAddressMultiDevice feature must: be enabled.", apiName); } return skip; } bool CoreChecks::PreCallValidateGetBufferOpaqueCaptureAddressKHR(VkDevice device, const VkBufferDeviceAddressInfo *pInfo) const { return ValidateGetBufferOpaqueCaptureAddress(device, static_cast<const VkBufferDeviceAddressInfo *>(pInfo), "vkGetBufferOpaqueCaptureAddressKHR"); } bool CoreChecks::PreCallValidateGetBufferOpaqueCaptureAddress(VkDevice device, const VkBufferDeviceAddressInfo *pInfo) const { return ValidateGetBufferOpaqueCaptureAddress(device, static_cast<const VkBufferDeviceAddressInfo *>(pInfo), "vkGetBufferOpaqueCaptureAddress"); } bool CoreChecks::ValidateGetDeviceMemoryOpaqueCaptureAddress(VkDevice device, const VkDeviceMemoryOpaqueCaptureAddressInfo *pInfo, const char *apiName) const { bool skip = false; if (!enabled_features.core12.bufferDeviceAddress) { skip |= LogError(pInfo->memory, "VUID-vkGetDeviceMemoryOpaqueCaptureAddress-None-03334", "%s(): The bufferDeviceAddress feature must: be enabled.", apiName); } if (physical_device_count > 1 && !enabled_features.core12.bufferDeviceAddressMultiDevice) { skip |= LogError(pInfo->memory, "VUID-vkGetDeviceMemoryOpaqueCaptureAddress-device-03335", "%s(): If device was created with multiple physical devices, then the " "bufferDeviceAddressMultiDevice feature must: be enabled.", apiName); } const DEVICE_MEMORY_STATE *mem_info = GetDevMemState(pInfo->memory); if (mem_info) { auto chained_flags_struct = LvlFindInChain<VkMemoryAllocateFlagsInfo>(mem_info->alloc_info.pNext); if (!chained_flags_struct || !(chained_flags_struct->flags & VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT)) { skip |= LogError(pInfo->memory, "VUID-VkDeviceMemoryOpaqueCaptureAddressInfo-memory-03336", "%s(): memory must have been allocated with VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT.", apiName); } } return skip; } bool CoreChecks::PreCallValidateGetDeviceMemoryOpaqueCaptureAddressKHR(VkDevice device, const VkDeviceMemoryOpaqueCaptureAddressInfo *pInfo) const { return ValidateGetDeviceMemoryOpaqueCaptureAddress(device, static_cast<const VkDeviceMemoryOpaqueCaptureAddressInfo *>(pInfo), "vkGetDeviceMemoryOpaqueCaptureAddressKHR"); } bool CoreChecks::PreCallValidateGetDeviceMemoryOpaqueCaptureAddress(VkDevice device, const VkDeviceMemoryOpaqueCaptureAddressInfo *pInfo) const { return ValidateGetDeviceMemoryOpaqueCaptureAddress(device, static_cast<const VkDeviceMemoryOpaqueCaptureAddressInfo *>(pInfo), "vkGetDeviceMemoryOpaqueCaptureAddress"); } bool CoreChecks::ValidateQueryRange(VkDevice device, VkQueryPool queryPool, uint32_t totalCount, uint32_t firstQuery, uint32_t queryCount, const char *vuid_badfirst, const char *vuid_badrange, const char *apiName) const { bool skip = false; if (firstQuery >= totalCount) { skip |= LogError(device, vuid_badfirst, "%s(): firstQuery (%" PRIu32 ") greater than or equal to query pool count (%" PRIu32 ") for %s", apiName, firstQuery, totalCount, report_data->FormatHandle(queryPool).c_str()); } if ((firstQuery + queryCount) > totalCount) { skip |= LogError(device, vuid_badrange, "%s(): Query range [%" PRIu32 ", %" PRIu32 ") goes beyond query pool count (%" PRIu32 ") for %s", apiName, firstQuery, firstQuery + queryCount, totalCount, report_data->FormatHandle(queryPool).c_str()); } return skip; } bool CoreChecks::ValidateResetQueryPool(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, const char *apiName) const { if (disabled[query_validation]) return false; bool skip = false; if (!enabled_features.core12.hostQueryReset) { skip |= LogError(device, "VUID-vkResetQueryPool-None-02665", "%s(): Host query reset not enabled for device", apiName); } const auto query_pool_state = GetQueryPoolState(queryPool); if (query_pool_state) { skip |= ValidateQueryRange(device, queryPool, query_pool_state->createInfo.queryCount, firstQuery, queryCount, "VUID-vkResetQueryPool-firstQuery-02666", "VUID-vkResetQueryPool-firstQuery-02667", apiName); } return skip; } bool CoreChecks::PreCallValidateResetQueryPoolEXT(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) const { return ValidateResetQueryPool(device, queryPool, firstQuery, queryCount, "vkResetQueryPoolEXT"); } bool CoreChecks::PreCallValidateResetQueryPool(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) const { return ValidateResetQueryPool(device, queryPool, firstQuery, queryCount, "vkResetQueryPool"); } VkResult CoreChecks::CoreLayerCreateValidationCacheEXT(VkDevice device, const VkValidationCacheCreateInfoEXT *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkValidationCacheEXT *pValidationCache) { *pValidationCache = ValidationCache::Create(pCreateInfo); return *pValidationCache ? VK_SUCCESS : VK_ERROR_INITIALIZATION_FAILED; } void CoreChecks::CoreLayerDestroyValidationCacheEXT(VkDevice device, VkValidationCacheEXT validationCache, const VkAllocationCallbacks *pAllocator) { delete CastFromHandle<ValidationCache *>(validationCache); } VkResult CoreChecks::CoreLayerGetValidationCacheDataEXT(VkDevice device, VkValidationCacheEXT validationCache, size_t *pDataSize, void *pData) { size_t in_size = *pDataSize; CastFromHandle<ValidationCache *>(validationCache)->Write(pDataSize, pData); return (pData && *pDataSize != in_size) ? VK_INCOMPLETE : VK_SUCCESS; } VkResult CoreChecks::CoreLayerMergeValidationCachesEXT(VkDevice device, VkValidationCacheEXT dstCache, uint32_t srcCacheCount, const VkValidationCacheEXT *pSrcCaches) { bool skip = false; auto dst = CastFromHandle<ValidationCache *>(dstCache); VkResult result = VK_SUCCESS; for (uint32_t i = 0; i < srcCacheCount; i++) { auto src = CastFromHandle<const ValidationCache *>(pSrcCaches[i]); if (src == dst) { skip |= LogError(device, "VUID-vkMergeValidationCachesEXT-dstCache-01536", "vkMergeValidationCachesEXT: dstCache (0x%" PRIx64 ") must not appear in pSrcCaches array.", HandleToUint64(dstCache)); result = VK_ERROR_VALIDATION_FAILED_EXT; } if (!skip) { dst->Merge(src); } } return result; } bool CoreChecks::ValidateCmdSetDeviceMask(VkCommandBuffer commandBuffer, uint32_t deviceMask, const char *func_name) const { bool skip = false; const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); skip |= ValidateCmd(cb_state, CMD_SETDEVICEMASK, func_name); skip |= ValidateDeviceMaskToPhysicalDeviceCount(deviceMask, commandBuffer, "VUID-vkCmdSetDeviceMask-deviceMask-00108"); skip |= ValidateDeviceMaskToZero(deviceMask, commandBuffer, "VUID-vkCmdSetDeviceMask-deviceMask-00109"); skip |= ValidateDeviceMaskToCommandBuffer(cb_state, deviceMask, commandBuffer, "VUID-vkCmdSetDeviceMask-deviceMask-00110"); if (cb_state->activeRenderPass) { skip |= ValidateDeviceMaskToRenderPass(cb_state, deviceMask, "VUID-vkCmdSetDeviceMask-deviceMask-00111"); } return skip; } bool CoreChecks::PreCallValidateCmdSetDeviceMask(VkCommandBuffer commandBuffer, uint32_t deviceMask) const { return ValidateCmdSetDeviceMask(commandBuffer, deviceMask, "vkSetDeviceMask()"); } bool CoreChecks::PreCallValidateCmdSetDeviceMaskKHR(VkCommandBuffer commandBuffer, uint32_t deviceMask) const { return ValidateCmdSetDeviceMask(commandBuffer, deviceMask, "vkSetDeviceMaskKHR()"); } bool CoreChecks::ValidateGetSemaphoreCounterValue(VkDevice device, VkSemaphore semaphore, uint64_t *pValue, const char *apiName) const { bool skip = false; const auto *semaphore_state = GetSemaphoreState(semaphore); if (semaphore_state && semaphore_state->type != VK_SEMAPHORE_TYPE_TIMELINE) { skip |= LogError(semaphore, "VUID-vkGetSemaphoreCounterValue-semaphore-03255", "%s(): semaphore %s must be of VK_SEMAPHORE_TYPE_TIMELINE type", apiName, report_data->FormatHandle(semaphore).c_str()); } return skip; } bool CoreChecks::PreCallValidateGetSemaphoreCounterValueKHR(VkDevice device, VkSemaphore semaphore, uint64_t *pValue) const { return ValidateGetSemaphoreCounterValue(device, semaphore, pValue, "vkGetSemaphoreCounterValueKHR"); } bool CoreChecks::PreCallValidateGetSemaphoreCounterValue(VkDevice device, VkSemaphore semaphore, uint64_t *pValue) const { return ValidateGetSemaphoreCounterValue(device, semaphore, pValue, "vkGetSemaphoreCounterValue"); } bool CoreChecks::ValidateQueryPoolStride(const std::string &vuid_not_64, const std::string &vuid_64, const VkDeviceSize stride, const char *parameter_name, const uint64_t parameter_value, const VkQueryResultFlags flags) const { bool skip = false; if (flags & VK_QUERY_RESULT_64_BIT) { static const int condition_multiples = 0b0111; if ((stride & condition_multiples) || (parameter_value & condition_multiples)) { skip |= LogError(device, vuid_64, "stride %" PRIx64 " or %s %" PRIx64 " is invalid.", stride, parameter_name, parameter_value); } } else { static const int condition_multiples = 0b0011; if ((stride & condition_multiples) || (parameter_value & condition_multiples)) { skip |= LogError(device, vuid_not_64, "stride %" PRIx64 " or %s %" PRIx64 " is invalid.", stride, parameter_name, parameter_value); } } return skip; } bool CoreChecks::ValidateCmdDrawStrideWithStruct(VkCommandBuffer commandBuffer, const std::string &vuid, const uint32_t stride, const char *struct_name, const uint32_t struct_size) const { bool skip = false; static const int condition_multiples = 0b0011; if ((stride & condition_multiples) || (stride < struct_size)) { skip |= LogError(commandBuffer, vuid, "stride %d is invalid or less than sizeof(%s) %d.", stride, struct_name, struct_size); } return skip; } bool CoreChecks::ValidateCmdDrawStrideWithBuffer(VkCommandBuffer commandBuffer, const std::string &vuid, const uint32_t stride, const char *struct_name, const uint32_t struct_size, const uint32_t drawCount, const VkDeviceSize offset, const BUFFER_STATE *buffer_state) const { bool skip = false; uint64_t validation_value = stride * (drawCount - 1) + offset + struct_size; if (validation_value > buffer_state->createInfo.size) { skip |= LogError(commandBuffer, vuid, "stride[%d] * (drawCount[%d] - 1) + offset[%" PRIx64 "] + sizeof(%s)[%d] = %" PRIx64 " is greater than the size[%" PRIx64 "] of %s.", stride, drawCount, offset, struct_name, struct_size, validation_value, buffer_state->createInfo.size, report_data->FormatHandle(buffer_state->buffer).c_str()); } return skip; } bool CoreChecks::PreCallValidateReleaseProfilingLockKHR(VkDevice device) const { bool skip = false; if (!performance_lock_acquired) { skip |= LogError(device, "VUID-vkReleaseProfilingLockKHR-device-03235", "vkReleaseProfilingLockKHR(): The profiling lock of device must have been held via a previous successful " "call to vkAcquireProfilingLockKHR."); } return skip; } bool CoreChecks::PreCallValidateCmdSetCheckpointNV(VkCommandBuffer commandBuffer, const void *pCheckpointMarker) const { { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetCheckpointNV()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT, "VUID-vkCmdSetCheckpointNV-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_SETCHECKPOINTNV, "vkCmdSetCheckpointNV()"); return skip; } } bool CoreChecks::PreCallValidateWriteAccelerationStructuresPropertiesKHR(VkDevice device, uint32_t accelerationStructureCount, const VkAccelerationStructureKHR *pAccelerationStructures, VkQueryType queryType, size_t dataSize, void *pData, size_t stride) const { bool skip = false; for (uint32_t i = 0; i < accelerationStructureCount; ++i) { const ACCELERATION_STRUCTURE_STATE_KHR *as_state = GetAccelerationStructureStateKHR(pAccelerationStructures[i]); const auto &as_info = as_state->build_info_khr; if (queryType == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR) { if (!(as_info.flags & VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR)) { skip |= LogError(device, "VUID-vkWriteAccelerationStructuresPropertiesKHR-accelerationStructures-03431", "vkWriteAccelerationStructuresPropertiesKHR: All acceleration structures (%s) in " "pAccelerationStructures must have been built with" "VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR if queryType is " "VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR.", report_data->FormatHandle(as_state->acceleration_structure).c_str()); } } } return skip; } bool CoreChecks::PreCallValidateCmdWriteAccelerationStructuresPropertiesKHR( VkCommandBuffer commandBuffer, uint32_t accelerationStructureCount, const VkAccelerationStructureKHR *pAccelerationStructures, VkQueryType queryType, VkQueryPool queryPool, uint32_t firstQuery) const { bool skip = false; const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); skip |= ValidateCmdQueueFlags(cb_state, "vkCmdWriteAccelerationStructuresPropertiesKHR()", VK_QUEUE_COMPUTE_BIT, "VUID-vkCmdWriteAccelerationStructuresPropertiesKHR-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_WRITEACCELERATIONSTRUCTURESPROPERTIESKHR, "vkCmdWriteAccelerationStructuresPropertiesKHR()"); // This command must only be called outside of a render pass instance skip |= InsideRenderPass(cb_state, "vkCmdWriteAccelerationStructuresPropertiesKHR()", "VUID-vkCmdWriteAccelerationStructuresPropertiesKHR-renderpass"); const auto *query_pool_state = GetQueryPoolState(queryPool); const auto &query_pool_ci = query_pool_state->createInfo; if (query_pool_ci.queryType != queryType) { skip |= LogError( device, "VUID-vkCmdWriteAccelerationStructuresPropertiesKHR-queryPool-02493", "vkCmdWriteAccelerationStructuresPropertiesKHR: queryPool must have been created with a queryType matching queryType."); } for (uint32_t i = 0; i < accelerationStructureCount; ++i) { if (queryType == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR) { const ACCELERATION_STRUCTURE_STATE_KHR *as_state = GetAccelerationStructureStateKHR(pAccelerationStructures[i]); if (!(as_state->build_info_khr.flags & VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR)) { skip |= LogError( device, "VUID-vkCmdWriteAccelerationStructuresPropertiesKHR-accelerationStructures-03431", "vkCmdWriteAccelerationStructuresPropertiesKHR: All acceleration structures in pAccelerationStructures " "must have been built with VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR if queryType is " "VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR."); } } } return skip; } bool CoreChecks::PreCallValidateCmdWriteAccelerationStructuresPropertiesNV(VkCommandBuffer commandBuffer, uint32_t accelerationStructureCount, const VkAccelerationStructureNV *pAccelerationStructures, VkQueryType queryType, VkQueryPool queryPool, uint32_t firstQuery) const { bool skip = false; const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); skip |= ValidateCmdQueueFlags(cb_state, "vkCmdWriteAccelerationStructuresPropertiesNV()", VK_QUEUE_COMPUTE_BIT, "VUID-vkCmdWriteAccelerationStructuresPropertiesNV-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_WRITEACCELERATIONSTRUCTURESPROPERTIESNV, "vkCmdWriteAccelerationStructuresPropertiesNV()"); // This command must only be called outside of a render pass instance skip |= InsideRenderPass(cb_state, "vkCmdWriteAccelerationStructuresPropertiesNV()", "VUID-vkCmdWriteAccelerationStructuresPropertiesNV-renderpass"); const auto *query_pool_state = GetQueryPoolState(queryPool); const auto &query_pool_ci = query_pool_state->createInfo; if (query_pool_ci.queryType != queryType) { skip |= LogError( device, "VUID-vkCmdWriteAccelerationStructuresPropertiesNV-queryPool-03755", "vkCmdWriteAccelerationStructuresPropertiesNV: queryPool must have been created with a queryType matching queryType."); } for (uint32_t i = 0; i < accelerationStructureCount; ++i) { if (queryType == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_NV) { const ACCELERATION_STRUCTURE_STATE *as_state = GetAccelerationStructureStateNV(pAccelerationStructures[i]); if (!(as_state->build_info.flags & VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR)) { skip |= LogError(device, "VUID-vkCmdWriteAccelerationStructuresPropertiesNV-accelerationStructures-03431", "vkCmdWriteAccelerationStructuresPropertiesNV: All acceleration structures in pAccelerationStructures " "must have been built with VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR if queryType is " "VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_NV."); } } } return skip; } uint32_t CoreChecks::CalcTotalShaderGroupCount(const PIPELINE_STATE *pipelineState) const { uint32_t total = pipelineState->raytracingPipelineCI.groupCount; if (pipelineState->raytracingPipelineCI.pLibraryInfo) { for (uint32_t i = 0; i < pipelineState->raytracingPipelineCI.pLibraryInfo->libraryCount; ++i) { const PIPELINE_STATE *library_pipeline_state = GetPipelineState(pipelineState->raytracingPipelineCI.pLibraryInfo->pLibraries[i]); total += CalcTotalShaderGroupCount(library_pipeline_state); } } return total; } bool CoreChecks::PreCallValidateGetRayTracingShaderGroupHandlesKHR(VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void *pData) const { bool skip = false; const PIPELINE_STATE *pipeline_state = GetPipelineState(pipeline); if (pipeline_state->getPipelineCreateFlags() & VK_PIPELINE_CREATE_LIBRARY_BIT_KHR) { skip |= LogError( device, "VUID-vkGetRayTracingShaderGroupHandlesKHR-pipeline-03482", "vkGetRayTracingShaderGroupHandlesKHR: pipeline must have not been created with VK_PIPELINE_CREATE_LIBRARY_BIT_KHR."); } if (dataSize < (phys_dev_ext_props.ray_tracing_propsKHR.shaderGroupHandleSize * groupCount)) { skip |= LogError(device, "VUID-vkGetRayTracingShaderGroupHandlesKHR-dataSize-02420", "vkGetRayTracingShaderGroupHandlesKHR: dataSize (%zu) must be at least " "VkPhysicalDeviceRayTracingPipelinePropertiesKHR::shaderGroupHandleSize * groupCount.", dataSize); } uint32_t total_group_count = CalcTotalShaderGroupCount(pipeline_state); if (firstGroup >= total_group_count) { skip |= LogError(device, "VUID-vkGetRayTracingShaderGroupHandlesKHR-firstGroup-04050", "vkGetRayTracingShaderGroupHandlesKHR: firstGroup must be less than the number of shader groups in pipeline."); } if ((firstGroup + groupCount) > total_group_count) { skip |= LogError( device, "VUID-vkGetRayTracingShaderGroupHandlesKHR-firstGroup-02419", "vkGetRayTracingShaderGroupHandlesKHR: The sum of firstGroup and groupCount must be less than or equal the number " "of shader groups in pipeline."); } return skip; } bool CoreChecks::PreCallValidateGetRayTracingCaptureReplayShaderGroupHandlesKHR(VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void *pData) const { bool skip = false; if (dataSize < (phys_dev_ext_props.ray_tracing_propsKHR.shaderGroupHandleCaptureReplaySize * groupCount)) { skip |= LogError(device, "VUID-vkGetRayTracingCaptureReplayShaderGroupHandlesKHR-dataSize-03484", "vkGetRayTracingCaptureReplayShaderGroupHandlesKHR: dataSize (%zu) must be at least " "VkPhysicalDeviceRayTracingPipelinePropertiesKHR::shaderGroupHandleCaptureReplaySize * groupCount.", dataSize); } const PIPELINE_STATE *pipeline_state = GetPipelineState(pipeline); if (!pipeline_state) { return skip; } if (firstGroup >= pipeline_state->raytracingPipelineCI.groupCount) { skip |= LogError(device, "VUID-vkGetRayTracingCaptureReplayShaderGroupHandlesKHR-firstGroup-04051", "vkGetRayTracingCaptureReplayShaderGroupHandlesKHR: firstGroup must be less than the number of shader " "groups in pipeline."); } if ((firstGroup + groupCount) > pipeline_state->raytracingPipelineCI.groupCount) { skip |= LogError(device, "VUID-vkGetRayTracingCaptureReplayShaderGroupHandlesKHR-firstGroup-03483", "vkGetRayTracingCaptureReplayShaderGroupHandlesKHR: The sum of firstGroup and groupCount must be less " "than or equal to the number of shader groups in pipeline."); } if (!(pipeline_state->raytracingPipelineCI.flags & VK_PIPELINE_CREATE_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR)) { skip |= LogError(device, "VUID-vkGetRayTracingCaptureReplayShaderGroupHandlesKHR-pipeline-03607", "pipeline must have been created with a flags that included " "VK_PIPELINE_CREATE_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR."); } return skip; } bool CoreChecks::PreCallValidateCmdBuildAccelerationStructuresIndirectKHR(VkCommandBuffer commandBuffer, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR *pInfos, const VkDeviceAddress *pIndirectDeviceAddresses, const uint32_t *pIndirectStrides, const uint32_t *const *ppMaxPrimitiveCounts) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdBuildAccelerationStructuresIndirectKHR()", VK_QUEUE_COMPUTE_BIT, "VUID-vkCmdBuildAccelerationStructuresIndirectKHR-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_BUILDACCELERATIONSTRUCTURESINDIRECTKHR, "vkCmdBuildAccelerationStructuresIndirectKHR()"); skip |= InsideRenderPass(cb_state, "vkCmdBuildAccelerationStructuresIndirectKHR()", "VUID-vkCmdBuildAccelerationStructuresIndirectKHR-renderpass"); for (uint32_t i = 0; i < infoCount; ++i) { const ACCELERATION_STRUCTURE_STATE_KHR *src_as_state = GetAccelerationStructureStateKHR(pInfos[i].srcAccelerationStructure); const ACCELERATION_STRUCTURE_STATE_KHR *dst_as_state = GetAccelerationStructureStateKHR(pInfos[i].dstAccelerationStructure); if (pInfos[i].mode == VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR) { if (src_as_state == nullptr || !src_as_state->built || !(src_as_state->build_info_khr.flags & VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_KHR)) { skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresIndirectKHR-pInfos-03667", "vkCmdBuildAccelerationStructuresIndirectKHR(): For each element of pInfos, if its mode member is " "VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR, its srcAccelerationStructure member must have " "been built before with VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_KHR set in " "VkAccelerationStructureBuildGeometryInfoKHR::flags."); } if (pInfos[i].geometryCount != src_as_state->build_info_khr.geometryCount) { skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresIndirectKHR-pInfos-03758", "vkCmdBuildAccelerationStructuresIndirectKHR(): For each element of pInfos, if its mode member is " "VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR," " its geometryCount member must have the same value which was specified when " "srcAccelerationStructure was last built."); } if (pInfos[i].flags != src_as_state->build_info_khr.flags) { skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresIndirectKHR-pInfos-03759", "vkCmdBuildAccelerationStructuresIndirectKHR(): For each element of pInfos, if its mode member is" " VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR, its flags member must have the same value which" " was specified when srcAccelerationStructure was last built."); } if (pInfos[i].type != src_as_state->build_info_khr.type) { skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresIndirectKHR-pInfos-03760", "vkCmdBuildAccelerationStructuresIndirectKHR(): For each element of pInfos, if its mode member is" " VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR, its type member must have the same value which" " was specified when srcAccelerationStructure was last built."); } } if (pInfos[i].type == VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR) { if (!dst_as_state || (dst_as_state && dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR && dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR)) { skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresIndirectKHR-pInfos-03700", "vkCmdBuildAccelerationStructuresIndirectKHR(): For each element of pInfos, if its type member is " "VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR, its dstAccelerationStructure member must have " "been created with a value of VkAccelerationStructureCreateInfoKHR::type equal to either " "VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR or VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR."); } } if (pInfos[i].type == VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR) { if (!dst_as_state || (dst_as_state && dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR && dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR)) { skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresIndirectKHR-pInfos-03699", "vkCmdBuildAccelerationStructuresIndirectKHR():For each element of pInfos, if its type member is " "VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR, its dstAccelerationStructure member must have been " "created with a value of VkAccelerationStructureCreateInfoKHR::type equal to either " "VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR or VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR."); } } } return skip; } bool CoreChecks::ValidateCopyAccelerationStructureInfoKHR(const VkCopyAccelerationStructureInfoKHR *pInfo, const char *api_name) const { bool skip = false; if (pInfo->mode == VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_KHR) { const ACCELERATION_STRUCTURE_STATE_KHR *src_as_state = GetAccelerationStructureStateKHR(pInfo->src); if (!(src_as_state->build_info_khr.flags & VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR)) { skip |= LogError(device, "VUID-VkCopyAccelerationStructureInfoKHR-src-03411", "(%s): src must have been built with VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR" "if mode is VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_KHR.", api_name); } } return skip; } bool CoreChecks::PreCallValidateCmdCopyAccelerationStructureKHR(VkCommandBuffer commandBuffer, const VkCopyAccelerationStructureInfoKHR *pInfo) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdCopyAccelerationStructureKHR()", VK_QUEUE_COMPUTE_BIT, "VUID-vkCmdCopyAccelerationStructureKHR-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_COPYACCELERATIONSTRUCTUREKHR, "vkCmdCopyAccelerationStructureKHR()"); skip |= InsideRenderPass(cb_state, "vkCmdCopyAccelerationStructureKHR()", "VUID-vkCmdCopyAccelerationStructureKHR-renderpass"); skip |= ValidateCopyAccelerationStructureInfoKHR(pInfo, "vkCmdCopyAccelerationStructureKHR"); return false; } bool CoreChecks::PreCallValidateCopyAccelerationStructureKHR(VkDevice device, VkDeferredOperationKHR deferredOperation, const VkCopyAccelerationStructureInfoKHR *pInfo) const { bool skip = false; skip |= ValidateCopyAccelerationStructureInfoKHR(pInfo, "vkCopyAccelerationStructureKHR"); return skip; } bool CoreChecks::PreCallValidateCmdCopyAccelerationStructureToMemoryKHR( VkCommandBuffer commandBuffer, const VkCopyAccelerationStructureToMemoryInfoKHR *pInfo) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdCopyAccelerationStructureToMemoryKHR()", VK_QUEUE_COMPUTE_BIT, "VUID-vkCmdCopyAccelerationStructureToMemoryKHR-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_COPYACCELERATIONSTRUCTURETOMEMORYKHR, "vkCmdCopyAccelerationStructureToMemoryKHR()"); skip |= InsideRenderPass(cb_state, "vkCmdCopyAccelerationStructureToMemoryKHR()", "VUID-vkCmdCopyAccelerationStructureToMemoryKHR-renderpass"); const auto *accel_state = GetAccelerationStructureStateKHR(pInfo->src); if (accel_state) { const auto *buffer_state = GetBufferState(accel_state->create_infoKHR.buffer); skip |= ValidateMemoryIsBoundToBuffer(buffer_state, "vkCmdCopyAccelerationStructureToMemoryKHR", "VUID-vkCmdCopyAccelerationStructureToMemoryKHR-None-03559"); } return skip; } bool CoreChecks::PreCallValidateCmdCopyMemoryToAccelerationStructureKHR( VkCommandBuffer commandBuffer, const VkCopyMemoryToAccelerationStructureInfoKHR *pInfo) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdCopyMemoryToAccelerationStructureKHR()", VK_QUEUE_COMPUTE_BIT, "VUID-vkCmdCopyMemoryToAccelerationStructureKHR-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_COPYMEMORYTOACCELERATIONSTRUCTUREKHR, "vkCmdCopyMemoryToAccelerationStructureKHR()"); // This command must only be called outside of a render pass instance skip |= InsideRenderPass(cb_state, "vkCmdCopyMemoryToAccelerationStructureKHR()", "VUID-vkCmdCopyMemoryToAccelerationStructureKHR-renderpass"); return skip; } bool CoreChecks::PreCallValidateCmdBindTransformFeedbackBuffersEXT(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers, const VkDeviceSize *pOffsets, const VkDeviceSize *pSizes) const { bool skip = false; char const *const cmd_name = "CmdBindTransformFeedbackBuffersEXT"; if (!enabled_features.transform_feedback_features.transformFeedback) { skip |= LogError(commandBuffer, "VUID-vkCmdBindTransformFeedbackBuffersEXT-transformFeedback-02355", "%s: transformFeedback feature is not enabled.", cmd_name); } { auto const cb_state = GetCBState(commandBuffer); if (cb_state->transform_feedback_active) { skip |= LogError(commandBuffer, "VUID-vkCmdBindTransformFeedbackBuffersEXT-None-02365", "%s: transform feedback is active.", cmd_name); } } for (uint32_t i = 0; i < bindingCount; ++i) { auto const buffer_state = GetBufferState(pBuffers[i]); assert(buffer_state != nullptr); if (pOffsets[i] >= buffer_state->createInfo.size) { skip |= LogError(buffer_state->buffer, "VUID-vkCmdBindTransformFeedbackBuffersEXT-pOffsets-02358", "%s: pOffset[%" PRIu32 "](0x%" PRIxLEAST64 ") is greater than or equal to the size of pBuffers[%" PRIu32 "](0x%" PRIxLEAST64 ").", cmd_name, i, pOffsets[i], i, buffer_state->createInfo.size); } if ((buffer_state->createInfo.usage & VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_BUFFER_BIT_EXT) == 0) { skip |= LogError(buffer_state->buffer, "VUID-vkCmdBindTransformFeedbackBuffersEXT-pBuffers-02360", "%s: pBuffers[%" PRIu32 "] (0x%" PRIxLEAST64 ") was not created with the VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_BUFFER_BIT_EXT flag.", cmd_name, i, pBuffers[i]); } // pSizes is optional and may be nullptr. Also might be VK_WHOLE_SIZE which VU don't apply if ((pSizes != nullptr) && (pSizes[i] != VK_WHOLE_SIZE)) { // only report one to prevent redundant error if the size is larger since adding offset will be as well if (pSizes[i] > buffer_state->createInfo.size) { skip |= LogError(buffer_state->buffer, "VUID-vkCmdBindTransformFeedbackBuffersEXT-pSizes-02362", "%s: pSizes[%" PRIu32 "](0x%" PRIxLEAST64 ") is greater than the size of pBuffers[%" PRIu32 "](0x%" PRIxLEAST64 ").", cmd_name, i, pSizes[i], i, buffer_state->createInfo.size); } else if (pOffsets[i] + pSizes[i] > buffer_state->createInfo.size) { skip |= LogError(buffer_state->buffer, "VUID-vkCmdBindTransformFeedbackBuffersEXT-pOffsets-02363", "%s: The sum of pOffsets[%" PRIu32 "](Ox%" PRIxLEAST64 ") and pSizes[%" PRIu32 "](0x%" PRIxLEAST64 ") is greater than the size of pBuffers[%" PRIu32 "](0x%" PRIxLEAST64 ").", cmd_name, i, pOffsets[i], i, pSizes[i], i, buffer_state->createInfo.size); } } skip |= ValidateMemoryIsBoundToBuffer(buffer_state, cmd_name, "VUID-vkCmdBindTransformFeedbackBuffersEXT-pBuffers-02364"); } return skip; } bool CoreChecks::PreCallValidateCmdBeginTransformFeedbackEXT(VkCommandBuffer commandBuffer, uint32_t firstCounterBuffer, uint32_t counterBufferCount, const VkBuffer *pCounterBuffers, const VkDeviceSize *pCounterBufferOffsets) const { bool skip = false; char const *const cmd_name = "CmdBeginTransformFeedbackEXT"; if (!enabled_features.transform_feedback_features.transformFeedback) { skip |= LogError(commandBuffer, "VUID-vkCmdBeginTransformFeedbackEXT-transformFeedback-02366", "%s: transformFeedback feature is not enabled.", cmd_name); } { auto const cb_state = GetCBState(commandBuffer); if (cb_state->transform_feedback_active) { skip |= LogError(commandBuffer, "VUID-vkCmdBeginTransformFeedbackEXT-None-02367", "%s: transform feedback is active.", cmd_name); } } // pCounterBuffers and pCounterBufferOffsets are optional and may be nullptr. Additionaly, pCounterBufferOffsets must be nullptr // if pCounterBuffers is nullptr. if (pCounterBuffers == nullptr) { if (pCounterBufferOffsets != nullptr) { skip |= LogError(commandBuffer, "VUID-vkCmdBeginTransformFeedbackEXT-pCounterBuffer-02371", "%s: pCounterBuffers is NULL and pCounterBufferOffsets is not NULL.", cmd_name); } } else { for (uint32_t i = 0; i < counterBufferCount; ++i) { if (pCounterBuffers[i] != VK_NULL_HANDLE) { auto const buffer_state = GetBufferState(pCounterBuffers[i]); assert(buffer_state != nullptr); if (pCounterBufferOffsets != nullptr && pCounterBufferOffsets[i] + 4 > buffer_state->createInfo.size) { skip |= LogError(buffer_state->buffer, "VUID-vkCmdBeginTransformFeedbackEXT-pCounterBufferOffsets-02370", "%s: pCounterBuffers[%" PRIu32 "](0x%" PRIxLEAST64 ") is not large enough to hold 4 bytes at pCounterBufferOffsets[%" PRIu32 "](0x%" PRIxLEAST64 ").", cmd_name, i, pCounterBuffers[i], i, pCounterBufferOffsets[i]); } if ((buffer_state->createInfo.usage & VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT) == 0) { skip |= LogError(buffer_state->buffer, "VUID-vkCmdBeginTransformFeedbackEXT-pCounterBuffers-02372", "%s: pCounterBuffers[%" PRIu32 "] (0x%" PRIxLEAST64 ") was not created with the VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT flag.", cmd_name, i, pCounterBuffers[i]); } } } } return skip; } bool CoreChecks::PreCallValidateCmdEndTransformFeedbackEXT(VkCommandBuffer commandBuffer, uint32_t firstCounterBuffer, uint32_t counterBufferCount, const VkBuffer *pCounterBuffers, const VkDeviceSize *pCounterBufferOffsets) const { bool skip = false; char const *const cmd_name = "CmdEndTransformFeedbackEXT"; if (!enabled_features.transform_feedback_features.transformFeedback) { skip |= LogError(commandBuffer, "VUID-vkCmdEndTransformFeedbackEXT-transformFeedback-02374", "%s: transformFeedback feature is not enabled.", cmd_name); } { auto const cb_state = GetCBState(commandBuffer); if (!cb_state->transform_feedback_active) { skip |= LogError(commandBuffer, "VUID-vkCmdEndTransformFeedbackEXT-None-02375", "%s: transform feedback is not active.", cmd_name); } } // pCounterBuffers and pCounterBufferOffsets are optional and may be nullptr. Additionaly, pCounterBufferOffsets must be nullptr // if pCounterBuffers is nullptr. if (pCounterBuffers == nullptr) { if (pCounterBufferOffsets != nullptr) { skip |= LogError(commandBuffer, "VUID-vkCmdEndTransformFeedbackEXT-pCounterBuffer-02379", "%s: pCounterBuffers is NULL and pCounterBufferOffsets is not NULL.", cmd_name); } } else { for (uint32_t i = 0; i < counterBufferCount; ++i) { if (pCounterBuffers[i] != VK_NULL_HANDLE) { auto const buffer_state = GetBufferState(pCounterBuffers[i]); assert(buffer_state != nullptr); if (pCounterBufferOffsets != nullptr && pCounterBufferOffsets[i] + 4 > buffer_state->createInfo.size) { skip |= LogError(buffer_state->buffer, "VUID-vkCmdEndTransformFeedbackEXT-pCounterBufferOffsets-02378", "%s: pCounterBuffers[%" PRIu32 "](0x%" PRIxLEAST64 ") is not large enough to hold 4 bytes at pCounterBufferOffsets[%" PRIu32 "](0x%" PRIxLEAST64 ").", cmd_name, i, pCounterBuffers[i], i, pCounterBufferOffsets[i]); } if ((buffer_state->createInfo.usage & VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT) == 0) { skip |= LogError(buffer_state->buffer, "VUID-vkCmdEndTransformFeedbackEXT-pCounterBuffers-02380", "%s: pCounterBuffers[%" PRIu32 "] (0x%" PRIxLEAST64 ") was not created with the VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT flag.", cmd_name, i, pCounterBuffers[i]); } } } } return skip; } bool CoreChecks::PreCallValidateCmdSetCullModeEXT(VkCommandBuffer commandBuffer, VkCullModeFlags cullMode) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetCullModeEXT()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdSetCullModeEXT-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_SETCULLMODEEXT, "vkCmdSetCullModeEXT()"); if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) { skip |= LogError(commandBuffer, "VUID-vkCmdSetCullModeEXT-None-03384", "vkCmdSetCullModeEXT: extendedDynamicState feature is not enabled."); } return skip; } bool CoreChecks::PreCallValidateCmdSetFrontFaceEXT(VkCommandBuffer commandBuffer, VkFrontFace frontFace) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetFrontFaceEXT()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdSetFrontFaceEXT-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_SETFRONTFACEEXT, "vkCmdSetFrontFaceEXT()"); if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) { skip |= LogError(commandBuffer, "VUID-vkCmdSetFrontFaceEXT-None-03383", "vkCmdSetFrontFaceEXT: extendedDynamicState feature is not enabled."); } return skip; } bool CoreChecks::PreCallValidateCmdSetPrimitiveTopologyEXT(VkCommandBuffer commandBuffer, VkPrimitiveTopology primitiveTopology) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetPrimitiveTopologyEXT()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdSetPrimitiveTopologyEXT-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_SETPRIMITIVETOPOLOGYEXT, "vkCmdSetPrimitiveTopologyEXT()"); if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) { skip |= LogError(commandBuffer, "VUID-vkCmdSetPrimitiveTopologyEXT-None-03347", "vkCmdSetPrimitiveTopologyEXT: extendedDynamicState feature is not enabled."); } return skip; } bool CoreChecks::PreCallValidateCmdSetViewportWithCountEXT(VkCommandBuffer commandBuffer, uint32_t viewportCount, const VkViewport *pViewports) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetViewportWithCountEXT()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdSetViewportWithCountEXT-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_SETVIEWPORTWITHCOUNTEXT, "vkCmdSetViewportWithCountEXT()"); if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) { skip |= LogError(commandBuffer, "VUID-vkCmdSetViewportWithCountEXT-None-03393", "vkCmdSetViewportWithCountEXT: extendedDynamicState feature is not enabled."); } return skip; } bool CoreChecks::PreCallValidateCmdSetScissorWithCountEXT(VkCommandBuffer commandBuffer, uint32_t scissorCount, const VkRect2D *pScissors) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetScissorWithCountEXT()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdSetScissorWithCountEXT-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_SETSCISSORWITHCOUNTEXT, "vkCmdSetScissorWithCountEXT()"); if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) { skip |= LogError(commandBuffer, "VUID-vkCmdSetScissorWithCountEXT-None-03396", "vkCmdSetScissorWithCountEXT: extendedDynamicState feature is not enabled."); } return skip; } bool CoreChecks::PreCallValidateCmdBindVertexBuffers2EXT(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers, const VkDeviceSize *pOffsets, const VkDeviceSize *pSizes, const VkDeviceSize *pStrides) const { const auto cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdBindVertexBuffers2EXT()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdBindVertexBuffers2EXT-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_BINDVERTEXBUFFERS2EXT, "vkCmdBindVertexBuffers2EXT()"); for (uint32_t i = 0; i < bindingCount; ++i) { const auto buffer_state = GetBufferState(pBuffers[i]); if (buffer_state) { skip |= ValidateBufferUsageFlags(buffer_state, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, true, "VUID-vkCmdBindVertexBuffers2EXT-pBuffers-03359", "vkCmdBindVertexBuffers2EXT()", "VK_BUFFER_USAGE_VERTEX_BUFFER_BIT"); skip |= ValidateMemoryIsBoundToBuffer(buffer_state, "vkCmdBindVertexBuffers2EXT()", "VUID-vkCmdBindVertexBuffers2EXT-pBuffers-03360"); if (pOffsets[i] >= buffer_state->createInfo.size) { skip |= LogError(buffer_state->buffer, "VUID-vkCmdBindVertexBuffers2EXT-pOffsets-03357", "vkCmdBindVertexBuffers2EXT() offset (0x%" PRIxLEAST64 ") is beyond the end of the buffer.", pOffsets[i]); } if (pSizes && pOffsets[i] + pSizes[i] > buffer_state->createInfo.size) { skip |= LogError(buffer_state->buffer, "VUID-vkCmdBindVertexBuffers2EXT-pSizes-03358", "vkCmdBindVertexBuffers2EXT() size (0x%" PRIxLEAST64 ") is beyond the end of the buffer.", pSizes[i]); } } } const auto lv_bind_point = ConvertToLvlBindPoint(VK_PIPELINE_BIND_POINT_GRAPHICS); const auto *pipe = cb_state->lastBound[lv_bind_point].pipeline_state; auto vibs = IsDynamic(pipe, VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT); if (vibs && (pStrides == nullptr)) { skip |= LogError(commandBuffer, "VUID-vkCmdBindVertexBuffers2EXT-pStrides-03361", "vkCmdBindVertexBuffers2EXT(): pStrides is NULL but the bound pipeline was created with " "VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT dynamic state enabled."); } else if (!vibs && (pStrides != nullptr)) { skip |= LogError(commandBuffer, "VUID-vkCmdBindVertexBuffers2EXT-pStrides-03361", "vkCmdBindVertexBuffers2EXT(): pStrides is non-NULL, but the bound pipeline was not created with " "VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT dynamic state enabled."); } return skip; } bool CoreChecks::PreCallValidateCmdSetDepthTestEnableEXT(VkCommandBuffer commandBuffer, VkBool32 depthTestEnable) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetDepthTestEnableEXT()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdSetDepthTestEnableEXT-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_SETDEPTHTESTENABLEEXT, "vkCmdSetDepthTestEnableEXT()"); if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) { skip |= LogError(commandBuffer, "VUID-vkCmdSetDepthTestEnableEXT-None-03352", "vkCmdSetDepthTestEnableEXT: extendedDynamicState feature is not enabled."); } return skip; } bool CoreChecks::PreCallValidateCmdSetDepthWriteEnableEXT(VkCommandBuffer commandBuffer, VkBool32 depthWriteEnable) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetDepthWriteEnableEXT()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdSetDepthWriteEnableEXT-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_SETDEPTHWRITEENABLEEXT, "vkCmdSetDepthWriteEnableEXT()"); if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) { skip |= LogError(commandBuffer, "VUID-vkCmdSetDepthWriteEnableEXT-None-03354", "vkCmdSetDepthWriteEnableEXT: extendedDynamicState feature is not enabled."); } return skip; } bool CoreChecks::PreCallValidateCmdSetDepthCompareOpEXT(VkCommandBuffer commandBuffer, VkCompareOp depthCompareOp) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetDepthCompareOpEXT()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdSetDepthCompareOpEXT-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_SETDEPTHCOMPAREOPEXT, "vkCmdSetDepthCompareOpEXT()"); if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) { skip |= LogError(commandBuffer, "VUID-vkCmdSetDepthCompareOpEXT-None-03353", "vkCmdSetDepthCompareOpEXT: extendedDynamicState feature is not enabled."); } return skip; } bool CoreChecks::PreCallValidateCmdSetDepthBoundsTestEnableEXT(VkCommandBuffer commandBuffer, VkBool32 depthBoundsTestEnable) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetDepthBoundsTestEnableEXT()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdSetDepthBoundsTestEnableEXT-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_SETDEPTHBOUNDSTESTENABLEEXT, "vkCmdSetDepthBoundsTestEnableEXT()"); if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) { skip |= LogError(commandBuffer, "VUID-vkCmdSetDepthBoundsTestEnableEXT-None-03349", "vkCmdSetDepthBoundsTestEnableEXT: extendedDynamicState feature is not enabled."); } return skip; } bool CoreChecks::PreCallValidateCmdSetStencilTestEnableEXT(VkCommandBuffer commandBuffer, VkBool32 stencilTestEnable) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetStencilTestEnableEXT()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdSetStencilTestEnableEXT-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_SETSTENCILTESTENABLEEXT, "vkCmdSetStencilTestEnableEXT()"); if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) { skip |= LogError(commandBuffer, "VUID-vkCmdSetStencilTestEnableEXT-None-03350", "vkCmdSetStencilTestEnableEXT: extendedDynamicState feature is not enabled."); } return skip; } bool CoreChecks::PreCallValidateCmdSetStencilOpEXT(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, VkStencilOp failOp, VkStencilOp passOp, VkStencilOp depthFailOp, VkCompareOp compareOp) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetStencilOpEXT()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdSetStencilOpEXT-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_SETSTENCILOPEXT, "vkCmdSetStencilOpEXT()"); if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) { skip |= LogError(commandBuffer, "VUID-vkCmdSetStencilOpEXT-None-03351", "vkCmdSetStencilOpEXT: extendedDynamicState feature is not enabled."); } return skip; } bool CoreChecks::PreCallValidateCreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkEvent *pEvent) const { bool skip = false; if (device_extensions.vk_khr_portability_subset != ExtEnabled::kNotEnabled) { if (VK_FALSE == enabled_features.portability_subset_features.events) { skip |= LogError(device, "VUID-vkCreateEvent-events-04468", "vkCreateEvent: events are not supported via VK_KHR_portability_subset"); } } return skip; } bool CoreChecks::PreCallValidateCmdSetRayTracingPipelineStackSizeKHR(VkCommandBuffer commandBuffer, uint32_t pipelineStackSize) const { bool skip = false; const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetRayTracingPipelineStackSizeKHR()", VK_QUEUE_COMPUTE_BIT, "VUID-vkCmdSetRayTracingPipelineStackSizeKHR-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_SETRAYTRACINGPIPELINESTACKSIZEKHR, "vkCmdSetRayTracingPipelineStackSizeKHR()"); skip |= InsideRenderPass(cb_state, "vkCmdSetRayTracingPipelineStackSizeKHR()", "VUID-vkCmdSetRayTracingPipelineStackSizeKHR-renderpass"); return skip; } bool CoreChecks::PreCallValidateGetRayTracingShaderGroupStackSizeKHR(VkDevice device, VkPipeline pipeline, uint32_t group, VkShaderGroupShaderKHR groupShader) const { bool skip = false; const PIPELINE_STATE *pipeline_state = GetPipelineState(pipeline); if (group >= pipeline_state->raytracingPipelineCI.groupCount) { skip |= LogError(device, "VUID-vkGetRayTracingShaderGroupStackSizeKHR-group-03608", "vkGetRayTracingShaderGroupStackSizeKHR: The value of group must be less than the number of shader groups " "in pipeline."); } return skip; } void PIPELINE_STATE::initGraphicsPipeline(const ValidationStateTracker *state_data, const VkGraphicsPipelineCreateInfo *pCreateInfo, std::shared_ptr<const RENDER_PASS_STATE> &&rpstate) { reset(); bool uses_color_attachment = false; bool uses_depthstencil_attachment = false; if (pCreateInfo->subpass < rpstate->createInfo.subpassCount) { const auto &subpass = rpstate->createInfo.pSubpasses[pCreateInfo->subpass]; for (uint32_t i = 0; i < subpass.colorAttachmentCount; ++i) { if (subpass.pColorAttachments[i].attachment != VK_ATTACHMENT_UNUSED) { uses_color_attachment = true; break; } } if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { uses_depthstencil_attachment = true; } } graphicsPipelineCI.initialize(pCreateInfo, uses_color_attachment, uses_depthstencil_attachment); if (graphicsPipelineCI.pInputAssemblyState) { topology_at_rasterizer = graphicsPipelineCI.pInputAssemblyState->topology; } stage_state.resize(pCreateInfo->stageCount); for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) { const VkPipelineShaderStageCreateInfo *pssci = &pCreateInfo->pStages[i]; this->duplicate_shaders |= this->active_shaders & pssci->stage; this->active_shaders |= pssci->stage; state_data->RecordPipelineShaderStage(pssci, this, &stage_state[i]); } if (graphicsPipelineCI.pVertexInputState) { const auto vici = graphicsPipelineCI.pVertexInputState; if (vici->vertexBindingDescriptionCount) { this->vertex_binding_descriptions_ = std::vector<VkVertexInputBindingDescription>( vici->pVertexBindingDescriptions, vici->pVertexBindingDescriptions + vici->vertexBindingDescriptionCount); this->vertex_binding_to_index_map_.reserve(vici->vertexBindingDescriptionCount); for (uint32_t i = 0; i < vici->vertexBindingDescriptionCount; ++i) { this->vertex_binding_to_index_map_[vici->pVertexBindingDescriptions[i].binding] = i; } } if (vici->vertexAttributeDescriptionCount) { this->vertex_attribute_descriptions_ = std::vector<VkVertexInputAttributeDescription>( vici->pVertexAttributeDescriptions, vici->pVertexAttributeDescriptions + vici->vertexAttributeDescriptionCount); for (uint32_t i = 0; i < vici->vertexAttributeDescriptionCount; ++i) { const auto attribute_format = vici->pVertexAttributeDescriptions[i].format; VkDeviceSize vtx_attrib_req_alignment = FormatElementSize(attribute_format); if (FormatElementIsTexel(attribute_format)) { vtx_attrib_req_alignment = SafeDivision(vtx_attrib_req_alignment, FormatChannelCount(attribute_format)); } this->vertex_attribute_alignments_.push_back(vtx_attrib_req_alignment); } } } if (graphicsPipelineCI.pColorBlendState) { const auto cbci = graphicsPipelineCI.pColorBlendState; if (cbci->attachmentCount) { this->attachments = std::vector<VkPipelineColorBlendAttachmentState>(cbci->pAttachments, cbci->pAttachments + cbci->attachmentCount); } } rp_state = rpstate; } void PIPELINE_STATE::initComputePipeline(const ValidationStateTracker *state_data, const VkComputePipelineCreateInfo *pCreateInfo) { reset(); computePipelineCI.initialize(pCreateInfo); switch (computePipelineCI.stage.stage) { case VK_SHADER_STAGE_COMPUTE_BIT: this->active_shaders |= VK_SHADER_STAGE_COMPUTE_BIT; stage_state.resize(1); state_data->RecordPipelineShaderStage(&pCreateInfo->stage, this, &stage_state[0]); break; default: // TODO : Flag error break; } } template <typename CreateInfo> void PIPELINE_STATE::initRayTracingPipeline(const ValidationStateTracker *state_data, const CreateInfo *pCreateInfo) { reset(); raytracingPipelineCI.initialize(pCreateInfo); stage_state.resize(pCreateInfo->stageCount); for (uint32_t stage_index = 0; stage_index < pCreateInfo->stageCount; stage_index++) { const auto &shader_stage = pCreateInfo->pStages[stage_index]; switch (shader_stage.stage) { case VK_SHADER_STAGE_RAYGEN_BIT_NV: this->active_shaders |= VK_SHADER_STAGE_RAYGEN_BIT_NV; break; case VK_SHADER_STAGE_ANY_HIT_BIT_NV: this->active_shaders |= VK_SHADER_STAGE_ANY_HIT_BIT_NV; break; case VK_SHADER_STAGE_CLOSEST_HIT_BIT_NV: this->active_shaders |= VK_SHADER_STAGE_CLOSEST_HIT_BIT_NV; break; case VK_SHADER_STAGE_MISS_BIT_NV: this->active_shaders |= VK_SHADER_STAGE_MISS_BIT_NV; break; case VK_SHADER_STAGE_INTERSECTION_BIT_NV: this->active_shaders |= VK_SHADER_STAGE_INTERSECTION_BIT_NV; break; case VK_SHADER_STAGE_CALLABLE_BIT_NV: this->active_shaders |= VK_SHADER_STAGE_CALLABLE_BIT_NV; break; default: // TODO : Flag error break; } state_data->RecordPipelineShaderStage(&shader_stage, this, &stage_state[stage_index]); } } template void PIPELINE_STATE::initRayTracingPipeline(const ValidationStateTracker *state_data, const VkRayTracingPipelineCreateInfoNV *pCreateInfo); template void PIPELINE_STATE::initRayTracingPipeline(const ValidationStateTracker *state_data, const VkRayTracingPipelineCreateInfoKHR *pCreateInfo); bool CoreChecks::PreCallValidateCmdSetFragmentShadingRateKHR(VkCommandBuffer commandBuffer, const VkExtent2D *pFragmentSize, const VkFragmentShadingRateCombinerOpKHR combinerOps[2]) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); const char *cmd_name = "vkCmdSetFragmentShadingRateKHR()"; bool skip = ValidateCmdQueueFlags(cb_state, cmd_name, VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdSetFragmentShadingRateKHR-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_SETFRAGMENTSHADINGRATEKHR, cmd_name); if (!enabled_features.fragment_shading_rate_features.pipelineFragmentShadingRate && !enabled_features.fragment_shading_rate_features.primitiveFragmentShadingRate && !enabled_features.fragment_shading_rate_features.attachmentFragmentShadingRate) { skip |= LogError( cb_state->commandBuffer, "VUID-vkCmdSetFragmentShadingRateKHR-pipelineFragmentShadingRate-04509", "vkCmdSetFragmentShadingRateKHR: Application called %s, but no fragment shading rate features have been enabled.", cmd_name); } if (!enabled_features.fragment_shading_rate_features.pipelineFragmentShadingRate && pFragmentSize->width != 1) { skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdSetFragmentShadingRateKHR-pipelineFragmentShadingRate-04507", "vkCmdSetFragmentShadingRateKHR: Pipeline fragment width of %u has been specified in %s, but " "pipelineFragmentShadingRate is not enabled", pFragmentSize->width, cmd_name); } if (!enabled_features.fragment_shading_rate_features.pipelineFragmentShadingRate && pFragmentSize->height != 1) { skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdSetFragmentShadingRateKHR-pipelineFragmentShadingRate-04508", "vkCmdSetFragmentShadingRateKHR: Pipeline fragment height of %u has been specified in %s, but " "pipelineFragmentShadingRate is not enabled", pFragmentSize->height, cmd_name); } if (!enabled_features.fragment_shading_rate_features.primitiveFragmentShadingRate && combinerOps[0] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR) { skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdSetFragmentShadingRateKHR-primitiveFragmentShadingRate-04510", "vkCmdSetFragmentShadingRateKHR: First combiner operation of %s has been specified in %s, but " "primitiveFragmentShadingRate is not enabled", string_VkFragmentShadingRateCombinerOpKHR(combinerOps[0]), cmd_name); } if (!enabled_features.fragment_shading_rate_features.attachmentFragmentShadingRate && combinerOps[1] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR) { skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdSetFragmentShadingRateKHR-attachmentFragmentShadingRate-04511", "vkCmdSetFragmentShadingRateKHR: Second combiner operation of %s has been specified in %s, but " "attachmentFragmentShadingRate is not enabled", string_VkFragmentShadingRateCombinerOpKHR(combinerOps[1]), cmd_name); } if (!phys_dev_ext_props.fragment_shading_rate_props.fragmentShadingRateNonTrivialCombinerOps && (combinerOps[0] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR && combinerOps[0] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_REPLACE_KHR)) { skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdSetFragmentShadingRateKHR-fragmentSizeNonTrivialCombinerOps-04512", "vkCmdSetFragmentShadingRateKHR: First combiner operation of %s has been specified in %s, but " "fragmentShadingRateNonTrivialCombinerOps is " "not supported", string_VkFragmentShadingRateCombinerOpKHR(combinerOps[0]), cmd_name); } if (!phys_dev_ext_props.fragment_shading_rate_props.fragmentShadingRateNonTrivialCombinerOps && (combinerOps[1] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR && combinerOps[1] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_REPLACE_KHR)) { skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdSetFragmentShadingRateKHR-fragmentSizeNonTrivialCombinerOps-04512", "vkCmdSetFragmentShadingRateKHR: Second combiner operation of %s has been specified in %s, but " "fragmentShadingRateNonTrivialCombinerOps " "is not supported", string_VkFragmentShadingRateCombinerOpKHR(combinerOps[1]), cmd_name); } if (pFragmentSize->width == 0) { skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdSetFragmentShadingRateKHR-pFragmentSize-04513", "vkCmdSetFragmentShadingRateKHR: Fragment width of %u has been specified in %s.", pFragmentSize->width, cmd_name); } if (pFragmentSize->height == 0) { skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdSetFragmentShadingRateKHR-pFragmentSize-04514", "vkCmdSetFragmentShadingRateKHR: Fragment height of %u has been specified in %s.", pFragmentSize->height, cmd_name); } if (pFragmentSize->width != 0 && !IsPowerOfTwo(pFragmentSize->width)) { skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdSetFragmentShadingRateKHR-pFragmentSize-04515", "vkCmdSetFragmentShadingRateKHR: Non-power-of-two fragment width of %u has been specified in %s.", pFragmentSize->width, cmd_name); } if (pFragmentSize->height != 0 && !IsPowerOfTwo(pFragmentSize->height)) { skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdSetFragmentShadingRateKHR-pFragmentSize-04516", "vkCmdSetFragmentShadingRateKHR: Non-power-of-two fragment height of %u has been specified in %s.", pFragmentSize->height, cmd_name); } if (pFragmentSize->width > 4) { skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdSetFragmentShadingRateKHR-pFragmentSize-04517", "vkCmdSetFragmentShadingRateKHR: Fragment width of %u specified in %s is too large.", pFragmentSize->width, cmd_name); } if (pFragmentSize->height > 4) { skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdSetFragmentShadingRateKHR-pFragmentSize-04518", "vkCmdSetFragmentShadingRateKHR: Fragment height of %u specified in %s is too large", pFragmentSize->height, cmd_name); } return skip; }
1
15,230
the name kTimelineCannotBeSignalled is confusing. It's not that a TimelineSemphore cannot be signaled... it's the "VK_KHR_timeline_semaphore is enabled *variant* of the "binary cannot be signaled" message. We should probably have a consistent naming scheme to clarify. kBinaryCannotBeSignalledAltTimeline or hide the complexity in Get*VUID functions as needed somehow (pass in the extension structure?)
KhronosGroup-Vulkan-ValidationLayers
cpp
@@ -2541,7 +2541,7 @@ defaultdict(<class 'list'>, {'col..., 'col...})] return DataFrame(sdf, self._metadata.copy()) # TODO: percentiles, include, and exclude should be implemented. - def describe(self) -> 'DataFrame': + def describe(self, percentiles=None) -> 'DataFrame': """ Generate descriptive statistics that summarize the central tendency, dispersion and shape of a dataset's distribution, excluding
1
# # Copyright (C) 2019 Databricks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ A wrapper class for Spark DataFrame to behave similar to pandas DataFrame. """ import re import warnings from functools import partial, reduce from typing import Any, Optional, List, Tuple, Union import numpy as np import pandas as pd from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype, is_list_like, \ is_dict_like from pyspark import sql as spark from pyspark.sql import functions as F, Column from pyspark.sql.types import (BooleanType, ByteType, DecimalType, DoubleType, FloatType, IntegerType, LongType, NumericType, ShortType, StructField, StructType, to_arrow_type) from pyspark.sql.utils import AnalysisException from databricks import koalas as ks # For running doctests and reference resolution in PyCharm. from databricks.koalas.utils import default_session, validate_arguments_and_invoke_function from databricks.koalas.generic import _Frame, max_display_count from databricks.koalas.metadata import Metadata from databricks.koalas.missing.frame import _MissingPandasLikeDataFrame from databricks.koalas.ml import corr from databricks.koalas.typedef import infer_pd_series_spark_type # These regular expression patterns are complied and defined here to avoid to compile the same # pattern every time it is used in _repr_ and _repr_html_ in DataFrame. # Two patterns basically seek the footer string from Pandas' REPR_PATTERN = re.compile(r"\n\n\[(?P<rows>[0-9]+) rows x (?P<columns>[0-9]+) columns\]$") REPR_HTML_PATTERN = re.compile( r"\n\<p\>(?P<rows>[0-9]+) rows × (?P<columns>[0-9]+) columns\<\/p\>\n\<\/div\>$") class DataFrame(_Frame): """ Koala DataFrame that corresponds to Pandas DataFrame logically. This holds Spark DataFrame internally. :ivar _sdf: Spark Column instance :ivar _metadata: Metadata related to column names and index information. Parameters ---------- data : numpy ndarray (structured or homogeneous), dict, Pandas DataFrame or Spark DataFrame Dict can contain Series, arrays, constants, or list-like objects If data is a dict, argument order is maintained for Python 3.6 and later. Note that if `data` is a Pandas DataFrame, other arguments should not be used. If `data` is a Spark DataFrame, all other arguments except `index` should not be used. index : Index or array-like Index to use for resulting frame. Will default to RangeIndex if no indexing information part of input data and no index provided If `data` is a Spark DataFrame, `index` is expected to be `Metadata`. columns : Index or array-like Column labels to use for resulting frame. Will default to RangeIndex (0, 1, 2, ..., n) if no column labels are provided dtype : dtype, default None Data type to force. Only a single dtype is allowed. If None, infer copy : boolean, default False Copy data from inputs. Only affects DataFrame / 2d ndarray input Examples -------- Constructing DataFrame from a dictionary. >>> d = {'col1': [1, 2], 'col2': [3, 4]} >>> df = ks.DataFrame(data=d, columns=['col1', 'col2']) >>> df col1 col2 0 1 3 1 2 4 Constructing DataFrame from Pandas DataFrame >>> df = ks.DataFrame(pd.DataFrame(data=d, columns=['col1', 'col2'])) >>> df col1 col2 0 1 3 1 2 4 Notice that the inferred dtype is int64. >>> df.dtypes col1 int64 col2 int64 dtype: object To enforce a single dtype: >>> df = ks.DataFrame(data=d, dtype=np.int8) >>> df.dtypes col1 int8 col2 int8 dtype: object Constructing DataFrame from numpy ndarray: >>> df2 = ks.DataFrame(np.random.randint(low=0, high=10, size=(5, 5)), ... columns=['a', 'b', 'c', 'd', 'e']) >>> df2 # doctest: +SKIP a b c d e 0 3 1 4 9 8 1 4 8 4 8 4 2 7 6 5 6 7 3 8 7 9 1 0 4 2 5 4 3 9 """ def __init__(self, data=None, index=None, columns=None, dtype=None, copy=False): if isinstance(data, pd.DataFrame): assert index is None assert columns is None assert dtype is None assert not copy self._init_from_pandas(data) elif isinstance(data, spark.DataFrame): assert columns is None assert dtype is None assert not copy self._init_from_spark(data, index) else: pdf = pd.DataFrame(data=data, index=index, columns=columns, dtype=dtype, copy=copy) self._init_from_pandas(pdf) def _init_from_pandas(self, pdf): metadata = Metadata.from_pandas(pdf) reset_index = pdf.reset_index() reset_index.columns = metadata.columns schema = StructType([StructField(name, infer_pd_series_spark_type(col), nullable=bool(col.isnull().any())) for name, col in reset_index.iteritems()]) for name, col in reset_index.iteritems(): dt = col.dtype if is_datetime64_dtype(dt) or is_datetime64tz_dtype(dt): continue reset_index[name] = col.replace({np.nan: None}) self._init_from_spark(default_session().createDataFrame(reset_index, schema=schema), metadata) def _init_from_spark(self, sdf, metadata=None): self._sdf = sdf if metadata is None: self._metadata = Metadata(data_columns=self._sdf.schema.fieldNames()) else: self._metadata = metadata def _reduce_for_stat_function(self, sfun): """ Applies sfun to each column and returns a pd.Series where the number of rows equal the number of columns. :param sfun: either an 1-arg function that takes a Column and returns a Column, or a 2-arg function that takes a Column and its DataType and returns a Column. """ from inspect import signature exprs = [] num_args = len(signature(sfun).parameters) for col in self.columns: col_sdf = self._sdf[col] col_type = self._sdf.schema[col].dataType if isinstance(col_type, BooleanType) and sfun.__name__ not in ('min', 'max'): # Stat functions cannot be used with boolean values by default # Thus, cast to integer (true to 1 and false to 0) # Exclude the min and max methods though since those work with booleans col_sdf = col_sdf.cast('integer') if num_args == 1: # Only pass in the column if sfun accepts only one arg col_sdf = sfun(col_sdf) else: # must be 2 assert num_args == 2 # Pass in both the column and its data type if sfun accepts two args col_sdf = sfun(col_sdf, col_type) exprs.append(col_sdf.alias(col)) sdf = self._sdf.select(*exprs) pdf = sdf.toPandas() assert len(pdf) == 1, (sdf, pdf) row = pdf.iloc[0] row.name = None return row # Return first row as a Series def corr(self, method='pearson'): """ Compute pairwise correlation of columns, excluding NA/null values. Parameters ---------- method : {'pearson', 'spearman'} * pearson : standard correlation coefficient * spearman : Spearman rank correlation Returns ------- y : pandas.DataFrame See Also -------- Series.corr Examples -------- >>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)], ... columns=['dogs', 'cats']) >>> df.corr('pearson') dogs cats dogs 1.000000 -0.851064 cats -0.851064 1.000000 >>> df.corr('spearman') dogs cats dogs 1.000000 -0.948683 cats -0.948683 1.000000 Notes ----- There are behavior differences between Koalas and pandas. * the `method` argument only accepts 'pearson', 'spearman' * the data should not contain NaNs. Koalas will return an error. * Koalas doesn't support the following argument(s). * `min_periods` argument is not supported """ return corr(self, method) def iteritems(self): """ Iterator over (column name, Series) pairs. Iterates over the DataFrame columns, returning a tuple with the column name and the content as a Series. Returns ------- label : object The column names for the DataFrame being iterated over. content : Series The column entries belonging to each label, as a Series. Examples -------- >>> df = ks.DataFrame({'species': ['bear', 'bear', 'marsupial'], ... 'population': [1864, 22000, 80000]}, ... index=['panda', 'polar', 'koala'], ... columns=['species', 'population']) >>> df species population panda bear 1864 polar bear 22000 koala marsupial 80000 >>> for label, content in df.iteritems(): ... print('label:', label) ... print('content:', content.to_string()) ... label: species content: panda bear polar bear koala marsupial label: population content: panda 1864 polar 22000 koala 80000 """ cols = list(self.columns) return list((col_name, self[col_name]) for col_name in cols) def to_clipboard(self, excel=True, sep=None, **kwargs): """ Copy object to the system clipboard. Write a text representation of object to the system clipboard. This can be pasted into Excel, for example. .. note:: This method should only be used if the resulting DataFrame is expected to be small, as all the data is loaded into the driver's memory. Parameters ---------- excel : bool, default True - True, use the provided separator, writing in a csv format for allowing easy pasting into excel. - False, write a string representation of the object to the clipboard. sep : str, default ``'\\t'`` Field delimiter. **kwargs These parameters will be passed to DataFrame.to_csv. Notes ----- Requirements for your platform. - Linux : `xclip`, or `xsel` (with `gtk` or `PyQt4` modules) - Windows : none - OS X : none Examples -------- Copy the contents of a DataFrame to the clipboard. >>> df = ks.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C']) # doctest: +SKIP >>> df.to_clipboard(sep=',') # doctest: +SKIP ... # Wrote the following to the system clipboard: ... # ,A,B,C ... # 0,1,2,3 ... # 1,4,5,6 We can omit the the index by passing the keyword `index` and setting it to false. >>> df.to_clipboard(sep=',', index=False) # doctest: +SKIP ... # Wrote the following to the system clipboard: ... # A,B,C ... # 1,2,3 ... # 4,5,6 This function also works for Series: >>> df = ks.Series([1, 2, 3, 4, 5, 6, 7], name='x') # doctest: +SKIP >>> df.to_clipboard(sep=',') # doctest: +SKIP ... # Wrote the following to the system clipboard: ... # 0, 1 ... # 1, 2 ... # 2, 3 ... # 3, 4 ... # 4, 5 ... # 5, 6 ... # 6, 7 """ args = locals() kdf = self return validate_arguments_and_invoke_function( kdf.to_pandas(), self.to_clipboard, pd.DataFrame.to_clipboard, args) def to_html(self, buf=None, columns=None, col_space=None, header=True, index=True, na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True, justify=None, max_rows=None, max_cols=None, show_dimensions=False, decimal='.', bold_rows=True, classes=None, escape=True, notebook=False, border=None, table_id=None, render_links=False): """ Render a DataFrame as an HTML table. .. note:: This method should only be used if the resulting Pandas object is expected to be small, as all the data is loaded into the driver's memory. If the input is large, set max_rows parameter. Parameters ---------- buf : StringIO-like, optional Buffer to write to. columns : sequence, optional, default None The subset of columns to write. Writes all columns by default. col_space : int, optional The minimum width of each column. header : bool, optional Write out the column names. If a list of strings is given, it is assumed to be aliases for the column names index : bool, optional, default True Whether to print index (row) labels. na_rep : str, optional, default 'NaN' String representation of NAN to use. formatters : list or dict of one-param. functions, optional Formatter functions to apply to columns' elements by position or name. The result of each function must be a unicode string. List must be of length equal to the number of columns. float_format : one-parameter function, optional, default None Formatter function to apply to columns' elements if they are floats. The result of this function must be a unicode string. sparsify : bool, optional, default True Set to False for a DataFrame with a hierarchical index to print every multiindex key at each row. index_names : bool, optional, default True Prints the names of the indexes. justify : str, default None How to justify the column labels. If None uses the option from the print configuration (controlled by set_option), 'right' out of the box. Valid values are * left * right * center * justify * justify-all * start * end * inherit * match-parent * initial * unset. max_rows : int, optional Maximum number of rows to display in the console. max_cols : int, optional Maximum number of columns to display in the console. show_dimensions : bool, default False Display DataFrame dimensions (number of rows by number of columns). decimal : str, default '.' Character recognized as decimal separator, e.g. ',' in Europe. bold_rows : bool, default True Make the row labels bold in the output. classes : str or list or tuple, default None CSS class(es) to apply to the resulting html table. escape : bool, default True Convert the characters <, >, and & to HTML-safe sequences. notebook : {True, False}, default False Whether the generated HTML is for IPython Notebook. border : int A ``border=border`` attribute is included in the opening `<table>` tag. Default ``pd.options.html.border``. table_id : str, optional A css id is included in the opening `<table>` tag if specified. render_links : bool, default False Convert URLs to HTML links (only works with Pandas 0.24+). Returns ------- str (or unicode, depending on data and options) String representation of the dataframe. See Also -------- to_string : Convert DataFrame to a string. """ # Make sure locals() call is at the top of the function so we don't capture local variables. args = locals() if max_rows is not None: kdf = self.head(max_rows) else: kdf = self return validate_arguments_and_invoke_function( kdf.to_pandas(), self.to_html, pd.DataFrame.to_html, args) def to_string(self, buf=None, columns=None, col_space=None, header=True, index=True, na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True, justify=None, max_rows=None, max_cols=None, show_dimensions=False, decimal='.', line_width=None): """ Render a DataFrame to a console-friendly tabular output. .. note:: This method should only be used if the resulting Pandas object is expected to be small, as all the data is loaded into the driver's memory. If the input is large, set max_rows parameter. Parameters ---------- buf : StringIO-like, optional Buffer to write to. columns : sequence, optional, default None The subset of columns to write. Writes all columns by default. col_space : int, optional The minimum width of each column. header : bool, optional Write out the column names. If a list of strings is given, it is assumed to be aliases for the column names index : bool, optional, default True Whether to print index (row) labels. na_rep : str, optional, default 'NaN' String representation of NAN to use. formatters : list or dict of one-param. functions, optional Formatter functions to apply to columns' elements by position or name. The result of each function must be a unicode string. List must be of length equal to the number of columns. float_format : one-parameter function, optional, default None Formatter function to apply to columns' elements if they are floats. The result of this function must be a unicode string. sparsify : bool, optional, default True Set to False for a DataFrame with a hierarchical index to print every multiindex key at each row. index_names : bool, optional, default True Prints the names of the indexes. justify : str, default None How to justify the column labels. If None uses the option from the print configuration (controlled by set_option), 'right' out of the box. Valid values are * left * right * center * justify * justify-all * start * end * inherit * match-parent * initial * unset. max_rows : int, optional Maximum number of rows to display in the console. max_cols : int, optional Maximum number of columns to display in the console. show_dimensions : bool, default False Display DataFrame dimensions (number of rows by number of columns). decimal : str, default '.' Character recognized as decimal separator, e.g. ',' in Europe. line_width : int, optional Width to wrap a line in characters. Returns ------- str (or unicode, depending on data and options) String representation of the dataframe. See Also -------- to_html : Convert DataFrame to HTML. Examples -------- >>> df = ks.DataFrame({'col1': [1, 2, 3], 'col2': [4, 5, 6]}, columns=['col1', 'col2']) >>> print(df.to_string()) col1 col2 0 1 4 1 2 5 2 3 6 >>> print(df.to_string(max_rows=2)) col1 col2 0 1 4 1 2 5 """ # Make sure locals() call is at the top of the function so we don't capture local variables. args = locals() if max_rows is not None: kdf = self.head(max_rows) else: kdf = self return validate_arguments_and_invoke_function( kdf.to_pandas(), self.to_string, pd.DataFrame.to_string, args) def to_dict(self, orient='dict', into=dict): """ Convert the DataFrame to a dictionary. The type of the key-value pairs can be customized with the parameters (see below). .. note:: This method should only be used if the resulting Pandas DataFrame is expected to be small, as all the data is loaded into the driver's memory. Parameters ---------- orient : str {'dict', 'list', 'series', 'split', 'records', 'index'} Determines the type of the values of the dictionary. - 'dict' (default) : dict like {column -> {index -> value}} - 'list' : dict like {column -> [values]} - 'series' : dict like {column -> Series(values)} - 'split' : dict like {'index' -> [index], 'columns' -> [columns], 'data' -> [values]} - 'records' : list like [{column -> value}, ... , {column -> value}] - 'index' : dict like {index -> {column -> value}} Abbreviations are allowed. `s` indicates `series` and `sp` indicates `split`. into : class, default dict The collections.abc.Mapping subclass used for all Mappings in the return value. Can be the actual class or an empty instance of the mapping type you want. If you want a collections.defaultdict, you must pass it initialized. Returns ------- dict, list or collections.abc.Mapping Return a collections.abc.Mapping object representing the DataFrame. The resulting transformation depends on the `orient` parameter. Examples -------- >>> df = ks.DataFrame({'col1': [1, 2], ... 'col2': [0.5, 0.75]}, ... index=['row1', 'row2'], ... columns=['col1', 'col2']) >>> df col1 col2 row1 1 0.50 row2 2 0.75 >>> df_dict = df.to_dict() >>> sorted([(key, sorted(values.items())) for key, values in df_dict.items()]) [('col1', [('row1', 1), ('row2', 2)]), ('col2', [('row1', 0.5), ('row2', 0.75)])] You can specify the return orientation. >>> df_dict = df.to_dict('series') >>> sorted(df_dict.items()) [('col1', row1 1 row2 2 Name: col1, dtype: int64), ('col2', row1 0.50 row2 0.75 Name: col2, dtype: float64)] >>> df_dict = df.to_dict('split') >>> sorted(df_dict.items()) # doctest: +ELLIPSIS [('columns', ['col1', 'col2']), ('data', [[1..., 0.75]]), ('index', ['row1', 'row2'])] >>> df_dict = df.to_dict('records') >>> [sorted(values.items()) for values in df_dict] # doctest: +ELLIPSIS [[('col1', 1...), ('col2', 0.5)], [('col1', 2...), ('col2', 0.75)]] >>> df_dict = df.to_dict('index') >>> sorted([(key, sorted(values.items())) for key, values in df_dict.items()]) [('row1', [('col1', 1), ('col2', 0.5)]), ('row2', [('col1', 2), ('col2', 0.75)])] You can also specify the mapping type. >>> from collections import OrderedDict, defaultdict >>> df.to_dict(into=OrderedDict) OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])), \ ('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))]) If you want a `defaultdict`, you need to initialize it: >>> dd = defaultdict(list) >>> df.to_dict('records', into=dd) # doctest: +ELLIPSIS [defaultdict(<class 'list'>, {'col..., 'col...}), \ defaultdict(<class 'list'>, {'col..., 'col...})] """ # Make sure locals() call is at the top of the function so we don't capture local variables. args = locals() kdf = self return validate_arguments_and_invoke_function( kdf.to_pandas(), self.to_dict, pd.DataFrame.to_dict, args) def to_latex(self, buf=None, columns=None, col_space=None, header=True, index=True, na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True, bold_rows=False, column_format=None, longtable=None, escape=None, encoding=None, decimal='.', multicolumn=None, multicolumn_format=None, multirow=None): r""" Render an object to a LaTeX tabular environment table. Render an object to a tabular environment table. You can splice this into a LaTeX document. Requires usepackage{booktabs}. .. note:: This method should only be used if the resulting Pandas object is expected to be small, as all the data is loaded into the driver's memory. If the input is large, consider alternative formats. Parameters ---------- buf : file descriptor or None Buffer to write to. If None, the output is returned as a string. columns : list of label, optional The subset of columns to write. Writes all columns by default. col_space : int, optional The minimum width of each column. header : bool or list of str, default True Write out the column names. If a list of strings is given, it is assumed to be aliases for the column names. index : bool, default True Write row names (index). na_rep : str, default ‘NaN’ Missing data representation. formatters : list of functions or dict of {str: function}, optional Formatter functions to apply to columns’ elements by position or name. The result of each function must be a unicode string. List must be of length equal to the number of columns. float_format : str, optional Format string for floating point numbers. sparsify : bool, optional Set to False for a DataFrame with a hierarchical index to print every multiindex key at each row. By default, the value will be read from the config module. index_names : bool, default True Prints the names of the indexes. bold_rows : bool, default False Make the row labels bold in the output. column_format : str, optional The columns format as specified in LaTeX table format e.g. ‘rcl’ for 3 columns. By default, ‘l’ will be used for all columns except columns of numbers, which default to ‘r’. longtable : bool, optional By default, the value will be read from the pandas config module. Use a longtable environment instead of tabular. Requires adding a usepackage{longtable} to your LaTeX preamble. escape : bool, optional By default, the value will be read from the pandas config module. When set to False prevents from escaping latex special characters in column names. encoding : str, optional A string representing the encoding to use in the output file, defaults to ‘ascii’ on Python 2 and ‘utf-8’ on Python 3. decimal : str, default ‘.’ Character recognized as decimal separator, e.g. ‘,’ in Europe. multicolumn : bool, default True Use multicolumn to enhance MultiIndex columns. The default will be read from the config module. multicolumn_format : str, default ‘l’ The alignment for multicolumns, similar to column_format The default will be read from the config module. multirow : bool, default False Use multirow to enhance MultiIndex rows. Requires adding a usepackage{multirow} to your LaTeX preamble. Will print centered labels (instead of top-aligned) across the contained rows, separating groups via clines. The default will be read from the pandas config module. Returns ------- str or None If buf is None, returns the resulting LateX format as a string. Otherwise returns None. See Also -------- DataFrame.to_string : Render a DataFrame to a console-friendly tabular output. DataFrame.to_html : Render a DataFrame as an HTML table. Examples -------- >>> df = pd.DataFrame({'name': ['Raphael', 'Donatello'], ... 'mask': ['red', 'purple'], ... 'weapon': ['sai', 'bo staff']}, ... columns=['name', 'mask', 'weapon']) >>> df.to_latex(index=False) # doctest: +NORMALIZE_WHITESPACE '\\begin{tabular}{lll}\n\\toprule\n name & mask & weapon \\\\\n\\midrule\n Raphael & red & sai \\\\\n Donatello & purple & bo staff \\\\\n\\bottomrule\n\\end{tabular}\n' """ args = locals() kdf = self return validate_arguments_and_invoke_function( kdf.to_pandas(), self.to_latex, pd.DataFrame.to_latex, args) @property def index(self): """The index (row labels) Column of the DataFrame. Currently not supported when the DataFrame has no index. See Also -------- Index """ from databricks.koalas.indexes import Index, MultiIndex if len(self._metadata.index_map) == 0: return None elif len(self._metadata.index_map) == 1: return Index(self) else: return MultiIndex(self) def set_index(self, keys, drop=True, append=False, inplace=False): """Set the DataFrame index (row labels) using one or more existing columns. Set the DataFrame index (row labels) using one or more existing columns or arrays (of the correct length). The index can replace the existing index or expand on it. Parameters ---------- keys : label or array-like or list of labels/arrays This parameter can be either a single column key, a single array of the same length as the calling DataFrame, or a list containing an arbitrary combination of column keys and arrays. Here, "array" encompasses :class:`Series`, :class:`Index` and ``np.ndarray``. drop : bool, default True Delete columns to be used as the new index. append : bool, default False Whether to append columns to existing index. inplace : bool, default False Modify the DataFrame in place (do not create a new object). Returns ------- DataFrame Changed row labels. See Also -------- DataFrame.reset_index : Opposite of set_index. Examples -------- >>> df = ks.DataFrame({'month': [1, 4, 7, 10], ... 'year': [2012, 2014, 2013, 2014], ... 'sale': [55, 40, 84, 31]}, ... columns=['month', 'year', 'sale']) >>> df month year sale 0 1 2012 55 1 4 2014 40 2 7 2013 84 3 10 2014 31 Set the index to become the 'month' column: >>> df.set_index('month') # doctest: +NORMALIZE_WHITESPACE year sale month 1 2012 55 4 2014 40 7 2013 84 10 2014 31 Create a MultiIndex using columns 'year' and 'month': >>> df.set_index(['year', 'month']) # doctest: +NORMALIZE_WHITESPACE sale year month 2012 1 55 2014 4 40 2013 7 84 2014 10 31 """ if isinstance(keys, str): keys = [keys] else: keys = list(keys) for key in keys: if key not in self.columns: raise KeyError(key) if drop: data_columns = [column for column in self._metadata.data_columns if column not in keys] else: data_columns = self._metadata.data_columns if append: index_map = self._metadata.index_map + [(column, column) for column in keys] else: index_map = [(column, column) for column in keys] metadata = self._metadata.copy(data_columns=data_columns, index_map=index_map) # Sync Spark's columns as well. sdf = self._sdf.select(['`{}`'.format(name) for name in metadata.columns]) if inplace: self._metadata = metadata self._sdf = sdf else: kdf = self.copy() kdf._metadata = metadata kdf._sdf = sdf return kdf def reset_index(self, level=None, drop=False, inplace=False): """Reset the index, or a level of it. For DataFrame with multi-level index, return new DataFrame with labeling information in the columns under the index names, defaulting to 'level_0', 'level_1', etc. if any are None. For a standard index, the index name will be used (if set), otherwise a default 'index' or 'level_0' (if 'index' is already taken) will be used. Parameters ---------- level : int, str, tuple, or list, default None Only remove the given levels from the index. Removes all levels by default. drop : bool, default False Do not try to insert index into dataframe columns. This resets the index to the default integer index. inplace : bool, default False Modify the DataFrame in place (do not create a new object). Returns ------- DataFrame DataFrame with the new index. See Also -------- DataFrame.set_index : Opposite of reset_index. Examples -------- >>> df = ks.DataFrame([('bird', 389.0), ... ('bird', 24.0), ... ('mammal', 80.5), ... ('mammal', np.nan)], ... index=['falcon', 'parrot', 'lion', 'monkey'], ... columns=('class', 'max_speed')) >>> df class max_speed falcon bird 389.0 parrot bird 24.0 lion mammal 80.5 monkey mammal NaN When we reset the index, the old index is added as a column. Unlike pandas, Koalas does not automatically add a sequential index. The following 0, 1, 2, 3 are only there when we display the DataFrame. >>> df.reset_index() index class max_speed 0 falcon bird 389.0 1 parrot bird 24.0 2 lion mammal 80.5 3 monkey mammal NaN We can use the `drop` parameter to avoid the old index being added as a column: >>> df.reset_index(drop=True) class max_speed 0 bird 389.0 1 bird 24.0 2 mammal 80.5 3 mammal NaN """ # TODO: add example of MultiIndex back. See https://github.com/databricks/koalas/issues/301 if len(self._metadata.index_map) == 0: raise NotImplementedError('Can\'t reset index because there is no index.') multi_index = len(self._metadata.index_map) > 1 def rename(index): if multi_index: return 'level_{}'.format(index) else: if 'index' not in self._metadata.data_columns: return 'index' else: return 'level_{}'.format(index) if level is None: new_index_map = [(column, name if name is not None else rename(i)) for i, (column, name) in enumerate(self._metadata.index_map)] index_map = [] else: if isinstance(level, (int, str)): level = [level] level = list(level) if all(isinstance(l, int) for l in level): for lev in level: if lev >= len(self._metadata.index_map): raise IndexError('Too many levels: Index has only {} level, not {}' .format(len(self._metadata.index_map), lev + 1)) idx = level elif all(isinstance(lev, str) for lev in level): idx = [] for l in level: try: i = self._metadata.index_columns.index(l) idx.append(i) except ValueError: if multi_index: raise KeyError('Level unknown not found') else: raise KeyError('Level unknown must be same as name ({})' .format(self._metadata.index_columns[0])) else: raise ValueError('Level should be all int or all string.') idx.sort() new_index_map = [] index_map = self._metadata.index_map.copy() for i in idx: info = self._metadata.index_map[i] index_column, index_name = info new_index_map.append( (index_column, index_name if index_name is not None else rename(index_name))) index_map.remove(info) if drop: new_index_map = [] metadata = self._metadata.copy( data_columns=[column for column, _ in new_index_map] + self._metadata.data_columns, index_map=index_map) columns = [name for _, name in new_index_map] + self._metadata.data_columns if inplace: self._metadata = metadata self.columns = columns else: kdf = self.copy() kdf._metadata = metadata kdf.columns = columns return kdf def isnull(self): """ Detects missing values for items in the current Dataframe. Return a boolean same-sized Dataframe indicating if the values are NA. NA values, such as None or numpy.NaN, gets mapped to True values. Everything else gets mapped to False values. See Also -------- Dataframe.notnull Examples -------- >>> df = ks.DataFrame([(.2, .3), (.0, None), (.6, None), (.2, .1)]) >>> df.isnull() 0 1 0 False False 1 False True 2 False True 3 False False >>> df = ks.DataFrame([[None, 'bee', None], ['dog', None, 'fly']]) >>> df.isnull() 0 1 2 0 True False True 1 False True False """ kdf = self.copy() for name, ks in kdf.iteritems(): kdf[name] = ks.isnull() return kdf isna = isnull def notnull(self): """ Detects non-missing values for items in the current Dataframe. This function takes a dataframe and indicates whether it's values are valid (not missing, which is ``NaN`` in numeric datatypes, ``None`` or ``NaN`` in objects and ``NaT`` in datetimelike). See Also -------- Dataframe.isnull Examples -------- >>> df = ks.DataFrame([(.2, .3), (.0, None), (.6, None), (.2, .1)]) >>> df.notnull() 0 1 0 True True 1 True False 2 True False 3 True True >>> df = ks.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']]) >>> df.notnull() 0 1 2 0 True True True 1 True False True """ kdf = self.copy() for name, ks in kdf.iteritems(): kdf[name] = ks.notnull() return kdf notna = notnull def to_koalas(self): """ Converts the existing DataFrame into a Koalas DataFrame. This method is monkey-patched into Spark's DataFrame and can be used to convert a Spark DataFrame into a Koalas DataFrame. If running on an existing Koalas DataFrame, the method returns itself. If a Koalas DataFrame is converted to a Spark DataFrame and then back to Koalas, it will lose the index information and the original index will be turned into a normal column. See Also -------- DataFrame.to_spark Examples -------- >>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4]}, columns=['col1', 'col2']) >>> df col1 col2 0 1 3 1 2 4 >>> spark_df = df.to_spark() >>> spark_df DataFrame[__index_level_0__: bigint, col1: bigint, col2: bigint] >>> kdf = spark_df.to_koalas() >>> kdf __index_level_0__ col1 col2 0 0 1 3 1 1 2 4 Calling to_koalas on a Koalas DataFrame simply returns itself. >>> df.to_koalas() col1 col2 0 1 3 1 2 4 """ if isinstance(self, DataFrame): return self else: return DataFrame(self) def to_spark(self): """ Return the current DataFrame as a Spark DataFrame. See Also -------- DataFrame.to_koalas """ return self._sdf def to_pandas(self): """ Return a Pandas DataFrame. .. note:: This method should only be used if the resulting Pandas DataFrame is expected to be small, as all the data is loaded into the driver's memory. Examples -------- >>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)], ... columns=['dogs', 'cats']) >>> df.to_pandas() dogs cats 0 0.2 0.3 1 0.0 0.6 2 0.6 0.0 3 0.2 0.1 """ sdf = self._sdf.select(['`{}`'.format(name) for name in self._metadata.columns]) pdf = sdf.toPandas() if len(pdf) == 0 and len(sdf.schema) > 0: # TODO: push to OSS pdf = pdf.astype({field.name: to_arrow_type(field.dataType).to_pandas_dtype() for field in sdf.schema}) index_columns = self._metadata.index_columns if len(index_columns) > 0: append = False for index_field in index_columns: drop = index_field not in self._metadata.data_columns pdf = pdf.set_index(index_field, drop=drop, append=append) append = True pdf = pdf[self._metadata.data_columns] index_names = self._metadata.index_names if len(index_names) > 0: if isinstance(pdf.index, pd.MultiIndex): pdf.index.names = index_names else: pdf.index.name = index_names[0] return pdf # Alias to maintain backward compatibility with Spark toPandas = to_pandas def assign(self, **kwargs): """ Assign new columns to a DataFrame. Returns a new object with all original columns in addition to new ones. Existing columns that are re-assigned will be overwritten. Parameters ---------- **kwargs : dict of {str: callable or Series} The column names are keywords. If the values are callable, they are computed on the DataFrame and assigned to the new columns. The callable must not change input DataFrame (though Koalas doesn't check it). If the values are not callable, (e.g. a Series or a literal), they are simply assigned. Returns ------- DataFrame A new DataFrame with the new columns in addition to all the existing columns. Examples -------- >>> df = ks.DataFrame({'temp_c': [17.0, 25.0]}, ... index=['Portland', 'Berkeley']) >>> df temp_c Portland 17.0 Berkeley 25.0 Where the value is a callable, evaluated on `df`: >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 Alternatively, the same behavior can be achieved by directly referencing an existing Series or sequence and you can also create multiple columns within the same assign. >>> assigned = df.assign(temp_f=df['temp_c'] * 9 / 5 + 32, ... temp_k=df['temp_c'] + 273.15) >>> assigned[['temp_c', 'temp_f', 'temp_k']] temp_c temp_f temp_k Portland 17.0 62.6 290.15 Berkeley 25.0 77.0 298.15 Notes ----- Assigning multiple columns within the same ``assign`` is possible but you cannot refer to newly created or modified columns. This feature is supported in pandas for Python 3.6 and later but not in Koalas. In Koalas, all items are computed first, and then assigned. """ from databricks.koalas.series import Series for k, v in kwargs.items(): if not (isinstance(v, (Series, spark.Column)) or callable(v) or pd.api.types.is_scalar(v)): raise TypeError("Column assignment doesn't support type " "{0}".format(type(v).__name__)) if callable(v): kwargs[k] = v(self) pairs = list(kwargs.items()) sdf = self._sdf for (name, c) in pairs: if isinstance(c, Series): sdf = sdf.withColumn(name, c._scol) elif isinstance(c, Column): sdf = sdf.withColumn(name, c) else: sdf = sdf.withColumn(name, F.lit(c)) data_columns = self._metadata.data_columns metadata = self._metadata.copy( data_columns=(data_columns + [name for name, _ in pairs if name not in data_columns])) return DataFrame(sdf, metadata) def to_records(self, index=True, convert_datetime64=None, column_dtypes=None, index_dtypes=None): """ Convert DataFrame to a NumPy record array. Index will be included as the first field of the record array if requested. .. note:: This method should only be used if the resulting NumPy ndarray is expected to be small, as all the data is loaded into the driver's memory. Parameters ---------- index : bool, default True Include index in resulting record array, stored in 'index' field or using the index label, if set. convert_datetime64 : bool, default None Whether to convert the index to datetime.datetime if it is a DatetimeIndex. column_dtypes : str, type, dict, default None If a string or type, the data type to store all columns. If a dictionary, a mapping of column names and indices (zero-indexed) to specific data types. index_dtypes : str, type, dict, default None If a string or type, the data type to store all index levels. If a dictionary, a mapping of index level names and indices (zero-indexed) to specific data types. This mapping is applied only if `index=True`. Returns ------- numpy.recarray NumPy ndarray with the DataFrame labels as fields and each row of the DataFrame as entries. See Also -------- DataFrame.from_records: Convert structured or record ndarray to DataFrame. numpy.recarray: An ndarray that allows field access using attributes, analogous to typed columns in a spreadsheet. Examples -------- >>> df = ks.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]}, ... index=['a', 'b']) >>> df A B a 1 0.50 b 2 0.75 >>> df.to_records() # doctest: +SKIP rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')]) The index can be excluded from the record array: >>> df.to_records(index=False) # doctest: +SKIP rec.array([(1, 0.5 ), (2, 0.75)], dtype=[('A', '<i8'), ('B', '<f8')]) Specification of dtype for columns is new in Pandas 0.24.0. Data types can be specified for the columns: >>> df.to_records(column_dtypes={"A": "int32"}) # doctest: +SKIP rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('index', 'O'), ('A', '<i4'), ('B', '<f8')]) Specification of dtype for index is new in Pandas 0.24.0. Data types can also be specified for the index: >>> df.to_records(index_dtypes="<S2") # doctest: +SKIP rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)], dtype=[('index', 'S2'), ('A', '<i8'), ('B', '<f8')]) """ args = locals() kdf = self return validate_arguments_and_invoke_function( kdf.to_pandas(), self.to_records, pd.DataFrame.to_records, args) def copy(self) -> 'DataFrame': """ Make a copy of this object's indices and data. Returns ------- copy : DataFrame """ return DataFrame(self._sdf, self._metadata.copy()) def dropna(self, axis=0, how='any', thresh=None, subset=None, inplace=False): """ Remove missing values. Parameters ---------- axis : {0 or 'index'}, default 0 Determine if rows or columns which contain missing values are removed. * 0, or 'index' : Drop rows which contain missing values. how : {'any', 'all'}, default 'any' Determine if row or column is removed from DataFrame, when we have at least one NA or all NA. * 'any' : If any NA values are present, drop that row or column. * 'all' : If all values are NA, drop that row or column. thresh : int, optional Require that many non-NA values. subset : array-like, optional Labels along other axis to consider, e.g. if you are dropping rows these would be a list of columns to include. inplace : bool, default False If True, do operation inplace and return None. Returns ------- DataFrame DataFrame with NA entries dropped from it. See Also -------- DataFrame.drop : Drop specified labels from columns. DataFrame.isnull: Indicate missing values. DataFrame.notnull : Indicate existing (non-missing) values. Examples -------- >>> df = ks.DataFrame({"name": ['Alfred', 'Batman', 'Catwoman'], ... "toy": [None, 'Batmobile', 'Bullwhip'], ... "born": [None, "1940-04-25", None]}, ... columns=['name', 'toy', 'born']) >>> df name toy born 0 Alfred None None 1 Batman Batmobile 1940-04-25 2 Catwoman Bullwhip None Drop the rows where at least one element is missing. >>> df.dropna() name toy born 1 Batman Batmobile 1940-04-25 Drop the rows where all elements are missing. >>> df.dropna(how='all') name toy born 0 Alfred None None 1 Batman Batmobile 1940-04-25 2 Catwoman Bullwhip None Keep only the rows with at least 2 non-NA values. >>> df.dropna(thresh=2) name toy born 1 Batman Batmobile 1940-04-25 2 Catwoman Bullwhip None Define in which columns to look for missing values. >>> df.dropna(subset=['name', 'born']) name toy born 1 Batman Batmobile 1940-04-25 Keep the DataFrame with valid entries in the same variable. >>> df.dropna(inplace=True) >>> df name toy born 1 Batman Batmobile 1940-04-25 """ if axis == 0 or axis == 'index': if subset is not None: if isinstance(subset, str): columns = [subset] else: columns = list(subset) invalids = [column for column in columns if column not in self._metadata.data_columns] if len(invalids) > 0: raise KeyError(invalids) else: columns = list(self.columns) cnt = reduce(lambda x, y: x + y, [F.when(self[column].notna()._scol, 1).otherwise(0) for column in columns], F.lit(0)) if thresh is not None: pred = cnt >= F.lit(int(thresh)) elif how == 'any': pred = cnt == F.lit(len(columns)) elif how == 'all': pred = cnt > F.lit(0) else: if how is not None: raise ValueError('invalid how option: {h}'.format(h=how)) else: raise TypeError('must specify how or thresh') sdf = self._sdf.filter(pred) if inplace: self._sdf = sdf else: return DataFrame(sdf, self._metadata.copy()) else: raise NotImplementedError("dropna currently only works for axis=0 or axis='index'") def fillna(self, value=None, axis=None, inplace=False): """Fill NA/NaN values. Parameters ---------- value : scalar, dict, Series Value to use to fill holes. alternately a dict/Series of values specifying which value to use for each column. DataFrame is not supported. axis : {0 or `index`} 1 and `columns` are not supported. inplace : boolean, default False Fill in place (do not create a new object) Returns ------- DataFrame DataFrame with NA entries filled. Examples -------- >>> df = ks.DataFrame({ ... 'A': [None, 3, None, None], ... 'B': [2, 4, None, 3], ... 'C': [None, None, None, 1], ... 'D': [0, 1, 5, 4] ... }, ... columns=['A', 'B', 'C', 'D']) >>> df A B C D 0 NaN 2.0 NaN 0 1 3.0 4.0 NaN 1 2 NaN NaN NaN 5 3 NaN 3.0 1.0 4 Replace all NaN elements with 0s. >>> df.fillna(0) A B C D 0 0.0 2.0 0.0 0 1 3.0 4.0 0.0 1 2 0.0 0.0 0.0 5 3 0.0 3.0 1.0 4 Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1, 2, and 3 respectively. >>> values = {'A': 0, 'B': 1, 'C': 2, 'D': 3} >>> df.fillna(value=values) A B C D 0 0.0 2.0 2.0 0 1 3.0 4.0 2.0 1 2 0.0 1.0 2.0 5 3 0.0 3.0 1.0 4 """ if axis is None: axis = 0 if not (axis == 0 or axis == "index"): raise NotImplementedError("fillna currently only works for axis=0 or axis='index'") if value is None: raise ValueError('Currently must specify value') if not isinstance(value, (float, int, str, bool, dict, pd.Series)): raise TypeError("Unsupported type %s" % type(value)) if isinstance(value, pd.Series): value = value.to_dict() if isinstance(value, dict): for v in value.values(): if not isinstance(v, (float, int, str, bool)): raise TypeError("Unsupported type %s" % type(v)) sdf = self._sdf.fillna(value) if inplace: self._sdf = sdf else: return DataFrame(sdf, self._metadata.copy()) def clip(self, lower: Union[float, int] = None, upper: Union[float, int] = None) \ -> 'DataFrame': """ Trim values at input threshold(s). Assigns values outside boundary to boundary values. Parameters ---------- lower : float or int, default None Minimum threshold value. All values below this threshold will be set to it. upper : float or int, default None Maximum threshold value. All values above this threshold will be set to it. Returns ------- DataFrame DataFrame with the values outside the clip boundaries replaced. Examples -------- >>> ks.DataFrame({'A': [0, 2, 4]}).clip(1, 3) A 0 1 1 2 2 3 Notes ----- One difference between this implementation and pandas is that running pd.DataFrame({'A': ['a', 'b']}).clip(0, 1) will crash with "TypeError: '<=' not supported between instances of 'str' and 'int'" while ks.DataFrame({'A': ['a', 'b']}).clip(0, 1) will output the original DataFrame, simply ignoring the incompatible types. """ if is_list_like(lower) or is_list_like(upper): raise ValueError("List-like value are not supported for 'lower' and 'upper' at the " + "moment") if lower is None and upper is None: return self sdf = self._sdf numeric_types = (DecimalType, DoubleType, FloatType, ByteType, IntegerType, LongType, ShortType) numeric_columns = [c for c in self.columns if isinstance(sdf.schema[c].dataType, numeric_types)] nonnumeric_columns = [c for c in self.columns if not isinstance(sdf.schema[c].dataType, numeric_types)] if lower is not None: sdf = sdf.select(*[F.when(F.col(c) < lower, lower).otherwise(F.col(c)).alias(c) for c in numeric_columns] + nonnumeric_columns) if upper is not None: sdf = sdf.select(*[F.when(F.col(c) > upper, upper).otherwise(F.col(c)).alias(c) for c in numeric_columns] + nonnumeric_columns) # Restore initial column order sdf = sdf.select(list(self.columns)) return ks.DataFrame(sdf) def head(self, n=5): """ Return the first `n` rows. This function returns the first `n` rows for the object based on position. It is useful for quickly testing if your object has the right type of data in it. Parameters ---------- n : int, default 5 Number of rows to select. Returns ------- obj_head : same type as caller The first `n` rows of the caller object. Examples -------- >>> df = ks.DataFrame({'animal':['alligator', 'bee', 'falcon', 'lion', ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']}) >>> df animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey 5 parrot 6 shark 7 whale 8 zebra Viewing the first 5 lines >>> df.head() animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey Viewing the first `n` lines (three in this case) >>> df.head(3) animal 0 alligator 1 bee 2 falcon """ return DataFrame(self._sdf.limit(n), self._metadata.copy()) @property def columns(self): """The column labels of the DataFrame.""" return pd.Index(self._metadata.data_columns) @columns.setter def columns(self, names): old_names = self._metadata.data_columns if len(old_names) != len(names): raise ValueError( "Length mismatch: Expected axis has %d elements, new values have %d elements" % (len(old_names), len(names))) sdf = self._sdf.select(self._metadata.index_columns + [self[old_name]._scol.alias(new_name) for (old_name, new_name) in zip(old_names, names)]) self._sdf = sdf self._metadata = self._metadata.copy(data_columns=names) @property def dtypes(self): """Return the dtypes in the DataFrame. This returns a Series with the data type of each column. The result's index is the original DataFrame's columns. Columns with mixed types are stored with the object dtype. Returns ------- pd.Series The data type of each column. Examples -------- >>> df = ks.DataFrame({'a': list('abc'), ... 'b': list(range(1, 4)), ... 'c': np.arange(3, 6).astype('i1'), ... 'd': np.arange(4.0, 7.0, dtype='float64'), ... 'e': [True, False, True], ... 'f': pd.date_range('20130101', periods=3)}, ... columns=['a', 'b', 'c', 'd', 'e', 'f']) >>> df.dtypes a object b int64 c int8 d float64 e bool f datetime64[ns] dtype: object """ return pd.Series([self[col].dtype for col in self._metadata.data_columns], index=self._metadata.data_columns) def count(self): """ Count non-NA cells for each column. The values `None`, `NaN` are considered NA. Returns ------- pandas.Series See Also -------- Series.count: Number of non-NA elements in a Series. DataFrame.shape: Number of DataFrame rows and columns (including NA elements). DataFrame.isna: Boolean same-sized DataFrame showing places of NA elements. Examples -------- Constructing DataFrame from a dictionary: >>> df = ks.DataFrame({"Person": ... ["John", "Myla", "Lewis", "John", "Myla"], ... "Age": [24., np.nan, 21., 33, 26], ... "Single": [False, True, True, True, False]}, ... columns=["Person", "Age", "Single"]) >>> df Person Age Single 0 John 24.0 False 1 Myla NaN True 2 Lewis 21.0 True 3 John 33.0 True 4 Myla 26.0 False Notice the uncounted NA values: >>> df.count() Person 5 Age 4 Single 5 dtype: int64 """ return self._reduce_for_stat_function(_Frame._count_expr) def drop(self, labels=None, axis=1, columns: Union[str, List[str]] = None): """ Drop specified labels from columns. Remove columns by specifying label names and axis=1 or columns. When specifying both labels and columns, only labels will be dropped. Removing rows is yet to be implemented. Parameters ---------- labels : single label or list-like Column labels to drop. axis : {1 or 'columns'}, default 1 .. dropna currently only works for axis=1 'columns' axis=0 is yet to be implemented. columns : single label or list-like Alternative to specifying axis (``labels, axis=1`` is equivalent to ``columns=labels``). Returns ------- dropped : DataFrame See Also -------- Series.dropna Examples -------- >>> df = ks.DataFrame({'x': [1, 2], 'y': [3, 4], 'z': [5, 6], 'w': [7, 8]}, ... columns=['x', 'y', 'z', 'w']) >>> df x y z w 0 1 3 5 7 1 2 4 6 8 >>> df.drop('x', axis=1) y z w 0 3 5 7 1 4 6 8 >>> df.drop(['y', 'z'], axis=1) x w 0 1 7 1 2 8 >>> df.drop(columns=['y', 'z']) x w 0 1 7 1 2 8 Notes ----- Currently only axis = 1 is supported in this function, axis = 0 is yet to be implemented. """ if labels is not None: axis = self._validate_axis(axis) if axis == 1: return self.drop(columns=labels) raise NotImplementedError("Drop currently only works for axis=1") elif columns is not None: if isinstance(columns, str): columns = [columns] sdf = self._sdf.drop(*columns) metadata = self._metadata.copy( data_columns=[column for column in self.columns if column not in columns] ) return DataFrame(sdf, metadata) else: raise ValueError("Need to specify at least one of 'labels' or 'columns'") def get(self, key, default=None): """ Get item from object for given key (DataFrame column, Panel slice, etc.). Returns default value if not found. Parameters ---------- key : object Returns ------- value : same type as items contained in object Examples -------- >>> df = ks.DataFrame({'x':range(3), 'y':['a','b','b'], 'z':['a','b','b']}, ... columns=['x', 'y', 'z']) >>> df x y z 0 0 a a 1 1 b b 2 2 b b >>> df.get('x') 0 0 1 1 2 2 Name: x, dtype: int64 >>> df.get(['x', 'y']) x y 0 0 a 1 1 b 2 2 b """ try: return self._pd_getitem(key) except (KeyError, ValueError, IndexError): return default def sort_values(self, by, ascending=True, inplace=False, na_position='last'): """ Sort by the values along either axis. Parameters ---------- by : str or list of str ascending : bool or list of bool, default True Sort ascending vs. descending. Specify list for multiple sort orders. If this is a list of bools, must match the length of the by. inplace : bool, default False if True, perform operation in-place na_position : {'first', 'last'}, default 'last' `first` puts NaNs at the beginning, `last` puts NaNs at the end Returns ------- sorted_obj : DataFrame Examples -------- >>> df = ks.DataFrame({ ... 'col1': ['A', 'B', None, 'D', 'C'], ... 'col2': [2, 9, 8, 7, 4], ... 'col3': [0, 9, 4, 2, 3], ... }, ... columns=['col1', 'col2', 'col3']) >>> df col1 col2 col3 0 A 2 0 1 B 9 9 2 None 8 4 3 D 7 2 4 C 4 3 Sort by col1 >>> df.sort_values(by=['col1']) col1 col2 col3 0 A 2 0 1 B 9 9 4 C 4 3 3 D 7 2 2 None 8 4 Sort Descending >>> df.sort_values(by='col1', ascending=False) col1 col2 col3 3 D 7 2 4 C 4 3 1 B 9 9 0 A 2 0 2 None 8 4 Sort by multiple columns >>> df = ks.DataFrame({ ... 'col1': ['A', 'A', 'B', None, 'D', 'C'], ... 'col2': [2, 1, 9, 8, 7, 4], ... 'col3': [0, 1, 9, 4, 2, 3], ... }, ... columns=['col1', 'col2', 'col3']) >>> df.sort_values(by=['col1', 'col2']) col1 col2 col3 1 A 1 1 0 A 2 0 2 B 9 9 5 C 4 3 4 D 7 2 3 None 8 4 """ if isinstance(by, str): by = [by] if isinstance(ascending, bool): ascending = [ascending] * len(by) if len(ascending) != len(by): raise ValueError('Length of ascending ({}) != length of by ({})' .format(len(ascending), len(by))) if na_position not in ('first', 'last'): raise ValueError("invalid na_position: '{}'".format(na_position)) # Mapper: Get a spark column function for (ascending, na_position) combination # Note that 'asc_nulls_first' and friends were added as of Spark 2.4, see SPARK-23847. mapper = { (True, 'first'): lambda x: Column(getattr(x._jc, "asc_nulls_first")()), (True, 'last'): lambda x: Column(getattr(x._jc, "asc_nulls_last")()), (False, 'first'): lambda x: Column(getattr(x._jc, "desc_nulls_first")()), (False, 'last'): lambda x: Column(getattr(x._jc, "desc_nulls_last")()), } by = [mapper[(asc, na_position)](self[colname]._scol) for colname, asc in zip(by, ascending)] kdf = DataFrame(self._sdf.sort(*by), self._metadata.copy()) if inplace: self._sdf = kdf._sdf self._metadata = kdf._metadata else: return kdf # TODO: add keep = First def nlargest(self, n: int, columns: 'Any') -> 'DataFrame': """ Return the first `n` rows ordered by `columns` in descending order. Return the first `n` rows with the largest values in `columns`, in descending order. The columns that are not specified are returned as well, but not used for ordering. This method is equivalent to ``df.sort_values(columns, ascending=False).head(n)``, but more performant in Pandas. In Koalas, thanks to Spark's lazy execution and query optimizer, the two would have same performance. Parameters ---------- n : int Number of rows to return. columns : label or list of labels Column label(s) to order by. Returns ------- DataFrame The first `n` rows ordered by the given columns in descending order. See Also -------- DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in ascending order. DataFrame.sort_values : Sort DataFrame by the values. DataFrame.head : Return the first `n` rows without re-ordering. Notes ----- This function cannot be used with all column types. For example, when specifying columns with `object` or `category` dtypes, ``TypeError`` is raised. Examples -------- >>> df = ks.DataFrame({'X': [1, 2, 3, 5, 6, 7, np.nan], ... 'Y': [6, 7, 8, 9, 10, 11, 12]}) >>> df X Y 0 1.0 6 1 2.0 7 2 3.0 8 3 5.0 9 4 6.0 10 5 7.0 11 6 NaN 12 In the following example, we will use ``nlargest`` to select the three rows having the largest values in column "population". >>> df.nlargest(n=3, columns='X') X Y 5 7.0 11 4 6.0 10 3 5.0 9 >>> df.nlargest(n=3, columns=['Y', 'X']) X Y 6 NaN 12 5 7.0 11 4 6.0 10 """ return self.sort_values(by=columns, ascending=False).head(n=n) # TODO: add keep = First def nsmallest(self, n: int, columns: 'Any') -> 'DataFrame': """ Return the first `n` rows ordered by `columns` in ascending order. Return the first `n` rows with the smallest values in `columns`, in ascending order. The columns that are not specified are returned as well, but not used for ordering. This method is equivalent to ``df.sort_values(columns, ascending=True).head(n)``, but more performant. In Koalas, thanks to Spark's lazy execution and query optimizer, the two would have same performance. Parameters ---------- n : int Number of items to retrieve. columns : list or str Column name or names to order by. Returns ------- DataFrame See Also -------- DataFrame.nlargest : Return the first `n` rows ordered by `columns` in descending order. DataFrame.sort_values : Sort DataFrame by the values. DataFrame.head : Return the first `n` rows without re-ordering. Examples -------- >>> df = ks.DataFrame({'X': [1, 2, 3, 5, 6, 7, np.nan], ... 'Y': [6, 7, 8, 9, 10, 11, 12]}) >>> df X Y 0 1.0 6 1 2.0 7 2 3.0 8 3 5.0 9 4 6.0 10 5 7.0 11 6 NaN 12 In the following example, we will use ``nsmallest`` to select the three rows having the smallest values in column "a". >>> df.nsmallest(n=3, columns='X') # doctest: +NORMALIZE_WHITESPACE X Y 0 1.0 6 1 2.0 7 2 3.0 8 To order by the largest values in column "a" and then "c", we can specify multiple columns like in the next example. >>> df.nsmallest(n=3, columns=['Y', 'X']) # doctest: +NORMALIZE_WHITESPACE X Y 0 1.0 6 1 2.0 7 2 3.0 8 """ return self.sort_values(by=columns, ascending=True).head(n=n) def isin(self, values): """ Whether each element in the DataFrame is contained in values. Parameters ---------- values : iterable or dict The sequence of values to test. If values is a dict, the keys must be the column names, which must match. Series and DataFrame are not supported. Returns ------- DataFrame DataFrame of booleans showing whether each element in the DataFrame is contained in values. Examples -------- >>> df = ks.DataFrame({'num_legs': [2, 4], 'num_wings': [2, 0]}, ... index=['falcon', 'dog'], ... columns=['num_legs', 'num_wings']) >>> df num_legs num_wings falcon 2 2 dog 4 0 When ``values`` is a list check whether every value in the DataFrame is present in the list (which animals have 0 or 2 legs or wings) >>> df.isin([0, 2]) num_legs num_wings falcon True True dog False True When ``values`` is a dict, we can pass values to check for each column separately: >>> df.isin({'num_wings': [0, 3]}) num_legs num_wings falcon False False dog False True """ if isinstance(values, (pd.DataFrame, pd.Series)): raise NotImplementedError("DataFrame and Series are not supported") if isinstance(values, dict) and not set(values.keys()).issubset(self.columns): raise AttributeError( "'DataFrame' object has no attribute %s" % (set(values.keys()).difference(self.columns))) _select_columns = self._metadata.index_columns if isinstance(values, dict): for col in self.columns: if col in values: _select_columns.append(self[col]._scol.isin(values[col]).alias(col)) else: _select_columns.append(F.lit(False).alias(col)) elif is_list_like(values): _select_columns += [ self[col]._scol.isin(list(values)).alias(col) for col in self.columns] else: raise TypeError('Values should be iterable, Series, DataFrame or dict.') return DataFrame(self._sdf.select(_select_columns), self._metadata.copy()) def pipe(self, func, *args, **kwargs): r""" Apply func(self, \*args, \*\*kwargs). Parameters ---------- func : function function to apply to the DataFrame. ``args``, and ``kwargs`` are passed into ``func``. Alternatively a ``(callable, data_keyword)`` tuple where ``data_keyword`` is a string indicating the keyword of ``callable`` that expects the DataFrames. args : iterable, optional positional arguments passed into ``func``. kwargs : mapping, optional a dictionary of keyword arguments passed into ``func``. Returns ------- object : the return type of ``func``. Notes ----- Use ``.pipe`` when chaining together functions that expect Series, DataFrames or GroupBy objects. For example, given >>> df = ks.DataFrame({'category': ['A', 'A', 'B'], ... 'col1': [1, 2, 3], ... 'col2': [4, 5, 6]}, ... columns=['category', 'col1', 'col2']) >>> def keep_category_a(df): ... return df[df['category'] == 'A'] >>> def add_one(df, column): ... return df.assign(col3=df[column] + 1) >>> def multiply(df, column1, column2): ... return df.assign(col4=df[column1] * df[column2]) instead of writing >>> multiply(add_one(keep_category_a(df), column="col1"), column1="col2", column2="col3") category col1 col2 col3 col4 0 A 1 4 2 8 1 A 2 5 3 15 You can write >>> (df.pipe(keep_category_a) ... .pipe(add_one, column="col1") ... .pipe(multiply, column1="col2", column2="col3") ... ) category col1 col2 col3 col4 0 A 1 4 2 8 1 A 2 5 3 15 If you have a function that takes the data as (say) the second argument, pass a tuple indicating which keyword expects the data. For example, suppose ``f`` takes its data as ``df``: >>> def multiply_2(column1, df, column2): ... return df.assign(col4=df[column1] * df[column2]) Then you can write >>> (df.pipe(keep_category_a) ... .pipe(add_one, column="col1") ... .pipe((multiply_2, 'df'), column1="col2", column2="col3") ... ) category col1 col2 col3 col4 0 A 1 4 2 8 1 A 2 5 3 15 """ if isinstance(func, tuple): func, target = func if target in kwargs: raise ValueError('%s is both the pipe target and a keyword ' 'argument' % target) kwargs[target] = self return func(*args, **kwargs) else: return func(self, *args, **kwargs) @property def shape(self): """ Return a tuple representing the dimensionality of the DataFrame. Examples -------- >>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.shape (2, 2) >>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4], ... 'col3': [5, 6]}) >>> df.shape (2, 3) """ return len(self), len(self.columns) def merge(self, right: 'DataFrame', how: str = 'inner', on: str = None, left_index: bool = False, right_index: bool = False, suffixes: Tuple[str, str] = ('_x', '_y')) -> 'DataFrame': """ Merge DataFrame objects with a database-style join. Parameters ---------- right: Object to merge with. how: Type of merge to be performed. {‘left’, ‘right’, ‘outer’, ‘inner’}, default ‘inner’ left: use only keys from left frame, similar to a SQL left outer join; preserve key order. right: use only keys from right frame, similar to a SQL right outer join; preserve key order. outer: use union of keys from both frames, similar to a SQL full outer join; sort keys lexicographically. inner: use intersection of keys from both frames, similar to a SQL inner join; preserve the order of the left keys. on: Column or index level names to join on. These must be found in both DataFrames. If on is None and not merging on indexes then this defaults to the intersection of the columns in both DataFrames. left_index: Use the index from the left DataFrame as the join key(s). If it is a MultiIndex, the number of keys in the other DataFrame (either the index or a number of columns) must match the number of levels. right_index: Use the index from the right DataFrame as the join key. Same caveats as left_index. suffixes: Suffix to apply to overlapping column names in the left and right side, respectively. Returns ------- DataFrame A DataFrame of the two merged objects. Examples -------- >>> left_kdf = ks.DataFrame({'A': [1, 2]}) >>> right_kdf = ks.DataFrame({'B': ['x', 'y']}, index=[1, 2]) >>> left_kdf.merge(right_kdf, left_index=True, right_index=True) A B 0 2 x >>> left_kdf.merge(right_kdf, left_index=True, right_index=True, how='left') A B 0 1 None 1 2 x >>> left_kdf.merge(right_kdf, left_index=True, right_index=True, how='right') A B 0 2.0 x 1 NaN y >>> left_kdf.merge(right_kdf, left_index=True, right_index=True, how='outer') A B 0 1.0 None 1 2.0 x 2 NaN y Notes ----- As described in #263, joining string columns currently returns None for missing values instead of NaN. """ if on is None and not left_index and not right_index: raise ValueError("At least 'on' or 'left_index' and 'right_index' have to be set") if on is not None and (left_index or right_index): raise ValueError("Only 'on' or 'left_index' and 'right_index' can be set") if how == 'full': warnings.warn("Warning: While Koalas will accept 'full', you should use 'outer' " + "instead to be compatible with the pandas merge API", UserWarning) if how == 'outer': # 'outer' in pandas equals 'full' in Spark how = 'full' if how not in ('inner', 'left', 'right', 'full'): raise ValueError("The 'how' parameter has to be amongst the following values: ", "['inner', 'left', 'right', 'outer']") if on is None: # FIXME Move index string to constant? on = '__index_level_0__' left_table = self._sdf.alias('left_table') right_table = right._sdf.alias('right_table') # Unpack suffixes tuple for convenience left_suffix = suffixes[0] right_suffix = suffixes[1] # Append suffixes to columns with the same name to avoid conflicts later duplicate_columns = list(self.columns & right.columns) if duplicate_columns: for duplicate_column_name in duplicate_columns: left_table = left_table.withColumnRenamed(duplicate_column_name, duplicate_column_name + left_suffix) right_table = right_table.withColumnRenamed(duplicate_column_name, duplicate_column_name + right_suffix) join_condition = (left_table[on] == right_table[on] if on not in duplicate_columns else left_table[on + left_suffix] == right_table[on + right_suffix]) joined_table = left_table.join(right_table, join_condition, how=how) if on in duplicate_columns: # Merge duplicate key columns joined_table = joined_table.withColumnRenamed(on + left_suffix, on) joined_table = joined_table.drop(on + right_suffix) # Remove auxiliary index # FIXME Move index string to constant? joined_table = joined_table.drop('__index_level_0__') kdf = DataFrame(joined_table) return kdf def sample(self, n: Optional[int] = None, frac: Optional[float] = None, replace: bool = False, random_state: Optional[int] = None) -> 'DataFrame': """ Return a random sample of items from an axis of object. Please call this function using named argument by specifing the ``frac`` argument. You can use `random_state` for reproducibility. However, note that different from pandas, specifying a seed in Koalas/Spark does not guarantee the sampled rows will be fixed. The result set depends on not only the seed, but also how the data is distributed across machines and to some extent network randomness when shuffle operations are involved. Even in the simplest case, the result set will depend on the system's CPU core count. Parameters ---------- n : int, optional Number of items to return. This is currently NOT supported. Use frac instead. frac : float, optional Fraction of axis items to return. replace : bool, default False Sample with or without replacement. random_state : int, optional Seed for the random number generator (if int). Returns ------- Series or DataFrame A new object of same type as caller containing the sampled items. Examples -------- >>> df = ks.DataFrame({'num_legs': [2, 4, 8, 0], ... 'num_wings': [2, 0, 0, 0], ... 'num_specimen_seen': [10, 2, 1, 8]}, ... index=['falcon', 'dog', 'spider', 'fish'], ... columns=['num_legs', 'num_wings', 'num_specimen_seen']) >>> df # doctest: +SKIP num_legs num_wings num_specimen_seen falcon 2 2 10 dog 4 0 2 spider 8 0 1 fish 0 0 8 A random 25% sample of the ``DataFrame``. Note that we use `random_state` to ensure the reproducibility of the examples. >>> df.sample(frac=0.25, random_state=1) # doctest: +SKIP num_legs num_wings num_specimen_seen falcon 2 2 10 fish 0 0 8 Extract 25% random elements from the ``Series`` ``df['num_legs']``, with replacement, so the same items could appear more than once. >>> df['num_legs'].sample(frac=0.4, replace=True, random_state=1) # doctest: +SKIP falcon 2 spider 8 spider 8 Name: num_legs, dtype: int64 Specifying the exact number of items to return is not supported at the moment. >>> df.sample(n=5) # doctest: +ELLIPSIS Traceback (most recent call last): ... NotImplementedError: Function sample currently does not support specifying ... """ # Note: we don't run any of the doctests because the result can change depending on the # system's core count. if n is not None: raise NotImplementedError("Function sample currently does not support specifying " "exact number of items to return. Use frac instead.") if frac is None: raise ValueError("frac must be specified.") sdf = self._sdf.sample(withReplacement=replace, fraction=frac, seed=random_state) return DataFrame(sdf, self._metadata.copy()) def astype(self, dtype) -> 'DataFrame': """ Cast a pandas object to a specified dtype ``dtype``. Parameters ---------- dtype : data type, or dict of column name -> data type Use a numpy.dtype or Python type to cast entire pandas object to the same type. Alternatively, use {col: dtype, ...}, where col is a column label and dtype is a numpy.dtype or Python type to cast one or more of the DataFrame's columns to column-specific types. Returns ------- casted : same type as caller See Also -------- to_datetime : Convert argument to datetime. Examples -------- >>> df = ks.DataFrame({'a': [1, 2, 3], 'b': [1, 2, 3]}, dtype='int64') >>> df a b 0 1 1 1 2 2 2 3 3 Convert to float type: >>> df.astype('float') a b 0 1.0 1.0 1 2.0 2.0 2 3.0 3.0 Convert to int64 type back: >>> df.astype('int64') a b 0 1 1 1 2 2 2 3 3 Convert column a to float type: >>> df.astype({'a': float}) a b 0 1.0 1 1 2.0 2 2 3.0 3 """ results = [] if is_dict_like(dtype): for col_name in dtype.keys(): if col_name not in self.columns: raise KeyError('Only a column name can be used for the ' 'key in a dtype mappings argument.') for col_name, col in self.iteritems(): if col_name in dtype: results.append(col.astype(dtype=dtype[col_name])) else: results.append(col) else: for col_name, col in self.iteritems(): results.append(col.astype(dtype=dtype)) sdf = self._sdf.select( self._metadata.index_columns + list(map(lambda ser: ser._scol, results))) return DataFrame(sdf, self._metadata.copy()) # TODO: percentiles, include, and exclude should be implemented. def describe(self) -> 'DataFrame': """ Generate descriptive statistics that summarize the central tendency, dispersion and shape of a dataset's distribution, excluding ``NaN`` values. Analyzes both numeric and object series, as well as ``DataFrame`` column sets of mixed data types. The output will vary depending on what is provided. Refer to the notes below for more detail. Returns ------- Series or DataFrame Summary statistics of the Series or Dataframe provided. See Also -------- DataFrame.count: Count number of non-NA/null observations. DataFrame.max: Maximum of the values in the object. DataFrame.min: Minimum of the values in the object. DataFrame.mean: Mean of the values. DataFrame.std: Standard deviation of the obersvations. Notes ----- For numeric data, the result's index will include ``count``, ``mean``, ``stddev``, ``min``, ``max``. Currently only numeric data is supported. Examples -------- Describing a numeric ``Series``. >>> s = ks.Series([1, 2, 3]) >>> s.describe() count 3.0 mean 2.0 stddev 1.0 min 1.0 max 3.0 Name: 0, dtype: float64 Describing a ``DataFrame``. Only numeric fields are returned. >>> df = ks.DataFrame({'numeric1': [1, 2, 3], ... 'numeric2': [4.0, 5.0, 6.0], ... 'object': ['a', 'b', 'c'] ... }, ... columns=['numeric1', 'numeric2', 'object']) >>> df.describe() numeric1 numeric2 count 3.0 3.0 mean 2.0 5.0 stddev 1.0 1.0 min 1.0 4.0 max 3.0 6.0 Describing a column from a ``DataFrame`` by accessing it as an attribute. >>> df.numeric1.describe() count 3.0 mean 2.0 stddev 1.0 min 1.0 max 3.0 Name: numeric1, dtype: float64 """ exprs = [] data_columns = [] for col in self.columns: kseries = self[col] spark_type = kseries.spark_type if isinstance(spark_type, DoubleType) or isinstance(spark_type, FloatType): exprs.append(F.nanvl(kseries._scol, F.lit(None)).alias(kseries.name)) data_columns.append(kseries.name) elif isinstance(spark_type, NumericType): exprs.append(kseries._scol) data_columns.append(kseries.name) if len(exprs) == 0: raise ValueError("Cannot describe a DataFrame without columns") sdf = self._sdf.select(*exprs).describe() return DataFrame(sdf, index=Metadata(data_columns=data_columns, index_map=[('summary', None)])).astype('float64') def _pd_getitem(self, key): from databricks.koalas.series import Series if key is None: raise KeyError("none key") if isinstance(key, str): try: return Series(self._sdf.__getitem__(key), anchor=self, index=self._metadata.index_map) except AnalysisException: raise KeyError(key) if np.isscalar(key) or isinstance(key, (tuple, str)): raise NotImplementedError(key) elif isinstance(key, slice): return self.loc[key] if isinstance(key, (pd.Series, np.ndarray, pd.Index)): raise NotImplementedError(key) if isinstance(key, list): return self.loc[:, key] if isinstance(key, DataFrame): # TODO Should not implement alignment, too dangerous? return Series(self._sdf.__getitem__(key), anchor=self, index=self._metadata.index_map) if isinstance(key, Series): # TODO Should not implement alignment, too dangerous? # It is assumed to be only a filter, otherwise .loc should be used. bcol = key._scol.cast("boolean") return DataFrame(self._sdf.filter(bcol), self._metadata.copy()) raise NotImplementedError(key) def __repr__(self): pdf = self.head(max_display_count + 1).to_pandas() pdf_length = len(pdf) repr_string = repr(pdf.iloc[:max_display_count]) if pdf_length > max_display_count: match = REPR_PATTERN.search(repr_string) if match is not None: nrows = match.group("rows") ncols = match.group("columns") footer = ("\n\n[Showing only the first {nrows} rows x {ncols} columns]" .format(nrows=nrows, ncols=ncols)) return REPR_PATTERN.sub(footer, repr_string) return repr_string def _repr_html_(self): pdf = self.head(max_display_count + 1).to_pandas() pdf_length = len(pdf) repr_html = pdf[:max_display_count]._repr_html_() if pdf_length > max_display_count: match = REPR_HTML_PATTERN.search(repr_html) if match is not None: nrows = match.group("rows") ncols = match.group("columns") by = chr(215) footer = ('\n<p>Showing only the first {rows} rows {by} {cols} columns</p>\n</div>' .format(rows=nrows, by=by, cols=ncols)) return REPR_HTML_PATTERN.sub(footer, repr_html) return repr_html def __getitem__(self, key): return self._pd_getitem(key) def __setitem__(self, key, value): from databricks.koalas.series import Series # For now, we don't support realignment against different dataframes. # This is too expensive in Spark. # Are we assigning against a column? if isinstance(value, Series): assert value._kdf is self, \ "Cannot combine column argument because it comes from a different dataframe" if isinstance(key, (tuple, list)): assert isinstance(value.schema, StructType) field_names = value.schema.fieldNames() kdf = self.assign(**{k: value[c] for k, c in zip(key, field_names)}) else: kdf = self.assign(**{key: value}) self._sdf = kdf._sdf self._metadata = kdf._metadata def __getattr__(self, key: str) -> Any: from databricks.koalas.series import Series if key.startswith("__") or key.startswith("_pandas_") or key.startswith("_spark_"): raise AttributeError(key) if hasattr(_MissingPandasLikeDataFrame, key): property_or_func = getattr(_MissingPandasLikeDataFrame, key) if isinstance(property_or_func, property): return property_or_func.fget(self) # type: ignore else: return partial(property_or_func, self) return Series(self._sdf.__getattr__(key), anchor=self, index=self._metadata.index_map) def __len__(self): return self._sdf.count() def __dir__(self): fields = [f for f in self._sdf.schema.fieldNames() if ' ' not in f] return super(DataFrame, self).__dir__() + fields @classmethod def _validate_axis(cls, axis=0): if axis not in (0, 1, 'index', 'columns', None): raise ValueError('No axis named {0}'.format(axis)) # convert to numeric axis return {None: 0, 'index': 0, 'columns': 1}.get(axis, axis) def _reduce_spark_multi(sdf, aggs): """ Performs a reduction on a dataframe, the functions being known sql aggregate functions. """ assert isinstance(sdf, spark.DataFrame) sdf0 = sdf.agg(*aggs) l = sdf0.head(2) assert len(l) == 1, (sdf, l) row = l[0] l2 = list(row) assert len(l2) == len(aggs), (row, l2) return l2
1
9,546
Could you add a type hint? `percentiles: Optional[List[float]] = None`
databricks-koalas
py
@@ -0,0 +1,16 @@ +module Ncr + module WorkOrdersHelper + def approver_options + # @todo should this list be limited by client/something else? + # @todo is there a better order? maybe by current_user's use? + User.order(:email_address).pluck(:email_address) + end + + def building_options + custom = Ncr::WorkOrder.where.not(building_number: nil).pluck('DISTINCT building_number') + all = custom + Ncr::BUILDING_NUMBERS + # @todo is there a better order? maybe by current_user's use? + all.uniq.sort + end + end +end
1
1
13,371
Maybe putting ones they've used before first would be good, but this is fine for now.
18F-C2
rb
@@ -105,6 +105,9 @@ func newVXLANManager( blackHoleProto = dpConfig.DeviceRouteProtocol } + noencTarget := routetable.Target{Type: routetable.TargetTypeNoEncap} + bhTarget := routetable.Target{Type: routetable.TargetTypeBlackhole} + brt := routetable.New( []string{routetable.InterfaceNone}, 4,
1
// Copyright (c) 2016-2021 Tigera, Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package intdataplane import ( "errors" "fmt" "net" "reflect" "strings" "sync" "syscall" "time" "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus" "github.com/vishvananda/netlink" "github.com/projectcalico/felix/dataplane/common" "github.com/projectcalico/felix/ethtool" "github.com/projectcalico/felix/ip" "github.com/projectcalico/felix/ipsets" "github.com/projectcalico/felix/logutils" "github.com/projectcalico/felix/proto" "github.com/projectcalico/felix/routetable" "github.com/projectcalico/felix/rules" ) // added so that we can shim netlink for tests type netlinkHandle interface { LinkByName(name string) (netlink.Link, error) LinkSetMTU(link netlink.Link, mtu int) error LinkSetUp(link netlink.Link) error AddrList(link netlink.Link, family int) ([]netlink.Addr, error) AddrAdd(link netlink.Link, addr *netlink.Addr) error AddrDel(link netlink.Link, addr *netlink.Addr) error LinkList() ([]netlink.Link, error) LinkAdd(netlink.Link) error LinkDel(netlink.Link) error } type vxlanManager struct { sync.Mutex // Our dependencies. hostname string routeTable routeTable blackholeRouteTable routeTable noEncapRouteTable routeTable // Hold pending updates. routesByDest map[string]*proto.RouteUpdate localIPAMBlocks map[string]*proto.RouteUpdate vtepsByNode map[string]*proto.VXLANTunnelEndpointUpdate // Holds this node's VTEP information. myVTEP *proto.VXLANTunnelEndpointUpdate // VXLAN configuration. vxlanDevice string vxlanID int vxlanPort int // Indicates if configuration has changed since the last apply. routesDirty bool ipsetsDataplane common.IPSetsDataplane ipSetMetadata ipsets.IPSetMetadata externalNodeCIDRs []string vtepsDirty bool nlHandle netlinkHandle dpConfig Config noEncapProtocol netlink.RouteProtocol // Used so that we can shim the no encap route table for the tests noEncapRTConstruct func(interfacePrefixes []string, ipVersion uint8, vxlan bool, netlinkTimeout time.Duration, deviceRouteSourceAddress net.IP, deviceRouteProtocol netlink.RouteProtocol, removeExternalRoutes bool) routeTable } const ( defaultVXLANProto netlink.RouteProtocol = 80 ) func newVXLANManager( ipsetsDataplane common.IPSetsDataplane, rt routeTable, deviceName string, dpConfig Config, opRecorder logutils.OpRecorder, ) *vxlanManager { nlHandle, _ := netlink.NewHandle() blackHoleProto := defaultVXLANProto if dpConfig.DeviceRouteProtocol != syscall.RTPROT_BOOT { blackHoleProto = dpConfig.DeviceRouteProtocol } brt := routetable.New( []string{routetable.InterfaceNone}, 4, false, dpConfig.NetlinkTimeout, dpConfig.DeviceRouteSourceAddress, blackHoleProto, false, 0, opRecorder, ) return newVXLANManagerWithShims( ipsetsDataplane, rt, brt, deviceName, dpConfig, nlHandle, func(interfaceRegexes []string, ipVersion uint8, vxlan bool, netlinkTimeout time.Duration, deviceRouteSourceAddress net.IP, deviceRouteProtocol netlink.RouteProtocol, removeExternalRoutes bool) routeTable { return routetable.New(interfaceRegexes, ipVersion, vxlan, netlinkTimeout, deviceRouteSourceAddress, deviceRouteProtocol, removeExternalRoutes, 0, opRecorder) }, ) } func newVXLANManagerWithShims( ipsetsDataplane common.IPSetsDataplane, rt, brt routeTable, deviceName string, dpConfig Config, nlHandle netlinkHandle, noEncapRTConstruct func(interfacePrefixes []string, ipVersion uint8, vxlan bool, netlinkTimeout time.Duration, deviceRouteSourceAddress net.IP, deviceRouteProtocol netlink.RouteProtocol, removeExternalRoutes bool) routeTable, ) *vxlanManager { noEncapProtocol := defaultVXLANProto if dpConfig.DeviceRouteProtocol != syscall.RTPROT_BOOT { noEncapProtocol = dpConfig.DeviceRouteProtocol } return &vxlanManager{ ipsetsDataplane: ipsetsDataplane, ipSetMetadata: ipsets.IPSetMetadata{ MaxSize: dpConfig.MaxIPSetSize, SetID: rules.IPSetIDAllVXLANSourceNets, Type: ipsets.IPSetTypeHashNet, }, hostname: dpConfig.Hostname, routeTable: rt, blackholeRouteTable: brt, routesByDest: map[string]*proto.RouteUpdate{}, localIPAMBlocks: map[string]*proto.RouteUpdate{}, vtepsByNode: map[string]*proto.VXLANTunnelEndpointUpdate{}, vxlanDevice: deviceName, vxlanID: dpConfig.RulesConfig.VXLANVNI, vxlanPort: dpConfig.RulesConfig.VXLANPort, externalNodeCIDRs: dpConfig.ExternalNodesCidrs, routesDirty: true, vtepsDirty: true, dpConfig: dpConfig, nlHandle: nlHandle, noEncapProtocol: noEncapProtocol, noEncapRTConstruct: noEncapRTConstruct, } } func (m *vxlanManager) OnUpdate(protoBufMsg interface{}) { switch msg := protoBufMsg.(type) { case *proto.RouteUpdate: // In case the route changes type to one we no longer care about... m.deleteRoute(msg.Dst) if msg.Type == proto.RouteType_REMOTE_WORKLOAD && msg.IpPoolType == proto.IPPoolType_VXLAN { logrus.WithField("msg", msg).Debug("VXLAN data plane received route update") m.routesByDest[msg.Dst] = msg m.routesDirty = true } // Process IPAM blocks that aren't associated to a single or /32 local workload if routeIsLocalVXLANBlock(msg) { logrus.WithField("msg", msg).Debug("VXLAN data plane received route update for IPAM block") m.localIPAMBlocks[msg.Dst] = msg m.routesDirty = true } else if _, ok := m.localIPAMBlocks[msg.Dst]; ok { logrus.WithField("msg", msg).Debug("VXLAN data plane IPAM block changed to something else") delete(m.localIPAMBlocks, msg.Dst) m.routesDirty = true } case *proto.RouteRemove: m.deleteRoute(msg.Dst) case *proto.VXLANTunnelEndpointUpdate: logrus.WithField("msg", msg).Debug("VXLAN data plane received VTEP update") if msg.Node == m.hostname { m.setLocalVTEP(msg) } else { m.vtepsByNode[msg.Node] = msg } m.routesDirty = true m.vtepsDirty = true case *proto.VXLANTunnelEndpointRemove: logrus.WithField("msg", msg).Debug("VXLAN data plane received VTEP remove") if msg.Node == m.hostname { m.setLocalVTEP(nil) } else { delete(m.vtepsByNode, msg.Node) } m.routesDirty = true m.vtepsDirty = true } } func routeIsLocalVXLANBlock(msg *proto.RouteUpdate) bool { // RouteType_LOCAL_WORKLOAD means "local IPAM block _or_ /32 of workload" if msg.Type != proto.RouteType_LOCAL_WORKLOAD { return false } // Only care about VXLAN blocks. if msg.IpPoolType != proto.IPPoolType_VXLAN { return false } // Ignore routes that we know are from local workload endpoints. if msg.LocalWorkload { return false } // Ignore /32 routes in any case for two reasons: // * If we have a /32 block then our blackhole route would stop the CNI plugin from programming its /32 for a // newly added workload. // * If this isn't a /32 block then it must be a borrowed /32 from another block. In that case, we know we're // racing with CNI, adding a new workload. We've received the borrowed IP but not the workload endpoint yet. if strings.HasSuffix(msg.Dst, "/32") { return false } return true } func (m *vxlanManager) deleteRoute(dst string) { _, exists := m.routesByDest[dst] if exists { logrus.Debug("deleting route dst ", dst) // In case the route changes type to one we no longer care about... delete(m.routesByDest, dst) m.routesDirty = true } if _, exists := m.localIPAMBlocks[dst]; exists { logrus.Debug("deleting local ipam dst ", dst) delete(m.localIPAMBlocks, dst) m.routesDirty = true } } func (m *vxlanManager) setLocalVTEP(vtep *proto.VXLANTunnelEndpointUpdate) { m.Lock() defer m.Unlock() m.myVTEP = vtep } func (m *vxlanManager) getLocalVTEP() *proto.VXLANTunnelEndpointUpdate { m.Lock() defer m.Unlock() return m.myVTEP } func (m *vxlanManager) getLocalVTEPParent() (netlink.Link, error) { return m.getParentInterface(m.getLocalVTEP()) } func (m *vxlanManager) getNoEncapRouteTable() routeTable { m.Lock() defer m.Unlock() return m.noEncapRouteTable } func (m *vxlanManager) setNoEncapRouteTable(rt routeTable) { m.Lock() defer m.Unlock() m.noEncapRouteTable = rt } func (m *vxlanManager) GetRouteTableSyncers() []routeTableSyncer { rts := []routeTableSyncer{m.routeTable, m.blackholeRouteTable} noEncapRouteTable := m.getNoEncapRouteTable() if noEncapRouteTable != nil { rts = append(rts, noEncapRouteTable) } return rts } func (m *vxlanManager) blackholeRoutes() []routetable.Target { var rtt []routetable.Target for dst := range m.localIPAMBlocks { cidr, err := ip.CIDRFromString(dst) if err != nil { logrus.WithError(err).Warning( "Error processing IPAM block CIDR: ", dst, ) continue } rtt = append(rtt, routetable.Target{ Type: routetable.TargetTypeBlackhole, CIDR: cidr, }) } logrus.Debug("calculated blackholes ", rtt) return rtt } func (m *vxlanManager) CompleteDeferredWork() error { if !m.routesDirty { logrus.Debug("No change since last application, nothing to do") return nil } if m.vtepsDirty { var allowedVXLANSources []string if m.vtepsDirty { logrus.Debug("VTEPs are dirty, collecting the allowed VXLAN source set") allowedVXLANSources = append(allowedVXLANSources, m.externalNodeCIDRs...) } // The route table accepts the desired state. Start by setting the desired L2 "routes" by iterating // known VTEPs. var l2routes []routetable.L2Target for _, u := range m.vtepsByNode { mac, err := net.ParseMAC(u.Mac) if err != nil { // Don't block programming of other VTEPs if somehow we receive one with a bad mac. logrus.WithError(err).Warn("Failed to parse VTEP mac address") continue } l2routes = append(l2routes, routetable.L2Target{ VTEPMAC: mac, GW: ip.FromString(u.Ipv4Addr), IP: ip.FromString(u.ParentDeviceIp), }) allowedVXLANSources = append(allowedVXLANSources, u.ParentDeviceIp) } logrus.WithField("l2routes", l2routes).Debug("VXLAN manager sending L2 updates") m.routeTable.SetL2Routes(m.vxlanDevice, l2routes) m.ipsetsDataplane.AddOrReplaceIPSet(m.ipSetMetadata, allowedVXLANSources) m.vtepsDirty = false } if m.routesDirty { // Iterate through all of our L3 routes and send them through to the route table. var vxlanRoutes []routetable.Target var noEncapRoutes []routetable.Target for _, r := range m.routesByDest { logCtx := logrus.WithField("route", r) cidr, err := ip.CIDRFromString(r.Dst) if err != nil { // Don't block programming of other routes if somehow we receive one with a bad dst. logCtx.WithError(err).Warn("Failed to parse VXLAN route destination") continue } if r.GetSameSubnet() { if r.DstNodeIp == "" { logCtx.Debug("Can't program non-encap route since host IP is not known.") continue } defaultRoute := routetable.Target{ Type: routetable.TargetTypeNoEncap, CIDR: cidr, GW: ip.FromString(r.DstNodeIp), } noEncapRoutes = append(noEncapRoutes, defaultRoute) logCtx.WithField("route", r).Debug("adding no encap route to list for addition") } else { // Extract the gateway addr for this route based on its remote VTEP. vtep, ok := m.vtepsByNode[r.DstNodeName] if !ok { // When the VTEP arrives, it'll set routesDirty=true so this loop will execute again. logCtx.Debug("Dataplane has route with no corresponding VTEP") continue } vxlanRoute := routetable.Target{ Type: routetable.TargetTypeVXLAN, CIDR: cidr, GW: ip.FromString(vtep.Ipv4Addr), } vxlanRoutes = append(vxlanRoutes, vxlanRoute) logCtx.WithField("route", vxlanRoute).Debug("adding vxlan route to list for addition") } } logrus.WithField("vxlanroutes", vxlanRoutes).Debug("VXLAN manager sending VXLAN L3 updates") m.routeTable.SetRoutes(m.vxlanDevice, vxlanRoutes) m.blackholeRouteTable.SetRoutes(routetable.InterfaceNone, m.blackholeRoutes()) noEncapRouteTable := m.getNoEncapRouteTable() // only set the noEncapRouteTable table if it's nil, as you will lose the routes that are being managed already // and the new table will probably delete routes that were put in there by the previous table if noEncapRouteTable != nil { if parentDevice, err := m.getLocalVTEPParent(); err == nil { ifName := parentDevice.Attrs().Name log.WithField("link", parentDevice).WithField("routes", noEncapRoutes).Debug("VXLAN manager sending unencapsulated L3 updates") noEncapRouteTable.SetRoutes(ifName, noEncapRoutes) } else { return err } } else { return errors.New("no encap route table not set, will defer adding routes") } logrus.Info("VXLAN Manager completed deferred work") m.routesDirty = false } return nil } // KeepVXLANDeviceInSync is a goroutine that configures the VXLAN tunnel device, then periodically // checks that it is still correctly configured. func (m *vxlanManager) KeepVXLANDeviceInSync(mtu int, xsumBroken bool, wait time.Duration) { logrus.WithFields(logrus.Fields{ "mtu": mtu, "xsumBroken": xsumBroken, "wait": wait, }).Info("VXLAN tunnel device thread started.") logNextSuccess := true for { localVTEP := m.getLocalVTEP() if localVTEP == nil { logrus.Debug("Missing local VTEP information, retrying...") time.Sleep(1 * time.Second) continue } if parent, err := m.getLocalVTEPParent(); err != nil { logrus.WithError(err).Warn("Failed configure VXLAN tunnel device, retrying...") time.Sleep(1 * time.Second) continue } else { if m.getNoEncapRouteTable() == nil { noEncapRouteTable := m.noEncapRTConstruct([]string{"^" + parent.Attrs().Name + "$"}, 4, false, m.dpConfig.NetlinkTimeout, m.dpConfig.DeviceRouteSourceAddress, m.noEncapProtocol, false) m.setNoEncapRouteTable(noEncapRouteTable) } } err := m.configureVXLANDevice(mtu, localVTEP, xsumBroken) if err != nil { logrus.WithError(err).Warn("Failed configure VXLAN tunnel device, retrying...") logNextSuccess = true time.Sleep(1 * time.Second) continue } if logNextSuccess { logrus.Info("VXLAN tunnel device configured") logNextSuccess = false } time.Sleep(wait) } } // getParentInterface returns the parent interface for the given local VTEP based on IP address. This link returned is nil // if, and only if, an error occurred func (m *vxlanManager) getParentInterface(localVTEP *proto.VXLANTunnelEndpointUpdate) (netlink.Link, error) { links, err := m.nlHandle.LinkList() if err != nil { return nil, err } for _, link := range links { addrs, err := m.nlHandle.AddrList(link, netlink.FAMILY_V4) if err != nil { return nil, err } for _, addr := range addrs { if addr.IPNet.IP.String() == localVTEP.ParentDeviceIp { logrus.Debugf("Found parent interface: %s", link) return link, nil } } } return nil, fmt.Errorf("Unable to find parent interface with address %s", localVTEP.ParentDeviceIp) } // configureVXLANDevice ensures the VXLAN tunnel device is up and configured correctly. func (m *vxlanManager) configureVXLANDevice(mtu int, localVTEP *proto.VXLANTunnelEndpointUpdate, xsumBroken bool) error { logCxt := logrus.WithFields(logrus.Fields{"device": m.vxlanDevice}) logCxt.Debug("Configuring VXLAN tunnel device") parent, err := m.getParentInterface(localVTEP) if err != nil { return err } mac, err := net.ParseMAC(localVTEP.Mac) if err != nil { return err } vxlan := &netlink.Vxlan{ LinkAttrs: netlink.LinkAttrs{ Name: m.vxlanDevice, HardwareAddr: mac, }, VxlanId: m.vxlanID, Port: m.vxlanPort, VtepDevIndex: parent.Attrs().Index, SrcAddr: ip.FromString(localVTEP.ParentDeviceIp).AsNetIP(), } // Try to get the device. link, err := m.nlHandle.LinkByName(m.vxlanDevice) if err != nil { logrus.WithError(err).Info("Failed to get VXLAN tunnel device, assuming it isn't present") if err := m.nlHandle.LinkAdd(vxlan); err == syscall.EEXIST { // Device already exists - likely a race. logrus.Debug("VXLAN device already exists, likely created by someone else.") } else if err != nil { // Error other than "device exists" - return it. return err } // The device now exists - requery it to check that the link exists and is a vxlan device. link, err = m.nlHandle.LinkByName(m.vxlanDevice) if err != nil { return fmt.Errorf("can't locate created vxlan device %v", m.vxlanDevice) } } // At this point, we have successfully queried the existing device, or made sure it exists if it didn't // already. Check for mismatched configuration. If they don't match, recreate the device. if incompat := vxlanLinksIncompat(vxlan, link); incompat != "" { // Existing device doesn't match desired configuration - delete it and recreate. logrus.Warningf("%q exists with incompatible configuration: %v; recreating device", vxlan.Name, incompat) if err = m.nlHandle.LinkDel(link); err != nil { return fmt.Errorf("failed to delete interface: %v", err) } if err = m.nlHandle.LinkAdd(vxlan); err != nil { if err == syscall.EEXIST { log.Warnf("Failed to create VXLAN device. Another device with this VNI may already exist") } return fmt.Errorf("failed to create vxlan interface: %v", err) } link, err = m.nlHandle.LinkByName(vxlan.Name) if err != nil { return err } } // Make sure the MTU is set correctly. attrs := link.Attrs() oldMTU := attrs.MTU if oldMTU != mtu { logCxt.WithFields(logrus.Fields{"old": oldMTU, "new": mtu}).Info("VXLAN device MTU needs to be updated") if err := m.nlHandle.LinkSetMTU(link, mtu); err != nil { log.WithError(err).Warn("Failed to set vxlan tunnel device MTU") } else { logCxt.Info("Updated vxlan tunnel MTU") } } // Make sure the IP address is configured. if err := m.ensureV4AddressOnLink(localVTEP.Ipv4Addr, link); err != nil { return fmt.Errorf("failed to ensure address of interface: %s", err) } // If required, disable checksum offload. if xsumBroken { if err := ethtool.EthtoolTXOff(m.vxlanDevice); err != nil { return fmt.Errorf("failed to disable checksum offload: %s", err) } } // And the device is up. if err := m.nlHandle.LinkSetUp(link); err != nil { return fmt.Errorf("failed to set interface up: %s", err) } return nil } // ensureV4AddressOnLink ensures that the provided IPv4 address is configured on the provided Link. If there are other addresses, // this function will remove them, ensuring that the desired IPv4 address is the _only_ address on the Link. func (m *vxlanManager) ensureV4AddressOnLink(ipStr string, link netlink.Link) error { _, net, err := net.ParseCIDR(ipStr + "/32") if err != nil { return err } addr := netlink.Addr{IPNet: net} existingAddrs, err := m.nlHandle.AddrList(link, netlink.FAMILY_V4) if err != nil { return err } // Remove any addresses which we don't want. addrPresent := false for _, existing := range existingAddrs { if reflect.DeepEqual(existing.IPNet, addr.IPNet) { addrPresent = true continue } logrus.WithFields(logrus.Fields{"address": existing, "link": link.Attrs().Name}).Warn("Removing unwanted IP from VXLAN device") if err := m.nlHandle.AddrDel(link, &existing); err != nil { return fmt.Errorf("failed to remove IP address %s", existing) } } // Actually add the desired address to the interface if needed. if !addrPresent { logrus.WithFields(logrus.Fields{"address": addr}).Info("Assigning address to VXLAN device") if err := m.nlHandle.AddrAdd(link, &addr); err != nil { return fmt.Errorf("failed to add IP address") } } return nil } // vlanLinksIncompat takes two vxlan devices and compares them to make sure they match. If they do not match, // this function will return a mesasge indicating which configuration is mismatched. func vxlanLinksIncompat(l1, l2 netlink.Link) string { if l1.Type() != l2.Type() { return fmt.Sprintf("link type: %v vs %v", l1.Type(), l2.Type()) } v1 := l1.(*netlink.Vxlan) v2 := l2.(*netlink.Vxlan) if v1.VxlanId != v2.VxlanId { return fmt.Sprintf("vni: %v vs %v", v1.VxlanId, v2.VxlanId) } if v1.VtepDevIndex > 0 && v2.VtepDevIndex > 0 && v1.VtepDevIndex != v2.VtepDevIndex { return fmt.Sprintf("vtep (external) interface: %v vs %v", v1.VtepDevIndex, v2.VtepDevIndex) } if len(v1.SrcAddr) > 0 && len(v2.SrcAddr) > 0 && !v1.SrcAddr.Equal(v2.SrcAddr) { return fmt.Sprintf("vtep (external) IP: %v vs %v", v1.SrcAddr, v2.SrcAddr) } if len(v1.Group) > 0 && len(v2.Group) > 0 && !v1.Group.Equal(v2.Group) { return fmt.Sprintf("group address: %v vs %v", v1.Group, v2.Group) } if v1.L2miss != v2.L2miss { return fmt.Sprintf("l2miss: %v vs %v", v1.L2miss, v2.L2miss) } if v1.Port > 0 && v2.Port > 0 && v1.Port != v2.Port { return fmt.Sprintf("port: %v vs %v", v1.Port, v2.Port) } if v1.GBP != v2.GBP { return fmt.Sprintf("gbp: %v vs %v", v1.GBP, v2.GBP) } return "" }
1
19,551
I don't think we need these. Simpler just to put `routetable.TargetType...` inline below.
projectcalico-felix
c
@@ -43,7 +43,8 @@ class Dispatcher when 'parallel' ParallelDispatcher.new when 'linear' - if cart.ncr? + # @todo: dynamic dispatch for selection + if cart.proposal.client_data_legacy.client == "ncr" NcrDispatcher.new else LinearDispatcher.new
1
class Dispatcher def email_approver(approval) approval.create_api_token! send_notification_email(approval) end def email_observers(cart) cart.approvals.observing.each do |observer| CommunicartMailer.cart_observer_email(observer.user_email_address, cart).deliver end end def email_sent_confirmation(cart) CommunicartMailer.proposal_created_confirmation(cart).deliver end def deliver_new_cart_emails(cart) self.email_observers(cart) self.email_sent_confirmation(cart) end def requires_approval_notice?(approval) true end def on_cart_rejected(cart) rejection = cart.rejections.first # @todo rewrite this email so a "rejection approval" isn't needed CommunicartMailer.approval_reply_received_email(rejection).deliver self.email_observers(cart) end def on_approval_approved(approval) if self.requires_approval_notice?(approval) CommunicartMailer.approval_reply_received_email(approval).deliver end self.email_observers(approval.cart) end def self.initialize_dispatcher(cart) case cart.flow when 'parallel' ParallelDispatcher.new when 'linear' if cart.ncr? NcrDispatcher.new else LinearDispatcher.new end end end def self.deliver_new_cart_emails(cart) dispatcher = self.initialize_dispatcher(cart) dispatcher.deliver_new_cart_emails(cart) end def self.on_cart_rejected(cart) dispatcher = self.initialize_dispatcher(cart) dispatcher.on_cart_rejected(cart) end def self.on_approval_approved(approval) dispatcher = self.initialize_dispatcher(approval.cart) dispatcher.on_approval_approved(approval) end private def send_notification_email(approval) email = approval.user_email_address CommunicartMailer.cart_notification_email(email, approval).deliver end end
1
12,672
Why this instead of the old way?
18F-C2
rb
@@ -549,10 +549,14 @@ void Mob::SetInvisible(uint8 state) invisible = state; SendAppearancePacket(AT_Invis, invisible); // Invis and hide breaks charms - auto formerpet = GetPet(); - if (formerpet && formerpet->GetPetType() == petCharmed && (invisible || hidden || improved_hidden)) - formerpet->BuffFadeByEffect(SE_Charm); + if (formerpet && formerpet->GetPetType() == petCharmed && (invisible || hidden || improved_hidden || invisible_animals || invisible_undead)) { + if (RuleB(Pets, LivelikeBreakCharmOnInvis) || (!RuleB(Pets, LivelikeBreakCharmOnInvis) && IsInvisible(formerpet))) { + formerpet->BuffFadeByEffect(SE_Charm); + } + + LogRules("Pets:LivelikeBreakCharmOnInvis for [{}] | Invis [{}] - Hidden [{}] - Shroud of Stealth [{}] - IVA [{}] - IVU [{}]", GetCleanName(), invisible, hidden, improved_hidden, invisible_animals, invisible_undead); + } } //check to see if `this` is invisible to `other`
1
/* EQEMu: Everquest Server Emulator Copyright (C) 2001-2016 EQEMu Development Team (http://eqemu.org) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY except by those people which sell it, which are required to give you total support for your newly bought product; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "../common/spdat.h" #include "../common/string_util.h" #include "../common/misc_functions.h" #include "data_bucket.h" #include "quest_parser_collection.h" #include "string_ids.h" #include "worldserver.h" #include "mob_movement_manager.h" #include "water_map.h" #include <limits.h> #include <math.h> #include <sstream> #include <algorithm> #ifdef BOTS #include "bot.h" #endif extern EntityList entity_list; extern Zone* zone; extern WorldServer worldserver; Mob::Mob( const char *in_name, const char *in_lastname, int32 in_cur_hp, int32 in_max_hp, uint8 in_gender, uint16 in_race, uint8 in_class, bodyType in_bodytype, uint8 in_deity, uint8 in_level, uint32 in_npctype_id, float in_size, float in_runspeed, const glm::vec4 &position, uint8 in_light, uint8 in_texture, uint8 in_helmtexture, uint16 in_ac, uint16 in_atk, uint16 in_str, uint16 in_sta, uint16 in_dex, uint16 in_agi, uint16 in_int, uint16 in_wis, uint16 in_cha, uint8 in_haircolor, uint8 in_beardcolor, uint8 in_eyecolor1, // the eyecolors always seem to be the same, maybe left and right eye? uint8 in_eyecolor2, uint8 in_hairstyle, uint8 in_luclinface, uint8 in_beard, uint32 in_drakkin_heritage, uint32 in_drakkin_tattoo, uint32 in_drakkin_details, EQ::TintProfile in_armor_tint, uint8 in_aa_title, uint8 in_see_invis, // see through invis/ivu uint8 in_see_invis_undead, uint8 in_see_hide, uint8 in_see_improved_hide, int32 in_hp_regen, int32 in_mana_regen, uint8 in_qglobal, uint8 in_maxlevel, uint32 in_scalerate, uint8 in_armtexture, uint8 in_bracertexture, uint8 in_handtexture, uint8 in_legtexture, uint8 in_feettexture, uint16 in_usemodel, bool in_always_aggro ) : attack_timer(2000), attack_dw_timer(2000), ranged_timer(2000), tic_timer(6000), mana_timer(2000), spellend_timer(0), rewind_timer(30000), bindwound_timer(10000), stunned_timer(0), spun_timer(0), bardsong_timer(6000), gravity_timer(1000), viral_timer(0), m_FearWalkTarget(-999999.0f, -999999.0f, -999999.0f), flee_timer(FLEE_CHECK_TIMER), m_Position(position), tmHidden(-1), mitigation_ac(0), m_specialattacks(eSpecialAttacks::None), attack_anim_timer(500), position_update_melee_push_timer(500), hate_list_cleanup_timer(6000), mob_close_scan_timer(6000), mob_check_moving_timer(1000) { mMovementManager = &MobMovementManager::Get(); mMovementManager->AddMob(this); targeted = 0; currently_fleeing = false; AI_Init(); SetMoving(false); moved = false; turning = false; m_RewindLocation = glm::vec3(); m_RelativePosition = glm::vec4(); name[0] = 0; orig_name[0] = 0; clean_name[0] = 0; lastname[0] = 0; if (in_name) { strn0cpy(name, in_name, 64); strn0cpy(orig_name, in_name, 64); } if (in_lastname) { strn0cpy(lastname, in_lastname, 64); } current_hp = in_cur_hp; max_hp = in_max_hp; base_hp = in_max_hp; gender = in_gender; race = in_race; base_gender = in_gender; base_race = in_race; use_model = in_usemodel; class_ = in_class; bodytype = in_bodytype; orig_bodytype = in_bodytype; deity = in_deity; level = in_level; orig_level = in_level; npctype_id = in_npctype_id; size = in_size; base_size = size; runspeed = in_runspeed; // neotokyo: sanity check if (runspeed < 0 || runspeed > 20) { runspeed = 1.25f; } // clients -- todo movement this doesn't take into account gm speed we need to fix that. base_runspeed = (int)((float)runspeed * 40.0f); if (runspeed == 0.7f) { base_runspeed = 28; walkspeed = 0.3f; base_walkspeed = 12; fearspeed = 0.625f; base_fearspeed = 25; // npcs } else { base_walkspeed = base_runspeed * 100 / 265; walkspeed = ((float) base_walkspeed) * 0.025f; base_fearspeed = base_runspeed * 100 / 127; fearspeed = ((float) base_fearspeed) * 0.025f; } last_hp_percent = 0; last_hp = 0; last_max_hp = 0; current_speed = base_runspeed; m_PlayerState = 0; // sanity check if (runspeed < 0 || runspeed > 20) { runspeed = 1.25f; } m_Light.Type[EQ::lightsource::LightInnate] = in_light; m_Light.Level[EQ::lightsource::LightInnate] = EQ::lightsource::TypeToLevel(m_Light.Type[EQ::lightsource::LightInnate]); m_Light.Type[EQ::lightsource::LightActive] = m_Light.Type[EQ::lightsource::LightInnate]; m_Light.Level[EQ::lightsource::LightActive] = m_Light.Level[EQ::lightsource::LightInnate]; texture = in_texture; helmtexture = in_helmtexture; armtexture = in_armtexture; bracertexture = in_bracertexture; handtexture = in_handtexture; legtexture = in_legtexture; feettexture = in_feettexture; multitexture = (armtexture || bracertexture || handtexture || legtexture || feettexture); haircolor = in_haircolor; beardcolor = in_beardcolor; eyecolor1 = in_eyecolor1; eyecolor2 = in_eyecolor2; hairstyle = in_hairstyle; luclinface = in_luclinface; beard = in_beard; drakkin_heritage = in_drakkin_heritage; drakkin_tattoo = in_drakkin_tattoo; drakkin_details = in_drakkin_details; attack_speed = 0; attack_delay = 0; slow_mitigation = 0; findable = false; trackable = true; has_shieldequiped = false; has_twohandbluntequiped = false; has_twohanderequipped = false; can_facestab = false; has_numhits = false; has_MGB = false; has_ProjectIllusion = false; SpellPowerDistanceMod = 0; last_los_check = false; if (in_aa_title > 0) { aa_title = in_aa_title; } else { aa_title = 0xFF; } AC = in_ac; ATK = in_atk; STR = in_str; STA = in_sta; DEX = in_dex; AGI = in_agi; INT = in_int; WIS = in_wis; CHA = in_cha; MR = CR = FR = DR = PR = Corrup = PhR = 0; ExtraHaste = 0; bEnraged = false; shield_target = nullptr; current_mana = 0; max_mana = 0; hp_regen = in_hp_regen; mana_regen = in_mana_regen; ooc_regen = RuleI(NPC, OOCRegen); //default Out of Combat Regen maxlevel = in_maxlevel; scalerate = in_scalerate; invisible = 0; invisible_undead = false; invisible_animals = false; sneaking = false; hidden = false; improved_hidden = false; invulnerable = false; IsFullHP = (current_hp == max_hp); qglobal = 0; spawned = false; rare_spawn = false; always_aggro = in_always_aggro; InitializeBuffSlots(); // clear the proc arrays for (int j = 0; j < MAX_PROCS; j++) { PermaProcs[j].spellID = SPELL_UNKNOWN; PermaProcs[j].chance = 0; PermaProcs[j].base_spellID = SPELL_UNKNOWN; PermaProcs[j].level_override = -1; SpellProcs[j].spellID = SPELL_UNKNOWN; SpellProcs[j].chance = 0; SpellProcs[j].base_spellID = SPELL_UNKNOWN; SpellProcs[j].level_override = -1; DefensiveProcs[j].spellID = SPELL_UNKNOWN; DefensiveProcs[j].chance = 0; DefensiveProcs[j].base_spellID = SPELL_UNKNOWN; DefensiveProcs[j].level_override = -1; RangedProcs[j].spellID = SPELL_UNKNOWN; RangedProcs[j].chance = 0; RangedProcs[j].base_spellID = SPELL_UNKNOWN; RangedProcs[j].level_override = -1; } for (int i = EQ::textures::textureBegin; i < EQ::textures::materialCount; i++) { armor_tint.Slot[i].Color = in_armor_tint.Slot[i].Color; } std::fill(std::begin(m_spellHitsLeft), std::end(m_spellHitsLeft), 0); m_Delta = glm::vec4(); animation = 0; isgrouped = false; israidgrouped = false; is_horse = false; entity_id_being_looted = 0; _appearance = eaStanding; pRunAnimSpeed = 0; spellend_timer.Disable(); bardsong_timer.Disable(); bardsong = 0; bardsong_target_id = 0; casting_spell_id = 0; casting_spell_timer = 0; casting_spell_timer_duration = 0; casting_spell_inventory_slot = 0; casting_spell_aa_id = 0; target = 0; ActiveProjectileATK = false; for (int i = 0; i < MAX_SPELL_PROJECTILE; i++) { ProjectileAtk[i].increment = 0; ProjectileAtk[i].hit_increment = 0; ProjectileAtk[i].target_id = 0; ProjectileAtk[i].wpn_dmg = 0; ProjectileAtk[i].origin_x = 0.0f; ProjectileAtk[i].origin_y = 0.0f; ProjectileAtk[i].origin_z = 0.0f; ProjectileAtk[i].tlast_x = 0.0f; ProjectileAtk[i].tlast_y = 0.0f; ProjectileAtk[i].ranged_id = 0; ProjectileAtk[i].ammo_id = 0; ProjectileAtk[i].ammo_slot = 0; ProjectileAtk[i].skill = 0; ProjectileAtk[i].speed_mod = 0.0f; } memset(&itembonuses, 0, sizeof(StatBonuses)); memset(&spellbonuses, 0, sizeof(StatBonuses)); memset(&aabonuses, 0, sizeof(StatBonuses)); spellbonuses.AggroRange = -1; spellbonuses.AssistRange = -1; SetPetID(0); SetOwnerID(0); typeofpet = petNone; // default to not a pet petpower = 0; held = false; gheld = false; nocast = false; focused = false; pet_stop = false; pet_regroup = false; _IsTempPet = false; pet_owner_client = false; pet_targetlock_id = 0; attacked_count = 0; mezzed = false; stunned = false; silenced = false; amnesiad = false; inWater = false; int m; for (m = 0; m < MAX_SHIELDERS; m++) { shielder[m].shielder_id = 0; shielder[m].shielder_bonus = 0; } destructibleobject = false; wandertype = 0; pausetype = 0; cur_wp = 0; m_CurrentWayPoint = glm::vec4(); cur_wp_pause = 0; patrol = 0; follow_id = 0; follow_dist = 100; // Default Distance for Follow follow_run = true; // We can run if distance great enough no_target_hotkey = false; flee_mode = false; currently_fleeing = false; flee_timer.Start(); permarooted = (runspeed > 0) ? false : true; pause_timer_complete = false; ForcedMovement = 0; roamer = false; rooted = false; charmed = false; has_virus = false; for (int i = 0; i < MAX_SPELL_TRIGGER * 2; i++) { viral_spells[i] = 0; } pStandingPetOrder = SPO_Follow; pseudo_rooted = false; see_invis = GetSeeInvisible(in_see_invis); see_invis_undead = GetSeeInvisible(in_see_invis_undead); see_hide = GetSeeInvisible(in_see_hide); see_improved_hide = GetSeeInvisible(in_see_improved_hide); qglobal = in_qglobal != 0; // Bind wound bindwound_timer.Disable(); bindwound_target = 0; trade = new Trade(this); // hp event nexthpevent = -1; nextinchpevent = -1; hasTempPet = false; count_TempPet = 0; m_is_running = false; nimbus_effect1 = 0; nimbus_effect2 = 0; nimbus_effect3 = 0; m_targetable = true; m_TargetRing = glm::vec3(); flymode = GravityBehavior::Water; DistractedFromGrid = false; hate_list.SetHateOwner(this); m_AllowBeneficial = false; m_DisableMelee = false; for (int i = 0; i < EQ::skills::HIGHEST_SKILL + 2; i++) { SkillDmgTaken_Mod[i] = 0; } for (int i = 0; i < HIGHEST_RESIST + 2; i++) { Vulnerability_Mod[i] = 0; } emoteid = 0; endur_upkeep = false; degenerating_effects = false; PrimaryAggro = false; AssistAggro = false; npc_assist_cap = 0; #ifdef BOTS m_manual_follow = false; #endif mob_close_scan_timer.Trigger(); SetCanOpenDoors(true); } Mob::~Mob() { mMovementManager->RemoveMob(this); AI_Stop(); if (GetPet()) { if (GetPet()->Charmed()) { GetPet()->BuffFadeByEffect(SE_Charm); } else { SetPet(0); } } EQApplicationPacket app; CreateDespawnPacket(&app, !IsCorpse()); Corpse *corpse = entity_list.GetCorpseByID(GetID()); if (!corpse || (corpse && !corpse->IsPlayerCorpse())) { entity_list.QueueClients(this, &app, true); } entity_list.RemoveFromTargets(this, true); if (trade) { Mob *with = trade->With(); if (with && with->IsClient()) { with->CastToClient()->FinishTrade(with); with->trade->Reset(); } delete trade; } if (HasTempPetsActive()) { entity_list.DestroyTempPets(this); } entity_list.UnMarkNPC(GetID()); UninitializeBuffSlots(); entity_list.RemoveMobFromCloseLists(this); entity_list.RemoveAuraFromMobs(this); close_mobs.clear(); #ifdef BOTS LeaveHealRotationTargetPool(); #endif } uint32 Mob::GetAppearanceValue(EmuAppearance iAppearance) { switch (iAppearance) { // 0 standing, 1 sitting, 2 ducking, 3 lieing down, 4 looting case eaStanding: { return ANIM_STAND; } case eaSitting: { return ANIM_SIT; } case eaCrouching: { return ANIM_CROUCH; } case eaDead: { return ANIM_DEATH; } case eaLooting: { return ANIM_LOOT; } //to shup up compiler: case _eaMaxAppearance: break; } return(ANIM_STAND); } void Mob::SetInvisible(uint8 state) { invisible = state; SendAppearancePacket(AT_Invis, invisible); // Invis and hide breaks charms auto formerpet = GetPet(); if (formerpet && formerpet->GetPetType() == petCharmed && (invisible || hidden || improved_hidden)) formerpet->BuffFadeByEffect(SE_Charm); } //check to see if `this` is invisible to `other` bool Mob::IsInvisible(Mob* other) const { if(!other) return(false); uint8 SeeInvisBonus = 0; if (IsClient()) SeeInvisBonus = aabonuses.SeeInvis; //check regular invisibility if (invisible && invisible > (other->SeeInvisible())) return true; //check invis vs. undead if (other->GetBodyType() == BT_Undead || other->GetBodyType() == BT_SummonedUndead) { if(invisible_undead && !other->SeeInvisibleUndead()) return true; } //check invis vs. animals... if (other->GetBodyType() == BT_Animal){ if(invisible_animals && !other->SeeInvisible()) return true; } if(hidden){ if(!other->see_hide && !other->see_improved_hide){ return true; } } if(improved_hidden){ if(!other->see_improved_hide){ return true; } } //handle sneaking if(sneaking) { if(BehindMob(other, GetX(), GetY()) ) return true; } return(false); } int Mob::_GetWalkSpeed() const { if (IsRooted() || IsStunned() || IsMezzed()) return 0; else if (IsPseudoRooted()) return 0; int aa_mod = 0; int speed_mod = base_walkspeed; int base_run = base_runspeed; bool has_horse = false; int runspeedcap = RuleI(Character,BaseRunSpeedCap); runspeedcap += itembonuses.IncreaseRunSpeedCap + spellbonuses.IncreaseRunSpeedCap + aabonuses.IncreaseRunSpeedCap; aa_mod += aabonuses.BaseMovementSpeed; if (IsClient() && CastToClient()->GetHorseId()) { Mob *horse = entity_list.GetMob(CastToClient()->GetHorseId()); if (horse) { speed_mod = horse->GetBaseRunspeed(); return speed_mod; } } int spell_mod = spellbonuses.movementspeed + itembonuses.movementspeed; int movemod = 0; if (spell_mod < 0) movemod += spell_mod; else if (spell_mod > aa_mod) movemod = spell_mod; else movemod = aa_mod; // hard cap if (runspeedcap > 225) runspeedcap = 225; if(movemod < -85) //cap it at moving very very slow movemod = -85; if (!has_horse && movemod != 0) speed_mod += (base_run * movemod / 100); if(speed_mod < 1) return(0); //runspeed cap. #ifdef BOTS if (IsClient() || IsBot()) #else if(IsClient()) #endif { if(speed_mod > runspeedcap) speed_mod = runspeedcap; } return speed_mod; } int Mob::_GetRunSpeed() const { if (IsRooted() || IsStunned() || IsMezzed() || IsPseudoRooted()) return 0; int aa_mod = 0; int speed_mod = base_runspeed; int base_walk = base_walkspeed; bool has_horse = false; if (IsClient()) { if(CastToClient()->GetGMSpeed()) { speed_mod = 325; } else if (CastToClient()->GetHorseId()) { Mob* horse = entity_list.GetMob(CastToClient()->GetHorseId()); if(horse) { speed_mod = horse->GetBaseRunspeed(); base_walk = horse->GetBaseWalkspeed(); has_horse = true; } } } int runspeedcap = RuleI(Character,BaseRunSpeedCap); runspeedcap += itembonuses.IncreaseRunSpeedCap + spellbonuses.IncreaseRunSpeedCap + aabonuses.IncreaseRunSpeedCap; aa_mod += aabonuses.BaseMovementSpeed + aabonuses.movementspeed; int spell_mod = spellbonuses.movementspeed + itembonuses.movementspeed; int movemod = 0; if(spell_mod < 0) { movemod += spell_mod; } else if(spell_mod > aa_mod) { movemod = spell_mod; } else { movemod = aa_mod; } if(movemod < -85) //cap it at moving very very slow movemod = -85; if (!has_horse && movemod != 0) { #ifdef BOTS if (IsClient() || IsBot()) #else if (IsClient()) #endif { speed_mod += (speed_mod * movemod / 100); } else { if (movemod < 0) { speed_mod += (50 * movemod / 100); // basically stoped if(speed_mod < 1) { return(0); } // moving slowly if (speed_mod < 8) return(8); } else { speed_mod += GetBaseWalkspeed(); if (movemod > 50) speed_mod += 4; if (movemod > 40) speed_mod += 3; } } } if(speed_mod < 1) { return(0); } //runspeed cap. #ifdef BOTS if (IsClient() || IsBot()) #else if(IsClient()) #endif { if(speed_mod > runspeedcap) speed_mod = runspeedcap; } return speed_mod; } int Mob::_GetFearSpeed() const { if (IsRooted() || IsStunned() || IsMezzed()) return 0; //float speed_mod = fearspeed; int speed_mod = GetBaseFearSpeed(); // use a max of 1.75f in calcs. int base_run = std::min(GetBaseRunspeed(), 70); int spell_mod = spellbonuses.movementspeed + itembonuses.movementspeed; int movemod = 0; if(spell_mod < 0) { movemod += spell_mod; } if(movemod < -85) //cap it at moving very very slow movemod = -85; if (IsClient()) { if (CastToClient()->GetRunMode()) speed_mod = GetBaseRunspeed(); else speed_mod = GetBaseWalkspeed(); if (movemod < 0) return GetBaseWalkspeed(); speed_mod += (base_run * movemod / 100); return speed_mod; } else { int hp_ratio = GetIntHPRatio(); // very large snares 50% or higher if (movemod < -49) { if (hp_ratio < 25) { return (0); } if (hp_ratio < 50) return (8); else return (12); } if (hp_ratio < 5) { speed_mod = base_walkspeed / 3; } else if (hp_ratio < 15) { speed_mod = base_walkspeed / 2; } else if (hp_ratio < 25) { speed_mod = base_walkspeed + 1; // add the +1 so they do the run animation } else if (hp_ratio < 50) { speed_mod *= 82; speed_mod /= 100; } if (movemod > 0) { speed_mod += GetBaseWalkspeed(); if (movemod > 50) speed_mod += 4; if (movemod > 40) speed_mod += 3; return speed_mod; } else if (movemod < 0) { speed_mod += (base_run * movemod / 100); } } if (speed_mod < 1) return (0); if (speed_mod < 9) return (8); if (speed_mod < 13) return (12); return speed_mod; } int32 Mob::CalcMaxMana() { switch (GetCasterClass()) { case 'I': max_mana = (((GetINT()/2)+1) * GetLevel()) + spellbonuses.Mana + itembonuses.Mana; break; case 'W': max_mana = (((GetWIS()/2)+1) * GetLevel()) + spellbonuses.Mana + itembonuses.Mana; break; case 'N': default: max_mana = 0; break; } if (max_mana < 0) { max_mana = 0; } return max_mana; } int32 Mob::CalcMaxHP() { max_hp = (base_hp + itembonuses.HP + spellbonuses.HP); max_hp += max_hp * ((aabonuses.MaxHPChange + spellbonuses.MaxHPChange + itembonuses.MaxHPChange) / 10000.0f); return max_hp; } int32 Mob::GetItemHPBonuses() { int32 item_hp = 0; item_hp = itembonuses.HP; item_hp += item_hp * itembonuses.MaxHPChange / 10000; return item_hp; } int32 Mob::GetSpellHPBonuses() { int32 spell_hp = 0; spell_hp = spellbonuses.HP; spell_hp += spell_hp * spellbonuses.MaxHPChange / 10000; return spell_hp; } char Mob::GetCasterClass() const { switch(class_) { case CLERIC: case PALADIN: case RANGER: case DRUID: case SHAMAN: case BEASTLORD: case CLERICGM: case PALADINGM: case RANGERGM: case DRUIDGM: case SHAMANGM: case BEASTLORDGM: return 'W'; break; case SHADOWKNIGHT: case BARD: case NECROMANCER: case WIZARD: case MAGICIAN: case ENCHANTER: case SHADOWKNIGHTGM: case BARDGM: case NECROMANCERGM: case WIZARDGM: case MAGICIANGM: case ENCHANTERGM: return 'I'; break; default: return 'N'; break; } } uint8 Mob::GetArchetype() const { switch(class_) { case PALADIN: case RANGER: case SHADOWKNIGHT: case BARD: case BEASTLORD: case PALADINGM: case RANGERGM: case SHADOWKNIGHTGM: case BARDGM: case BEASTLORDGM: return ARCHETYPE_HYBRID; break; case CLERIC: case DRUID: case SHAMAN: case NECROMANCER: case WIZARD: case MAGICIAN: case ENCHANTER: case CLERICGM: case DRUIDGM: case SHAMANGM: case NECROMANCERGM: case WIZARDGM: case MAGICIANGM: case ENCHANTERGM: return ARCHETYPE_CASTER; break; case WARRIOR: case MONK: case ROGUE: case BERSERKER: case WARRIORGM: case MONKGM: case ROGUEGM: case BERSERKERGM: return ARCHETYPE_MELEE; break; default: return ARCHETYPE_HYBRID; break; } } void Mob::CreateSpawnPacket(EQApplicationPacket *app, Mob *ForWho) { app->SetOpcode(OP_NewSpawn); app->size = sizeof(NewSpawn_Struct); app->pBuffer = new uchar[app->size]; memset(app->pBuffer, 0, app->size); NewSpawn_Struct *ns = (NewSpawn_Struct *) app->pBuffer; FillSpawnStruct(ns, ForWho); if (RuleB(NPC, UseClassAsLastName) && strlen(ns->spawn.lastName) == 0) { switch (ns->spawn.class_) { case TRIBUTE_MASTER: strcpy(ns->spawn.lastName, "Tribute Master"); break; case ADVENTURERECRUITER: strcpy(ns->spawn.lastName, "Adventure Recruiter"); break; case BANKER: strcpy(ns->spawn.lastName, "Banker"); break; case ADVENTUREMERCHANT: strcpy(ns->spawn.lastName, "Adventure Merchant"); break; case WARRIORGM: strcpy(ns->spawn.lastName, "GM Warrior"); break; case PALADINGM: strcpy(ns->spawn.lastName, "GM Paladin"); break; case RANGERGM: strcpy(ns->spawn.lastName, "GM Ranger"); break; case SHADOWKNIGHTGM: strcpy(ns->spawn.lastName, "GM Shadowknight"); break; case DRUIDGM: strcpy(ns->spawn.lastName, "GM Druid"); break; case BARDGM: strcpy(ns->spawn.lastName, "GM Bard"); break; case ROGUEGM: strcpy(ns->spawn.lastName, "GM Rogue"); break; case SHAMANGM: strcpy(ns->spawn.lastName, "GM Shaman"); break; case NECROMANCERGM: strcpy(ns->spawn.lastName, "GM Necromancer"); break; case WIZARDGM: strcpy(ns->spawn.lastName, "GM Wizard"); break; case MAGICIANGM: strcpy(ns->spawn.lastName, "GM Magician"); break; case ENCHANTERGM: strcpy(ns->spawn.lastName, "GM Enchanter"); break; case BEASTLORDGM: strcpy(ns->spawn.lastName, "GM Beastlord"); break; case BERSERKERGM: strcpy(ns->spawn.lastName, "GM Berserker"); break; case MERCERNARY_MASTER: strcpy(ns->spawn.lastName, "Mercenary Recruiter"); break; default: break; } } } void Mob::CreateSpawnPacket(EQApplicationPacket* app, NewSpawn_Struct* ns) { app->SetOpcode(OP_NewSpawn); app->size = sizeof(NewSpawn_Struct); app->pBuffer = new uchar[sizeof(NewSpawn_Struct)]; // Copy ns directly into packet memcpy(app->pBuffer, ns, sizeof(NewSpawn_Struct)); // Custom packet data NewSpawn_Struct* ns2 = (NewSpawn_Struct*)app->pBuffer; strcpy(ns2->spawn.name, ns->spawn.name); // Set default Last Names for certain Classes if not defined if (RuleB(NPC, UseClassAsLastName) && strlen(ns->spawn.lastName) == 0) { switch (ns->spawn.class_) { case TRIBUTE_MASTER: strcpy(ns2->spawn.lastName, "Tribute Master"); break; case ADVENTURERECRUITER: strcpy(ns2->spawn.lastName, "Adventure Recruiter"); break; case BANKER: strcpy(ns2->spawn.lastName, "Banker"); break; case ADVENTUREMERCHANT: strcpy(ns2->spawn.lastName, "Adventure Merchant"); break; case WARRIORGM: strcpy(ns2->spawn.lastName, "GM Warrior"); break; case PALADINGM: strcpy(ns2->spawn.lastName, "GM Paladin"); break; case RANGERGM: strcpy(ns2->spawn.lastName, "GM Ranger"); break; case SHADOWKNIGHTGM: strcpy(ns2->spawn.lastName, "GM Shadowknight"); break; case DRUIDGM: strcpy(ns2->spawn.lastName, "GM Druid"); break; case BARDGM: strcpy(ns2->spawn.lastName, "GM Bard"); break; case ROGUEGM: strcpy(ns2->spawn.lastName, "GM Rogue"); break; case SHAMANGM: strcpy(ns2->spawn.lastName, "GM Shaman"); break; case NECROMANCERGM: strcpy(ns2->spawn.lastName, "GM Necromancer"); break; case WIZARDGM: strcpy(ns2->spawn.lastName, "GM Wizard"); break; case MAGICIANGM: strcpy(ns2->spawn.lastName, "GM Magician"); break; case ENCHANTERGM: strcpy(ns2->spawn.lastName, "GM Enchanter"); break; case BEASTLORDGM: strcpy(ns2->spawn.lastName, "GM Beastlord"); break; case BERSERKERGM: strcpy(ns2->spawn.lastName, "GM Berserker"); break; case MERCERNARY_MASTER: strcpy(ns2->spawn.lastName, "Mercenary liaison"); break; default: strcpy(ns2->spawn.lastName, ns->spawn.lastName); break; } } else { strcpy(ns2->spawn.lastName, ns->spawn.lastName); } memset(&app->pBuffer[sizeof(Spawn_Struct)-7], 0xFF, 7); } void Mob::FillSpawnStruct(NewSpawn_Struct* ns, Mob* ForWho) { int i; strcpy(ns->spawn.name, name); if(IsClient()) { strn0cpy(ns->spawn.lastName, lastname, sizeof(ns->spawn.lastName)); } ns->spawn.heading = FloatToEQ12(m_Position.w); ns->spawn.x = FloatToEQ19(m_Position.x);//((int32)x_pos)<<3; ns->spawn.y = FloatToEQ19(m_Position.y);//((int32)y_pos)<<3; ns->spawn.z = FloatToEQ19(m_Position.z);//((int32)z_pos)<<3; ns->spawn.spawnId = GetID(); ns->spawn.curHp = static_cast<uint8>(GetHPRatio()); ns->spawn.max_hp = 100; //this field needs a better name ns->spawn.race = (use_model) ? use_model : race; ns->spawn.runspeed = runspeed; ns->spawn.walkspeed = walkspeed; ns->spawn.class_ = class_; ns->spawn.gender = gender; ns->spawn.level = level; ns->spawn.PlayerState = m_PlayerState; ns->spawn.deity = deity; ns->spawn.animation = 0; ns->spawn.findable = findable?1:0; UpdateActiveLight(); ns->spawn.light = m_Light.Type[EQ::lightsource::LightActive]; if (IsNPC() && race == ERUDITE) ns->spawn.showhelm = 1; else ns->spawn.showhelm = (helmtexture && helmtexture != 0xFF) ? 1 : 0; ns->spawn.invis = (invisible || hidden) ? 1 : 0; // TODO: load this before spawning players ns->spawn.NPC = IsClient() ? 0 : 1; ns->spawn.IsMercenary = IsMerc() ? 1 : 0; ns->spawn.targetable_with_hotkey = no_target_hotkey ? 0 : 1; // opposite logic! ns->spawn.petOwnerId = ownerid; ns->spawn.haircolor = haircolor; ns->spawn.beardcolor = beardcolor; ns->spawn.eyecolor1 = eyecolor1; ns->spawn.eyecolor2 = eyecolor2; ns->spawn.hairstyle = hairstyle; ns->spawn.face = luclinface; ns->spawn.beard = beard; ns->spawn.StandState = GetAppearanceValue(_appearance); ns->spawn.drakkin_heritage = drakkin_heritage; ns->spawn.drakkin_tattoo = drakkin_tattoo; ns->spawn.drakkin_details = drakkin_details; ns->spawn.equip_chest2 = GetHerosForgeModel(1) != 0 || multitexture? 0xff : texture; // ns->spawn.invis2 = 0xff;//this used to be labeled beard.. if its not FF it will turn mob invis if (helmtexture && helmtexture != 0xFF && GetHerosForgeModel(0) == 0) { ns->spawn.helm=helmtexture; } else { ns->spawn.helm = 0; } ns->spawn.guildrank = 0xFF; ns->spawn.size = size; ns->spawn.bodytype = bodytype; // The 'flymode' settings have the following effect: // 0 - Mobs in water sink like a stone to the bottom // 1 - Same as #flymode 1 // 2 - Same as #flymode 2 // 3 - Mobs in water do not sink. A value of 3 in this field appears to be the default setting for all mobs // (in water or not) according to 6.2 era packet collects. if(IsClient()) ns->spawn.flymode = FindType(SE_Levitate) ? 2 : 0; else ns->spawn.flymode = flymode; ns->spawn.lastName[0] = '\0'; strn0cpy(ns->spawn.lastName, lastname, sizeof(ns->spawn.lastName)); //for (i = 0; i < _MaterialCount; i++) for (i = 0; i < 9; i++) { // Only Player Races Wear Armor if (Mob::IsPlayerRace(race) || i > 6) { ns->spawn.equipment.Slot[i].Material = GetEquipmentMaterial(i); ns->spawn.equipment.Slot[i].EliteModel = IsEliteMaterialItem(i); ns->spawn.equipment.Slot[i].HerosForgeModel = GetHerosForgeModel(i); ns->spawn.equipment_tint.Slot[i].Color = GetEquipmentColor(i); } } if (texture > 0) { for (i = 0; i < 9; i++) { if (i == EQ::textures::weaponPrimary || i == EQ::textures::weaponSecondary || texture == 255) { continue; } ns->spawn.equipment.Slot[i].Material = texture; } } memset(ns->spawn.set_to_0xFF, 0xFF, sizeof(ns->spawn.set_to_0xFF)); if(IsNPC() && IsDestructibleObject()) { ns->spawn.DestructibleObject = true; // Changing the first string made it vanish, so it has some significance. if(lastname) sprintf(ns->spawn.DestructibleModel, "%s", lastname); // Changing the second string made no visible difference sprintf(ns->spawn.DestructibleName2, "%s", ns->spawn.name); // Putting a string in the final one that was previously empty had no visible effect. ns->spawn.DestructibleString[0] = '\0'; // Sets damage appearance level of the object. ns->spawn.DestructibleAppearance = luclinface; // Was 0x00000000 //ns->spawn.DestructibleAppearance = static_cast<EmuAppearance>(_appearance); // #appearance 44 1 makes it jump but no visible damage // #appearance 44 2 makes it look completely broken but still visible // #appearance 44 3 makes it jump but not visible difference to 3 // #appearance 44 4 makes it disappear altogether // #appearance 44 5 makes the client crash. ns->spawn.DestructibleUnk1 = 0x00000224; // Was 0x000001f5; // These next 4 are mostly always sequential // Originally they were 633, 634, 635, 636 // Changing them all to 633 - no visible effect. // Changing them all to 636 - no visible effect. // Reversing the order of these four numbers and then using #appearance gain had no visible change. // Setting these four ids to zero had no visible effect when the catapult spawned, nor when #appearance was used. ns->spawn.DestructibleID1 = 1968; ns->spawn.DestructibleID2 = 1969; ns->spawn.DestructibleID3 = 1970; ns->spawn.DestructibleID4 = 1971; // Next one was originally 0x1ce45008, changing it to 0x00000000 made no visible difference ns->spawn.DestructibleUnk2 = 0x13f79d00; // Next one was originally 0x1a68fe30, changing it to 0x00000000 made no visible difference ns->spawn.DestructibleUnk3 = 0x00000000; // Next one was already 0x00000000 ns->spawn.DestructibleUnk4 = 0x13f79d58; // Next one was originally 0x005a69ec, changing it to 0x00000000 made no visible difference. ns->spawn.DestructibleUnk5 = 0x13c55b00; // Next one was originally 0x1a68fe30, changing it to 0x00000000 made no visible difference. ns->spawn.DestructibleUnk6 = 0x00128860; // Next one was originally 0x0059de6d, changing it to 0x00000000 made no visible difference. ns->spawn.DestructibleUnk7 = 0x005a8f66; // Next one was originally 0x00000201, changing it to 0x00000000 made no visible difference. // For the Minohten tents, 0x00000000 had them up in the air, while 0x201 put them on the ground. // Changing it it 0x00000001 makes the tent sink into the ground. ns->spawn.DestructibleUnk8 = 0x01; // Needs to be 1 for tents? ns->spawn.DestructibleUnk9 = 0x00000002; // Needs to be 2 for tents? ns->spawn.flymode = 0; } if (RuleB(Character, AllowCrossClassTrainers) && ForWho) { if (ns->spawn.class_ >= WARRIORGM && ns->spawn.class_ <= BERSERKERGM) { int trainer_class = WARRIORGM + (ForWho->GetClass() - 1); ns->spawn.class_ = trainer_class; } } } void Mob::CreateDespawnPacket(EQApplicationPacket* app, bool Decay) { app->SetOpcode(OP_DeleteSpawn); app->size = sizeof(DeleteSpawn_Struct); app->pBuffer = new uchar[app->size]; memset(app->pBuffer, 0, app->size); DeleteSpawn_Struct* ds = (DeleteSpawn_Struct*)app->pBuffer; ds->spawn_id = GetID(); // The next field only applies to corpses. If 0, they vanish instantly, otherwise they 'decay' ds->Decay = Decay ? 1 : 0; } void Mob::CreateHPPacket(EQApplicationPacket* app) { this->IsFullHP=(current_hp>=max_hp); app->SetOpcode(OP_MobHealth); app->size = sizeof(SpawnHPUpdate_Struct2); app->pBuffer = new uchar[app->size]; memset(app->pBuffer, 0, sizeof(SpawnHPUpdate_Struct2)); SpawnHPUpdate_Struct2* ds = (SpawnHPUpdate_Struct2*)app->pBuffer; ds->spawn_id = GetID(); // they don't need to know the real hp ds->hp = (int)GetHPRatio(); // hp event if (IsNPC() && (GetNextHPEvent() > 0)) { if (ds->hp < GetNextHPEvent()) { char buf[10]; snprintf(buf, 9, "%i", GetNextHPEvent()); buf[9] = '\0'; SetNextHPEvent(-1); parse->EventNPC(EVENT_HP, CastToNPC(), nullptr, buf, 0); } } if (IsNPC() && (GetNextIncHPEvent() > 0)) { if (ds->hp > GetNextIncHPEvent()) { char buf[10]; snprintf(buf, 9, "%i", GetNextIncHPEvent()); buf[9] = '\0'; SetNextIncHPEvent(-1); parse->EventNPC(EVENT_HP, CastToNPC(), nullptr, buf, 1); } } } void Mob::SendHPUpdate(bool skip_self /*= false*/, bool force_update_all /*= false*/) { /** * If our HP is different from last HP update call - let's update selves */ if (IsClient()) { // delay to allow the client to catch up on buff states if (max_hp != last_max_hp) { last_max_hp = max_hp; CastToClient()->hp_self_update_throttle_timer.Trigger(); return; } if (current_hp != last_hp || force_update_all) { /** * This is to prevent excessive packet sending under trains/fast combat */ if (this->CastToClient()->hp_self_update_throttle_timer.Check() || force_update_all) { Log(Logs::General, Logs::HPUpdate, "Mob::SendHPUpdate :: Update HP of self (%s) HP: %i/%i last: %i/%i skip_self: %s", this->GetCleanName(), current_hp, max_hp, last_hp, last_max_hp, (skip_self ? "true" : "false") ); if (!skip_self || this->CastToClient()->ClientVersion() >= EQ::versions::ClientVersion::SoD) { auto client_packet = new EQApplicationPacket(OP_HPUpdate, sizeof(SpawnHPUpdate_Struct)); auto *hp_packet_client = (SpawnHPUpdate_Struct *) client_packet->pBuffer; hp_packet_client->cur_hp = static_cast<uint32>(CastToClient()->GetHP() - itembonuses.HP); hp_packet_client->spawn_id = GetID(); hp_packet_client->max_hp = CastToClient()->GetMaxHP() - itembonuses.HP; CastToClient()->QueuePacket(client_packet); safe_delete(client_packet); ResetHPUpdateTimer(); } /** * Used to check if HP has changed to update self next round */ last_hp = current_hp; } } } auto current_hp_percent = GetIntHPRatio(); Log(Logs::General, Logs::HPUpdate, "Mob::SendHPUpdate :: SendHPUpdate %s HP is %i last %i", this->GetCleanName(), current_hp_percent, last_hp_percent); if (current_hp_percent == last_hp_percent && !force_update_all) { Log(Logs::General, Logs::HPUpdate, "Mob::SendHPUpdate :: Same HP - skipping update"); ResetHPUpdateTimer(); return; } else { if (IsClient() && RuleB(Character, MarqueeHPUpdates)) { this->CastToClient()->SendHPUpdateMarquee(); } Log(Logs::General, Logs::HPUpdate, "Mob::SendHPUpdate :: HP Changed - Send update"); last_hp_percent = current_hp_percent; } EQApplicationPacket hp_packet; Group *group = nullptr; CreateHPPacket(&hp_packet); /** * Update those who have us targeted */ entity_list.QueueClientsByTarget(this, &hp_packet, false, 0, false, true, EQ::versions::maskAllClients); /** * Update those who have us on x-target */ entity_list.QueueClientsByXTarget(this, &hp_packet, false); /** * Update groups using Group LAA health name tag counter */ entity_list.QueueToGroupsForNPCHealthAA(this, &hp_packet); /** * Group */ if (IsGrouped()) { group = entity_list.GetGroupByMob(this); if (group) { group->SendHPPacketsFrom(this); } } /** * Raid */ if (IsClient()) { Raid *raid = entity_list.GetRaidByClient(CastToClient()); if (raid) { raid->SendHPManaEndPacketsFrom(this); } } /** * Pet */ if (GetOwner() && GetOwner()->IsClient()) { GetOwner()->CastToClient()->QueuePacket(&hp_packet, false); group = entity_list.GetGroupByClient(GetOwner()->CastToClient()); if (group) { group->SendHPPacketsFrom(this); } Raid *raid = entity_list.GetRaidByClient(GetOwner()->CastToClient()); if (raid) { raid->SendHPManaEndPacketsFrom(this); } } #ifdef BOTS if (GetOwner() && GetOwner()->IsBot() && GetOwner()->CastToBot()->GetBotOwner() && GetOwner()->CastToBot()->GetBotOwner()->IsClient()) { auto bot_owner = GetOwner()->CastToBot()->GetBotOwner()->CastToClient(); if (bot_owner) { bot_owner->QueuePacket(&hp_packet, false); group = entity_list.GetGroupByClient(bot_owner); if (group) { group->SendHPPacketsFrom(this); } Raid *raid = entity_list.GetRaidByClient(bot_owner); if (raid) { raid->SendHPManaEndPacketsFrom(this); } } } #endif if (GetPet() && GetPet()->IsClient()) { GetPet()->CastToClient()->QueuePacket(&hp_packet, false); } /** * Destructible objects */ if (IsNPC() && IsDestructibleObject()) { if (GetHPRatio() > 74) { if (GetAppearance() != eaStanding) { SendAppearancePacket(AT_DamageState, eaStanding); _appearance = eaStanding; } } else if (GetHPRatio() > 49) { if (GetAppearance() != eaSitting) { SendAppearancePacket(AT_DamageState, eaSitting); _appearance = eaSitting; } } else if (GetHPRatio() > 24) { if (GetAppearance() != eaCrouching) { SendAppearancePacket(AT_DamageState, eaCrouching); _appearance = eaCrouching; } } else if (GetHPRatio() > 0) { if (GetAppearance() != eaDead) { SendAppearancePacket(AT_DamageState, eaDead); _appearance = eaDead; } } else if (GetAppearance() != eaLooting) { SendAppearancePacket(AT_DamageState, eaLooting); _appearance = eaLooting; } } } void Mob::StopMoving() { StopNavigation(); if (moved) { moved = false; } } void Mob::StopMoving(float new_heading) { StopNavigation(); RotateTo(new_heading); if (moved) { moved = false; } } void Mob::SentPositionPacket(float dx, float dy, float dz, float dh, int anim, bool send_to_self) { EQApplicationPacket outapp(OP_ClientUpdate, sizeof(PlayerPositionUpdateServer_Struct)); PlayerPositionUpdateServer_Struct *spu = (PlayerPositionUpdateServer_Struct*)outapp.pBuffer; memset(spu, 0x00, sizeof(PlayerPositionUpdateServer_Struct)); spu->spawn_id = GetID(); spu->x_pos = FloatToEQ19(GetX()); spu->y_pos = FloatToEQ19(GetY()); spu->z_pos = FloatToEQ19(GetZ()); spu->heading = FloatToEQ12(GetHeading()); spu->delta_x = FloatToEQ13(dx); spu->delta_y = FloatToEQ13(dy); spu->delta_z = FloatToEQ13(dz); spu->delta_heading = FloatToEQ10(dh); spu->animation = anim; entity_list.QueueClients(this, &outapp, send_to_self == false, false); } // this is for SendPosition() void Mob::MakeSpawnUpdateNoDelta(PlayerPositionUpdateServer_Struct *spu) { memset(spu, 0xff, sizeof(PlayerPositionUpdateServer_Struct)); spu->spawn_id = GetID(); spu->x_pos = FloatToEQ19(m_Position.x); spu->y_pos = FloatToEQ19(m_Position.y); spu->z_pos = FloatToEQ19(m_Position.z); spu->delta_x = FloatToEQ13(0); spu->delta_y = FloatToEQ13(0); spu->delta_z = FloatToEQ13(0); spu->heading = FloatToEQ12(m_Position.w); spu->animation = 0; spu->delta_heading = FloatToEQ10(0); } // this is for SendPosUpdate() void Mob::MakeSpawnUpdate(PlayerPositionUpdateServer_Struct* spu) { spu->spawn_id = GetID(); spu->x_pos = FloatToEQ19(m_Position.x); spu->y_pos = FloatToEQ19(m_Position.y); spu->z_pos = FloatToEQ19(m_Position.z); spu->delta_x = FloatToEQ13(m_Delta.x); spu->delta_y = FloatToEQ13(m_Delta.y); spu->delta_z = FloatToEQ13(m_Delta.z); spu->heading = FloatToEQ12(m_Position.w); #ifdef BOTS if (this->IsClient() || this->IsBot()) #else if (this->IsClient()) #endif spu->animation = animation; else spu->animation = pRunAnimSpeed;//animation; spu->delta_heading = FloatToEQ10(m_Delta.w); } void Mob::ShowStats(Client* client) { if (IsClient()) { CastToClient()->SendStatsWindow(client, RuleB(Character, UseNewStatsWindow)); } else if (IsCorpse()) { if (IsPlayerCorpse()) { client->Message(Chat::White, " CharID: %i PlayerCorpse: %i", CastToCorpse()->GetCharID(), CastToCorpse()->GetCorpseDBID()); } else { client->Message(Chat::White, " NPCCorpse", GetID()); } } else { client->Message(Chat::White, " Level: %i AC: %i Class: %i Size: %1.1f Haste: %i", GetLevel(), ACSum(), GetClass(), GetSize(), GetHaste()); client->Message(Chat::White, " HP: %i Max HP: %i",GetHP(), GetMaxHP()); client->Message(Chat::White, " Mana: %i Max Mana: %i", GetMana(), GetMaxMana()); client->Message(Chat::White, " Total ATK: %i Worn/Spell ATK (Cap %i): %i", GetATK(), RuleI(Character, ItemATKCap), GetATKBonus()); client->Message(Chat::White, " STR: %i STA: %i DEX: %i AGI: %i INT: %i WIS: %i CHA: %i", GetSTR(), GetSTA(), GetDEX(), GetAGI(), GetINT(), GetWIS(), GetCHA()); client->Message(Chat::White, " MR: %i PR: %i FR: %i CR: %i DR: %i Corruption: %i PhR: %i", GetMR(), GetPR(), GetFR(), GetCR(), GetDR(), GetCorrup(), GetPhR()); client->Message(Chat::White, " Race: %i BaseRace: %i Texture: %i HelmTexture: %i Gender: %i BaseGender: %i", GetRace(), GetBaseRace(), GetTexture(), GetHelmTexture(), GetGender(), GetBaseGender()); if (client->Admin() >= 100) client->Message(Chat::White, " EntityID: %i PetID: %i OwnerID: %i AIControlled: %i Targetted: %i", GetID(), GetPetID(), GetOwnerID(), IsAIControlled(), targeted); if (IsNPC()) { NPC *n = CastToNPC(); uint32 spawngroupid = 0; if(n->respawn2 != 0) spawngroupid = n->respawn2->SpawnGroupID(); client->Message(Chat::White, " NPCID: %u SpawnGroupID: %u Grid: %i LootTable: %u FactionID: %i SpellsID: %u ", GetNPCTypeID(),spawngroupid, n->GetGrid(), n->GetLoottableID(), n->GetNPCFactionID(), n->GetNPCSpellsID()); client->Message(Chat::White, " Accuracy: %i MerchantID: %i EmoteID: %i Runspeed: %.3f Walkspeed: %.3f", n->GetAccuracyRating(), n->MerchantType, n->GetEmoteID(), static_cast<float>(0.025f * n->GetRunspeed()), static_cast<float>(0.025f * n->GetWalkspeed())); n->QueryLoot(client); } if (IsAIControlled()) { client->Message(Chat::White, " AggroRange: %1.0f AssistRange: %1.0f", GetAggroRange(), GetAssistRange()); } client->Message(Chat::White, " compute_tohit: %i TotalToHit: %i", compute_tohit(EQ::skills::SkillHandtoHand), GetTotalToHit(EQ::skills::SkillHandtoHand, 0)); client->Message(Chat::White, " compute_defense: %i TotalDefense: %i", compute_defense(), GetTotalDefense()); client->Message(Chat::White, " offense: %i mitigation ac: %i", offense(EQ::skills::SkillHandtoHand), GetMitigationAC()); } } void Mob::DoAnim(const int animnum, int type, bool ackreq, eqFilterType filter) { if (!attack_anim_timer.Check()) { return; } auto outapp = new EQApplicationPacket(OP_Animation, sizeof(Animation_Struct)); auto *anim = (Animation_Struct *) outapp->pBuffer; anim->spawnid = GetID(); if (type == 0) { anim->action = animnum; anim->speed = 10; } else { anim->action = animnum; anim->speed = type; } entity_list.QueueCloseClients( this, /* Sender */ outapp, /* Packet */ false, /* Ignore Sender */ RuleI(Range, Anims), 0, /* Skip this mob */ ackreq, /* Packet ACK */ filter /* eqFilterType filter */ ); safe_delete(outapp); } void Mob::ShowBuffs(Client* client) { if(SPDAT_RECORDS <= 0) return; client->Message(Chat::White, "Buffs on: %s", this->GetName()); uint32 i; uint32 buff_count = GetMaxTotalSlots(); for (i=0; i < buff_count; i++) { if (buffs[i].spellid != SPELL_UNKNOWN) { if (spells[buffs[i].spellid].buffdurationformula == DF_Permanent) client->Message(Chat::White, " %i: %s: Permanent", i, spells[buffs[i].spellid].name); else client->Message(Chat::White, " %i: %s: %i tics left", i, spells[buffs[i].spellid].name, buffs[i].ticsremaining); } } if (IsClient()){ client->Message(Chat::White, "itembonuses:"); client->Message(Chat::White, "Atk:%i Ac:%i HP(%i):%i Mana:%i", itembonuses.ATK, itembonuses.AC, itembonuses.HPRegen, itembonuses.HP, itembonuses.Mana); client->Message(Chat::White, "Str:%i Sta:%i Dex:%i Agi:%i Int:%i Wis:%i Cha:%i", itembonuses.STR,itembonuses.STA,itembonuses.DEX,itembonuses.AGI,itembonuses.INT,itembonuses.WIS,itembonuses.CHA); client->Message(Chat::White, "SvMagic:%i SvFire:%i SvCold:%i SvPoison:%i SvDisease:%i", itembonuses.MR,itembonuses.FR,itembonuses.CR,itembonuses.PR,itembonuses.DR); client->Message(Chat::White, "DmgShield:%i Haste:%i", itembonuses.DamageShield, itembonuses.haste ); client->Message(Chat::White, "spellbonuses:"); client->Message(Chat::White, "Atk:%i Ac:%i HP(%i):%i Mana:%i", spellbonuses.ATK, spellbonuses.AC, spellbonuses.HPRegen, spellbonuses.HP, spellbonuses.Mana); client->Message(Chat::White, "Str:%i Sta:%i Dex:%i Agi:%i Int:%i Wis:%i Cha:%i", spellbonuses.STR,spellbonuses.STA,spellbonuses.DEX,spellbonuses.AGI,spellbonuses.INT,spellbonuses.WIS,spellbonuses.CHA); client->Message(Chat::White, "SvMagic:%i SvFire:%i SvCold:%i SvPoison:%i SvDisease:%i", spellbonuses.MR,spellbonuses.FR,spellbonuses.CR,spellbonuses.PR,spellbonuses.DR); client->Message(Chat::White, "DmgShield:%i Haste:%i", spellbonuses.DamageShield, spellbonuses.haste ); } } void Mob::ShowBuffList(Client* client) { if(SPDAT_RECORDS <= 0) return; client->Message(Chat::White, "Buffs on: %s", this->GetCleanName()); uint32 i; uint32 buff_count = GetMaxTotalSlots(); for (i = 0; i < buff_count; i++) { if (buffs[i].spellid != SPELL_UNKNOWN) { if (spells[buffs[i].spellid].buffdurationformula == DF_Permanent) client->Message(Chat::White, " %i: %s: Permanent", i, spells[buffs[i].spellid].name); else client->Message(Chat::White, " %i: %s: %i tics left", i, spells[buffs[i].spellid].name, buffs[i].ticsremaining); } } } void Mob::GMMove(float x, float y, float z, float heading, bool SendUpdate) { m_Position.x = x; m_Position.y = y; m_Position.z = z; SetHeading(heading); mMovementManager->SendCommandToClients(this, 0.0, 0.0, 0.0, 0.0, 0, ClientRangeAny); if (IsNPC()) { CastToNPC()->SaveGuardSpot(glm::vec4(x, y, z, heading)); } } void Mob::SendIllusionPacket( uint16 in_race, uint8 in_gender, uint8 in_texture, uint8 in_helmtexture, uint8 in_haircolor, uint8 in_beardcolor, uint8 in_eyecolor1, uint8 in_eyecolor2, uint8 in_hairstyle, uint8 in_luclinface, uint8 in_beard, uint8 in_aa_title, uint32 in_drakkin_heritage, uint32 in_drakkin_tattoo, uint32 in_drakkin_details, float in_size ) { uint8 new_texture = in_texture; uint8 new_helmtexture = in_helmtexture; uint8 new_haircolor; uint8 new_beardcolor; uint8 new_eyecolor1; uint8 new_eyecolor2; uint8 new_hairstyle; uint8 new_luclinface; uint8 new_beard; uint8 new_aa_title; uint32 new_drakkin_heritage; uint32 new_drakkin_tattoo; uint32 new_drakkin_details; race = in_race; if (race == 0) { race = (use_model) ? use_model : GetBaseRace(); } if (in_gender != 0xFF) { gender = in_gender; } else { gender = (in_race) ? GetDefaultGender(race, gender) : GetBaseGender(); } if (in_texture == 0xFF && !IsPlayerRace(in_race)) { new_texture = GetTexture(); } if (in_helmtexture == 0xFF && !IsPlayerRace(in_race)) { new_helmtexture = GetHelmTexture(); } new_haircolor = (in_haircolor == 0xFF) ? GetHairColor() : in_haircolor; new_beardcolor = (in_beardcolor == 0xFF) ? GetBeardColor() : in_beardcolor; new_eyecolor1 = (in_eyecolor1 == 0xFF) ? GetEyeColor1() : in_eyecolor1; new_eyecolor2 = (in_eyecolor2 == 0xFF) ? GetEyeColor2() : in_eyecolor2; new_hairstyle = (in_hairstyle == 0xFF) ? GetHairStyle() : in_hairstyle; new_luclinface = (in_luclinface == 0xFF) ? GetLuclinFace() : in_luclinface; new_beard = (in_beard == 0xFF) ? GetBeard() : in_beard; new_drakkin_heritage = (in_drakkin_heritage == 0xFFFFFFFF) ? GetDrakkinHeritage() : in_drakkin_heritage; new_drakkin_tattoo = (in_drakkin_tattoo == 0xFFFFFFFF) ? GetDrakkinTattoo() : in_drakkin_tattoo; new_drakkin_details = (in_drakkin_details == 0xFFFFFFFF) ? GetDrakkinDetails() : in_drakkin_details; new_aa_title = in_aa_title; // Reset features to Base from the Player Profile if (IsClient() && in_race == 0) { race = CastToClient()->GetBaseRace(); gender = CastToClient()->GetBaseGender(); new_texture = texture = 0xFF; new_helmtexture = helmtexture = 0xFF; new_haircolor = haircolor = CastToClient()->GetBaseHairColor(); new_beardcolor = beardcolor = CastToClient()->GetBaseBeardColor(); new_eyecolor1 = eyecolor1 = CastToClient()->GetBaseEyeColor(); new_eyecolor2 = eyecolor2 = CastToClient()->GetBaseEyeColor(); new_hairstyle = hairstyle = CastToClient()->GetBaseHairStyle(); new_luclinface = luclinface = CastToClient()->GetBaseFace(); new_beard = beard = CastToClient()->GetBaseBeard(); new_aa_title = aa_title = 0xFF; new_drakkin_heritage = drakkin_heritage = CastToClient()->GetBaseHeritage(); new_drakkin_tattoo = drakkin_tattoo = CastToClient()->GetBaseTattoo(); new_drakkin_details = drakkin_details = CastToClient()->GetBaseDetails(); switch (race) { case OGRE: size = 9; break; case TROLL: size = 8; break; case VAHSHIR: case BARBARIAN: size = 7; break; case HALF_ELF: case WOOD_ELF: case DARK_ELF: case FROGLOK: size = 5; break; case DWARF: size = 4; break; case HALFLING: case GNOME: size = 3; break; default: size = 6; break; } } // update internal values for mob size = (in_size <= 0.0f) ? GetSize() : in_size; texture = new_texture; helmtexture = new_helmtexture; haircolor = new_haircolor; beardcolor = new_beardcolor; eyecolor1 = new_eyecolor1; eyecolor2 = new_eyecolor2; hairstyle = new_hairstyle; luclinface = new_luclinface; beard = new_beard; drakkin_heritage = new_drakkin_heritage; drakkin_tattoo = new_drakkin_tattoo; drakkin_details = new_drakkin_details; auto outapp = new EQApplicationPacket(OP_Illusion, sizeof(Illusion_Struct)); Illusion_Struct *is = (Illusion_Struct *) outapp->pBuffer; is->spawnid = GetID(); strcpy(is->charname, GetCleanName()); is->race = race; is->gender = gender; is->texture = new_texture; is->helmtexture = new_helmtexture; is->haircolor = new_haircolor; is->beardcolor = new_beardcolor; is->beard = new_beard; is->eyecolor1 = new_eyecolor1; is->eyecolor2 = new_eyecolor2; is->hairstyle = new_hairstyle; is->face = new_luclinface; is->drakkin_heritage = new_drakkin_heritage; is->drakkin_tattoo = new_drakkin_tattoo; is->drakkin_details = new_drakkin_details; is->size = size; entity_list.QueueClients(this, outapp); safe_delete(outapp); /* Refresh armor and tints after send illusion packet */ SendArmorAppearance(); LogSpells( "Illusion: Race [{}] Gender [{}] Texture [{}] HelmTexture [{}] HairColor [{}] BeardColor [{}] EyeColor1 [{}] EyeColor2 [{}] HairStyle [{}] Face [{}] DrakkinHeritage [{}] DrakkinTattoo [{}] DrakkinDetails [{}] Size [{}]", race, gender, new_texture, new_helmtexture, new_haircolor, new_beardcolor, new_eyecolor1, new_eyecolor2, new_hairstyle, new_luclinface, new_drakkin_heritage, new_drakkin_tattoo, new_drakkin_details, size ); } bool Mob::RandomizeFeatures(bool send_illusion, bool set_variables) { if (IsPlayerRace(GetRace())) { uint8 Gender = GetGender(); uint8 Texture = 0xFF; uint8 HelmTexture = 0xFF; uint8 HairColor = 0xFF; uint8 BeardColor = 0xFF; uint8 EyeColor1 = 0xFF; uint8 EyeColor2 = 0xFF; uint8 HairStyle = 0xFF; uint8 LuclinFace = 0xFF; uint8 Beard = 0xFF; uint32 DrakkinHeritage = 0xFFFFFFFF; uint32 DrakkinTattoo = 0xFFFFFFFF; uint32 DrakkinDetails = 0xFFFFFFFF; // Set some common feature settings EyeColor1 = zone->random.Int(0, 9); EyeColor2 = zone->random.Int(0, 9); LuclinFace = zone->random.Int(0, 7); // Adjust all settings based on the min and max for each feature of each race and gender switch (GetRace()) { case HUMAN: HairColor = zone->random.Int(0, 19); if (Gender == MALE) { BeardColor = HairColor; HairStyle = zone->random.Int(0, 3); Beard = zone->random.Int(0, 5); } if (Gender == FEMALE) { HairStyle = zone->random.Int(0, 2); } break; case BARBARIAN: HairColor = zone->random.Int(0, 19); LuclinFace = zone->random.Int(0, 87); if (Gender == MALE) { BeardColor = HairColor; HairStyle = zone->random.Int(0, 3); Beard = zone->random.Int(0, 5); } if (Gender == FEMALE) { HairStyle = zone->random.Int(0, 2); } break; case ERUDITE: if (Gender == MALE) { BeardColor = zone->random.Int(0, 19); Beard = zone->random.Int(0, 5); LuclinFace = zone->random.Int(0, 57); } if (Gender == FEMALE) { LuclinFace = zone->random.Int(0, 87); } break; case WOOD_ELF: HairColor = zone->random.Int(0, 19); if (Gender == MALE) { HairStyle = zone->random.Int(0, 3); } if (Gender == FEMALE) { HairStyle = zone->random.Int(0, 2); } break; case HIGH_ELF: HairColor = zone->random.Int(0, 14); if (Gender == MALE) { HairStyle = zone->random.Int(0, 3); LuclinFace = zone->random.Int(0, 37); BeardColor = HairColor; } if (Gender == FEMALE) { HairStyle = zone->random.Int(0, 2); } break; case DARK_ELF: HairColor = zone->random.Int(13, 18); if (Gender == MALE) { HairStyle = zone->random.Int(0, 3); LuclinFace = zone->random.Int(0, 37); BeardColor = HairColor; } if (Gender == FEMALE) { HairStyle = zone->random.Int(0, 2); } break; case HALF_ELF: HairColor = zone->random.Int(0, 19); if (Gender == MALE) { HairStyle = zone->random.Int(0, 3); LuclinFace = zone->random.Int(0, 37); BeardColor = HairColor; } if (Gender == FEMALE) { HairStyle = zone->random.Int(0, 2); } break; case DWARF: HairColor = zone->random.Int(0, 19); BeardColor = HairColor; if (Gender == MALE) { HairStyle = zone->random.Int(0, 3); Beard = zone->random.Int(0, 5); } if (Gender == FEMALE) { HairStyle = zone->random.Int(0, 2); LuclinFace = zone->random.Int(0, 17); } break; case TROLL: EyeColor1 = zone->random.Int(0, 10); EyeColor2 = zone->random.Int(0, 10); if (Gender == FEMALE) { HairStyle = zone->random.Int(0, 3); HairColor = zone->random.Int(0, 23); } break; case OGRE: if (Gender == FEMALE) { HairStyle = zone->random.Int(0, 3); HairColor = zone->random.Int(0, 23); } break; case HALFLING: HairColor = zone->random.Int(0, 19); if (Gender == MALE) { BeardColor = HairColor; HairStyle = zone->random.Int(0, 3); Beard = zone->random.Int(0, 5); } if (Gender == FEMALE) { HairStyle = zone->random.Int(0, 2); } break; case GNOME: HairColor = zone->random.Int(0, 24); if (Gender == MALE) { BeardColor = HairColor; HairStyle = zone->random.Int(0, 3); Beard = zone->random.Int(0, 5); } if (Gender == FEMALE) { HairStyle = zone->random.Int(0, 2); } break; case IKSAR: case VAHSHIR: break; case FROGLOK: LuclinFace = zone->random.Int(0, 9); case DRAKKIN: HairColor = zone->random.Int(0, 3); BeardColor = HairColor; EyeColor1 = zone->random.Int(0, 11); EyeColor2 = zone->random.Int(0, 11); LuclinFace = zone->random.Int(0, 6); DrakkinHeritage = zone->random.Int(0, 6); DrakkinTattoo = zone->random.Int(0, 7); DrakkinDetails = zone->random.Int(0, 7); if (Gender == MALE) { Beard = zone->random.Int(0, 12); HairStyle = zone->random.Int(0, 8); } if (Gender == FEMALE) { Beard = zone->random.Int(0, 3); HairStyle = zone->random.Int(0, 7); } break; default: break; } if (set_variables) { haircolor = HairColor; beardcolor = BeardColor; eyecolor1 = EyeColor1; eyecolor2 = EyeColor2; hairstyle = HairStyle; luclinface = LuclinFace; beard = Beard; drakkin_heritage = DrakkinHeritage; drakkin_tattoo = DrakkinTattoo; drakkin_details = DrakkinDetails; } if (send_illusion) { SendIllusionPacket(GetRace(), Gender, Texture, HelmTexture, HairColor, BeardColor, EyeColor1, EyeColor2, HairStyle, LuclinFace, Beard, 0xFF, DrakkinHeritage, DrakkinTattoo, DrakkinDetails); } return true; } return false; } bool Mob::IsPlayerRace(uint16 in_race) { if ((in_race >= HUMAN && in_race <= GNOME) || in_race == IKSAR || in_race == VAHSHIR || in_race == FROGLOK || in_race == DRAKKIN) { return true; } return false; } uint16 Mob::GetFactionRace() { uint16 current_race = GetRace(); if (IsPlayerRace(current_race) || current_race == TREE || current_race == MINOR_ILL_OBJ) { return current_race; } else { return (GetBaseRace()); } } uint8 Mob::GetDefaultGender(uint16 in_race, uint8 in_gender) { if (Mob::IsPlayerRace(in_race) || in_race == 15 || in_race == 50 || in_race == 57 || in_race == 70 || in_race == 98 || in_race == 118 || in_race == 562) { if (in_gender >= 2) { // Male default for PC Races return 0; } else return in_gender; } else if (in_race == 44 || in_race == 52 || in_race == 55 || in_race == 65 || in_race == 67 || in_race == 88 || in_race == 117 || in_race == 127 || in_race == 77 || in_race == 78 || in_race == 81 || in_race == 90 || in_race == 92 || in_race == 93 || in_race == 94 || in_race == 106 || in_race == 112 || in_race == 471) { // Male only races return 0; } else if (in_race == 25 || in_race == 56) { // Female only races return 1; } else { // Neutral default for NPC Races return 2; } } void Mob::SendAppearancePacket(uint32 type, uint32 value, bool WholeZone, bool iIgnoreSelf, Client *specific_target) { if (!GetID()) return; auto outapp = new EQApplicationPacket(OP_SpawnAppearance, sizeof(SpawnAppearance_Struct)); SpawnAppearance_Struct* appearance = (SpawnAppearance_Struct*)outapp->pBuffer; appearance->spawn_id = this->GetID(); appearance->type = type; appearance->parameter = value; if (WholeZone) entity_list.QueueClients(this, outapp, iIgnoreSelf); else if(specific_target != nullptr) specific_target->QueuePacket(outapp, false, Client::CLIENT_CONNECTED); else if (this->IsClient()) this->CastToClient()->QueuePacket(outapp, false, Client::CLIENT_CONNECTED); safe_delete(outapp); } void Mob::SendLevelAppearance(){ auto outapp = new EQApplicationPacket(OP_LevelAppearance, sizeof(LevelAppearance_Struct)); LevelAppearance_Struct* la = (LevelAppearance_Struct*)outapp->pBuffer; la->parm1 = 0x4D; la->parm2 = la->parm1 + 1; la->parm3 = la->parm2 + 1; la->parm4 = la->parm3 + 1; la->parm5 = la->parm4 + 1; la->spawn_id = GetID(); la->value1a = 1; la->value2a = 2; la->value3a = 1; la->value3b = 1; la->value4a = 1; la->value4b = 1; la->value5a = 2; entity_list.QueueCloseClients(this,outapp); safe_delete(outapp); } void Mob::SendStunAppearance() { auto outapp = new EQApplicationPacket(OP_LevelAppearance, sizeof(LevelAppearance_Struct)); LevelAppearance_Struct* la = (LevelAppearance_Struct*)outapp->pBuffer; la->parm1 = 58; la->parm2 = 60; la->spawn_id = GetID(); la->value1a = 2; la->value1b = 0; la->value2a = 2; la->value2b = 0; entity_list.QueueCloseClients(this,outapp); safe_delete(outapp); } void Mob::SendAppearanceEffect(uint32 parm1, uint32 parm2, uint32 parm3, uint32 parm4, uint32 parm5, Client *specific_target){ auto outapp = new EQApplicationPacket(OP_LevelAppearance, sizeof(LevelAppearance_Struct)); LevelAppearance_Struct* la = (LevelAppearance_Struct*)outapp->pBuffer; la->spawn_id = GetID(); la->parm1 = parm1; la->parm2 = parm2; la->parm3 = parm3; la->parm4 = parm4; la->parm5 = parm5; // Note that setting the b values to 0 will disable the related effect from the corresponding parameter. // Setting the a value appears to have no affect at all.s la->value1a = 1; la->value1b = 1; la->value2a = 1; la->value2b = 1; la->value3a = 1; la->value3b = 1; la->value4a = 1; la->value4b = 1; la->value5a = 1; la->value5b = 1; if(specific_target == nullptr) { entity_list.QueueClients(this,outapp); } else if (specific_target->IsClient()) { specific_target->CastToClient()->QueuePacket(outapp, false); } safe_delete(outapp); } void Mob::SendTargetable(bool on, Client *specific_target) { auto outapp = new EQApplicationPacket(OP_Untargetable, sizeof(Untargetable_Struct)); Untargetable_Struct *ut = (Untargetable_Struct*)outapp->pBuffer; ut->id = GetID(); ut->targetable_flag = on == true ? 1 : 0; if(specific_target == nullptr) { entity_list.QueueClients(this, outapp); } else if (specific_target->IsClient()) { specific_target->CastToClient()->QueuePacket(outapp); } safe_delete(outapp); } void Mob::CameraEffect(uint32 duration, uint32 intensity, Client *c, bool global) { if(global == true) { auto pack = new ServerPacket(ServerOP_CameraShake, sizeof(ServerCameraShake_Struct)); ServerCameraShake_Struct* scss = (ServerCameraShake_Struct*) pack->pBuffer; scss->duration = duration; scss->intensity = intensity; worldserver.SendPacket(pack); safe_delete(pack); return; } auto outapp = new EQApplicationPacket(OP_CameraEffect, sizeof(Camera_Struct)); Camera_Struct* cs = (Camera_Struct*) outapp->pBuffer; cs->duration = duration; // Duration in milliseconds cs->intensity = ((intensity * 6710886) + 1023410176); // Intensity ranges from 1023410176 to 1090519040, so simplify it from 0 to 10. if(c) c->QueuePacket(outapp, false, Client::CLIENT_CONNECTED); else entity_list.QueueClients(this, outapp); safe_delete(outapp); } void Mob::SendSpellEffect(uint32 effectid, uint32 duration, uint32 finish_delay, bool zone_wide, uint32 unk020, bool perm_effect, Client *c) { auto outapp = new EQApplicationPacket(OP_SpellEffect, sizeof(SpellEffect_Struct)); SpellEffect_Struct* se = (SpellEffect_Struct*) outapp->pBuffer; se->EffectID = effectid; // ID of the Particle Effect se->EntityID = GetID(); se->EntityID2 = GetID(); // EntityID again se->Duration = duration; // In Milliseconds se->FinishDelay = finish_delay; // Seen 0 se->Unknown020 = unk020; // Seen 3000 se->Unknown024 = 1; // Seen 1 for SoD se->Unknown025 = 1; // Seen 1 for Live se->Unknown026 = 0; // Seen 1157 if(c) c->QueuePacket(outapp, false, Client::CLIENT_CONNECTED); else if(zone_wide) entity_list.QueueClients(this, outapp); else entity_list.QueueCloseClients(this, outapp); safe_delete(outapp); if (perm_effect) { if(!IsNimbusEffectActive(effectid)) { SetNimbusEffect(effectid); } } } void Mob::TempName(const char *newname) { char temp_name[64]; char old_name[64]; strn0cpy(old_name, GetName(), 64); if(newname) strn0cpy(temp_name, newname, 64); // Reset the name to the original if left null. if(!newname) { strn0cpy(temp_name, GetOrigName(), 64); SetName(temp_name); //CleanMobName(GetName(), temp_name); strn0cpy(temp_name, GetCleanName(), 64); } // Remove Numbers before making name unique EntityList::RemoveNumbers(temp_name); // Make the new name unique and set it entity_list.MakeNameUnique(temp_name); // Send the new name to all clients auto outapp = new EQApplicationPacket(OP_MobRename, sizeof(MobRename_Struct)); MobRename_Struct* mr = (MobRename_Struct*) outapp->pBuffer; strn0cpy(mr->old_name, old_name, 64); strn0cpy(mr->old_name_again, old_name, 64); strn0cpy(mr->new_name, temp_name, 64); mr->unknown192 = 0; mr->unknown196 = 1; entity_list.QueueClients(this, outapp); safe_delete(outapp); SetName(temp_name); } void Mob::SetTargetable(bool on) { if(m_targetable != on) { m_targetable = on; SendTargetable(on); } } const int32& Mob::SetMana(int32 amount) { CalcMaxMana(); int32 mmana = GetMaxMana(); current_mana = amount < 0 ? 0 : (amount > mmana ? mmana : amount); /* if(IsClient()) LogFile->write(EQEMuLog::Debug, "Setting mana for %s to %d (%4.1f%%)", GetName(), amount, GetManaRatio()); */ return current_mana; } void Mob::SetAppearance(EmuAppearance app, bool iIgnoreSelf) { if (_appearance == app) return; _appearance = app; SendAppearancePacket(AT_Anim, GetAppearanceValue(app), true, iIgnoreSelf); if (this->IsClient() && this->IsAIControlled()) SendAppearancePacket(AT_Anim, ANIM_FREEZE, false, false); } bool Mob::UpdateActiveLight() { uint8 old_light_level = m_Light.Level[EQ::lightsource::LightActive]; m_Light.Type[EQ::lightsource::LightActive] = 0; m_Light.Level[EQ::lightsource::LightActive] = 0; if (EQ::lightsource::IsLevelGreater((m_Light.Type[EQ::lightsource::LightInnate] & 0x0F), m_Light.Type[EQ::lightsource::LightActive])) { m_Light.Type[EQ::lightsource::LightActive] = m_Light.Type[EQ::lightsource::LightInnate]; } if (m_Light.Level[EQ::lightsource::LightEquipment] > m_Light.Level[EQ::lightsource::LightActive]) { m_Light.Type[EQ::lightsource::LightActive] = m_Light.Type[EQ::lightsource::LightEquipment]; } // limiter in property handler if (m_Light.Level[EQ::lightsource::LightSpell] > m_Light.Level[EQ::lightsource::LightActive]) { m_Light.Type[EQ::lightsource::LightActive] = m_Light.Type[EQ::lightsource::LightSpell]; } // limiter in property handler m_Light.Level[EQ::lightsource::LightActive] = EQ::lightsource::TypeToLevel(m_Light.Type[EQ::lightsource::LightActive]); return (m_Light.Level[EQ::lightsource::LightActive] != old_light_level); } void Mob::ChangeSize(float in_size = 0, bool bNoRestriction) { // Size Code if (!bNoRestriction) { if (this->IsClient() || this->petid != 0) if (in_size < 3.0) in_size = 3.0; if (this->IsClient() || this->petid != 0) if (in_size > 15.0) in_size = 15.0; } if (in_size < 1.0) in_size = 1.0; if (in_size > 255.0) in_size = 255.0; //End of Size Code size = in_size; SendAppearancePacket(AT_Size, (uint32) in_size); } Mob* Mob::GetOwnerOrSelf() { if (!GetOwnerID()) return this; Mob* owner = entity_list.GetMob(this->GetOwnerID()); if (!owner) { SetOwnerID(0); return(this); } if (owner->GetPetID() == this->GetID()) { return owner; } if(IsNPC() && CastToNPC()->GetSwarmInfo()){ return (CastToNPC()->GetSwarmInfo()->GetOwner()); } SetOwnerID(0); return this; } Mob* Mob::GetOwner() { Mob* owner = entity_list.GetMob(this->GetOwnerID()); if (owner && owner->GetPetID() == this->GetID()) { return owner; } if(IsNPC() && CastToNPC()->GetSwarmInfo()){ return (CastToNPC()->GetSwarmInfo()->GetOwner()); } SetOwnerID(0); return 0; } Mob* Mob::GetUltimateOwner() { Mob* Owner = GetOwner(); if(!Owner) return this; while(Owner && Owner->HasOwner()) Owner = Owner->GetOwner(); return Owner ? Owner : this; } void Mob::SetOwnerID(uint16 NewOwnerID) { if (NewOwnerID == GetID() && NewOwnerID != 0) // ok, no charming yourself now =p return; ownerid = NewOwnerID; // if we're setting the owner ID to 0 and they're not either charmed or not-a-pet then // they're a normal pet and should be despawned if (ownerid == 0 && IsNPC() && GetPetType() != petCharmed && GetPetType() != petNone) Depop(); } // used in checking for behind (backstab) and checking in front (melee LoS) float Mob::MobAngle(Mob *other, float ourx, float oury) const { if (!other || other == this) return 0.0f; float angle, lengthb, vectorx, vectory, dotp; float mobx = -(other->GetX()); // mob xloc (inverse because eq) float moby = other->GetY(); // mob yloc float heading = other->GetHeading(); // mob heading heading = (heading * 360.0f) / 512.0f; // convert to degrees if (heading < 270) heading += 90; else heading -= 270; heading = heading * 3.1415f / 180.0f; // convert to radians vectorx = mobx + (10.0f * std::cos(heading)); // create a vector based on heading vectory = moby + (10.0f * std::sin(heading)); // of mob length 10 // length of mob to player vector lengthb = (float) std::sqrt(((-ourx - mobx) * (-ourx - mobx)) + ((oury - moby) * (oury - moby))); // calculate dot product to get angle // Handle acos domain errors due to floating point rounding errors dotp = ((vectorx - mobx) * (-ourx - mobx) + (vectory - moby) * (oury - moby)) / (10 * lengthb); // I haven't seen any errors that cause problems that weren't slightly // larger/smaller than 1/-1, so only handle these cases for now if (dotp > 1) return 0.0f; else if (dotp < -1) return 180.0f; angle = std::acos(dotp); angle = angle * 180.0f / 3.1415f; return angle; } void Mob::SetZone(uint32 zone_id, uint32 instance_id) { if(IsClient()) { CastToClient()->GetPP().zone_id = zone_id; CastToClient()->GetPP().zoneInstance = instance_id; } Save(); } void Mob::Kill() { Death(this, 0, SPELL_UNKNOWN, EQ::skills::SkillHandtoHand); } bool Mob::CanThisClassDualWield(void) const { if(!IsClient()) { return(GetSkill(EQ::skills::SkillDualWield) > 0); } else if (CastToClient()->HasSkill(EQ::skills::SkillDualWield)) { const EQ::ItemInstance* pinst = CastToClient()->GetInv().GetItem(EQ::invslot::slotPrimary); const EQ::ItemInstance* sinst = CastToClient()->GetInv().GetItem(EQ::invslot::slotSecondary); // 2HS, 2HB, or 2HP if(pinst && pinst->IsWeapon()) { const EQ::ItemData* item = pinst->GetItem(); if (item->IsType2HWeapon()) return false; } // OffHand Weapon if(sinst && !sinst->IsWeapon()) return false; // Dual-Wielding Empty Fists if(!pinst && !sinst) if(class_ != MONK && class_ != MONKGM && class_ != BEASTLORD && class_ != BEASTLORDGM) return false; return true; } return false; } bool Mob::CanThisClassDoubleAttack(void) const { if(!IsClient()) { return(GetSkill(EQ::skills::SkillDoubleAttack) > 0); } else { if(aabonuses.GiveDoubleAttack || itembonuses.GiveDoubleAttack || spellbonuses.GiveDoubleAttack) { return true; } return(CastToClient()->HasSkill(EQ::skills::SkillDoubleAttack)); } } bool Mob::CanThisClassTripleAttack() const { if (!IsClient()) return false; // When they added the real triple attack skill, mobs lost the ability to triple else return CastToClient()->HasSkill(EQ::skills::SkillTripleAttack); } bool Mob::IsWarriorClass(void) const { switch(GetClass()) { case WARRIOR: case WARRIORGM: case ROGUE: case ROGUEGM: case MONK: case MONKGM: case PALADIN: case PALADINGM: case SHADOWKNIGHT: case SHADOWKNIGHTGM: case RANGER: case RANGERGM: case BEASTLORD: case BEASTLORDGM: case BERSERKER: case BERSERKERGM: case BARD: case BARDGM: { return true; } default: { return false; } } } bool Mob::CanThisClassParry(void) const { if(!IsClient()) { return(GetSkill(EQ::skills::SkillParry) > 0); } else { return(CastToClient()->HasSkill(EQ::skills::SkillParry)); } } bool Mob::CanThisClassDodge(void) const { if(!IsClient()) { return(GetSkill(EQ::skills::SkillDodge) > 0); } else { return(CastToClient()->HasSkill(EQ::skills::SkillDodge)); } } bool Mob::CanThisClassRiposte(void) const { if(!IsClient()) { return(GetSkill(EQ::skills::SkillRiposte) > 0); } else { return(CastToClient()->HasSkill(EQ::skills::SkillRiposte)); } } bool Mob::CanThisClassBlock(void) const { if(!IsClient()) { return(GetSkill(EQ::skills::SkillBlock) > 0); } else { return(CastToClient()->HasSkill(EQ::skills::SkillBlock)); } } /* float Mob::GetReciprocalHeading(Mob* target) { float Result = 0; if(target) { // Convert to radians float h = (target->GetHeading() / 256.0f) * 6.283184f; // Calculate the reciprocal heading in radians Result = h + 3.141592f; // Convert back to eq heading from radians Result = (Result / 6.283184f) * 256.0f; } return Result; } */ bool Mob::PlotPositionAroundTarget(Mob* target, float &x_dest, float &y_dest, float &z_dest, bool lookForAftArc) { bool Result = false; if(target) { float look_heading = 0; if(lookForAftArc) look_heading = GetReciprocalHeading(target->GetPosition()); else look_heading = target->GetHeading(); // Convert to sony heading to radians look_heading = (look_heading / 512.0f) * 6.283184f; float tempX = 0; float tempY = 0; float tempZ = 0; float tempSize = 0; const float rangeCreepMod = 0.25; const uint8 maxIterationsAllowed = 4; uint8 counter = 0; float rangeReduction= 0; tempSize = target->GetSize(); rangeReduction = (tempSize * rangeCreepMod); while(tempSize > 0 && counter != maxIterationsAllowed) { tempX = GetX() + (tempSize * static_cast<float>(sin(double(look_heading)))); tempY = GetY() + (tempSize * static_cast<float>(cos(double(look_heading)))); tempZ = target->GetZ(); if(!CheckLosFN(tempX, tempY, tempZ, tempSize)) { tempSize -= rangeReduction; } else { Result = true; break; } counter++; } if(!Result) { // Try to find an attack arc to position at from the opposite direction. look_heading += (3.141592 / 2); tempSize = target->GetSize(); counter = 0; while(tempSize > 0 && counter != maxIterationsAllowed) { tempX = GetX() + (tempSize * static_cast<float>(sin(double(look_heading)))); tempY = GetY() + (tempSize * static_cast<float>(cos(double(look_heading)))); tempZ = target->GetZ(); if(!CheckLosFN(tempX, tempY, tempZ, tempSize)) { tempSize -= rangeReduction; } else { Result = true; break; } counter++; } } if(Result) { x_dest = tempX; y_dest = tempY; z_dest = tempZ; } } return Result; } bool Mob::PlotPositionOnArcInFrontOfTarget(Mob* target, float& x_dest, float& y_dest, float& z_dest, float distance, float min_deg, float max_deg) { return false; } bool Mob::PlotPositionOnArcBehindTarget(Mob* target, float& x_dest, float& y_dest, float& z_dest, float distance) { return false; } bool Mob::PlotPositionBehindMeFacingTarget(Mob* target, float& x_dest, float& y_dest, float& z_dest, float min_dist, float max_dist) { return false; } bool Mob::HateSummon() { // check if mob has ability to summon // 97% is the offical % that summoning starts on live, not 94 if (IsCharmed()) return false; int summon_level = GetSpecialAbility(SPECATK_SUMMON); if(summon_level == 1 || summon_level == 2) { if(!GetTarget()) { return false; } } else { //unsupported summon level or OFF return false; } // validate hp int hp_ratio = GetSpecialAbilityParam(SPECATK_SUMMON, 1); hp_ratio = hp_ratio > 0 ? hp_ratio : 97; if(GetHPRatio() > static_cast<float>(hp_ratio)) { return false; } // now validate the timer int summon_timer_duration = GetSpecialAbilityParam(SPECATK_SUMMON, 0); summon_timer_duration = summon_timer_duration > 0 ? summon_timer_duration : 6000; Timer *timer = GetSpecialAbilityTimer(SPECATK_SUMMON); if (!timer) { StartSpecialAbilityTimer(SPECATK_SUMMON, summon_timer_duration); } else { if(!timer->Check()) return false; timer->Start(summon_timer_duration); } // get summon target SetTarget(GetHateTop()); if(target) { if(summon_level == 1) { entity_list.MessageClose(this, true, 500, Chat::Say, "%s says 'You will not evade me, %s!' ", GetCleanName(), target->GetCleanName() ); if (target->IsClient()) target->CastToClient()->MovePC(zone->GetZoneID(), zone->GetInstanceID(), m_Position.x, m_Position.y, m_Position.z, target->GetHeading(), 0, SummonPC); else target->GMMove(m_Position.x, m_Position.y, m_Position.z, target->GetHeading()); return true; } else if(summon_level == 2) { entity_list.MessageClose(this, true, 500, Chat::Say, "%s says 'You will not evade me, %s!'", GetCleanName(), target->GetCleanName()); GMMove(target->GetX(), target->GetY(), target->GetZ()); } } return false; } void Mob::FaceTarget(Mob* mob_to_face /*= 0*/) { if (IsBoat()) { return; } Mob* faced_mob = mob_to_face; if(!faced_mob) { if(!GetTarget()) { return; } else { faced_mob = GetTarget(); } } float current_heading = GetHeading(); float new_heading = CalculateHeadingToTarget(faced_mob->GetX(), faced_mob->GetY()); if(current_heading != new_heading) { if (IsEngaged() || IsRunning()) { RotateToRunning(new_heading); } else { RotateToWalking(new_heading); } } if(IsNPC() && !IsEngaged()) { CastToNPC()->GetRefaceTimer()->Start(15000); CastToNPC()->GetRefaceTimer()->Enable(); } } bool Mob::RemoveFromHateList(Mob* mob) { SetRunAnimSpeed(0); bool bFound = false; if(IsEngaged()) { bFound = hate_list.RemoveEntFromHateList(mob); if(hate_list.IsHateListEmpty()) { AI_Event_NoLongerEngaged(); zone->DelAggroMob(); if (IsNPC() && !RuleB(Aggro, AllowTickPulling)) ResetAssistCap(); } } if(GetTarget() == mob) { SetTarget(hate_list.GetEntWithMostHateOnList(this)); } return bFound; } void Mob::WipeHateList() { if(IsEngaged()) { hate_list.WipeHateList(); AI_Event_NoLongerEngaged(); } else { hate_list.WipeHateList(); } } uint32 Mob::RandomTimer(int min, int max) { int r = 14000; if (min != 0 && max != 0 && min < max) { r = zone->random.Int(min, max); } return r; } uint32 Mob::IsEliteMaterialItem(uint8 material_slot) const { const EQ::ItemData *item = nullptr; item = database.GetItem(GetEquippedItemFromTextureSlot(material_slot)); if(item != 0) { return item->EliteMaterial; } return 0; } // works just like a printf void Mob::Say(const char *format, ...) { char buf[1000]; va_list ap; va_start(ap, format); vsnprintf(buf, 1000, format, ap); va_end(ap); Mob *talker = this; if (spellbonuses.VoiceGraft != 0) { if (spellbonuses.VoiceGraft == GetPetID()) { talker = entity_list.GetMob(spellbonuses.VoiceGraft); } else { spellbonuses.VoiceGraft = 0; } } if (!talker) { talker = this; } entity_list.MessageCloseString( talker, false, 200, 10, GENERIC_SAY, GetCleanName(), buf ); } // // this is like the above, but the first parameter is a string id // void Mob::SayString(uint32 string_id, const char *message3, const char *message4, const char *message5, const char *message6, const char *message7, const char *message8, const char *message9) { char string_id_str[10]; snprintf(string_id_str, 10, "%d", string_id); entity_list.MessageCloseString( this, false, 200, 10, GENERIC_STRINGID_SAY, GetCleanName(), string_id_str, message3, message4, message5, message6, message7, message8, message9 ); } void Mob::SayString(uint32 type, uint32 string_id, const char *message3, const char *message4, const char *message5, const char *message6, const char *message7, const char *message8, const char *message9) { char string_id_str[10]; snprintf(string_id_str, 10, "%d", string_id); entity_list.MessageCloseString( this, false, 200, type, GENERIC_STRINGID_SAY, GetCleanName(), string_id_str, message3, message4, message5, message6, message7, message8, message9 ); } void Mob::SayString(Client *to, uint32 string_id, const char *message3, const char *message4, const char *message5, const char *message6, const char *message7, const char *message8, const char *message9) { if (!to) return; auto string_id_str = std::to_string(string_id); to->MessageString(Chat::NPCQuestSay, GENERIC_STRINGID_SAY, GetCleanName(), string_id_str.c_str(), message3, message4, message5, message6, message7, message8, message9); } void Mob::SayString(Client *to, uint32 type, uint32 string_id, const char *message3, const char *message4, const char *message5, const char *message6, const char *message7, const char *message8, const char *message9) { if (!to) return; auto string_id_str = std::to_string(string_id); to->MessageString(type, GENERIC_STRINGID_SAY, GetCleanName(), string_id_str.c_str(), message3, message4, message5, message6, message7, message8, message9); } void Mob::Shout(const char *format, ...) { char buf[1000]; va_list ap; va_start(ap, format); vsnprintf(buf, 1000, format, ap); va_end(ap); entity_list.MessageString(this, false, Chat::Shout, GENERIC_SHOUT, GetCleanName(), buf); } void Mob::Emote(const char *format, ...) { char buf[1000]; va_list ap; va_start(ap, format); vsnprintf(buf, 1000, format, ap); va_end(ap); entity_list.MessageCloseString( this, false, 200, 10, GENERIC_EMOTE, GetCleanName(), buf ); } void Mob::QuestJournalledSay(Client *QuestInitiator, const char *str, Journal::Options &opts) { // just in case if (opts.target_spawn_id == 0 && QuestInitiator) opts.target_spawn_id = QuestInitiator->GetID(); entity_list.QuestJournalledSayClose(this, 200, GetCleanName(), str, opts); } const char *Mob::GetCleanName() { if (!strlen(clean_name)) { CleanMobName(GetName(), clean_name); } return clean_name; } // hp event void Mob::SetNextHPEvent( int hpevent ) { nexthpevent = hpevent; } void Mob::SetNextIncHPEvent( int inchpevent ) { nextinchpevent = inchpevent; } int16 Mob::GetResist(uint8 type) const { if (IsNPC()) { if (type == 1) return MR + spellbonuses.MR + itembonuses.MR; else if (type == 2) return FR + spellbonuses.FR + itembonuses.FR; else if (type == 3) return CR + spellbonuses.CR + itembonuses.CR; else if (type == 4) return PR + spellbonuses.PR + itembonuses.PR; else if (type == 5) return DR + spellbonuses.DR + itembonuses.DR; } else if (IsClient()) { if (type == 1) return CastToClient()->GetMR(); else if (type == 2) return CastToClient()->GetFR(); else if (type == 3) return CastToClient()->GetCR(); else if (type == 4) return CastToClient()->GetPR(); else if (type == 5) return CastToClient()->GetDR(); } return 25; } uint32 Mob::GetLevelHP(uint8 tlevel) { int multiplier = 0; if (tlevel < 10) { multiplier = tlevel*20; } else if (tlevel < 20) { multiplier = tlevel*25; } else if (tlevel < 40) { multiplier = tlevel*tlevel*12*((tlevel*2+60)/100)/10; } else if (tlevel < 45) { multiplier = tlevel*tlevel*15*((tlevel*2+60)/100)/10; } else if (tlevel < 50) { multiplier = tlevel*tlevel*175*((tlevel*2+60)/100)/100; } else { multiplier = tlevel*tlevel*2*((tlevel*2+60)/100)*(1+((tlevel-50)*20/10)); } return multiplier; } int32 Mob::GetActSpellCasttime(uint16 spell_id, int32 casttime) { int32 cast_reducer = GetFocusEffect(focusSpellHaste, spell_id); if (level > 50 && casttime >= 3000 && !spells[spell_id].goodEffect && (GetClass() == RANGER || GetClass() == SHADOWKNIGHT || GetClass() == PALADIN || GetClass() == BEASTLORD)) { int level_mod = std::min(15, GetLevel() - 50); cast_reducer += level_mod * 3; } casttime = casttime * (100 - cast_reducer) / 100; return std::max(casttime, casttime / 2); } void Mob::ExecWeaponProc(const EQ::ItemInstance *inst, uint16 spell_id, Mob *on, int level_override) { // Changed proc targets to look up based on the spells goodEffect flag. // This should work for the majority of weapons. if(spell_id == SPELL_UNKNOWN || on->GetSpecialAbility(NO_HARM_FROM_CLIENT)) { //This is so 65535 doesn't get passed to the client message and to logs because it is not relavant information for debugging. return; } if (on->GetSpecialAbility(IMMUNE_DAMAGE_CLIENT) && IsClient()) return; if (on->GetSpecialAbility(IMMUNE_DAMAGE_NPC) && IsNPC()) return; if (IsNoCast()) return; if(!IsValidSpell(spell_id)) { // Check for a valid spell otherwise it will crash through the function if(IsClient()){ Message(0, "Invalid spell proc %u", spell_id); LogSpells("Player [{}], Weapon Procced invalid spell [{}]", this->GetName(), spell_id); } return; } if(inst && IsClient()) { //const cast is dirty but it would require redoing a ton of interfaces at this point //It should be safe as we don't have any truly const EQ::ItemInstance floating around anywhere. //So we'll live with it for now int i = parse->EventItem(EVENT_WEAPON_PROC, CastToClient(), const_cast<EQ::ItemInstance*>(inst), on, "", spell_id); if(i != 0) { return; } } bool twinproc = false; int32 twinproc_chance = 0; if(IsClient()) twinproc_chance = CastToClient()->GetFocusEffect(focusTwincast, spell_id); if(twinproc_chance && zone->random.Roll(twinproc_chance)) twinproc = true; if (IsBeneficialSpell(spell_id) && (!IsNPC() || (IsNPC() && CastToNPC()->GetInnateProcSpellID() != spell_id))) { // NPC innate procs don't take this path ever SpellFinished(spell_id, this, EQ::spells::CastingSlot::Item, 0, -1, spells[spell_id].ResistDiff, true, level_override); if(twinproc) SpellOnTarget(spell_id, this, false, false, 0, true, level_override); } else if(!(on->IsClient() && on->CastToClient()->dead)) { //dont proc on dead clients SpellFinished(spell_id, on, EQ::spells::CastingSlot::Item, 0, -1, spells[spell_id].ResistDiff, true, level_override); if(twinproc) SpellOnTarget(spell_id, on, false, false, 0, true, level_override); } return; } uint32 Mob::GetZoneID() const { return(zone->GetZoneID()); } int Mob::GetHaste() { // See notes in Client::CalcHaste // Need to check if the effect of inhibit melee differs for NPCs if (spellbonuses.haste < 0) { if (-spellbonuses.haste <= spellbonuses.inhibitmelee) return 100 - spellbonuses.inhibitmelee; else return 100 + spellbonuses.haste; } if (spellbonuses.haste == 0 && spellbonuses.inhibitmelee) return 100 - spellbonuses.inhibitmelee; int h = 0; int cap = 0; int level = GetLevel(); if (spellbonuses.haste) h += spellbonuses.haste - spellbonuses.inhibitmelee; if (spellbonuses.hastetype2 && level > 49) h += spellbonuses.hastetype2 > 10 ? 10 : spellbonuses.hastetype2; // 26+ no cap, 1-25 10 if (level > 25) // 26+ h += itembonuses.haste; else // 1-25 h += itembonuses.haste > 10 ? 10 : itembonuses.haste; // mobs are different! Mob *owner = nullptr; if (IsPet()) owner = GetOwner(); else if (IsNPC() && CastToNPC()->GetSwarmOwner()) owner = entity_list.GetMobID(CastToNPC()->GetSwarmOwner()); if (owner) { cap = 10 + level; cap += std::max(0, owner->GetLevel() - 39) + std::max(0, owner->GetLevel() - 60); } else { cap = 150; } if(h > cap) h = cap; // 51+ 25 (despite there being higher spells...), 1-50 10 if (level > 50) // 51+ h += spellbonuses.hastetype3 > 25 ? 25 : spellbonuses.hastetype3; else // 1-50 h += spellbonuses.hastetype3 > 10 ? 10 : spellbonuses.hastetype3; h += ExtraHaste; //GM granted haste. return 100 + h; } void Mob::SetTarget(Mob *mob) { if (target == mob) { return; } target = mob; entity_list.UpdateHoTT(this); if (IsNPC()) { parse->EventNPC(EVENT_TARGET_CHANGE, CastToNPC(), mob, "", 0); } else if (IsClient()) { parse->EventPlayer(EVENT_TARGET_CHANGE, CastToClient(), "", 0); if (this->CastToClient()->admin > 200) { this->DisplayInfo(mob); } #ifdef BOTS CastToClient()->SetBotPrecombat(false); // Any change in target will nullify this flag (target == mob checked above) #endif } if (IsPet() && GetOwner() && GetOwner()->IsClient()) { GetOwner()->CastToClient()->UpdateXTargetType(MyPetTarget, mob); } if (this->IsClient() && this->GetTarget() && this->CastToClient()->hp_other_update_throttle_timer.Check()) { this->GetTarget()->SendHPUpdate(false, true); } } // For when we want a Ground Z at a location we are not at yet // Like MoveTo. float Mob::FindDestGroundZ(glm::vec3 dest, float z_offset) { float best_z = BEST_Z_INVALID; if (zone->zonemap != nullptr) { dest.z += z_offset; best_z = zone->zonemap->FindBestZ(dest, nullptr); } return best_z; } float Mob::FindGroundZ(float new_x, float new_y, float z_offset) { float ret = BEST_Z_INVALID; if (zone->zonemap != nullptr) { glm::vec3 me; me.x = new_x; me.y = new_y; me.z = m_Position.z + z_offset; glm::vec3 hit; float best_z = zone->zonemap->FindBestZ(me, &hit); if (best_z != BEST_Z_INVALID) { ret = best_z; } } return ret; } // Copy of above function that isn't protected to be exported to Perl::Mob float Mob::GetGroundZ(float new_x, float new_y, float z_offset) { float ret = BEST_Z_INVALID; if (zone->zonemap != 0) { glm::vec3 me; me.x = new_x; me.y = new_y; me.z = m_Position.z+z_offset; glm::vec3 hit; float best_z = zone->zonemap->FindBestZ(me, &hit); if (best_z != BEST_Z_INVALID) { ret = best_z; } } return ret; } //helper function for npc AI; needs to be mob:: cause we need to be able to count buffs on other clients and npcs int Mob::CountDispellableBuffs() { int val = 0; int buff_count = GetMaxTotalSlots(); for(int x = 0; x < buff_count; x++) { if(!IsValidSpell(buffs[x].spellid)) continue; if(buffs[x].counters) continue; if(spells[buffs[x].spellid].goodEffect == 0) continue; if(buffs[x].spellid != SPELL_UNKNOWN && spells[buffs[x].spellid].buffdurationformula != DF_Permanent) val++; } return val; } // Returns the % that a mob is snared (as a positive value). -1 means not snared int Mob::GetSnaredAmount() { int worst_snare = -1; int buff_count = GetMaxTotalSlots(); for (int i = 0; i < buff_count; i++) { if (!IsValidSpell(buffs[i].spellid)) continue; for(int j = 0; j < EFFECT_COUNT; j++) { if (spells[buffs[i].spellid].effectid[j] == SE_MovementSpeed) { int val = CalcSpellEffectValue_formula(spells[buffs[i].spellid].formula[j], spells[buffs[i].spellid].base[j], spells[buffs[i].spellid].max[j], buffs[i].casterlevel, buffs[i].spellid); //int effect = CalcSpellEffectValue(buffs[i].spellid, spells[buffs[i].spellid].effectid[j], buffs[i].casterlevel); if (val < 0 && std::abs(val) > worst_snare) worst_snare = std::abs(val); } } } return worst_snare; } void Mob::TriggerDefensiveProcs(Mob *on, uint16 hand, bool FromSkillProc, int damage) { if (!on) return; if (!FromSkillProc) on->TryDefensiveProc(this, hand); //Defensive Skill Procs if (damage < 0 && damage >= -4) { uint16 skillinuse = 0; switch (damage) { case (-1): skillinuse = EQ::skills::SkillBlock; break; case (-2): skillinuse = EQ::skills::SkillParry; break; case (-3): skillinuse = EQ::skills::SkillRiposte; break; case (-4): skillinuse = EQ::skills::SkillDodge; break; } if (on->HasSkillProcs()) on->TrySkillProc(this, skillinuse, 0, false, hand, true); if (on->HasSkillProcSuccess()) on->TrySkillProc(this, skillinuse, 0, true, hand, true); } } void Mob::SetDelta(const glm::vec4& delta) { m_Delta = delta; } void Mob::SetEntityVariable(const char *id, const char *m_var) { std::string n_m_var = m_var; m_EntityVariables[id] = n_m_var; } const char *Mob::GetEntityVariable(const char *id) { auto iter = m_EntityVariables.find(id); if (iter != m_EntityVariables.end()) { return iter->second.c_str(); } return nullptr; } bool Mob::EntityVariableExists(const char *id) { auto iter = m_EntityVariables.find(id); if(iter != m_EntityVariables.end()) { return true; } return false; } void Mob::SetFlyMode(GravityBehavior flymode) { this->flymode = flymode; } void Mob::Teleport(const glm::vec3 &pos) { mMovementManager->Teleport(this, pos.x, pos.y, pos.z, m_Position.w); } void Mob::Teleport(const glm::vec4 &pos) { mMovementManager->Teleport(this, pos.x, pos.y, pos.z, pos.w); } bool Mob::IsNimbusEffectActive(uint32 nimbus_effect) { if(nimbus_effect1 == nimbus_effect || nimbus_effect2 == nimbus_effect || nimbus_effect3 == nimbus_effect) { return true; } return false; } void Mob::SetNimbusEffect(uint32 nimbus_effect) { if(nimbus_effect1 == 0) { nimbus_effect1 = nimbus_effect; } else if(nimbus_effect2 == 0) { nimbus_effect2 = nimbus_effect; } else { nimbus_effect3 = nimbus_effect; } } void Mob::TryTriggerOnCast(uint32 spell_id, bool aa_trigger) { if(!IsValidSpell(spell_id)) return; if (aabonuses.SpellTriggers[0] || spellbonuses.SpellTriggers[0] || itembonuses.SpellTriggers[0]){ for(int i = 0; i < MAX_SPELL_TRIGGER; i++){ if(aabonuses.SpellTriggers[i] && IsClient()) TriggerOnCast(aabonuses.SpellTriggers[i], spell_id,1); if(spellbonuses.SpellTriggers[i]) TriggerOnCast(spellbonuses.SpellTriggers[i], spell_id,0); if(itembonuses.SpellTriggers[i]) TriggerOnCast(spellbonuses.SpellTriggers[i], spell_id,0); } } } void Mob::TriggerOnCast(uint32 focus_spell, uint32 spell_id, bool aa_trigger) { if (!IsValidSpell(focus_spell) || !IsValidSpell(spell_id)) return; uint32 trigger_spell_id = 0; if (aa_trigger && IsClient()) { // focus_spell = aaid auto rank = zone->GetAlternateAdvancementRank(focus_spell); if (rank) trigger_spell_id = CastToClient()->CalcAAFocus(focusTriggerOnCast, *rank, spell_id); if (IsValidSpell(trigger_spell_id) && GetTarget()) SpellFinished(trigger_spell_id, GetTarget(), EQ::spells::CastingSlot::Item, 0, -1, spells[trigger_spell_id].ResistDiff); } else { trigger_spell_id = CalcFocusEffect(focusTriggerOnCast, focus_spell, spell_id); if (IsValidSpell(trigger_spell_id) && GetTarget()) { SpellFinished(trigger_spell_id, GetTarget(), EQ::spells::CastingSlot::Item, 0, -1, spells[trigger_spell_id].ResistDiff); CheckNumHitsRemaining(NumHit::MatchingSpells, -1, focus_spell); } } } bool Mob::TrySpellTrigger(Mob *target, uint32 spell_id, int effect) { if(!target || !IsValidSpell(spell_id)) return false; int spell_trig = 0; // Count all the percentage chances to trigger for all effects for(int i = 0; i < EFFECT_COUNT; i++) { if (spells[spell_id].effectid[i] == SE_SpellTrigger) spell_trig += spells[spell_id].base[i]; } // If all the % add to 100, then only one of the effects can fire but one has to fire. if (spell_trig == 100) { int trig_chance = 100; for(int i = 0; i < EFFECT_COUNT; i++) { if (spells[spell_id].effectid[i] == SE_SpellTrigger) { if(zone->random.Int(0, trig_chance) <= spells[spell_id].base[i]) { // If we trigger an effect then its over. if (IsValidSpell(spells[spell_id].base2[i])){ SpellFinished(spells[spell_id].base2[i], target, EQ::spells::CastingSlot::Item, 0, -1, spells[spells[spell_id].base2[i]].ResistDiff); return true; } } else { // Increase the chance to fire for the next effect, if all effects fail, the final effect will fire. trig_chance -= spells[spell_id].base[i]; } } } } // if the chances don't add to 100, then each effect gets a chance to fire, chance for no trigger as well. else { if(zone->random.Int(0, 100) <= spells[spell_id].base[effect]) { if (IsValidSpell(spells[spell_id].base2[effect])){ SpellFinished(spells[spell_id].base2[effect], target, EQ::spells::CastingSlot::Item, 0, -1, spells[spells[spell_id].base2[effect]].ResistDiff); return true; //Only trigger once of these per spell effect. } } } return false; } void Mob::TryTriggerOnValueAmount(bool IsHP, bool IsMana, bool IsEndur, bool IsPet) { /* At present time there is no obvious difference between ReqTarget and ReqCaster ReqTarget is typically used in spells cast on a target where the trigger occurs on that target. ReqCaster is typically self only spells where the triggers on self. Regardless both trigger on the owner of the buff. */ /* Base2 Range: 1004 = Below < 80% HP Base2 Range: 500-520 = Below (base2 - 500)*5 HP Base2 Range: 521 = Below (?) Mana UKNOWN - Will assume its 20% unless proven otherwise Base2 Range: 522 = Below (40%) Endurance Base2 Range: 523 = Below (40%) Mana Base2 Range: 220-? = Number of pets on hatelist to trigger (base2 - 220) (Set at 30 pets max for now) 38311 = < 10% mana; */ if (!spellbonuses.TriggerOnValueAmount) return; if (spellbonuses.TriggerOnValueAmount){ int buff_count = GetMaxTotalSlots(); for(int e = 0; e < buff_count; e++){ uint32 spell_id = buffs[e].spellid; if (IsValidSpell(spell_id)){ for(int i = 0; i < EFFECT_COUNT; i++){ if ((spells[spell_id].effectid[i] == SE_TriggerOnReqTarget) || (spells[spell_id].effectid[i] == SE_TriggerOnReqCaster)) { int base2 = spells[spell_id].base2[i]; bool use_spell = false; if (IsHP){ if ((base2 >= 500 && base2 <= 520) && GetHPRatio() < (base2 - 500)*5) use_spell = true; else if (base2 == 1004 && GetHPRatio() < 80) use_spell = true; } else if (IsMana){ if ( (base2 = 521 && GetManaRatio() < 20) || (base2 = 523 && GetManaRatio() < 40)) use_spell = true; else if (base2 == 38311 && GetManaRatio() < 10) use_spell = true; } else if (IsEndur){ if (base2 == 522 && GetEndurancePercent() < 40){ use_spell = true; } } else if (IsPet){ int count = hate_list.GetSummonedPetCountOnHateList(this); if ((base2 >= 220 && base2 <= 250) && count >= (base2 - 220)){ use_spell = true; } } if (use_spell){ SpellFinished(spells[spell_id].base[i], this, EQ::spells::CastingSlot::Item, 0, -1, spells[spell_id].ResistDiff); if(!TryFadeEffect(e)) BuffFadeBySlot(e); } } } } } } } //Twincast Focus effects should stack across different types (Spell, AA - when implemented ect) void Mob::TryTwincast(Mob *caster, Mob *target, uint32 spell_id) { if(!IsValidSpell(spell_id)) return; if(IsClient()) { int32 focus = CastToClient()->GetFocusEffect(focusTwincast, spell_id); if (focus > 0) { if(zone->random.Roll(focus)) { Message(Chat::Spells,"You twincast %s!", spells[spell_id].name); SpellFinished(spell_id, target, EQ::spells::CastingSlot::Item, 0, -1, spells[spell_id].ResistDiff); } } } //Retains function for non clients else if (spellbonuses.FocusEffects[focusTwincast] || itembonuses.FocusEffects[focusTwincast]) { int buff_count = GetMaxTotalSlots(); for(int i = 0; i < buff_count; i++) { if(IsEffectInSpell(buffs[i].spellid, SE_FcTwincast)) { int32 focus = CalcFocusEffect(focusTwincast, buffs[i].spellid, spell_id); if(focus > 0) { if(zone->random.Roll(focus)) { SpellFinished(spell_id, target, EQ::spells::CastingSlot::Item, 0, -1, spells[spell_id].ResistDiff); } } } } } } int32 Mob::GetVulnerability(Mob* caster, uint32 spell_id, uint32 ticsremaining) { if (!IsValidSpell(spell_id)) return 0; if (!caster) return 0; int32 value = 0; //Apply innate vulnerabilities if (Vulnerability_Mod[GetSpellResistType(spell_id)] != 0) value = Vulnerability_Mod[GetSpellResistType(spell_id)]; else if (Vulnerability_Mod[HIGHEST_RESIST+1] != 0) value = Vulnerability_Mod[HIGHEST_RESIST+1]; //Apply spell derived vulnerabilities if (spellbonuses.FocusEffects[focusSpellVulnerability]){ int32 tmp_focus = 0; int tmp_buffslot = -1; int buff_count = GetMaxTotalSlots(); for(int i = 0; i < buff_count; i++) { if((IsValidSpell(buffs[i].spellid) && IsEffectInSpell(buffs[i].spellid, SE_FcSpellVulnerability))){ int32 focus = caster->CalcFocusEffect(focusSpellVulnerability, buffs[i].spellid, spell_id, true); if (!focus) continue; if (tmp_focus && focus > tmp_focus){ tmp_focus = focus; tmp_buffslot = i; } else if (!tmp_focus){ tmp_focus = focus; tmp_buffslot = i; } } } tmp_focus = caster->CalcFocusEffect(focusSpellVulnerability, buffs[tmp_buffslot].spellid, spell_id); if (tmp_focus < -99) tmp_focus = -99; value += tmp_focus; if (tmp_buffslot >= 0) CheckNumHitsRemaining(NumHit::MatchingSpells, tmp_buffslot); } return value; } int16 Mob::GetSkillDmgTaken(const EQ::skills::SkillType skill_used, ExtraAttackOptions *opts) { int skilldmg_mod = 0; // All skill dmg mod + Skill specific skilldmg_mod += itembonuses.SkillDmgTaken[EQ::skills::HIGHEST_SKILL + 1] + spellbonuses.SkillDmgTaken[EQ::skills::HIGHEST_SKILL + 1] + itembonuses.SkillDmgTaken[skill_used] + spellbonuses.SkillDmgTaken[skill_used]; skilldmg_mod += SkillDmgTaken_Mod[skill_used] + SkillDmgTaken_Mod[EQ::skills::HIGHEST_SKILL + 1]; if (opts) skilldmg_mod += opts->skilldmgtaken_bonus_flat; if(skilldmg_mod < -100) skilldmg_mod = -100; return skilldmg_mod; } int16 Mob::GetHealRate(uint16 spell_id, Mob* caster) { int16 heal_rate = 0; heal_rate += itembonuses.HealRate + spellbonuses.HealRate + aabonuses.HealRate; heal_rate += GetFocusIncoming(focusFcHealPctIncoming, SE_FcHealPctIncoming, caster, spell_id); if(heal_rate < -99) heal_rate = -99; return heal_rate; } bool Mob::TryFadeEffect(int slot) { if (!buffs[slot].spellid) return false; if(IsValidSpell(buffs[slot].spellid)) { for(int i = 0; i < EFFECT_COUNT; i++) { if (!spells[buffs[slot].spellid].effectid[i]) continue; if (spells[buffs[slot].spellid].effectid[i] == SE_CastOnFadeEffectAlways || spells[buffs[slot].spellid].effectid[i] == SE_CastOnRuneFadeEffect) { uint16 spell_id = spells[buffs[slot].spellid].base[i]; BuffFadeBySlot(slot); if(spell_id) { if(spell_id == SPELL_UNKNOWN) return false; if(IsValidSpell(spell_id)) { if (IsBeneficialSpell(spell_id)) { SpellFinished(spell_id, this, EQ::spells::CastingSlot::Item, 0, -1, spells[spell_id].ResistDiff); } else if(!(IsClient() && CastToClient()->dead)) { SpellFinished(spell_id, this, EQ::spells::CastingSlot::Item, 0, -1, spells[spell_id].ResistDiff); } return true; } } } } } return false; } void Mob::TrySympatheticProc(Mob *target, uint32 spell_id) { if(target == nullptr || !IsValidSpell(spell_id) || !IsClient()) return; uint16 focus_spell = CastToClient()->GetSympatheticFocusEffect(focusSympatheticProc,spell_id); if(!IsValidSpell(focus_spell)) return; uint16 focus_trigger = GetSympatheticSpellProcID(focus_spell); if(!IsValidSpell(focus_trigger)) return; // For beneficial spells, if the triggered spell is also beneficial then proc it on the target // if the triggered spell is detrimental, then it will trigger on the caster(ie cursed items) if(IsBeneficialSpell(spell_id)) { if(IsBeneficialSpell(focus_trigger)) SpellFinished(focus_trigger, target); else SpellFinished(focus_trigger, this, EQ::spells::CastingSlot::Item, 0, -1, spells[focus_trigger].ResistDiff); } // For detrimental spells, if the triggered spell is beneficial, then it will land on the caster // if the triggered spell is also detrimental, then it will land on the target else { if(IsBeneficialSpell(focus_trigger)) SpellFinished(focus_trigger, this); else SpellFinished(focus_trigger, target, EQ::spells::CastingSlot::Item, 0, -1, spells[focus_trigger].ResistDiff); } CheckNumHitsRemaining(NumHit::MatchingSpells, -1, focus_spell); } int32 Mob::GetItemStat(uint32 itemid, const char *identifier) { const EQ::ItemInstance* inst = database.CreateItem(itemid); if (!inst) return 0; const EQ::ItemData* item = inst->GetItem(); if (!item) return 0; if (!identifier) return 0; int32 stat = 0; std::string id = identifier; for(uint32 i = 0; i < id.length(); ++i) { id[i] = tolower(id[i]); } if (id == "itemclass") stat = int32(item->ItemClass); if (id == "id") stat = int32(item->ID); if (id == "idfile") stat = atoi(&item->IDFile[2]); if (id == "weight") stat = int32(item->Weight); if (id == "norent") stat = int32(item->NoRent); if (id == "nodrop") stat = int32(item->NoDrop); if (id == "size") stat = int32(item->Size); if (id == "slots") stat = int32(item->Slots); if (id == "price") stat = int32(item->Price); if (id == "icon") stat = int32(item->Icon); if (id == "loregroup") stat = int32(item->LoreGroup); if (id == "loreflag") stat = int32(item->LoreFlag); if (id == "pendingloreflag") stat = int32(item->PendingLoreFlag); if (id == "artifactflag") stat = int32(item->ArtifactFlag); if (id == "summonedflag") stat = int32(item->SummonedFlag); if (id == "fvnodrop") stat = int32(item->FVNoDrop); if (id == "favor") stat = int32(item->Favor); if (id == "guildfavor") stat = int32(item->GuildFavor); if (id == "pointtype") stat = int32(item->PointType); if (id == "bagtype") stat = int32(item->BagType); if (id == "bagslots") stat = int32(item->BagSlots); if (id == "bagsize") stat = int32(item->BagSize); if (id == "bagwr") stat = int32(item->BagWR); if (id == "benefitflag") stat = int32(item->BenefitFlag); if (id == "tradeskills") stat = int32(item->Tradeskills); if (id == "cr") stat = int32(item->CR); if (id == "dr") stat = int32(item->DR); if (id == "pr") stat = int32(item->PR); if (id == "mr") stat = int32(item->MR); if (id == "fr") stat = int32(item->FR); if (id == "astr") stat = int32(item->AStr); if (id == "asta") stat = int32(item->ASta); if (id == "aagi") stat = int32(item->AAgi); if (id == "adex") stat = int32(item->ADex); if (id == "acha") stat = int32(item->ACha); if (id == "aint") stat = int32(item->AInt); if (id == "awis") stat = int32(item->AWis); if (id == "hp") stat = int32(item->HP); if (id == "mana") stat = int32(item->Mana); if (id == "ac") stat = int32(item->AC); if (id == "deity") stat = int32(item->Deity); if (id == "skillmodvalue") stat = int32(item->SkillModValue); if (id == "skillmodtype") stat = int32(item->SkillModType); if (id == "banedmgrace") stat = int32(item->BaneDmgRace); if (id == "banedmgamt") stat = int32(item->BaneDmgAmt); if (id == "banedmgbody") stat = int32(item->BaneDmgBody); if (id == "magic") stat = int32(item->Magic); if (id == "casttime_") stat = int32(item->CastTime_); if (id == "reqlevel") stat = int32(item->ReqLevel); if (id == "bardtype") stat = int32(item->BardType); if (id == "bardvalue") stat = int32(item->BardValue); if (id == "light") stat = int32(item->Light); if (id == "delay") stat = int32(item->Delay); if (id == "reclevel") stat = int32(item->RecLevel); if (id == "recskill") stat = int32(item->RecSkill); if (id == "elemdmgtype") stat = int32(item->ElemDmgType); if (id == "elemdmgamt") stat = int32(item->ElemDmgAmt); if (id == "range") stat = int32(item->Range); if (id == "damage") stat = int32(item->Damage); if (id == "color") stat = int32(item->Color); if (id == "classes") stat = int32(item->Classes); if (id == "races") stat = int32(item->Races); if (id == "maxcharges") stat = int32(item->MaxCharges); if (id == "itemtype") stat = int32(item->ItemType); if (id == "material") stat = int32(item->Material); if (id == "casttime") stat = int32(item->CastTime); if (id == "elitematerial") stat = int32(item->EliteMaterial); if (id == "herosforgemodel") stat = int32(item->HerosForgeModel); if (id == "procrate") stat = int32(item->ProcRate); if (id == "combateffects") stat = int32(item->CombatEffects); if (id == "shielding") stat = int32(item->Shielding); if (id == "stunresist") stat = int32(item->StunResist); if (id == "strikethrough") stat = int32(item->StrikeThrough); if (id == "extradmgskill") stat = int32(item->ExtraDmgSkill); if (id == "extradmgamt") stat = int32(item->ExtraDmgAmt); if (id == "spellshield") stat = int32(item->SpellShield); if (id == "avoidance") stat = int32(item->Avoidance); if (id == "accuracy") stat = int32(item->Accuracy); if (id == "charmfileid") stat = int32(item->CharmFileID); if (id == "factionmod1") stat = int32(item->FactionMod1); if (id == "factionmod2") stat = int32(item->FactionMod2); if (id == "factionmod3") stat = int32(item->FactionMod3); if (id == "factionmod4") stat = int32(item->FactionMod4); if (id == "factionamt1") stat = int32(item->FactionAmt1); if (id == "factionamt2") stat = int32(item->FactionAmt2); if (id == "factionamt3") stat = int32(item->FactionAmt3); if (id == "factionamt4") stat = int32(item->FactionAmt4); if (id == "augtype") stat = int32(item->AugType); if (id == "ldontheme") stat = int32(item->LDoNTheme); if (id == "ldonprice") stat = int32(item->LDoNPrice); if (id == "ldonsold") stat = int32(item->LDoNSold); if (id == "banedmgraceamt") stat = int32(item->BaneDmgRaceAmt); if (id == "augrestrict") stat = int32(item->AugRestrict); if (id == "endur") stat = int32(item->Endur); if (id == "dotshielding") stat = int32(item->DotShielding); if (id == "attack") stat = int32(item->Attack); if (id == "regen") stat = int32(item->Regen); if (id == "manaregen") stat = int32(item->ManaRegen); if (id == "enduranceregen") stat = int32(item->EnduranceRegen); if (id == "haste") stat = int32(item->Haste); if (id == "damageshield") stat = int32(item->DamageShield); if (id == "recastdelay") stat = int32(item->RecastDelay); if (id == "recasttype") stat = int32(item->RecastType); if (id == "augdistiller") stat = int32(item->AugDistiller); if (id == "attuneable") stat = int32(item->Attuneable); if (id == "nopet") stat = int32(item->NoPet); if (id == "potionbelt") stat = int32(item->PotionBelt); if (id == "stackable") stat = int32(item->Stackable); if (id == "notransfer") stat = int32(item->NoTransfer); if (id == "questitemflag") stat = int32(item->QuestItemFlag); if (id == "stacksize") stat = int32(item->StackSize); if (id == "potionbeltslots") stat = int32(item->PotionBeltSlots); if (id == "book") stat = int32(item->Book); if (id == "booktype") stat = int32(item->BookType); if (id == "svcorruption") stat = int32(item->SVCorruption); if (id == "purity") stat = int32(item->Purity); if (id == "backstabdmg") stat = int32(item->BackstabDmg); if (id == "dsmitigation") stat = int32(item->DSMitigation); if (id == "heroicstr") stat = int32(item->HeroicStr); if (id == "heroicint") stat = int32(item->HeroicInt); if (id == "heroicwis") stat = int32(item->HeroicWis); if (id == "heroicagi") stat = int32(item->HeroicAgi); if (id == "heroicdex") stat = int32(item->HeroicDex); if (id == "heroicsta") stat = int32(item->HeroicSta); if (id == "heroiccha") stat = int32(item->HeroicCha); if (id == "heroicmr") stat = int32(item->HeroicMR); if (id == "heroicfr") stat = int32(item->HeroicFR); if (id == "heroiccr") stat = int32(item->HeroicCR); if (id == "heroicdr") stat = int32(item->HeroicDR); if (id == "heroicpr") stat = int32(item->HeroicPR); if (id == "heroicsvcorrup") stat = int32(item->HeroicSVCorrup); if (id == "healamt") stat = int32(item->HealAmt); if (id == "spelldmg") stat = int32(item->SpellDmg); if (id == "ldonsellbackrate") stat = int32(item->LDoNSellBackRate); if (id == "scriptfileid") stat = int32(item->ScriptFileID); if (id == "expendablearrow") stat = int32(item->ExpendableArrow); if (id == "clairvoyance") stat = int32(item->Clairvoyance); // Begin Effects if (id == "clickeffect") stat = int32(item->Click.Effect); if (id == "clicktype") stat = int32(item->Click.Type); if (id == "clicklevel") stat = int32(item->Click.Level); if (id == "clicklevel2") stat = int32(item->Click.Level2); if (id == "proceffect") stat = int32(item->Proc.Effect); if (id == "proctype") stat = int32(item->Proc.Type); if (id == "proclevel") stat = int32(item->Proc.Level); if (id == "proclevel2") stat = int32(item->Proc.Level2); if (id == "worneffect") stat = int32(item->Worn.Effect); if (id == "worntype") stat = int32(item->Worn.Type); if (id == "wornlevel") stat = int32(item->Worn.Level); if (id == "wornlevel2") stat = int32(item->Worn.Level2); if (id == "focuseffect") stat = int32(item->Focus.Effect); if (id == "focustype") stat = int32(item->Focus.Type); if (id == "focuslevel") stat = int32(item->Focus.Level); if (id == "focuslevel2") stat = int32(item->Focus.Level2); if (id == "scrolleffect") stat = int32(item->Scroll.Effect); if (id == "scrolltype") stat = int32(item->Scroll.Type); if (id == "scrolllevel") stat = int32(item->Scroll.Level); if (id == "scrolllevel2") stat = int32(item->Scroll.Level2); safe_delete(inst); return stat; } std::string Mob::GetGlobal(const char *varname) { int qgCharid = 0; int qgNpcid = 0; if (this->IsNPC()) qgNpcid = this->GetNPCTypeID(); if (this->IsClient()) qgCharid = this->CastToClient()->CharacterID(); QGlobalCache *qglobals = nullptr; std::list<QGlobal> globalMap; if (this->IsClient()) qglobals = this->CastToClient()->GetQGlobals(); if (this->IsNPC()) qglobals = this->CastToNPC()->GetQGlobals(); if(qglobals) QGlobalCache::Combine(globalMap, qglobals->GetBucket(), qgNpcid, qgCharid, zone->GetZoneID()); auto iter = globalMap.begin(); while(iter != globalMap.end()) { if ((*iter).name.compare(varname) == 0) return (*iter).value; ++iter; } return "Undefined"; } void Mob::SetGlobal(const char *varname, const char *newvalue, int options, const char *duration, Mob *other) { int qgZoneid = zone->GetZoneID(); int qgCharid = 0; int qgNpcid = 0; if (this->IsNPC()) { qgNpcid = this->GetNPCTypeID(); } else if (other && other->IsNPC()) { qgNpcid = other->GetNPCTypeID(); } if (this->IsClient()) { qgCharid = this->CastToClient()->CharacterID(); } else if (other && other->IsClient()) { qgCharid = other->CastToClient()->CharacterID(); } else { qgCharid = -qgNpcid; // make char id negative npc id as a fudge } if (options < 0 || options > 7) { //cerr << "Invalid options for global var " << varname << " using defaults" << endl; options = 0; // default = 0 (only this npcid,player and zone) } else { if (options & 1) qgNpcid=0; if (options & 2) qgCharid=0; if (options & 4) qgZoneid=0; } InsertQuestGlobal(qgCharid, qgNpcid, qgZoneid, varname, newvalue, QGVarDuration(duration)); } void Mob::TarGlobal(const char *varname, const char *value, const char *duration, int qgNpcid, int qgCharid, int qgZoneid) { InsertQuestGlobal(qgCharid, qgNpcid, qgZoneid, varname, value, QGVarDuration(duration)); } void Mob::DelGlobal(const char *varname) { int qgZoneid=zone->GetZoneID(); int qgCharid=0; int qgNpcid=0; if (this->IsNPC()) qgNpcid = this->GetNPCTypeID(); if (this->IsClient()) qgCharid = this->CastToClient()->CharacterID(); else qgCharid = -qgNpcid; // make char id negative npc id as a fudge std::string query = StringFormat("DELETE FROM quest_globals " "WHERE name='%s' && (npcid=0 || npcid=%i) " "&& (charid=0 || charid=%i) " "&& (zoneid=%i || zoneid=0)", varname, qgNpcid, qgCharid, qgZoneid); database.QueryDatabase(query); if(zone) { auto pack = new ServerPacket(ServerOP_QGlobalDelete, sizeof(ServerQGlobalDelete_Struct)); ServerQGlobalDelete_Struct *qgu = (ServerQGlobalDelete_Struct*)pack->pBuffer; qgu->npc_id = qgNpcid; qgu->char_id = qgCharid; qgu->zone_id = qgZoneid; strcpy(qgu->name, varname); entity_list.DeleteQGlobal(std::string((char*)qgu->name), qgu->npc_id, qgu->char_id, qgu->zone_id); zone->DeleteQGlobal(std::string((char*)qgu->name), qgu->npc_id, qgu->char_id, qgu->zone_id); worldserver.SendPacket(pack); safe_delete(pack); } } // Inserts global variable into quest_globals table void Mob::InsertQuestGlobal(int charid, int npcid, int zoneid, const char *varname, const char *varvalue, int duration) { // Make duration string either "unix_timestamp(now()) + xxx" or "NULL" std::stringstream duration_ss; if (duration == INT_MAX) duration_ss << "NULL"; else duration_ss << "unix_timestamp(now()) + " << duration; //NOTE: this should be escaping the contents of arglist //npcwise a malicious script can arbitrarily alter the DB uint32 last_id = 0; std::string query = StringFormat("REPLACE INTO quest_globals " "(charid, npcid, zoneid, name, value, expdate)" "VALUES (%i, %i, %i, '%s', '%s', %s)", charid, npcid, zoneid, varname, varvalue, duration_ss.str().c_str()); database.QueryDatabase(query); if(zone) { //first delete our global auto pack = new ServerPacket(ServerOP_QGlobalDelete, sizeof(ServerQGlobalDelete_Struct)); ServerQGlobalDelete_Struct *qgd = (ServerQGlobalDelete_Struct*)pack->pBuffer; qgd->npc_id = npcid; qgd->char_id = charid; qgd->zone_id = zoneid; qgd->from_zone_id = zone->GetZoneID(); qgd->from_instance_id = zone->GetInstanceID(); strcpy(qgd->name, varname); entity_list.DeleteQGlobal(std::string((char*)qgd->name), qgd->npc_id, qgd->char_id, qgd->zone_id); zone->DeleteQGlobal(std::string((char*)qgd->name), qgd->npc_id, qgd->char_id, qgd->zone_id); worldserver.SendPacket(pack); safe_delete(pack); //then create a new one with the new id pack = new ServerPacket(ServerOP_QGlobalUpdate, sizeof(ServerQGlobalUpdate_Struct)); ServerQGlobalUpdate_Struct *qgu = (ServerQGlobalUpdate_Struct*)pack->pBuffer; qgu->npc_id = npcid; qgu->char_id = charid; qgu->zone_id = zoneid; if(duration == INT_MAX) qgu->expdate = 0xFFFFFFFF; else qgu->expdate = Timer::GetTimeSeconds() + duration; strcpy((char*)qgu->name, varname); strcpy((char*)qgu->value, varvalue); qgu->id = last_id; qgu->from_zone_id = zone->GetZoneID(); qgu->from_instance_id = zone->GetInstanceID(); QGlobal temp; temp.npc_id = npcid; temp.char_id = charid; temp.zone_id = zoneid; temp.expdate = qgu->expdate; temp.name.assign(qgu->name); temp.value.assign(qgu->value); entity_list.UpdateQGlobal(qgu->id, temp); zone->UpdateQGlobal(qgu->id, temp); worldserver.SendPacket(pack); safe_delete(pack); } } // Converts duration string to duration value (in seconds) // Return of INT_MAX indicates infinite duration int Mob::QGVarDuration(const char *fmt) { int duration = 0; // format: Y#### or D## or H## or M## or S## or T###### or C####### int len = static_cast<int>(strlen(fmt)); // Default to no duration if (len < 1) return 0; // Set val to value after type character // e.g., for "M3924", set to 3924 int val = atoi(&fmt[0] + 1); switch (fmt[0]) { // Forever case 'F': case 'f': duration = INT_MAX; break; // Years case 'Y': case 'y': duration = val * 31556926; break; case 'D': case 'd': duration = val * 86400; break; // Hours case 'H': case 'h': duration = val * 3600; break; // Minutes case 'M': case 'm': duration = val * 60; break; // Seconds case 'S': case 's': duration = val; break; // Invalid default: duration = 0; break; } return duration; } void Mob::DoKnockback(Mob *caster, uint32 pushback, uint32 pushup) { if(IsClient()) { auto outapp_push = new EQApplicationPacket(OP_ClientUpdate, sizeof(PlayerPositionUpdateServer_Struct)); PlayerPositionUpdateServer_Struct* spu = (PlayerPositionUpdateServer_Struct*)outapp_push->pBuffer; double look_heading = caster->CalculateHeadingToTarget(GetX(), GetY()); look_heading /= 256; look_heading *= 360; if(look_heading > 360) look_heading -= 360; //x and y are crossed mkay double new_x = pushback * sin(double(look_heading * 3.141592 / 180.0)); double new_y = pushback * cos(double(look_heading * 3.141592 / 180.0)); spu->spawn_id = GetID(); spu->x_pos = FloatToEQ19(GetX()); spu->y_pos = FloatToEQ19(GetY()); spu->z_pos = FloatToEQ19(GetZ()); spu->delta_x = FloatToEQ13(static_cast<float>(new_x)); spu->delta_y = FloatToEQ13(static_cast<float>(new_y)); spu->delta_z = FloatToEQ13(static_cast<float>(pushup)); spu->heading = FloatToEQ12(GetHeading()); // for ref: these were not passed on to other 5 clients while on Titanium standard (change to RoF2 standard: 11/16/2019) //eq->padding0002 = 0; //eq->padding0006 = 0x7; //eq->padding0014 = 0x7F; //eq->padding0018 = 0x5dF27; spu->animation = 0; spu->delta_heading = FloatToEQ10(0); outapp_push->priority = 6; entity_list.QueueClients(this, outapp_push, true); CastToClient()->FastQueuePacket(&outapp_push); } } void Mob::TrySpellOnKill(uint8 level, uint16 spell_id) { if (spell_id != SPELL_UNKNOWN) { if(IsEffectInSpell(spell_id, SE_ProcOnSpellKillShot)) { for (int i = 0; i < EFFECT_COUNT; i++) { if (spells[spell_id].effectid[i] == SE_ProcOnSpellKillShot) { if (IsValidSpell(spells[spell_id].base2[i]) && spells[spell_id].max[i] <= level) { if(zone->random.Roll(spells[spell_id].base[i])) SpellFinished(spells[spell_id].base2[i], this, EQ::spells::CastingSlot::Item, 0, -1, spells[spells[spell_id].base2[i]].ResistDiff); } } } } } if (!aabonuses.SpellOnKill[0] && !itembonuses.SpellOnKill[0] && !spellbonuses.SpellOnKill[0]) return; // Allow to check AA, items and buffs in all cases. Base2 = Spell to fire | Base1 = % chance | Base3 = min level for(int i = 0; i < MAX_SPELL_TRIGGER*3; i+=3) { if(aabonuses.SpellOnKill[i] && IsValidSpell(aabonuses.SpellOnKill[i]) && (level >= aabonuses.SpellOnKill[i + 2])) { if(zone->random.Roll(static_cast<int>(aabonuses.SpellOnKill[i + 1]))) SpellFinished(aabonuses.SpellOnKill[i], this, EQ::spells::CastingSlot::Item, 0, -1, spells[aabonuses.SpellOnKill[i]].ResistDiff); } if(itembonuses.SpellOnKill[i] && IsValidSpell(itembonuses.SpellOnKill[i]) && (level >= itembonuses.SpellOnKill[i + 2])){ if(zone->random.Roll(static_cast<int>(itembonuses.SpellOnKill[i + 1]))) SpellFinished(itembonuses.SpellOnKill[i], this, EQ::spells::CastingSlot::Item, 0, -1, spells[aabonuses.SpellOnKill[i]].ResistDiff); } if(spellbonuses.SpellOnKill[i] && IsValidSpell(spellbonuses.SpellOnKill[i]) && (level >= spellbonuses.SpellOnKill[i + 2])) { if(zone->random.Roll(static_cast<int>(spellbonuses.SpellOnKill[i + 1]))) SpellFinished(spellbonuses.SpellOnKill[i], this, EQ::spells::CastingSlot::Item, 0, -1, spells[aabonuses.SpellOnKill[i]].ResistDiff); } } } bool Mob::TrySpellOnDeath() { if (IsNPC() && !spellbonuses.SpellOnDeath[0] && !itembonuses.SpellOnDeath[0]) return false; if (IsClient() && !aabonuses.SpellOnDeath[0] && !spellbonuses.SpellOnDeath[0] && !itembonuses.SpellOnDeath[0]) return false; for(int i = 0; i < MAX_SPELL_TRIGGER*2; i+=2) { if(IsClient() && aabonuses.SpellOnDeath[i] && IsValidSpell(aabonuses.SpellOnDeath[i])) { if(zone->random.Roll(static_cast<int>(aabonuses.SpellOnDeath[i + 1]))) { SpellFinished(aabonuses.SpellOnDeath[i], this, EQ::spells::CastingSlot::Item, 0, -1, spells[aabonuses.SpellOnDeath[i]].ResistDiff); } } if(itembonuses.SpellOnDeath[i] && IsValidSpell(itembonuses.SpellOnDeath[i])) { if(zone->random.Roll(static_cast<int>(itembonuses.SpellOnDeath[i + 1]))) { SpellFinished(itembonuses.SpellOnDeath[i], this, EQ::spells::CastingSlot::Item, 0, -1, spells[itembonuses.SpellOnDeath[i]].ResistDiff); } } if(spellbonuses.SpellOnDeath[i] && IsValidSpell(spellbonuses.SpellOnDeath[i])) { if(zone->random.Roll(static_cast<int>(spellbonuses.SpellOnDeath[i + 1]))) { SpellFinished(spellbonuses.SpellOnDeath[i], this, EQ::spells::CastingSlot::Item, 0, -1, spells[spellbonuses.SpellOnDeath[i]].ResistDiff); } } } BuffFadeAll(); return false; //You should not be able to use this effect and survive (ALWAYS return false), //attempting to place a heal in these effects will still result //in death because the heal will not register before the script kills you. } int16 Mob::GetCritDmgMod(uint16 skill) { int critDmg_mod = 0; // All skill dmg mod + Skill specific critDmg_mod += itembonuses.CritDmgMod[EQ::skills::HIGHEST_SKILL + 1] + spellbonuses.CritDmgMod[EQ::skills::HIGHEST_SKILL + 1] + aabonuses.CritDmgMod[EQ::skills::HIGHEST_SKILL + 1] + itembonuses.CritDmgMod[skill] + spellbonuses.CritDmgMod[skill] + aabonuses.CritDmgMod[skill]; return critDmg_mod; } void Mob::SetGrouped(bool v) { if(v) { israidgrouped = false; } isgrouped = v; if(IsClient()) { parse->EventPlayer(EVENT_GROUP_CHANGE, CastToClient(), "", 0); if(!v) CastToClient()->RemoveGroupXTargets(); } } void Mob::SetRaidGrouped(bool v) { if(v) { isgrouped = false; } israidgrouped = v; if(IsClient()) { parse->EventPlayer(EVENT_GROUP_CHANGE, CastToClient(), "", 0); } } int Mob::GetCriticalChanceBonus(uint16 skill) { int critical_chance = 0; // All skills + Skill specific critical_chance += itembonuses.CriticalHitChance[EQ::skills::HIGHEST_SKILL + 1] + spellbonuses.CriticalHitChance[EQ::skills::HIGHEST_SKILL + 1] + aabonuses.CriticalHitChance[EQ::skills::HIGHEST_SKILL + 1] + itembonuses.CriticalHitChance[skill] + spellbonuses.CriticalHitChance[skill] + aabonuses.CriticalHitChance[skill]; if(critical_chance < -100) critical_chance = -100; return critical_chance; } int16 Mob::GetMeleeDamageMod_SE(uint16 skill) { int dmg_mod = 0; // All skill dmg mod + Skill specific dmg_mod += itembonuses.DamageModifier[EQ::skills::HIGHEST_SKILL + 1] + spellbonuses.DamageModifier[EQ::skills::HIGHEST_SKILL + 1] + aabonuses.DamageModifier[EQ::skills::HIGHEST_SKILL + 1] + itembonuses.DamageModifier[skill] + spellbonuses.DamageModifier[skill] + aabonuses.DamageModifier[skill]; dmg_mod += itembonuses.DamageModifier2[EQ::skills::HIGHEST_SKILL + 1] + spellbonuses.DamageModifier2[EQ::skills::HIGHEST_SKILL + 1] + aabonuses.DamageModifier2[EQ::skills::HIGHEST_SKILL + 1] + itembonuses.DamageModifier2[skill] + spellbonuses.DamageModifier2[skill] + aabonuses.DamageModifier2[skill]; if(dmg_mod < -100) dmg_mod = -100; return dmg_mod; } int16 Mob::GetMeleeMinDamageMod_SE(uint16 skill) { int dmg_mod = 0; dmg_mod = itembonuses.MinDamageModifier[skill] + spellbonuses.MinDamageModifier[skill] + itembonuses.MinDamageModifier[EQ::skills::HIGHEST_SKILL + 1] + spellbonuses.MinDamageModifier[EQ::skills::HIGHEST_SKILL + 1]; if(dmg_mod < -100) dmg_mod = -100; return dmg_mod; } int16 Mob::GetCrippBlowChance() { int16 crip_chance = 0; crip_chance += itembonuses.CrippBlowChance + spellbonuses.CrippBlowChance + aabonuses.CrippBlowChance; if(crip_chance < 0) crip_chance = 0; return crip_chance; } int16 Mob::GetSkillReuseTime(uint16 skill) { int skill_reduction = this->itembonuses.SkillReuseTime[skill] + this->spellbonuses.SkillReuseTime[skill] + this->aabonuses.SkillReuseTime[skill]; return skill_reduction; } int16 Mob::GetSkillDmgAmt(uint16 skill) { int skill_dmg = 0; // All skill dmg(only spells do this) + Skill specific skill_dmg += spellbonuses.SkillDamageAmount[EQ::skills::HIGHEST_SKILL + 1] + itembonuses.SkillDamageAmount[EQ::skills::HIGHEST_SKILL + 1] + aabonuses.SkillDamageAmount[EQ::skills::HIGHEST_SKILL + 1] + itembonuses.SkillDamageAmount[skill] + spellbonuses.SkillDamageAmount[skill] + aabonuses.SkillDamageAmount[skill]; skill_dmg += spellbonuses.SkillDamageAmount2[EQ::skills::HIGHEST_SKILL + 1] + itembonuses.SkillDamageAmount2[EQ::skills::HIGHEST_SKILL + 1] + itembonuses.SkillDamageAmount2[skill] + spellbonuses.SkillDamageAmount2[skill]; return skill_dmg; } void Mob::MeleeLifeTap(int32 damage) { int32 lifetap_amt = 0; lifetap_amt = spellbonuses.MeleeLifetap + itembonuses.MeleeLifetap + aabonuses.MeleeLifetap + spellbonuses.Vampirism + itembonuses.Vampirism + aabonuses.Vampirism; if(lifetap_amt && damage > 0){ lifetap_amt = damage * lifetap_amt / 100; LogCombat("Melee lifetap healing for [{}] damage", damage); if (lifetap_amt > 0) HealDamage(lifetap_amt); //Heal self for modified damage amount. else Damage(this, -lifetap_amt, 0, EQ::skills::SkillEvocation, false); //Dmg self for modified damage amount. } } bool Mob::TryReflectSpell(uint32 spell_id) { if (!spells[spell_id].reflectable) return false; int chance = itembonuses.reflect_chance + spellbonuses.reflect_chance + aabonuses.reflect_chance; if(chance && zone->random.Roll(chance)) return true; return false; } void Mob::DoGravityEffect() { Mob *caster = nullptr; int away = -1; float caster_x, caster_y, amount, value, cur_x, my_x, cur_y, my_y, x_vector, y_vector, hypot; // Set values so we can run through all gravity effects and then apply the culmative move at the end // instead of many small moves if the mob/client had more than 1 gravity effect on them cur_x = my_x = GetX(); cur_y = my_y = GetY(); int buff_count = GetMaxTotalSlots(); for (int slot = 0; slot < buff_count; slot++) { if (buffs[slot].spellid != SPELL_UNKNOWN && IsEffectInSpell(buffs[slot].spellid, SE_GravityEffect)) { for (int i = 0; i < EFFECT_COUNT; i++) { if(spells[buffs[slot].spellid].effectid[i] == SE_GravityEffect) { int casterId = buffs[slot].casterid; if(casterId) caster = entity_list.GetMob(casterId); if(!caster || casterId == this->GetID()) continue; caster_x = caster->GetX(); caster_y = caster->GetY(); value = static_cast<float>(spells[buffs[slot].spellid].base[i]); if(value == 0) continue; if(value > 0) away = 1; amount = std::abs(value) / (100.0f); // to bring the values in line, arbitarily picked x_vector = cur_x - caster_x; y_vector = cur_y - caster_y; hypot = sqrt(x_vector*x_vector + y_vector*y_vector); if(hypot <= 5) // dont want to be inside the mob, even though we can, it looks bad continue; x_vector /= hypot; y_vector /= hypot; cur_x = cur_x + (x_vector * amount * away); cur_y = cur_y + (y_vector * amount * away); } } } } if ((std::abs(my_x - cur_x) > 0.01) || (std::abs(my_y - cur_y) > 0.01)) { float new_ground = GetGroundZ(cur_x, cur_y); // If we cant get LoS on our new spot then keep checking up to 5 units up. if(!CheckLosFN(cur_x, cur_y, new_ground, GetSize())) { for(float z_adjust = 0.1f; z_adjust < 5; z_adjust += 0.1f) { if(CheckLosFN(cur_x, cur_y, new_ground+z_adjust, GetSize())) { new_ground += z_adjust; break; } } // If we still fail, then lets only use the x portion(ie sliding around a wall) if(!CheckLosFN(cur_x, my_y, new_ground, GetSize())) { // If that doesnt work, try the y if(!CheckLosFN(my_x, cur_y, new_ground, GetSize())) { // If everything fails, then lets do nothing return; } else { cur_x = my_x; } } else { cur_y = my_y; } } if(IsClient()) this->CastToClient()->MovePC(zone->GetZoneID(), zone->GetInstanceID(), cur_x, cur_y, new_ground, GetHeading()); else this->GMMove(cur_x, cur_y, new_ground, GetHeading()); } } void Mob::SpreadVirus(uint16 spell_id, uint16 casterID) { int num_targs = spells[spell_id].viral_targets; Mob* caster = entity_list.GetMob(casterID); Mob* target = nullptr; // Only spread in zones without perm buffs if(!zone->BuffTimersSuspended()) { for(int i = 0; i < num_targs; i++) { target = entity_list.GetTargetForVirus(this, spells[spell_id].viral_range); if(target) { // Only spreads to the uninfected if(!target->FindBuff(spell_id)) { if(caster) caster->SpellOnTarget(spell_id, target); } } } } } void Mob::AddNimbusEffect(int effectid) { SetNimbusEffect(effectid); auto outapp = new EQApplicationPacket(OP_AddNimbusEffect, sizeof(RemoveNimbusEffect_Struct)); auto ane = (RemoveNimbusEffect_Struct *)outapp->pBuffer; ane->spawnid = GetID(); ane->nimbus_effect = effectid; entity_list.QueueClients(this, outapp); safe_delete(outapp); } void Mob::RemoveNimbusEffect(int effectid) { if (effectid == nimbus_effect1) nimbus_effect1 = 0; else if (effectid == nimbus_effect2) nimbus_effect2 = 0; else if (effectid == nimbus_effect3) nimbus_effect3 = 0; auto outapp = new EQApplicationPacket(OP_RemoveNimbusEffect, sizeof(RemoveNimbusEffect_Struct)); RemoveNimbusEffect_Struct* rne = (RemoveNimbusEffect_Struct*)outapp->pBuffer; rne->spawnid = GetID(); rne->nimbus_effect = effectid; entity_list.QueueClients(this, outapp); safe_delete(outapp); } bool Mob::IsBoat() const { return ( race == RACE_SHIP_72 || race == RACE_LAUNCH_73 || race == RACE_GHOST_SHIP_114 || race == RACE_SHIP_404 || race == RACE_MERCHANT_SHIP_550 || race == RACE_PIRATE_SHIP_551 || race == RACE_GHOST_SHIP_552 || race == RACE_BOAT_533 ); } bool Mob::IsControllableBoat() const { return ( race == RACE_BOAT_141 || race == RACE_ROWBOAT_502 ); } void Mob::SetBodyType(bodyType new_body, bool overwrite_orig) { bool needs_spawn_packet = false; if(bodytype == 11 || bodytype >= 65 || new_body == 11 || new_body >= 65) { needs_spawn_packet = true; } if(overwrite_orig) { orig_bodytype = new_body; } bodytype = new_body; if(needs_spawn_packet) { auto app = new EQApplicationPacket; CreateDespawnPacket(app, true); entity_list.QueueClients(this, app); CreateSpawnPacket(app, this); entity_list.QueueClients(this, app); safe_delete(app); } } void Mob::ModSkillDmgTaken(EQ::skills::SkillType skill_num, int value) { if (skill_num == ALL_SKILLS) SkillDmgTaken_Mod[EQ::skills::HIGHEST_SKILL + 1] = value; else if (skill_num >= 0 && skill_num <= EQ::skills::HIGHEST_SKILL) SkillDmgTaken_Mod[skill_num] = value; } int16 Mob::GetModSkillDmgTaken(const EQ::skills::SkillType skill_num) { if (skill_num == ALL_SKILLS) return SkillDmgTaken_Mod[EQ::skills::HIGHEST_SKILL + 1]; else if (skill_num >= 0 && skill_num <= EQ::skills::HIGHEST_SKILL) return SkillDmgTaken_Mod[skill_num]; return 0; } void Mob::ModVulnerability(uint8 resist, int16 value) { if (resist < HIGHEST_RESIST+1) Vulnerability_Mod[resist] = value; else if (resist == 255) Vulnerability_Mod[HIGHEST_RESIST+1] = value; } int16 Mob::GetModVulnerability(const uint8 resist) { if (resist < HIGHEST_RESIST+1) return Vulnerability_Mod[resist]; else if (resist == 255) return Vulnerability_Mod[HIGHEST_RESIST+1]; return 0; } void Mob::CastOnCurer(uint32 spell_id) { for(int i = 0; i < EFFECT_COUNT; i++) { if (spells[spell_id].effectid[i] == SE_CastOnCurer) { if(IsValidSpell(spells[spell_id].base[i])) { SpellFinished(spells[spell_id].base[i], this); } } } } void Mob::CastOnCure(uint32 spell_id) { for(int i = 0; i < EFFECT_COUNT; i++) { if (spells[spell_id].effectid[i] == SE_CastOnCure) { if(IsValidSpell(spells[spell_id].base[i])) { SpellFinished(spells[spell_id].base[i], this); } } } } void Mob::CastOnNumHitFade(uint32 spell_id) { if(!IsValidSpell(spell_id)) return; for(int i = 0; i < EFFECT_COUNT; i++) { if (spells[spell_id].effectid[i] == SE_CastonNumHitFade) { if(IsValidSpell(spells[spell_id].base[i])) { SpellFinished(spells[spell_id].base[i], this); } } } } void Mob::SlowMitigation(Mob* caster) { if (GetSlowMitigation() && caster && caster->IsClient()) { if ((GetSlowMitigation() > 0) && (GetSlowMitigation() < 26)) caster->MessageString(Chat::SpellFailure, SLOW_MOSTLY_SUCCESSFUL); else if ((GetSlowMitigation() >= 26) && (GetSlowMitigation() < 74)) caster->MessageString(Chat::SpellFailure, SLOW_PARTIALLY_SUCCESSFUL); else if ((GetSlowMitigation() >= 74) && (GetSlowMitigation() < 101)) caster->MessageString(Chat::SpellFailure, SLOW_SLIGHTLY_SUCCESSFUL); else if (GetSlowMitigation() > 100) caster->MessageString(Chat::SpellFailure, SPELL_OPPOSITE_EFFECT); } } uint16 Mob::GetSkillByItemType(int ItemType) { switch (ItemType) { case EQ::item::ItemType1HSlash: return EQ::skills::Skill1HSlashing; case EQ::item::ItemType2HSlash: return EQ::skills::Skill2HSlashing; case EQ::item::ItemType1HPiercing: return EQ::skills::Skill1HPiercing; case EQ::item::ItemType1HBlunt: return EQ::skills::Skill1HBlunt; case EQ::item::ItemType2HBlunt: return EQ::skills::Skill2HBlunt; case EQ::item::ItemType2HPiercing: if (IsClient() && CastToClient()->ClientVersion() < EQ::versions::ClientVersion::RoF2) return EQ::skills::Skill1HPiercing; else return EQ::skills::Skill2HPiercing; case EQ::item::ItemTypeBow: return EQ::skills::SkillArchery; case EQ::item::ItemTypeLargeThrowing: case EQ::item::ItemTypeSmallThrowing: return EQ::skills::SkillThrowing; case EQ::item::ItemTypeMartial: return EQ::skills::SkillHandtoHand; default: return EQ::skills::SkillHandtoHand; } } uint8 Mob::GetItemTypeBySkill(EQ::skills::SkillType skill) { switch (skill) { case EQ::skills::SkillThrowing: return EQ::item::ItemTypeSmallThrowing; case EQ::skills::SkillArchery: return EQ::item::ItemTypeArrow; case EQ::skills::Skill1HSlashing: return EQ::item::ItemType1HSlash; case EQ::skills::Skill2HSlashing: return EQ::item::ItemType2HSlash; case EQ::skills::Skill1HPiercing: return EQ::item::ItemType1HPiercing; case EQ::skills::Skill2HPiercing: // watch for undesired client behavior return EQ::item::ItemType2HPiercing; case EQ::skills::Skill1HBlunt: return EQ::item::ItemType1HBlunt; case EQ::skills::Skill2HBlunt: return EQ::item::ItemType2HBlunt; case EQ::skills::SkillHandtoHand: return EQ::item::ItemTypeMartial; default: return EQ::item::ItemTypeMartial; } } bool Mob::PassLimitToSkill(uint16 spell_id, uint16 skill) { if (!IsValidSpell(spell_id)) return false; for (int i = 0; i < EFFECT_COUNT; i++) { if (spells[spell_id].effectid[i] == SE_LimitToSkill){ if (spells[spell_id].base[i] == skill){ return true; } } } return false; } uint16 Mob::GetWeaponSpeedbyHand(uint16 hand) { uint16 weapon_speed = 0; switch (hand) { case 13: weapon_speed = attack_timer.GetDuration(); break; case 14: weapon_speed = attack_dw_timer.GetDuration(); break; case 11: weapon_speed = ranged_timer.GetDuration(); break; } if (weapon_speed < RuleI(Combat, MinHastedDelay)) weapon_speed = RuleI(Combat, MinHastedDelay); return weapon_speed; } int8 Mob::GetDecayEffectValue(uint16 spell_id, uint16 spelleffect) { if (!IsValidSpell(spell_id)) return false; int spell_level = spells[spell_id].classes[(GetClass()%17) - 1]; int effect_value = 0; int lvlModifier = 100; int buff_count = GetMaxTotalSlots(); for (int slot = 0; slot < buff_count; slot++){ if (IsValidSpell(buffs[slot].spellid)){ for (int i = 0; i < EFFECT_COUNT; i++){ if(spells[buffs[slot].spellid].effectid[i] == spelleffect) { int critchance = spells[buffs[slot].spellid].base[i]; int decay = spells[buffs[slot].spellid].base2[i]; int lvldiff = spell_level - spells[buffs[slot].spellid].max[i]; if(lvldiff > 0 && decay > 0) { lvlModifier -= decay*lvldiff; if (lvlModifier > 0){ critchance = (critchance*lvlModifier)/100; effect_value += critchance; } } else effect_value += critchance; } } } } return effect_value; } // Faction Mods for Alliance type spells void Mob::AddFactionBonus(uint32 pFactionID,int32 bonus) { std::map <uint32, int32> :: const_iterator faction_bonus; typedef std::pair <uint32, int32> NewFactionBonus; faction_bonus = faction_bonuses.find(pFactionID); if(faction_bonus == faction_bonuses.end()) { faction_bonuses.insert(NewFactionBonus(pFactionID,bonus)); } else { if(faction_bonus->second<bonus) { faction_bonuses.erase(pFactionID); faction_bonuses.insert(NewFactionBonus(pFactionID,bonus)); } } } // Faction Mods from items void Mob::AddItemFactionBonus(uint32 pFactionID,int32 bonus) { std::map <uint32, int32> :: const_iterator faction_bonus; typedef std::pair <uint32, int32> NewFactionBonus; faction_bonus = item_faction_bonuses.find(pFactionID); if(faction_bonus == item_faction_bonuses.end()) { item_faction_bonuses.insert(NewFactionBonus(pFactionID,bonus)); } else { if((bonus > 0 && faction_bonus->second < bonus) || (bonus < 0 && faction_bonus->second > bonus)) { item_faction_bonuses.erase(pFactionID); item_faction_bonuses.insert(NewFactionBonus(pFactionID,bonus)); } } } int32 Mob::GetFactionBonus(uint32 pFactionID) { std::map <uint32, int32> :: const_iterator faction_bonus; faction_bonus = faction_bonuses.find(pFactionID); if(faction_bonus != faction_bonuses.end()) { return (*faction_bonus).second; } return 0; } int32 Mob::GetItemFactionBonus(uint32 pFactionID) { std::map <uint32, int32> :: const_iterator faction_bonus; faction_bonus = item_faction_bonuses.find(pFactionID); if(faction_bonus != item_faction_bonuses.end()) { return (*faction_bonus).second; } return 0; } void Mob::ClearItemFactionBonuses() { item_faction_bonuses.clear(); } FACTION_VALUE Mob::GetSpecialFactionCon(Mob* iOther) { if (!iOther) return FACTION_INDIFFERENT; iOther = iOther->GetOwnerOrSelf(); Mob* self = this->GetOwnerOrSelf(); bool selfAIcontrolled = self->IsAIControlled(); bool iOtherAIControlled = iOther->IsAIControlled(); int selfPrimaryFaction = self->GetPrimaryFaction(); int iOtherPrimaryFaction = iOther->GetPrimaryFaction(); if (selfPrimaryFaction >= 0 && selfAIcontrolled) return FACTION_INDIFFERENT; if (iOther->GetPrimaryFaction() >= 0) return FACTION_INDIFFERENT; /* special values: -2 = indiff to player, ally to AI on special values, indiff to AI -3 = dub to player, ally to AI on special values, indiff to AI -4 = atk to player, ally to AI on special values, indiff to AI -5 = indiff to player, indiff to AI -6 = dub to player, indiff to AI -7 = atk to player, indiff to AI -8 = indiff to players, ally to AI on same value, indiff to AI -9 = dub to players, ally to AI on same value, indiff to AI -10 = atk to players, ally to AI on same value, indiff to AI -11 = indiff to players, ally to AI on same value, atk to AI -12 = dub to players, ally to AI on same value, atk to AI -13 = atk to players, ally to AI on same value, atk to AI */ switch (iOtherPrimaryFaction) { case -2: // -2 = indiff to player, ally to AI on special values, indiff to AI if (selfAIcontrolled && iOtherAIControlled) return FACTION_ALLY; else return FACTION_INDIFFERENT; case -3: // -3 = dub to player, ally to AI on special values, indiff to AI if (selfAIcontrolled && iOtherAIControlled) return FACTION_ALLY; else return FACTION_DUBIOUS; case -4: // -4 = atk to player, ally to AI on special values, indiff to AI if (selfAIcontrolled && iOtherAIControlled) return FACTION_ALLY; else return FACTION_SCOWLS; case -5: // -5 = indiff to player, indiff to AI return FACTION_INDIFFERENT; case -6: // -6 = dub to player, indiff to AI if (selfAIcontrolled && iOtherAIControlled) return FACTION_INDIFFERENT; else return FACTION_DUBIOUS; case -7: // -7 = atk to player, indiff to AI if (selfAIcontrolled && iOtherAIControlled) return FACTION_INDIFFERENT; else return FACTION_SCOWLS; case -8: // -8 = indiff to players, ally to AI on same value, indiff to AI if (selfAIcontrolled && iOtherAIControlled) { if (selfPrimaryFaction == iOtherPrimaryFaction) return FACTION_ALLY; else return FACTION_INDIFFERENT; } else return FACTION_INDIFFERENT; case -9: // -9 = dub to players, ally to AI on same value, indiff to AI if (selfAIcontrolled && iOtherAIControlled) { if (selfPrimaryFaction == iOtherPrimaryFaction) return FACTION_ALLY; else return FACTION_INDIFFERENT; } else return FACTION_DUBIOUS; case -10: // -10 = atk to players, ally to AI on same value, indiff to AI if (selfAIcontrolled && iOtherAIControlled) { if (selfPrimaryFaction == iOtherPrimaryFaction) return FACTION_ALLY; else return FACTION_INDIFFERENT; } else return FACTION_SCOWLS; case -11: // -11 = indiff to players, ally to AI on same value, atk to AI if (selfAIcontrolled && iOtherAIControlled) { if (selfPrimaryFaction == iOtherPrimaryFaction) return FACTION_ALLY; else return FACTION_SCOWLS; } else return FACTION_INDIFFERENT; case -12: // -12 = dub to players, ally to AI on same value, atk to AI if (selfAIcontrolled && iOtherAIControlled) { if (selfPrimaryFaction == iOtherPrimaryFaction) return FACTION_ALLY; else return FACTION_SCOWLS; } else return FACTION_DUBIOUS; case -13: // -13 = atk to players, ally to AI on same value, atk to AI if (selfAIcontrolled && iOtherAIControlled) { if (selfPrimaryFaction == iOtherPrimaryFaction) return FACTION_ALLY; else return FACTION_SCOWLS; } else return FACTION_SCOWLS; default: return FACTION_INDIFFERENT; } } bool Mob::HasSpellEffect(int effectid) { int i; int buff_count = GetMaxTotalSlots(); for(i = 0; i < buff_count; i++) { if(buffs[i].spellid == SPELL_UNKNOWN) { continue; } if(IsEffectInSpell(buffs[i].spellid, effectid)) { return(1); } } return(0); } int Mob::GetSpecialAbility(int ability) { if (ability >= MAX_SPECIAL_ATTACK || ability < 0) { return 0; } return SpecialAbilities[ability].level; } bool Mob::HasSpecialAbilities() { for (int i = 0; i < MAX_SPECIAL_ATTACK; ++i) { if (GetSpecialAbility(i)) { return true; } } return false; } int Mob::GetSpecialAbilityParam(int ability, int param) { if(param >= MAX_SPECIAL_ATTACK_PARAMS || param < 0 || ability >= MAX_SPECIAL_ATTACK || ability < 0) { return 0; } return SpecialAbilities[ability].params[param]; } void Mob::SetSpecialAbility(int ability, int level) { if(ability >= MAX_SPECIAL_ATTACK || ability < 0) { return; } SpecialAbilities[ability].level = level; } void Mob::SetSpecialAbilityParam(int ability, int param, int value) { if(param >= MAX_SPECIAL_ATTACK_PARAMS || param < 0 || ability >= MAX_SPECIAL_ATTACK || ability < 0) { return; } SpecialAbilities[ability].params[param] = value; } void Mob::StartSpecialAbilityTimer(int ability, uint32 time) { if (ability >= MAX_SPECIAL_ATTACK || ability < 0) { return; } if(SpecialAbilities[ability].timer) { SpecialAbilities[ability].timer->Start(time); } else { SpecialAbilities[ability].timer = new Timer(time); SpecialAbilities[ability].timer->Start(); } } void Mob::StopSpecialAbilityTimer(int ability) { if (ability >= MAX_SPECIAL_ATTACK || ability < 0) { return; } safe_delete(SpecialAbilities[ability].timer); } Timer *Mob::GetSpecialAbilityTimer(int ability) { if (ability >= MAX_SPECIAL_ATTACK || ability < 0) { return nullptr; } return SpecialAbilities[ability].timer; } void Mob::ClearSpecialAbilities() { for(int a = 0; a < MAX_SPECIAL_ATTACK; ++a) { SpecialAbilities[a].level = 0; safe_delete(SpecialAbilities[a].timer); for(int p = 0; p < MAX_SPECIAL_ATTACK_PARAMS; ++p) { SpecialAbilities[a].params[p] = 0; } } } void Mob::ProcessSpecialAbilities(const std::string &str) { ClearSpecialAbilities(); std::vector<std::string> sp = SplitString(str, '^'); for(auto iter = sp.begin(); iter != sp.end(); ++iter) { std::vector<std::string> sub_sp = SplitString((*iter), ','); if(sub_sp.size() >= 2) { int ability = std::stoi(sub_sp[0]); int value = std::stoi(sub_sp[1]); SetSpecialAbility(ability, value); switch(ability) { case SPECATK_QUAD: if(value > 0) { SetSpecialAbility(SPECATK_TRIPLE, 1); } break; case DESTRUCTIBLE_OBJECT: if(value == 0) { SetDestructibleObject(false); } else { SetDestructibleObject(true); } break; default: break; } for(size_t i = 2, p = 0; i < sub_sp.size(); ++i, ++p) { if(p >= MAX_SPECIAL_ATTACK_PARAMS) { break; } SetSpecialAbilityParam(ability, p, std::stoi(sub_sp[i])); } } } } // derived from client to keep these functions more consistent // if anything seems weird, blame SoE bool Mob::IsFacingMob(Mob *other) { if (!other) return false; float angle = HeadingAngleToMob(other); float heading = GetHeading(); if (angle > 472.0 && heading < 40.0) angle = heading; if (angle < 40.0 && heading > 472.0) angle = heading; if (std::abs(angle - heading) <= 80.0) return true; return false; } // All numbers derived from the client float Mob::HeadingAngleToMob(float other_x, float other_y) { float this_x = GetX(); float this_y = GetY(); return CalculateHeadingAngleBetweenPositions(this_x, this_y, other_x, other_y); } bool Mob::GetSeeInvisible(uint8 see_invis) { if(see_invis > 0) { if(see_invis == 1) return true; else { if (zone->random.Int(0, 99) < see_invis) return true; } } return false; } int32 Mob::GetSpellStat(uint32 spell_id, const char *identifier, uint8 slot) { if (!IsValidSpell(spell_id)) return 0; if (!identifier) return 0; int32 stat = 0; if (slot > 0) slot = slot - 1; std::string id = identifier; for(uint32 i = 0; i < id.length(); ++i) { id[i] = tolower(id[i]); } if (slot < 16){ if (id == "classes") {return spells[spell_id].classes[slot]; } else if (id == "dieties") {return spells[spell_id].deities[slot];} } if (slot < 12){ if (id == "base") {return spells[spell_id].base[slot];} else if (id == "base2") {return spells[spell_id].base2[slot];} else if (id == "max") {return spells[spell_id].max[slot];} else if (id == "formula") {return spells[spell_id].formula[slot];} else if (id == "effectid") {return spells[spell_id].effectid[slot];} } if (slot < 4){ if (id == "components") { return spells[spell_id].components[slot];} else if (id == "component_counts") { return spells[spell_id].component_counts[slot];} else if (id == "NoexpendReagent") {return spells[spell_id].NoexpendReagent[slot];} } if (id == "range") {return static_cast<int32>(spells[spell_id].range); } else if (id == "aoerange") {return static_cast<int32>(spells[spell_id].aoerange);} else if (id == "pushback") {return static_cast<int32>(spells[spell_id].pushback);} else if (id == "pushup") {return static_cast<int32>(spells[spell_id].pushup);} else if (id == "cast_time") {return spells[spell_id].cast_time;} else if (id == "recovery_time") {return spells[spell_id].recovery_time;} else if (id == "recast_time") {return spells[spell_id].recast_time;} else if (id == "buffdurationformula") {return spells[spell_id].buffdurationformula;} else if (id == "buffduration") {return spells[spell_id].buffduration;} else if (id == "AEDuration") {return spells[spell_id].AEDuration;} else if (id == "mana") {return spells[spell_id].mana;} //else if (id == "LightType") {stat = spells[spell_id].LightType;} - Not implemented else if (id == "goodEffect") {return spells[spell_id].goodEffect;} else if (id == "Activated") {return spells[spell_id].Activated;} else if (id == "resisttype") {return spells[spell_id].resisttype;} else if (id == "targettype") {return spells[spell_id].targettype;} else if (id == "basedeiff") {return spells[spell_id].basediff;} else if (id == "skill") {return spells[spell_id].skill;} else if (id == "zonetype") {return spells[spell_id].zonetype;} else if (id == "EnvironmentType") {return spells[spell_id].EnvironmentType;} else if (id == "TimeOfDay") {return spells[spell_id].TimeOfDay;} else if (id == "CastingAnim") {return spells[spell_id].CastingAnim;} else if (id == "SpellAffectIndex") {return spells[spell_id].SpellAffectIndex; } else if (id == "disallow_sit") {return spells[spell_id].disallow_sit; } //else if (id == "spellanim") {stat = spells[spell_id].spellanim; } - Not implemented else if (id == "uninterruptable") {return spells[spell_id].uninterruptable; } else if (id == "ResistDiff") {return spells[spell_id].ResistDiff; } else if (id == "dot_stacking_exemp") {return spells[spell_id].dot_stacking_exempt; } else if (id == "RecourseLink") {return spells[spell_id].RecourseLink; } else if (id == "no_partial_resist") {return spells[spell_id].no_partial_resist; } else if (id == "short_buff_box") {return spells[spell_id].short_buff_box; } else if (id == "descnum") {return spells[spell_id].descnum; } else if (id == "effectdescnum") {return spells[spell_id].effectdescnum; } else if (id == "npc_no_los") {return spells[spell_id].npc_no_los; } else if (id == "reflectable") {return spells[spell_id].reflectable; } else if (id == "bonushate") {return spells[spell_id].bonushate; } else if (id == "EndurCost") {return spells[spell_id].EndurCost; } else if (id == "EndurTimerIndex") {return spells[spell_id].EndurTimerIndex; } else if (id == "IsDisciplineBuf") {return spells[spell_id].IsDisciplineBuff; } else if (id == "HateAdded") {return spells[spell_id].HateAdded; } else if (id == "EndurUpkeep") {return spells[spell_id].EndurUpkeep; } else if (id == "numhitstype") {return spells[spell_id].numhitstype; } else if (id == "numhits") {return spells[spell_id].numhits; } else if (id == "pvpresistbase") {return spells[spell_id].pvpresistbase; } else if (id == "pvpresistcalc") {return spells[spell_id].pvpresistcalc; } else if (id == "pvpresistcap") {return spells[spell_id].pvpresistcap; } else if (id == "spell_category") {return spells[spell_id].spell_category; } else if (id == "can_mgb") {return spells[spell_id].can_mgb; } else if (id == "dispel_flag") {return spells[spell_id].dispel_flag; } else if (id == "MinResist") {return spells[spell_id].MinResist; } else if (id == "MaxResist") {return spells[spell_id].MaxResist; } else if (id == "viral_targets") {return spells[spell_id].viral_targets; } else if (id == "viral_timer") {return spells[spell_id].viral_timer; } else if (id == "NimbusEffect") {return spells[spell_id].NimbusEffect; } else if (id == "directional_start") {return static_cast<int32>(spells[spell_id].directional_start); } else if (id == "directional_end") {return static_cast<int32>(spells[spell_id].directional_end); } else if (id == "not_focusable") {return spells[spell_id].not_focusable; } else if (id == "suspendable") {return spells[spell_id].suspendable; } else if (id == "viral_range") {return spells[spell_id].viral_range; } else if (id == "spellgroup") {return spells[spell_id].spellgroup; } else if (id == "rank") {return spells[spell_id].rank; } else if (id == "no_resist") {return spells[spell_id].no_resist; } else if (id == "CastRestriction") {return spells[spell_id].CastRestriction; } else if (id == "AllowRest") {return spells[spell_id].AllowRest; } else if (id == "InCombat") {return spells[spell_id].InCombat; } else if (id == "OutofCombat") {return spells[spell_id].OutofCombat; } else if (id == "aemaxtargets") {return spells[spell_id].aemaxtargets; } else if (id == "no_heal_damage_item_mod") {return spells[spell_id].no_heal_damage_item_mod; } else if (id == "persistdeath") {return spells[spell_id].persistdeath; } else if (id == "min_dist") {return static_cast<int32>(spells[spell_id].min_dist); } else if (id == "min_dist_mod") {return static_cast<int32>(spells[spell_id].min_dist_mod); } else if (id == "max_dist") {return static_cast<int32>(spells[spell_id].max_dist); } else if (id == "min_range") {return static_cast<int32>(spells[spell_id].min_range); } else if (id == "DamageShieldType") {return spells[spell_id].DamageShieldType; } return stat; } bool Mob::CanClassEquipItem(uint32 item_id) { const EQ::ItemData* itm = nullptr; itm = database.GetItem(item_id); if (!itm) return false; if(itm->Classes == 65535 ) return true; if (GetClass() > 16) return false; int bitmask = 1; bitmask = bitmask << (GetClass() - 1); if(!(itm->Classes & bitmask)) return false; else return true; } void Mob::SendAddPlayerState(PlayerState new_state) { auto app = new EQApplicationPacket(OP_PlayerStateAdd, sizeof(PlayerState_Struct)); auto ps = (PlayerState_Struct *)app->pBuffer; ps->spawn_id = GetID(); ps->state = static_cast<uint32>(new_state); AddPlayerState(ps->state); entity_list.QueueClients(nullptr, app); safe_delete(app); } void Mob::SendRemovePlayerState(PlayerState old_state) { auto app = new EQApplicationPacket(OP_PlayerStateRemove, sizeof(PlayerState_Struct)); auto ps = (PlayerState_Struct *)app->pBuffer; ps->spawn_id = GetID(); ps->state = static_cast<uint32>(old_state); RemovePlayerState(ps->state); entity_list.QueueClients(nullptr, app); safe_delete(app); } int32 Mob::GetMeleeMitigation() { int32 mitigation = 0; mitigation += spellbonuses.MeleeMitigationEffect; mitigation += itembonuses.MeleeMitigationEffect; mitigation += aabonuses.MeleeMitigationEffect; return mitigation; } /* this is the mob being attacked. * Pass in the weapon's EQ::ItemInstance */ int Mob::ResistElementalWeaponDmg(const EQ::ItemInstance *item) { if (!item) return 0; int magic = 0, fire = 0, cold = 0, poison = 0, disease = 0, chromatic = 0, prismatic = 0, physical = 0, corruption = 0; int resist = 0; int roll = 0; /* this is how the client does the resist rolls for these. * Given the difficulty of parsing out these resists, I'll trust the client */ if (item->GetItemElementalDamage(magic, fire, cold, poison, disease, chromatic, prismatic, physical, corruption, true)) { if (magic) { resist = GetMR(); if (resist >= 201) { magic = 0; } else { roll = zone->random.Int(0, 200) - resist; if (roll < 1) magic = 0; else if (roll < 100) magic = magic * roll / 100; } } if (fire) { resist = GetFR(); if (resist >= 201) { fire = 0; } else { roll = zone->random.Int(0, 200) - resist; if (roll < 1) fire = 0; else if (roll < 100) fire = fire * roll / 100; } } if (cold) { resist = GetCR(); if (resist >= 201) { cold = 0; } else { roll = zone->random.Int(0, 200) - resist; if (roll < 1) cold = 0; else if (roll < 100) cold = cold * roll / 100; } } if (poison) { resist = GetPR(); if (resist >= 201) { poison = 0; } else { roll = zone->random.Int(0, 200) - resist; if (roll < 1) poison = 0; else if (roll < 100) poison = poison * roll / 100; } } if (disease) { resist = GetDR(); if (resist >= 201) { disease = 0; } else { roll = zone->random.Int(0, 200) - resist; if (roll < 1) disease = 0; else if (roll < 100) disease = disease * roll / 100; } } if (corruption) { resist = GetCorrup(); if (resist >= 201) { corruption = 0; } else { roll = zone->random.Int(0, 200) - resist; if (roll < 1) corruption = 0; else if (roll < 100) corruption = corruption * roll / 100; } } if (chromatic) { resist = GetFR(); int temp = GetCR(); if (temp < resist) resist = temp; temp = GetMR(); if (temp < resist) resist = temp; temp = GetDR(); if (temp < resist) resist = temp; temp = GetPR(); if (temp < resist) resist = temp; if (resist >= 201) { chromatic = 0; } else { roll = zone->random.Int(0, 200) - resist; if (roll < 1) chromatic = 0; else if (roll < 100) chromatic = chromatic * roll / 100; } } if (prismatic) { resist = (GetFR() + GetCR() + GetMR() + GetDR() + GetPR()) / 5; if (resist >= 201) { prismatic = 0; } else { roll = zone->random.Int(0, 200) - resist; if (roll < 1) prismatic = 0; else if (roll < 100) prismatic = prismatic * roll / 100; } } if (physical) { resist = GetPhR(); if (resist >= 201) { physical = 0; } else { roll = zone->random.Int(0, 200) - resist; if (roll < 1) physical = 0; else if (roll < 100) physical = physical * roll / 100; } } } return magic + fire + cold + poison + disease + chromatic + prismatic + physical + corruption; } /* this is the mob being attacked. * Pass in the weapon's EQ::ItemInstance */ int Mob::CheckBaneDamage(const EQ::ItemInstance *item) { if (!item) return 0; int damage = item->GetItemBaneDamageBody(GetBodyType(), true); damage += item->GetItemBaneDamageRace(GetRace(), true); return damage; } void Mob::CancelSneakHide() { if (hidden || improved_hidden) { hidden = false; improved_hidden = false; auto outapp = new EQApplicationPacket(OP_SpawnAppearance, sizeof(SpawnAppearance_Struct)); SpawnAppearance_Struct* sa_out = (SpawnAppearance_Struct*)outapp->pBuffer; sa_out->spawn_id = GetID(); sa_out->type = 0x03; sa_out->parameter = 0; entity_list.QueueClients(this, outapp, true); safe_delete(outapp); } } void Mob::CommonBreakInvisible() { BreakInvisibleSpells(); CancelSneakHide(); } float Mob::GetDefaultRaceSize() const { return GetRaceGenderDefaultHeight(race, gender); } #ifdef BOTS bool Mob::JoinHealRotationTargetPool(std::shared_ptr<HealRotation>* heal_rotation) { if (IsHealRotationTarget()) return false; if (!heal_rotation->use_count()) return false; if (!(*heal_rotation)) return false; if (!IsHealRotationTargetMobType(this)) return false; if (!(*heal_rotation)->AddTargetToPool(this)) return false; m_target_of_heal_rotation = *heal_rotation; return IsHealRotationTarget(); } bool Mob::LeaveHealRotationTargetPool() { if (!IsHealRotationTarget()) { m_target_of_heal_rotation.reset(); return true; } m_target_of_heal_rotation->RemoveTargetFromPool(this); m_target_of_heal_rotation.reset(); return !IsHealRotationTarget(); } uint32 Mob::HealRotationHealCount() { if (!IsHealRotationTarget()) return 0; return m_target_of_heal_rotation->HealCount(this); } uint32 Mob::HealRotationExtendedHealCount() { if (!IsHealRotationTarget()) return 0; return m_target_of_heal_rotation->ExtendedHealCount(this); } float Mob::HealRotationHealFrequency() { if (!IsHealRotationTarget()) return 0.0f; return m_target_of_heal_rotation->HealFrequency(this); } float Mob::HealRotationExtendedHealFrequency() { if (!IsHealRotationTarget()) return 0.0f; return m_target_of_heal_rotation->ExtendedHealFrequency(this); } #endif bool Mob::CanOpenDoors() const { return m_can_open_doors; } void Mob::SetCanOpenDoors(bool can_open) { m_can_open_doors = can_open; } void Mob::DeleteBucket(std::string bucket_name) { std::string full_bucket_name = fmt::format("{}-{}", GetBucketKey(), bucket_name); DataBucket::DeleteData(full_bucket_name); } std::string Mob::GetBucket(std::string bucket_name) { std::string full_bucket_name = fmt::format("{}-{}", GetBucketKey(), bucket_name); std::string bucket_value = DataBucket::GetData(full_bucket_name); if (!bucket_value.empty()) { return bucket_value; } return std::string(); } std::string Mob::GetBucketExpires(std::string bucket_name) { std::string full_bucket_name = fmt::format("{}-{}", GetBucketKey(), bucket_name); std::string bucket_expiration = DataBucket::GetDataExpires(full_bucket_name); if (!bucket_expiration.empty()) { return bucket_expiration; } return std::string(); } std::string Mob::GetBucketKey() { if (IsClient()) { return fmt::format("character-{}", CastToClient()->CharacterID()); } else if (IsNPC()) { return fmt::format("npc-{}", GetNPCTypeID()); } return std::string(); } std::string Mob::GetBucketRemaining(std::string bucket_name) { std::string full_bucket_name = fmt::format("{}-{}", GetBucketKey(), bucket_name); std::string bucket_remaining = DataBucket::GetDataRemaining(full_bucket_name); if (!bucket_remaining.empty() && atoi(bucket_remaining.c_str()) > 0) { return bucket_remaining; } else if (atoi(bucket_remaining.c_str()) == 0) { return "0"; } return std::string(); } void Mob::SetBucket(std::string bucket_name, std::string bucket_value, std::string expiration) { std::string full_bucket_name = fmt::format("{}-{}", GetBucketKey(), bucket_name); DataBucket::SetData(full_bucket_name, bucket_value, expiration); }
1
10,390
Just combining the inner if/else blocks like this is what I originally meant (I didn't mean replacing all the invisible checks in the outer if, I realize that would have been a functionality change regarding the rule) This should now be equivalent to `if (RuleB(Pets, LivelikeBreakCharmOnInvis) || IsInvisible(formerpet))` (the second check for the rule being false after the `||` would be implied, anything after the `||` would only be evaluated if the rule was false)
EQEmu-Server
cpp
@@ -964,6 +964,9 @@ func loadPluginConfig(subrepoConfig *core.Configuration, packageState *core.Buil contextPackage := &core.Package{SubrepoName: packageState.CurrentSubrepo} configValueDefinitions := subrepoConfig.PluginConfig for key, definition := range configValueDefinitions { + if definition.ConfigKey == "" { + definition.ConfigKey = strings.ReplaceAll(key, "_", "") + } fullConfigKey := fmt.Sprintf("%v.%v", pluginName, definition.ConfigKey) value, ok := extraVals[strings.ToLower(definition.ConfigKey)] if !ok {
1
package asp import ( "context" "fmt" "reflect" "sort" "strconv" "strings" "sync" "github.com/thought-machine/please/src/core" ) // A pyObject is the base type for all interpreter objects. // Strictly the "py" prefix is a misnomer but it's short and easy to follow... type pyObject interface { // All pyObjects are stringable. fmt.Stringer // Returns the name of this object's type. Type() string // Returns true if this object evaluates to something truthy. IsTruthy() bool // Returns a property of this object with the given name. Property(name string) pyObject // Invokes the given operator on this object and returns the result. Operator(operator Operator, operand pyObject) pyObject // Used for index-assignment statements IndexAssign(index, value pyObject) } // A freezable represents an object that can be frozen into a readonly state. // Not all pyObjects implement this. type freezable interface { Freeze() pyObject } type pyBool bool // True and False are the singletons representing those values. var ( True pyObject = pyBool(true) False pyObject = pyBool(false) ) // newPyBool creates a new bool. It's a minor optimisation to treat them as singletons // although also means one can write "is True" and have it work (not that you should, really). func newPyBool(b bool) pyObject { if b { return True } return False } func (b pyBool) Type() string { return "bool" } func (b pyBool) IsTruthy() bool { return b == True } func (b pyBool) Property(name string) pyObject { panic("bool object has no property " + name) } func (b pyBool) Operator(operator Operator, operand pyObject) pyObject { panic(fmt.Sprintf("operator %s not implemented on type bool", operator)) } func (b pyBool) IndexAssign(index, value pyObject) { panic("bool type is not indexable") } func (b pyBool) String() string { if b == True { return "True" } return "False" } func (b pyBool) MarshalJSON() ([]byte, error) { if b { return []byte("true"), nil } return []byte("false"), nil } type pyNone struct{} // None is the singleton representing None; there can be only one etc. var None pyObject = pyNone{} func (n pyNone) Type() string { return "none" } func (n pyNone) IsTruthy() bool { return false } func (n pyNone) Property(name string) pyObject { panic("none object has no property " + name) } func (n pyNone) Operator(operator Operator, operand pyObject) pyObject { panic(fmt.Sprintf("operator %s not implemented on type none", operator)) } func (n pyNone) IndexAssign(index, value pyObject) { panic("none type is not indexable") } func (n pyNone) String() string { return "None" } func (n pyNone) MarshalJSON() ([]byte, error) { return []byte("null"), nil } // A pySentinel is an internal implementation detail used in some cases. It should never be // exposed to users. type pySentinel struct{} // continueIteration is used to implement the "continue" statement. var continueIteration = pySentinel{} func (s pySentinel) Type() string { return "sentinel" } func (s pySentinel) IsTruthy() bool { return false } func (s pySentinel) Property(name string) pyObject { panic("sentinel object has no property " + name) } func (s pySentinel) Operator(operator Operator, operand pyObject) pyObject { panic(fmt.Sprintf("operator %s not implemented on type sentinel", operator)) } func (s pySentinel) IndexAssign(index, value pyObject) { panic("sentinel type is not indexable") } func (s pySentinel) String() string { panic("non stringable type sentinel") } func (s pySentinel) MarshalJSON() ([]byte, error) { panic("non serialisable type sentinel") } type pyInt int // pyIndex converts an object that's being used as an index to an int. func pyIndex(obj, index pyObject, slice bool) pyInt { i, ok := index.(pyInt) if !ok { panic(obj.Type() + " indices must be integers, not " + index.Type()) } else if l := objLen(obj); i < 0 { i = l + i // Go doesn't support negative indices } else if i > l { if slice { return l } panic(obj.Type() + " index out of range") } return i } func (i pyInt) Type() string { return "int" } func (i pyInt) IsTruthy() bool { return i != 0 } func (i pyInt) Property(name string) pyObject { panic("int object has no property " + name) } func (i pyInt) Operator(operator Operator, operand pyObject) pyObject { switch o := operand.(type) { case pyInt: switch operator { case Add: return i + o case Subtract: return i - o case Multiply: return i * o case Divide: return i / o case LessThan: return newPyBool(i < o) case GreaterThan: return newPyBool(i > o) case LessThanOrEqual: return newPyBool(i <= o) case GreaterThanOrEqual: return newPyBool(i >= o) case Modulo: return i % o case In: panic("bad operator: 'in' int") } panic("unknown operator") case pyString: if operator == Multiply { return pyString(strings.Repeat(string(o), int(i))) } case pyList: if operator == Multiply { return o.Repeat(i) } } panic("Cannot operate on int and " + operand.Type()) } func (i pyInt) IndexAssign(index, value pyObject) { panic("int type is not indexable") } func (i pyInt) String() string { return strconv.Itoa(int(i)) } type pyString string func (s pyString) Type() string { return "str" } func (s pyString) IsTruthy() bool { return s != "" } func (s pyString) Property(name string) pyObject { if prop, present := stringMethods[name]; present { return prop.Member(s) } panic("str object has no property " + name) } func (s pyString) Operator(operator Operator, operand pyObject) pyObject { s2, ok := operand.(pyString) if !ok && operator != Modulo && operator != Index && operator != Multiply { panic("Cannot operate on str and " + operand.Type()) } switch operator { case Add: return s + s2 case Multiply: i, ok := operand.(pyInt) if !ok { panic("Can only multiply string with int, not with " + operand.Type()) } return pyString(strings.Repeat(string(s), int(i))) case LessThan: return newPyBool(s < s2) case GreaterThan: return newPyBool(s > s2) case LessThanOrEqual: return newPyBool(s <= s2) case GreaterThanOrEqual: return newPyBool(s >= s2) case Modulo: if ok { // Special case: "%s" % "x" return pyString(fmt.Sprintf(string(s), s2)) } else if i, ok := operand.(pyInt); ok { // Another one: "%d" % 4 return pyString(fmt.Sprintf(string(s), i)) } l, ok := operand.(pyList) if !ok { panic("Argument to string interpolation must be a string or list; was " + operand.Type()) } // Classic issue: can't use a []pyObject as a []interface{} :( l2 := make([]interface{}, len(l)) for i, v := range l { l2[i] = v } return pyString(fmt.Sprintf(string(s), l2...)) case In: return newPyBool(strings.Contains(string(s), string(s2))) case NotIn: return newPyBool(!strings.Contains(string(s), string(s2))) case Index: return pyString(s[pyIndex(s, operand, false)]) } panic("Unknown operator for string") } func (s pyString) IndexAssign(index, value pyObject) { panic("str type cannot be partially assigned to") } func (s pyString) String() string { return string(s) } type pyList []pyObject var emptyList pyObject = make(pyList, 0) // want this to explicitly have zero capacity func (l pyList) Type() string { return "list" } func (l pyList) IsTruthy() bool { return len(l) > 0 } func (l pyList) Property(name string) pyObject { panic("list object has no property " + name) } func (l pyList) Operator(operator Operator, operand pyObject) pyObject { switch operator { case Add: l2, ok := operand.(pyList) if !ok { if l2, ok := operand.(pyFrozenList); ok { return append(l, l2.pyList...) } panic("Cannot add list and " + operand.Type()) } return append(l, l2...) case In, NotIn: for _, item := range l { if item == operand { return newPyBool(operator == In) } } return newPyBool(operator == NotIn) case Index: return l[pyIndex(l, operand, false)] case LessThan: // Needed for sorting. l2, ok := operand.(pyList) if !ok { panic("Cannot compare list and " + operand.Type()) } for i, li := range l { if i >= len(l2) || l2[i].Operator(LessThan, li).IsTruthy() { return False } else if li.Operator(LessThan, l2[i]).IsTruthy() { return True } } if len(l) < len(l2) { return True } return False case Multiply: i, ok := operand.(pyInt) if !ok { panic("Can only multiply list with int, not with " + operand.Type()) } return l.Repeat(i) } panic("Unsupported operator on list: " + operator.String()) } func (l pyList) IndexAssign(index, value pyObject) { i, ok := index.(pyInt) if !ok { panic("List indices must be integers, not " + index.Type()) } l[i] = value } func (l pyList) String() string { return fmt.Sprintf("%s", []pyObject(l)) } // Freeze freezes this list for further updates. // Note that this is a "soft" freeze; callers holding the original unfrozen // reference can still modify it. func (l pyList) Freeze() pyObject { return pyFrozenList{pyList: l} } // Repeat returns a copy of this list, repeated n times func (l pyList) Repeat(n pyInt) pyList { var ret pyList for i := 0; i < int(n); i++ { ret = append(ret, l...) } return ret } // A pyFrozenList implements an immutable list. type pyFrozenList struct{ pyList } func (l pyFrozenList) IndexAssign(index, value pyObject) { panic("list is immutable") } type pyDict map[string]pyObject // Dicts can only be keyed by strings func (d pyDict) Type() string { return "dict" } func (d pyDict) IsTruthy() bool { return len(d) > 0 } func (d pyDict) Property(name string) pyObject { // We allow looking up dict members by . as well as by indexing in order to facilitate the config map. if obj, present := d[name]; present { return obj } else if prop, present := dictMethods[name]; present { return prop.Member(d) } panic("dict object has no property " + name) } func (d pyDict) Operator(operator Operator, operand pyObject) pyObject { if operator == In || operator == NotIn { if s, ok := operand.(pyString); ok { _, present := d[string(s)] return newPyBool(present == (operator == In)) } return newPyBool(operator == NotIn) } else if operator == Index { s, ok := operand.(pyString) if !ok { panic("Dict keys must be strings, not " + operand.Type()) } else if v, present := d[string(s)]; present { return v } panic("unknown dict key: " + s.String()) } else if operator == Union { d2, ok := operand.(pyDict) if !ok { panic("Operator to | must be another dict, not " + operand.Type()) } ret := make(pyDict, len(d)+len(d2)) for k, v := range d { ret[k] = v } for k, v := range d2 { ret[k] = v } return ret } panic("Unsupported operator on dict") } func (d pyDict) IndexAssign(index, value pyObject) { key, ok := index.(pyString) if !ok { panic("Dict keys must be strings, not " + index.Type()) } d[string(key)] = value } func (d pyDict) String() string { var b strings.Builder b.WriteByte('{') started := false for _, k := range d.Keys() { if started { b.WriteString(", ") } started = true b.WriteByte('"') b.WriteString(k) b.WriteString(`": `) b.WriteString(d[k].String()) } b.WriteByte('}') return b.String() } // Copy creates a shallow duplicate of this dictionary. func (d pyDict) Copy() pyDict { m := make(pyDict, len(d)) for k, v := range d { m[k] = v } return m } // Freeze freezes this dict for further updates. // Note that this is a "soft" freeze; callers holding the original unfrozen // reference can still modify it. func (d pyDict) Freeze() pyObject { return pyFrozenDict{pyDict: d} } // Keys returns the keys of this dict, in order. func (d pyDict) Keys() []string { ret := make([]string, 0, len(d)) for k := range d { ret = append(ret, k) } sort.Strings(ret) return ret } // A pyFrozenDict implements an immutable python dict. type pyFrozenDict struct{ pyDict } func (d pyFrozenDict) Property(name string) pyObject { if name == "setdefault" { panic("dict is immutable") } return d.pyDict.Property(name) } func (d pyFrozenDict) IndexAssign(index, value pyObject) { panic("dict is immutable") } type pyFunc struct { name string docstring string scope *scope args []string argIndices map[string]int defaults []*Expression constants []pyObject types [][]string code []*Statement argPool *sync.Pool // If the function is implemented natively, this is the pointer to its real code. nativeCode func(*scope, []pyObject) pyObject // If the function has been bound as a member function, this is the implicit self argument. self pyObject // True if this function accepts non-keyword varargs (like the log functions, or zip()). varargs bool // True if this function accepts arbitrary keyword arguments (e.g. package(), str.format()). kwargs bool // True if this function may only be called using keyword arguments. // This is the case for all builtin build rules, although for now it cannot be specified // on any user-defined ones. kwargsonly bool // return type of the function returnType string } func newPyFunc(parentScope *scope, def *FuncDef) pyObject { f := &pyFunc{ name: def.Name, scope: parentScope, args: make([]string, len(def.Arguments)), argIndices: make(map[string]int, len(def.Arguments)), constants: make([]pyObject, len(def.Arguments)), types: make([][]string, len(def.Arguments)), code: def.Statements, kwargsonly: def.KeywordsOnly, returnType: def.Return, } if def.Docstring != "" { f.docstring = stringLiteral(def.Docstring) } for i, arg := range def.Arguments { f.args[i] = arg.Name f.argIndices[arg.Name] = i f.types[i] = arg.Type if arg.Value != nil { if constant := parentScope.Constant(arg.Value); constant != nil { f.constants[i] = constant } else { if f.defaults == nil { // Minor optimisation: defaults is lazily allocated f.defaults = make([]*Expression, len(def.Arguments)) } f.defaults[i] = arg.Value } } for _, alias := range arg.Aliases { f.argIndices[alias] = i } } return f } func (f *pyFunc) Type() string { return "function" } func (f *pyFunc) IsTruthy() bool { return true } func (f *pyFunc) Property(name string) pyObject { panic("function object has no property " + name) } func (f *pyFunc) Operator(operator Operator, operand pyObject) pyObject { panic("cannot use operators on a function") } func (f *pyFunc) IndexAssign(index, value pyObject) { panic("function type is not indexable") } func (f *pyFunc) String() string { return fmt.Sprintf("<function %s>", f.name) } func (f *pyFunc) Call(ctx context.Context, s *scope, c *Call) pyObject { if f.nativeCode != nil { if f.kwargs { return f.callNative(s.NewScope(), c) } return f.callNative(s, c) } s2 := f.scope.NewPackagedScope(s.pkg, len(f.args)+1) s2.ctx = ctx s2.config = s.config s2.Set("CONFIG", s.config) // This needs to be copied across too :( s2.Callback = s.Callback // Handle implicit 'self' parameter for bound functions. args := c.Arguments if f.self != nil { args = append([]CallArgument{{ Value: Expression{Optimised: &OptimisedExpression{Constant: f.self}}, }}, args...) } for i, a := range args { if a.Name != "" { // Named argument name := a.Name idx, present := f.argIndices[name] if !present && !f.kwargs { s.Error("Unknown argument to %s: %s", f.name, name) } if present { name = f.args[idx] } s2.Set(name, f.validateType(s, idx, &a.Value)) } else { if i >= len(f.args) { s.Error("Too many arguments to %s", f.name) } else if f.kwargsonly { s.Error("Function %s can only be called with keyword arguments", f.name) } s2.Set(f.args[i], f.validateType(s, i, &a.Value)) } } // Now make sure any arguments with defaults are set, and check any others have been passed. for i, a := range f.args { if s2.LocalLookup(a) == nil { s2.Set(a, f.defaultArg(s, i, a)) } } ret := s2.interpretStatements(f.code) if ret == nil { return None // Implicit 'return None' in any function that didn't do that itself. } if f.returnType != "" && ret.Type() != f.returnType { return s.Error("Invalid return type %s from function %s, expecting %s", ret.Type(), f.name, f.returnType) } return ret } // callNative implements the "calling convention" for functions implemented with native code. // For performance reasons these are done differently - rather then receiving a pointer to a scope // they receive their arguments as a slice, in which unpassed arguments are nil. func (f *pyFunc) callNative(s *scope, c *Call) pyObject { var args []pyObject if f.argPool != nil { args = f.argPool.Get().([]pyObject) defer func() { for i := range args { args[i] = nil } f.argPool.Put(args) //nolint:staticcheck }() } else { args = make([]pyObject, len(f.args)) } offset := 0 if f.self != nil { args[0] = f.self offset = 1 } for i, a := range c.Arguments { if a.Name != "" { // Named argument if idx, present := f.argIndices[a.Name]; present { args[idx] = f.validateType(s, idx, &a.Value) } else if f.kwargs { s.Set(a.Name, s.interpretExpression(&a.Value)) } else { s.Error("Unknown argument to %s: %s", f.name, a.Name) } } else if i >= len(args) { s.Assert(f.varargs, "Too many arguments to %s", f.name) args = append(args, s.interpretExpression(&a.Value)) } else { if f.kwargsonly { s.Error("Function %s can only be called with keyword arguments", f.name) } if i+offset >= len(args) { args = append(args, f.validateType(s, i+offset, &a.Value)) } else { args[i+offset] = f.validateType(s, i+offset, &a.Value) } } } // Now make sure any arguments with defaults are set, and check any others have been passed. for i, a := range f.args { if args[i] == nil { args[i] = f.defaultArg(s, i, a) } } return f.nativeCode(s, args) } // defaultArg returns the default value for an argument, whether it's constant or not. func (f *pyFunc) defaultArg(s *scope, i int, arg string) pyObject { if f.constants[i] != nil { return f.constants[i] } // Deliberately does not use Assert since it doesn't get inlined here (weirdly it does // in _many_ other places) and this function is pretty hot. if f.defaults == nil || f.defaults[i] == nil { s.Error("Missing required argument to %s: %s", f.name, arg) } return s.interpretExpression(f.defaults[i]) } // Member duplicates this function as a member function of the given object. func (f *pyFunc) Member(obj pyObject) pyObject { return &pyFunc{ name: f.name, scope: f.scope, args: f.args, argIndices: f.argIndices, defaults: f.defaults, constants: f.constants, types: f.types, code: f.code, nativeCode: f.nativeCode, varargs: f.varargs, kwargs: f.kwargs, self: obj, } } // validateType validates that this argument matches the given type func (f *pyFunc) validateType(s *scope, i int, expr *Expression) pyObject { val := s.interpretExpression(expr) if i >= len(f.types) && (f.varargs || f.kwargs) { return val // function is varargs so we have no type signature for this } else if f.types[i] == nil { return val // not varargs but we just don't have a type signature, so take it as it is } else if val == None { if f.constants[i] == nil && (f.defaults == nil || f.defaults[i] == nil) { return val } return f.defaultArg(s, i, f.args[i]) } actual := val.Type() for _, t := range f.types[i] { if t == actual { return val } } // Using integers in place of booleans seems common in Bazel BUILD files :( if s.state.Config.Bazel.Compatibility && f.types[i][0] == "bool" && actual == "int" { return val } defer func() { panic(AddStackFrame(expr.Pos, recover())) }() return s.Error("Invalid type for argument %s to %s; expected %s, was %s", f.args[i], f.name, strings.Join(f.types[i], " or "), actual) } // A pyConfig is a wrapper object around Please's global config. // Initially it was implemented as just a dict but that requires us to spend a lot of time // copying & duplicating it - this structure instead requires very little to be copied // on each update. type pyConfig struct { base pyDict overlay pyDict } func (c *pyConfig) String() string { return "<global config object>" } func (c *pyConfig) Type() string { return "config" } func (c *pyConfig) IsTruthy() bool { return true // sure, why not } func (c *pyConfig) Property(name string) pyObject { if obj := c.Get(name, nil); obj != nil { return obj } else if f, present := configMethods[name]; present { return f.Member(c) } panic("Config has no such property " + name) } func (c *pyConfig) Operator(operator Operator, operand pyObject) pyObject { s, ok := operand.(pyString) if !ok { panic("config keys must be strings") } if operator == In || operator == NotIn { v := c.Get(string(s), nil) if (v != nil) == (operator == In) { return True } return False } else if operator == Index { return c.MustGet(string(s)) } panic("Cannot operate on config object") } func (c *pyConfig) IndexAssign(index, value pyObject) { key := string(index.(pyString)) if c.overlay == nil { c.overlay = pyDict{key: value} } else { c.overlay[key] = value } } // Copy creates a copy of this config object. It does not copy the overlay config, so be careful // where it is used. func (c *pyConfig) Copy() *pyConfig { return &pyConfig{base: c.base} } // Get implements the get() method, similarly to a dict but looks up in both internal maps. func (c *pyConfig) Get(key string, fallback pyObject) pyObject { if c.overlay != nil { if obj, present := c.overlay[key]; present { return obj } } if obj, present := c.base[key]; present { return obj } return fallback } // MustGet implements getting items from the config. If the requested item is not present, it panics. func (c *pyConfig) MustGet(key string) pyObject { v := c.Get(key, nil) if v == nil { panic("unknown config key " + key) } return v } // Freeze returns a copy of this config that is frozen for further updates. func (c *pyConfig) Freeze() pyObject { return &pyFrozenConfig{pyConfig: *c} } // Merge merges the contents of the given config object into this one. func (c *pyConfig) Merge(other *pyFrozenConfig) { if c.overlay == nil { // N.B. We cannot directly copy since this might get mutated again later on. c.overlay = make(pyDict, len(other.overlay)) } for k, v := range other.overlay { c.overlay[k] = v } } // newConfig creates a new pyConfig object from the configuration. // This is typically only created once at global scope, other scopes copy it with .Copy() func newConfig(state *core.BuildState) *pyConfig { config := state.Config c := make(pyDict, 100) v := reflect.ValueOf(config).Elem() for i := 0; i < v.NumField(); i++ { if field := v.Field(i); field.Kind() == reflect.Struct { for j := 0; j < field.NumField(); j++ { if tag := field.Type().Field(j).Tag.Get("var"); tag != "" { subfield := field.Field(j) switch subfield.Kind() { case reflect.String: c[tag] = pyString(subfield.String()) case reflect.Bool: c[tag] = newPyBool(subfield.Bool()) case reflect.Slice: l := make(pyList, subfield.Len()) for i := 0; i < subfield.Len(); i++ { l[i] = pyString(subfield.Index(i).String()) } c[tag] = l case reflect.Struct: c[tag] = pyString(subfield.Interface().(fmt.Stringer).String()) default: log.Fatalf("Unknown config field type for %s", tag) } } } } } // Arbitrary build config stuff for k, v := range config.BuildConfig { c[strings.ReplaceAll(strings.ToUpper(k), "-", "_")] = pyString(v) } // Settings specific to package() which aren't in the config, but it's easier to // just put them in now. c["DEFAULT_VISIBILITY"] = None c["DEFAULT_TESTONLY"] = False c["DEFAULT_LICENCES"] = None // Bazel supports a 'features' flag to toggle things on and off. // We don't but at least let them call package() without blowing up. if config.Bazel.Compatibility { c["FEATURES"] = pyList{} } arch := state.Arch c["OS"] = pyString(arch.OS) c["ARCH"] = pyString(arch.Arch) c["HOSTOS"] = pyString(arch.HostOS()) c["HOSTARCH"] = pyString(arch.HostArch()) c["GOOS"] = pyString(arch.OS) c["GOARCH"] = pyString(arch.GoArch()) c["TARGET_OS"] = pyString(state.TargetArch.OS) c["TARGET_ARCH"] = pyString(state.TargetArch.Arch) c["BUILD_CONFIG"] = pyString(state.Config.Build.Config) if debug := state.Debug; debug != nil { c["DEBUG"] = pyDict{ "DEBUGGER": pyString(debug.Debugger), "PORT": pyInt(debug.Port), } } loadPluginConfig(state.Config, state, c) return &pyConfig{base: c} } func loadPluginConfig(subrepoConfig *core.Configuration, packageState *core.BuildState, c pyDict) { pluginName := subrepoConfig.PluginDefinition.Name if pluginName == "" { return } extraVals := map[string][]string{} if config := packageState.Config.Plugin[pluginName]; config != nil { extraVals = config.ExtraValues } pluginNamespace := pyDict{} contextPackage := &core.Package{SubrepoName: packageState.CurrentSubrepo} configValueDefinitions := subrepoConfig.PluginConfig for key, definition := range configValueDefinitions { fullConfigKey := fmt.Sprintf("%v.%v", pluginName, definition.ConfigKey) value, ok := extraVals[strings.ToLower(definition.ConfigKey)] if !ok { value = definition.DefaultValue } if len(value) == 0 && !definition.Optional { log.Fatalf("plugin config %s is not optional %v", fullConfigKey, extraVals) } if !definition.Repeatable && len(value) > 1 { log.Fatalf("plugin config %v is not repeatable", fullConfigKey) } // Parse any config values in the current subrepo so @self resolves correctly. If we leave them, @self will // resolve based on the subincluding package which will likely be the host repo. for i, v := range value { if core.LooksLikeABuildLabel(v) { value[i] = core.ParseBuildLabelContext(v, contextPackage).String() } } if definition.Repeatable { l := make(pyList, 0, len(value)) for _, v := range value { l = append(l, toPyObject(fullConfigKey, v, definition.Type)) } pluginNamespace[strings.ToUpper(key)] = l } else { val := "" if len(value) == 1 { val = value[0] } pluginNamespace[strings.ToUpper(key)] = toPyObject(fullConfigKey, val, definition.Type) } } c[strings.ToUpper(pluginName)] = pluginNamespace } func toPyObject(key, val, toType string) pyObject { if toType == "" || toType == "str" { return pyString(val) } if toType == "bool" { val = strings.ToLower(val) if val == "true" || val == "yes" || val == "on" { return pyBool(true) } if val == "false" || val == "no" || val == "off" || val == "" { return pyBool(false) } log.Fatalf("%s: Invalid boolean value %v", key, val) } if toType == "int" { if val == "" { return pyInt(0) } i, err := strconv.Atoi(val) if err != nil { log.Fatalf("%s: Invalid int value %v", key, val) } return pyInt(i) } log.Fatalf("%s: invalid config type %v", key, toType) return pyNone{} } // A pyFrozenConfig is a config object that disallows further updates. type pyFrozenConfig struct{ pyConfig } // IndexAssign always fails, assignments to a pyFrozenConfig aren't allowed. func (c *pyFrozenConfig) IndexAssign(_, _ pyObject) { panic("Config object is not assignable in this scope") } // Property disallows setdefault() since it's immutable. func (c *pyFrozenConfig) Property(name string) pyObject { if name == "setdefault" { panic("Config object is not assignable in this scope") } return c.pyConfig.Property(name) }
1
10,345
We don't want to write this back to the definition do we? Probably just want to create a local variable for it.
thought-machine-please
go
@@ -964,6 +964,7 @@ public class PlaybackService extends MediaBrowserServiceCompat { @Override public void onPlaybackEnded(MediaType mediaType, boolean stopPlaying) { + PlaybackPreferences.clearCurrentlyPlayingTemporaryPlaybackSpeed(); PlaybackService.this.onPlaybackEnded(mediaType, stopPlaying); } };
1
package de.danoeh.antennapod.core.service.playback; import android.app.NotificationManager; import android.app.PendingIntent; import android.app.Service; import android.app.UiModeManager; import android.bluetooth.BluetoothA2dp; import android.content.BroadcastReceiver; import android.content.ComponentName; import android.content.Context; import android.content.Intent; import android.content.IntentFilter; import android.content.SharedPreferences; import android.content.res.Configuration; import android.graphics.Bitmap; import android.media.AudioManager; import android.media.MediaPlayer; import android.net.Uri; import android.os.Binder; import android.os.Build; import android.os.Bundle; import android.os.IBinder; import android.os.Vibrator; import androidx.preference.PreferenceManager; import androidx.annotation.NonNull; import androidx.annotation.StringRes; import androidx.core.app.NotificationCompat; import androidx.core.app.NotificationManagerCompat; import android.support.v4.media.MediaBrowserCompat; import androidx.media.MediaBrowserServiceCompat; import android.support.v4.media.MediaDescriptionCompat; import android.support.v4.media.MediaMetadataCompat; import android.support.v4.media.session.MediaSessionCompat; import android.support.v4.media.session.PlaybackStateCompat; import android.text.TextUtils; import android.util.Log; import android.util.Pair; import android.view.KeyEvent; import android.view.SurfaceHolder; import android.webkit.URLUtil; import android.widget.Toast; import com.bumptech.glide.Glide; import com.bumptech.glide.request.RequestOptions; import com.bumptech.glide.request.target.Target; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.concurrent.TimeUnit; import de.danoeh.antennapod.core.ClientConfig; import de.danoeh.antennapod.core.R; import de.danoeh.antennapod.core.event.FeedItemEvent; import de.danoeh.antennapod.core.event.MessageEvent; import de.danoeh.antennapod.core.event.PlaybackPositionEvent; import de.danoeh.antennapod.core.event.ServiceEvent; import de.danoeh.antennapod.core.event.settings.SkipIntroEndingChangedEvent; import de.danoeh.antennapod.core.event.settings.SpeedPresetChangedEvent; import de.danoeh.antennapod.core.event.settings.VolumeAdaptionChangedEvent; import de.danoeh.antennapod.core.feed.Chapter; import de.danoeh.antennapod.core.feed.Feed; import de.danoeh.antennapod.core.feed.FeedItem; import de.danoeh.antennapod.core.feed.FeedMedia; import de.danoeh.antennapod.core.feed.FeedPreferences; import de.danoeh.antennapod.core.feed.MediaType; import de.danoeh.antennapod.core.glide.ApGlideSettings; import de.danoeh.antennapod.core.preferences.PlaybackPreferences; import de.danoeh.antennapod.core.preferences.SleepTimerPreferences; import de.danoeh.antennapod.core.preferences.UserPreferences; import de.danoeh.antennapod.core.receiver.MediaButtonReceiver; import de.danoeh.antennapod.core.service.PlayerWidgetJobService; import de.danoeh.antennapod.core.storage.DBReader; import de.danoeh.antennapod.core.storage.DBTasks; import de.danoeh.antennapod.core.storage.DBWriter; import de.danoeh.antennapod.core.storage.FeedSearcher; import de.danoeh.antennapod.core.feed.util.ImageResourceUtils; import de.danoeh.antennapod.core.util.IntentUtils; import de.danoeh.antennapod.core.util.NetworkUtils; import de.danoeh.antennapod.core.util.gui.NotificationUtils; import de.danoeh.antennapod.core.util.playback.ExternalMedia; import de.danoeh.antennapod.core.util.playback.Playable; import de.danoeh.antennapod.core.util.playback.PlaybackServiceStarter; import io.reactivex.Observable; import io.reactivex.Single; import io.reactivex.android.schedulers.AndroidSchedulers; import io.reactivex.disposables.Disposable; import io.reactivex.schedulers.Schedulers; import org.greenrobot.eventbus.EventBus; import org.greenrobot.eventbus.Subscribe; import org.greenrobot.eventbus.ThreadMode; import static de.danoeh.antennapod.core.feed.FeedPreferences.SPEED_USE_GLOBAL; /** * Controls the MediaPlayer that plays a FeedMedia-file */ public class PlaybackService extends MediaBrowserServiceCompat { /** * Logging tag */ private static final String TAG = "PlaybackService"; /** * Parcelable of type Playable. */ public static final String EXTRA_PLAYABLE = "PlaybackService.PlayableExtra"; /** * True if cast session should disconnect. */ public static final String EXTRA_CAST_DISCONNECT = "extra.de.danoeh.antennapod.core.service.castDisconnect"; /** * True if media should be streamed. */ public static final String EXTRA_SHOULD_STREAM = "extra.de.danoeh.antennapod.core.service.shouldStream"; public static final String EXTRA_ALLOW_STREAM_THIS_TIME = "extra.de.danoeh.antennapod.core.service.allowStream"; public static final String EXTRA_ALLOW_STREAM_ALWAYS = "extra.de.danoeh.antennapod.core.service.allowStreamAlways"; /** * True if playback should be started immediately after media has been * prepared. */ public static final String EXTRA_START_WHEN_PREPARED = "extra.de.danoeh.antennapod.core.service.startWhenPrepared"; public static final String EXTRA_PREPARE_IMMEDIATELY = "extra.de.danoeh.antennapod.core.service.prepareImmediately"; public static final String ACTION_PLAYER_STATUS_CHANGED = "action.de.danoeh.antennapod.core.service.playerStatusChanged"; private static final String AVRCP_ACTION_PLAYER_STATUS_CHANGED = "com.android.music.playstatechanged"; private static final String AVRCP_ACTION_META_CHANGED = "com.android.music.metachanged"; public static final String ACTION_PLAYER_NOTIFICATION = "action.de.danoeh.antennapod.core.service.playerNotification"; public static final String EXTRA_NOTIFICATION_CODE = "extra.de.danoeh.antennapod.core.service.notificationCode"; public static final String EXTRA_NOTIFICATION_TYPE = "extra.de.danoeh.antennapod.core.service.notificationType"; /** * If the PlaybackService receives this action, it will stop playback and * try to shutdown. */ public static final String ACTION_SHUTDOWN_PLAYBACK_SERVICE = "action.de.danoeh.antennapod.core.service.actionShutdownPlaybackService"; /** * If the PlaybackService receives this action, it will end playback of the * current episode and load the next episode if there is one available. */ public static final String ACTION_SKIP_CURRENT_EPISODE = "action.de.danoeh.antennapod.core.service.skipCurrentEpisode"; /** * If the PlaybackService receives this action, it will pause playback. */ public static final String ACTION_PAUSE_PLAY_CURRENT_EPISODE = "action.de.danoeh.antennapod.core.service.pausePlayCurrentEpisode"; /** * Custom action used by Android Wear */ private static final String CUSTOM_ACTION_FAST_FORWARD = "action.de.danoeh.antennapod.core.service.fastForward"; private static final String CUSTOM_ACTION_REWIND = "action.de.danoeh.antennapod.core.service.rewind"; /** * Used in NOTIFICATION_TYPE_RELOAD. */ public static final int EXTRA_CODE_AUDIO = 1; public static final int EXTRA_CODE_VIDEO = 2; public static final int EXTRA_CODE_CAST = 3; public static final int NOTIFICATION_TYPE_ERROR = 0; public static final int NOTIFICATION_TYPE_BUFFER_UPDATE = 2; /** * Receivers of this intent should update their information about the curently playing media */ public static final int NOTIFICATION_TYPE_RELOAD = 3; /** * The state of the sleeptimer changed. */ public static final int NOTIFICATION_TYPE_SLEEPTIMER_UPDATE = 4; public static final int NOTIFICATION_TYPE_BUFFER_START = 5; public static final int NOTIFICATION_TYPE_BUFFER_END = 6; /** * No more episodes are going to be played. */ public static final int NOTIFICATION_TYPE_PLAYBACK_END = 7; /** * Playback speed has changed */ public static final int NOTIFICATION_TYPE_PLAYBACK_SPEED_CHANGE = 8; /** * Ability to set the playback speed has changed */ public static final int NOTIFICATION_TYPE_SET_SPEED_ABILITY_CHANGED = 9; /** * Returned by getPositionSafe() or getDurationSafe() if the playbackService * is in an invalid state. */ public static final int INVALID_TIME = -1; /** * Is true if service is running. */ public static boolean isRunning = false; /** * Is true if the service was running, but paused due to headphone disconnect */ private static boolean transientPause = false; /** * Is true if a Cast Device is connected to the service. */ private static volatile boolean isCasting = false; private PlaybackServiceMediaPlayer mediaPlayer; private PlaybackServiceTaskManager taskManager; private PlaybackServiceFlavorHelper flavorHelper; private PlaybackServiceStateManager stateManager; private Disposable positionEventTimer; private PlaybackServiceNotificationBuilder notificationBuilder; private String autoSkippedFeedMediaId = null; /** * Used for Lollipop notifications, Android Wear, and Android Auto. */ private MediaSessionCompat mediaSession; private static volatile MediaType currentMediaType = MediaType.UNKNOWN; private final IBinder mBinder = new LocalBinder(); public class LocalBinder extends Binder { public PlaybackService getService() { return PlaybackService.this; } } @Override public boolean onUnbind(Intent intent) { Log.d(TAG, "Received onUnbind event"); return super.onUnbind(intent); } /** * Returns an intent which starts an audio- or videoplayer, depending on the * type of media that is being played. If the playbackservice is not * running, the type of the last played media will be looked up. */ public static Intent getPlayerActivityIntent(Context context) { if (isRunning) { return ClientConfig.playbackServiceCallbacks.getPlayerActivityIntent(context, currentMediaType, isCasting); } else { if (PlaybackPreferences.getCurrentEpisodeIsVideo()) { return ClientConfig.playbackServiceCallbacks.getPlayerActivityIntent(context, MediaType.VIDEO, isCasting); } else { return ClientConfig.playbackServiceCallbacks.getPlayerActivityIntent(context, MediaType.AUDIO, isCasting); } } } /** * Same as getPlayerActivityIntent(context), but here the type of activity * depends on the FeedMedia that is provided as an argument. */ public static Intent getPlayerActivityIntent(Context context, Playable media) { MediaType mt = media.getMediaType(); return ClientConfig.playbackServiceCallbacks.getPlayerActivityIntent(context, mt, isCasting); } @Override public void onCreate() { super.onCreate(); Log.d(TAG, "Service created."); isRunning = true; stateManager = new PlaybackServiceStateManager(this); notificationBuilder = new PlaybackServiceNotificationBuilder(this); registerReceiver(autoStateUpdated, new IntentFilter("com.google.android.gms.car.media.STATUS")); registerReceiver(headsetDisconnected, new IntentFilter(Intent.ACTION_HEADSET_PLUG)); registerReceiver(shutdownReceiver, new IntentFilter(ACTION_SHUTDOWN_PLAYBACK_SERVICE)); registerReceiver(bluetoothStateUpdated, new IntentFilter(BluetoothA2dp.ACTION_CONNECTION_STATE_CHANGED)); registerReceiver(audioBecomingNoisy, new IntentFilter(AudioManager.ACTION_AUDIO_BECOMING_NOISY)); registerReceiver(skipCurrentEpisodeReceiver, new IntentFilter(ACTION_SKIP_CURRENT_EPISODE)); registerReceiver(pausePlayCurrentEpisodeReceiver, new IntentFilter(ACTION_PAUSE_PLAY_CURRENT_EPISODE)); EventBus.getDefault().register(this); taskManager = new PlaybackServiceTaskManager(this, taskManagerCallback); flavorHelper = new PlaybackServiceFlavorHelper(PlaybackService.this, flavorHelperCallback); PreferenceManager.getDefaultSharedPreferences(this) .registerOnSharedPreferenceChangeListener(prefListener); ComponentName eventReceiver = new ComponentName(getApplicationContext(), MediaButtonReceiver.class); Intent mediaButtonIntent = new Intent(Intent.ACTION_MEDIA_BUTTON); mediaButtonIntent.setComponent(eventReceiver); PendingIntent buttonReceiverIntent = PendingIntent.getBroadcast(this, 0, mediaButtonIntent, PendingIntent.FLAG_UPDATE_CURRENT); mediaSession = new MediaSessionCompat(getApplicationContext(), TAG, eventReceiver, buttonReceiverIntent); setSessionToken(mediaSession.getSessionToken()); try { mediaSession.setCallback(sessionCallback); mediaSession.setFlags(MediaSessionCompat.FLAG_HANDLES_MEDIA_BUTTONS | MediaSessionCompat.FLAG_HANDLES_TRANSPORT_CONTROLS); } catch (NullPointerException npe) { // on some devices (Huawei) setting active can cause a NullPointerException // even with correct use of the api. // See http://stackoverflow.com/questions/31556679/android-huawei-mediassessioncompat // and https://plus.google.com/+IanLake/posts/YgdTkKFxz7d Log.e(TAG, "NullPointerException while setting up MediaSession"); npe.printStackTrace(); } Single.<List<MediaSessionCompat.QueueItem>>create(emitter -> { List<MediaSessionCompat.QueueItem> queueItems = new ArrayList<>(); for (FeedItem feedItem : taskManager.getQueue()) { if (feedItem.getMedia() != null) { MediaDescriptionCompat mediaDescription = feedItem.getMedia().getMediaItem().getDescription(); queueItems.add(new MediaSessionCompat.QueueItem(mediaDescription, feedItem.getId())); } } emitter.onSuccess(queueItems); }) .subscribeOn(Schedulers.io()) .observeOn(AndroidSchedulers.mainThread()) .subscribe(queueItems -> mediaSession.setQueue(queueItems), Throwable::printStackTrace); flavorHelper.initializeMediaPlayer(PlaybackService.this); mediaSession.setActive(true); EventBus.getDefault().post(new ServiceEvent(ServiceEvent.Action.SERVICE_STARTED)); } @Override public void onDestroy() { super.onDestroy(); Log.d(TAG, "Service is about to be destroyed"); if (notificationBuilder.getPlayerStatus() == PlayerStatus.PLAYING) { notificationBuilder.setPlayerStatus(PlayerStatus.STOPPED); NotificationManagerCompat notificationManager = NotificationManagerCompat.from(this); notificationManager.notify(R.id.notification_playing, notificationBuilder.build()); } stateManager.stopForeground(!UserPreferences.isPersistNotify()); isRunning = false; currentMediaType = MediaType.UNKNOWN; cancelPositionObserver(); PreferenceManager.getDefaultSharedPreferences(this).unregisterOnSharedPreferenceChangeListener(prefListener); if (mediaSession != null) { mediaSession.release(); } unregisterReceiver(autoStateUpdated); unregisterReceiver(headsetDisconnected); unregisterReceiver(shutdownReceiver); unregisterReceiver(bluetoothStateUpdated); unregisterReceiver(audioBecomingNoisy); unregisterReceiver(skipCurrentEpisodeReceiver); unregisterReceiver(pausePlayCurrentEpisodeReceiver); flavorHelper.removeCastConsumer(); flavorHelper.unregisterWifiBroadcastReceiver(); mediaPlayer.shutdown(); taskManager.shutdown(); } @Override public BrowserRoot onGetRoot(@NonNull String clientPackageName, int clientUid, Bundle rootHints) { Log.d(TAG, "OnGetRoot: clientPackageName=" + clientPackageName + "; clientUid=" + clientUid + " ; rootHints=" + rootHints); return new BrowserRoot( getResources().getString(R.string.app_name), // Name visible in Android Auto null); // Bundle of optional extras } private MediaBrowserCompat.MediaItem createBrowsableMediaItemForRoot() { MediaDescriptionCompat description = new MediaDescriptionCompat.Builder() .setMediaId(getResources().getString(R.string.queue_label)) .setTitle(getResources().getString(R.string.queue_label)) .build(); return new MediaBrowserCompat.MediaItem(description, MediaBrowserCompat.MediaItem.FLAG_BROWSABLE); } private MediaBrowserCompat.MediaItem createBrowsableMediaItemForFeed(Feed feed) { MediaDescriptionCompat.Builder builder = new MediaDescriptionCompat.Builder() .setMediaId("FeedId:" + feed.getId()) .setTitle(feed.getTitle()) .setDescription(feed.getDescription()) .setSubtitle(feed.getCustomTitle()); if (feed.getImageLocation() != null) { builder.setIconUri(Uri.parse(feed.getImageLocation())); } if (feed.getLink() != null) { builder.setMediaUri(Uri.parse(feed.getLink())); } MediaDescriptionCompat description = builder.build(); return new MediaBrowserCompat.MediaItem(description, MediaBrowserCompat.MediaItem.FLAG_BROWSABLE); } @Override public void onLoadChildren(@NonNull String parentId, @NonNull Result<List<MediaBrowserCompat.MediaItem>> result) { Log.d(TAG, "OnLoadChildren: parentMediaId=" + parentId); List<MediaBrowserCompat.MediaItem> mediaItems = new ArrayList<>(); if (parentId.equals(getResources().getString(R.string.app_name))) { // Root List try { if (!(taskManager.getQueue().isEmpty())) { mediaItems.add(createBrowsableMediaItemForRoot()); } } catch (InterruptedException e) { e.printStackTrace(); } List<Feed> feeds = DBReader.getFeedList(); for (Feed feed : feeds) { mediaItems.add(createBrowsableMediaItemForFeed(feed)); } } else if (parentId.equals(getResources().getString(R.string.queue_label))) { // Child List try { for (FeedItem feedItem : taskManager.getQueue()) { FeedMedia media = feedItem.getMedia(); if (media != null) { mediaItems.add(media.getMediaItem()); } } } catch (InterruptedException e) { e.printStackTrace(); } } else if (parentId.startsWith("FeedId:")) { long feedId = Long.parseLong(parentId.split(":")[1]); List<FeedItem> feedItems = DBReader.getFeedItemList(DBReader.getFeed(feedId)); for (FeedItem feedItem : feedItems) { if (feedItem.getMedia() != null && feedItem.getMedia().getMediaItem() != null) { mediaItems.add(feedItem.getMedia().getMediaItem()); } } } result.sendResult(mediaItems); } @Override public IBinder onBind(Intent intent) { Log.d(TAG, "Received onBind event"); if (intent.getAction() != null && TextUtils.equals(intent.getAction(), MediaBrowserServiceCompat.SERVICE_INTERFACE)) { return super.onBind(intent); } else { return mBinder; } } @Override public int onStartCommand(Intent intent, int flags, int startId) { super.onStartCommand(intent, flags, startId); Log.d(TAG, "OnStartCommand called"); stateManager.startForeground(R.id.notification_playing, notificationBuilder.build()); NotificationManagerCompat notificationManager = NotificationManagerCompat.from(this); notificationManager.cancel(R.id.notification_streaming_confirmation); final int keycode = intent.getIntExtra(MediaButtonReceiver.EXTRA_KEYCODE, -1); final boolean hardwareButton = intent.getBooleanExtra(MediaButtonReceiver.EXTRA_HARDWAREBUTTON, false); final boolean castDisconnect = intent.getBooleanExtra(EXTRA_CAST_DISCONNECT, false); Playable playable = intent.getParcelableExtra(EXTRA_PLAYABLE); if (keycode == -1 && playable == null && !castDisconnect) { Log.e(TAG, "PlaybackService was started with no arguments"); stateManager.stopService(); return Service.START_NOT_STICKY; } if ((flags & Service.START_FLAG_REDELIVERY) != 0) { Log.d(TAG, "onStartCommand is a redelivered intent, calling stopForeground now."); stateManager.stopForeground(true); } else { if (keycode != -1) { boolean notificationButton; if (hardwareButton) { Log.d(TAG, "Received hardware button event"); notificationButton = false; } else { Log.d(TAG, "Received media button event"); notificationButton = true; } boolean handled = handleKeycode(keycode, notificationButton); if (!handled && !stateManager.hasReceivedValidStartCommand()) { stateManager.stopService(); return Service.START_NOT_STICKY; } } else if (!flavorHelper.castDisconnect(castDisconnect) && playable != null) { stateManager.validStartCommandWasReceived(); boolean stream = intent.getBooleanExtra(EXTRA_SHOULD_STREAM, true); boolean allowStreamThisTime = intent.getBooleanExtra(EXTRA_ALLOW_STREAM_THIS_TIME, false); boolean allowStreamAlways = intent.getBooleanExtra(EXTRA_ALLOW_STREAM_ALWAYS, false); boolean startWhenPrepared = intent.getBooleanExtra(EXTRA_START_WHEN_PREPARED, false); boolean prepareImmediately = intent.getBooleanExtra(EXTRA_PREPARE_IMMEDIATELY, false); sendNotificationBroadcast(NOTIFICATION_TYPE_RELOAD, 0); //If the user asks to play External Media, the casting session, if on, should end. flavorHelper.castDisconnect(playable instanceof ExternalMedia); if (allowStreamAlways) { UserPreferences.setAllowMobileStreaming(true); } boolean localFeed = URLUtil.isContentUrl(playable.getStreamUrl()); if (stream && !NetworkUtils.isStreamingAllowed() && !allowStreamThisTime && !localFeed) { displayStreamingNotAllowedNotification(intent); PlaybackPreferences.writeNoMediaPlaying(); stateManager.stopService(); return Service.START_NOT_STICKY; } Observable.fromCallable( () -> { if (playable instanceof FeedMedia) { return DBReader.getFeedMedia(((FeedMedia) playable).getId()); } else { return playable; } }) .subscribeOn(Schedulers.io()) .observeOn(AndroidSchedulers.mainThread()) .subscribe( playableLoaded -> { mediaPlayer.playMediaObject(playableLoaded, stream, startWhenPrepared, prepareImmediately); addPlayableToQueue(playableLoaded); }, error -> { Log.d(TAG, "Playable was not found. Stopping service."); error.printStackTrace(); stateManager.stopService(); }); return Service.START_NOT_STICKY; } else { Log.d(TAG, "Did not handle intent to PlaybackService: " + intent); Log.d(TAG, "Extras: " + intent.getExtras()); } } return Service.START_NOT_STICKY; } private void skipIntro(Playable playable) { if (! (playable instanceof FeedMedia)) { return; } FeedMedia feedMedia = (FeedMedia) playable; FeedPreferences preferences = feedMedia.getItem().getFeed().getPreferences(); int skipIntro = preferences.getFeedSkipIntro(); Context context = getApplicationContext(); if (skipIntro > 0 && playable.getPosition() < skipIntro * 1000) { int duration = getDuration(); if (skipIntro * 1000 < duration || duration <= 0) { Log.d(TAG, "skipIntro " + playable.getEpisodeTitle()); mediaPlayer.seekTo(skipIntro * 1000); String skipIntroMesg = context.getString(R.string.pref_feed_skip_intro_toast, skipIntro); Toast toast = Toast.makeText(context, skipIntroMesg, Toast.LENGTH_LONG); toast.show(); } } } private void displayStreamingNotAllowedNotification(Intent originalIntent) { Intent intentAllowThisTime = new Intent(originalIntent); intentAllowThisTime.setAction(EXTRA_ALLOW_STREAM_THIS_TIME); intentAllowThisTime.putExtra(EXTRA_ALLOW_STREAM_THIS_TIME, true); PendingIntent pendingIntentAllowThisTime; if (android.os.Build.VERSION.SDK_INT >= android.os.Build.VERSION_CODES.O) { pendingIntentAllowThisTime = PendingIntent.getForegroundService(this, R.id.pending_intent_allow_stream_this_time, intentAllowThisTime, PendingIntent.FLAG_UPDATE_CURRENT); } else { pendingIntentAllowThisTime = PendingIntent.getService(this, R.id.pending_intent_allow_stream_this_time, intentAllowThisTime, PendingIntent.FLAG_UPDATE_CURRENT); } Intent intentAlwaysAllow = new Intent(intentAllowThisTime); intentAlwaysAllow.setAction(EXTRA_ALLOW_STREAM_ALWAYS); intentAlwaysAllow.putExtra(EXTRA_ALLOW_STREAM_ALWAYS, true); PendingIntent pendingIntentAlwaysAllow; if (android.os.Build.VERSION.SDK_INT >= android.os.Build.VERSION_CODES.O) { pendingIntentAlwaysAllow = PendingIntent.getForegroundService(this, R.id.pending_intent_allow_stream_always, intentAlwaysAllow, PendingIntent.FLAG_UPDATE_CURRENT); } else { pendingIntentAlwaysAllow = PendingIntent.getService(this, R.id.pending_intent_allow_stream_always, intentAlwaysAllow, PendingIntent.FLAG_UPDATE_CURRENT); } NotificationCompat.Builder builder = new NotificationCompat.Builder(this, NotificationUtils.CHANNEL_ID_USER_ACTION) .setSmallIcon(R.drawable.ic_stream_white) .setContentTitle(getString(R.string.confirm_mobile_streaming_notification_title)) .setContentText(getString(R.string.confirm_mobile_streaming_notification_message)) .setStyle(new NotificationCompat.BigTextStyle() .bigText(getString(R.string.confirm_mobile_streaming_notification_message))) .setPriority(NotificationCompat.PRIORITY_DEFAULT) .setContentIntent(pendingIntentAllowThisTime) .addAction(R.drawable.ic_stream_white, getString(R.string.confirm_mobile_streaming_button_once), pendingIntentAllowThisTime) .addAction(R.drawable.ic_stream_white, getString(R.string.confirm_mobile_streaming_button_always), pendingIntentAlwaysAllow) .setAutoCancel(true); NotificationManagerCompat notificationManager = NotificationManagerCompat.from(this); notificationManager.notify(R.id.notification_streaming_confirmation, builder.build()); } /** * Handles media button events * return: keycode was handled */ private boolean handleKeycode(int keycode, boolean notificationButton) { Log.d(TAG, "Handling keycode: " + keycode); final PlaybackServiceMediaPlayer.PSMPInfo info = mediaPlayer.getPSMPInfo(); final PlayerStatus status = info.playerStatus; switch (keycode) { case KeyEvent.KEYCODE_HEADSETHOOK: case KeyEvent.KEYCODE_MEDIA_PLAY_PAUSE: if (status == PlayerStatus.PLAYING) { mediaPlayer.pause(!UserPreferences.isPersistNotify(), false); } else if (status == PlayerStatus.PAUSED || status == PlayerStatus.PREPARED) { mediaPlayer.resume(); } else if (status == PlayerStatus.PREPARING) { mediaPlayer.setStartWhenPrepared(!mediaPlayer.isStartWhenPrepared()); } else if (status == PlayerStatus.INITIALIZED) { mediaPlayer.setStartWhenPrepared(true); mediaPlayer.prepare(); } else if (mediaPlayer.getPlayable() == null) { startPlayingFromPreferences(); } else { return false; } taskManager.restartSleepTimer(); return true; case KeyEvent.KEYCODE_MEDIA_PLAY: if (status == PlayerStatus.PAUSED || status == PlayerStatus.PREPARED) { mediaPlayer.resume(); } else if (status == PlayerStatus.INITIALIZED) { mediaPlayer.setStartWhenPrepared(true); mediaPlayer.prepare(); } else if (mediaPlayer.getPlayable() == null) { startPlayingFromPreferences(); } else { return false; } taskManager.restartSleepTimer(); return true; case KeyEvent.KEYCODE_MEDIA_PAUSE: if (status == PlayerStatus.PLAYING) { mediaPlayer.pause(!UserPreferences.isPersistNotify(), false); return true; } return false; case KeyEvent.KEYCODE_MEDIA_NEXT: if (getStatus() != PlayerStatus.PLAYING && getStatus() != PlayerStatus.PAUSED) { return false; } else if (notificationButton || UserPreferences.shouldHardwareButtonSkip()) { // assume the skip command comes from a notification or the lockscreen // a >| skip button should actually skip mediaPlayer.skip(); } else { // assume skip command comes from a (bluetooth) media button // user actually wants to fast-forward seekDelta(UserPreferences.getFastForwardSecs() * 1000); } return true; case KeyEvent.KEYCODE_MEDIA_FAST_FORWARD: if (getStatus() == PlayerStatus.PLAYING || getStatus() == PlayerStatus.PAUSED) { mediaPlayer.seekDelta(UserPreferences.getFastForwardSecs() * 1000); return true; } return false; case KeyEvent.KEYCODE_MEDIA_PREVIOUS: if (getStatus() != PlayerStatus.PLAYING && getStatus() != PlayerStatus.PAUSED) { return false; } else if (UserPreferences.shouldHardwarePreviousButtonRestart()) { // user wants to restart current episode mediaPlayer.seekTo(0); } else { // user wants to rewind current episode mediaPlayer.seekDelta(-UserPreferences.getRewindSecs() * 1000); } return true; case KeyEvent.KEYCODE_MEDIA_REWIND: if (getStatus() == PlayerStatus.PLAYING || getStatus() == PlayerStatus.PAUSED) { mediaPlayer.seekDelta(-UserPreferences.getRewindSecs() * 1000); } else { return false; } return true; case KeyEvent.KEYCODE_MEDIA_STOP: if (status == PlayerStatus.PLAYING) { mediaPlayer.pause(true, true); } stateManager.stopForeground(true); // gets rid of persistent notification return true; default: Log.d(TAG, "Unhandled key code: " + keycode); if (info.playable != null && info.playerStatus == PlayerStatus.PLAYING) { // only notify the user about an unknown key event if it is actually doing something String message = String.format(getResources().getString(R.string.unknown_media_key), keycode); Toast.makeText(this, message, Toast.LENGTH_SHORT).show(); } } return false; } private void startPlayingFromPreferences() { Observable.fromCallable(() -> Playable.PlayableUtils.createInstanceFromPreferences(getApplicationContext())) .subscribeOn(Schedulers.io()) .observeOn(AndroidSchedulers.mainThread()) .subscribe( playable -> { boolean localFeed = URLUtil.isContentUrl(playable.getStreamUrl()); if (PlaybackPreferences.getCurrentEpisodeIsStream() && !NetworkUtils.isStreamingAllowed() && !localFeed) { displayStreamingNotAllowedNotification( new PlaybackServiceStarter(this, playable) .prepareImmediately(true) .startWhenPrepared(true) .shouldStream(true) .getIntent()); PlaybackPreferences.writeNoMediaPlaying(); stateManager.stopService(); return; } mediaPlayer.playMediaObject(playable, PlaybackPreferences.getCurrentEpisodeIsStream(), true, true); stateManager.validStartCommandWasReceived(); PlaybackService.this.updateMediaSessionMetadata(playable); addPlayableToQueue(playable); }, error -> { Log.d(TAG, "Playable was not loaded from preferences. Stopping service."); error.printStackTrace(); stateManager.stopService(); }); } /** * Called by a mediaplayer Activity as soon as it has prepared its * mediaplayer. */ public void setVideoSurface(SurfaceHolder sh) { Log.d(TAG, "Setting display"); mediaPlayer.setVideoSurface(sh); } public void notifyVideoSurfaceAbandoned() { mediaPlayer.pause(true, false); mediaPlayer.resetVideoSurface(); setupNotification(getPlayable()); stateManager.stopForeground(!UserPreferences.isPersistNotify()); } private final PlaybackServiceTaskManager.PSTMCallback taskManagerCallback = new PlaybackServiceTaskManager.PSTMCallback() { @Override public void positionSaverTick() { saveCurrentPosition(true, null, PlaybackServiceMediaPlayer.INVALID_TIME); } @Override public void onSleepTimerAlmostExpired(long timeLeft) { final float[] multiplicators = {0.1f, 0.2f, 0.3f, 0.3f, 0.3f, 0.4f, 0.4f, 0.4f, 0.6f, 0.8f}; float multiplicator = multiplicators[Math.max(0, (int) timeLeft / 1000)]; Log.d(TAG, "onSleepTimerAlmostExpired: " + multiplicator); float leftVolume = multiplicator * UserPreferences.getLeftVolume(); float rightVolume = multiplicator * UserPreferences.getRightVolume(); mediaPlayer.setVolume(leftVolume, rightVolume); } @Override public void onSleepTimerExpired() { mediaPlayer.pause(true, true); float leftVolume = UserPreferences.getLeftVolume(); float rightVolume = UserPreferences.getRightVolume(); mediaPlayer.setVolume(leftVolume, rightVolume); sendNotificationBroadcast(NOTIFICATION_TYPE_SLEEPTIMER_UPDATE, 0); } @Override public void onSleepTimerReset() { float leftVolume = UserPreferences.getLeftVolume(); float rightVolume = UserPreferences.getRightVolume(); mediaPlayer.setVolume(leftVolume, rightVolume); } @Override public void onWidgetUpdaterTick() { PlayerWidgetJobService.updateWidget(getBaseContext()); } @Override public void onChapterLoaded(Playable media) { sendNotificationBroadcast(NOTIFICATION_TYPE_RELOAD, 0); } }; private final PlaybackServiceMediaPlayer.PSMPCallback mediaPlayerCallback = new PlaybackServiceMediaPlayer.PSMPCallback() { @Override public void statusChanged(PlaybackServiceMediaPlayer.PSMPInfo newInfo) { if (mediaPlayer != null) { currentMediaType = mediaPlayer.getCurrentMediaType(); } else { currentMediaType = MediaType.UNKNOWN; } updateMediaSession(newInfo.playerStatus); switch (newInfo.playerStatus) { case INITIALIZED: PlaybackPreferences.writeMediaPlaying(mediaPlayer.getPSMPInfo().playable, mediaPlayer.getPSMPInfo().playerStatus, mediaPlayer.isStreaming()); setupNotification(newInfo); break; case PREPARED: taskManager.startChapterLoader(newInfo.playable); break; case PAUSED: if ((UserPreferences.isPersistNotify() || isCasting) && android.os.Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN) { // do not remove notification on pause based on user pref and whether android version supports expanded notifications // Change [Play] button to [Pause] setupNotification(newInfo); } else if (!UserPreferences.isPersistNotify() && !isCasting) { // remove notification on pause stateManager.stopForeground(true); } cancelPositionObserver(); PlaybackPreferences.writePlayerStatus(mediaPlayer.getPlayerStatus()); break; case STOPPED: //writePlaybackPreferencesNoMediaPlaying(); //stopService(); break; case PLAYING: PlaybackPreferences.writePlayerStatus(mediaPlayer.getPlayerStatus()); setupNotification(newInfo); setupPositionObserver(); stateManager.validStartCommandWasReceived(); // set sleep timer if auto-enabled if (newInfo.oldPlayerStatus != null && newInfo.oldPlayerStatus != PlayerStatus.SEEKING && SleepTimerPreferences.autoEnable() && !sleepTimerActive()) { setSleepTimer(SleepTimerPreferences.timerMillis()); EventBus.getDefault().post(new MessageEvent(getString(R.string.sleep_timer_enabled_label), PlaybackService.this::disableSleepTimer)); } break; case ERROR: PlaybackPreferences.writeNoMediaPlaying(); stateManager.stopService(); break; } IntentUtils.sendLocalBroadcast(getApplicationContext(), ACTION_PLAYER_STATUS_CHANGED); PlayerWidgetJobService.updateWidget(getBaseContext()); bluetoothNotifyChange(newInfo, AVRCP_ACTION_PLAYER_STATUS_CHANGED); bluetoothNotifyChange(newInfo, AVRCP_ACTION_META_CHANGED); } @Override public void shouldStop() { setupNotification(getPlayable()); // Stops foreground if not playing } @Override public void playbackSpeedChanged(float s) { sendNotificationBroadcast(NOTIFICATION_TYPE_PLAYBACK_SPEED_CHANGE, 0); } public void setSpeedAbilityChanged() { sendNotificationBroadcast(NOTIFICATION_TYPE_SET_SPEED_ABILITY_CHANGED, 0); } @Override public void onBufferingUpdate(int percent) { sendNotificationBroadcast(NOTIFICATION_TYPE_BUFFER_UPDATE, percent); } @Override public void onMediaChanged(boolean reloadUI) { Log.d(TAG, "reloadUI callback reached"); if (reloadUI) { sendNotificationBroadcast(NOTIFICATION_TYPE_RELOAD, 0); } PlaybackService.this.updateMediaSessionMetadata(getPlayable()); } @Override public boolean onMediaPlayerInfo(int code, @StringRes int resourceId) { switch (code) { case MediaPlayer.MEDIA_INFO_BUFFERING_START: sendNotificationBroadcast(NOTIFICATION_TYPE_BUFFER_START, 0); return true; case MediaPlayer.MEDIA_INFO_BUFFERING_END: sendNotificationBroadcast(NOTIFICATION_TYPE_BUFFER_END, 0); Playable playable = getPlayable(); if (getPlayable() instanceof FeedMedia && playable.getDuration() <= 0 && mediaPlayer.getDuration() > 0) { // Playable is being streamed and does not have a duration specified in the feed playable.setDuration(mediaPlayer.getDuration()); DBWriter.setFeedMedia((FeedMedia) playable); updateMediaSessionMetadata(playable); setupNotification(playable); } return true; default: return flavorHelper.onMediaPlayerInfo(PlaybackService.this, code, resourceId); } } @Override public boolean onMediaPlayerError(Object inObj, int what, int extra) { final String TAG = "PlaybackSvc.onErrorLtsn"; Log.w(TAG, "An error has occured: " + what + " " + extra); if (mediaPlayer.getPlayerStatus() == PlayerStatus.PLAYING) { mediaPlayer.pause(true, false); } sendNotificationBroadcast(NOTIFICATION_TYPE_ERROR, what); PlaybackPreferences.writeNoMediaPlaying(); stateManager.stopService(); return true; } @Override public void onPostPlayback(@NonNull Playable media, boolean ended, boolean skipped, boolean playingNext) { PlaybackService.this.onPostPlayback(media, ended, skipped, playingNext); } @Override public void onPlaybackStart(@NonNull Playable playable, int position) { taskManager.startWidgetUpdater(); if (position != PlaybackServiceMediaPlayer.INVALID_TIME) { playable.setPosition(position); } else { skipIntro(playable); } playable.onPlaybackStart(); taskManager.startPositionSaver(); } @Override public void onPlaybackPause(Playable playable, int position) { taskManager.cancelPositionSaver(); cancelPositionObserver(); saveCurrentPosition(position == PlaybackServiceMediaPlayer.INVALID_TIME || playable == null, playable, position); taskManager.cancelWidgetUpdater(); if (playable != null) { playable.onPlaybackPause(getApplicationContext()); } } @Override public Playable getNextInQueue(Playable currentMedia) { return PlaybackService.this.getNextInQueue(currentMedia); } @Override public void onPlaybackEnded(MediaType mediaType, boolean stopPlaying) { PlaybackService.this.onPlaybackEnded(mediaType, stopPlaying); } }; private Playable getNextInQueue(final Playable currentMedia) { if (!(currentMedia instanceof FeedMedia)) { Log.d(TAG, "getNextInQueue(), but playable not an instance of FeedMedia, so not proceeding"); return null; } Log.d(TAG, "getNextInQueue()"); FeedMedia media = (FeedMedia) currentMedia; try { media.loadMetadata(); } catch (Playable.PlayableException e) { Log.e(TAG, "Unable to load metadata to get next in queue", e); return null; } FeedItem item = media.getItem(); if (item == null) { Log.w(TAG, "getNextInQueue() with FeedMedia object whose FeedItem is null"); return null; } FeedItem nextItem; try { final List<FeedItem> queue = taskManager.getQueue(); nextItem = DBTasks.getQueueSuccessorOfItem(item.getId(), queue); } catch (InterruptedException e) { Log.e(TAG, "Error handling the queue in order to retrieve the next item", e); return null; } if (nextItem == null || nextItem.getMedia() == null) { return null; } if (!nextItem.getMedia().localFileAvailable() && !NetworkUtils.isStreamingAllowed() && UserPreferences.isFollowQueue() && !nextItem.getFeed().isLocalFeed()) { displayStreamingNotAllowedNotification( new PlaybackServiceStarter(this, nextItem.getMedia()) .prepareImmediately(true) .startWhenPrepared(true) .shouldStream(true) .getIntent()); PlaybackPreferences.writeNoMediaPlaying(); stateManager.stopService(); return null; } return nextItem.getMedia(); } /** * Set of instructions to be performed when playback ends. */ private void onPlaybackEnded(MediaType mediaType, boolean stopPlaying) { Log.d(TAG, "Playback ended"); if (stopPlaying) { taskManager.cancelPositionSaver(); cancelPositionObserver(); PlaybackPreferences.writeNoMediaPlaying(); if (!isCasting) { stateManager.stopForeground(true); } } if (mediaType == null) { sendNotificationBroadcast(NOTIFICATION_TYPE_PLAYBACK_END, 0); } else { sendNotificationBroadcast(NOTIFICATION_TYPE_RELOAD, isCasting ? EXTRA_CODE_CAST : (mediaType == MediaType.VIDEO) ? EXTRA_CODE_VIDEO : EXTRA_CODE_AUDIO); } } /** * This method processes the media object after its playback ended, either because it completed * or because a different media object was selected for playback. * <p> * Even though these tasks aren't supposed to be resource intensive, a good practice is to * usually call this method on a background thread. * * @param playable the media object that was playing. It is assumed that its position * property was updated before this method was called. * @param ended if true, it signals that {@param playable} was played until its end. * In such case, the position property of the media becomes irrelevant for * most of the tasks (although it's still a good practice to keep it * accurate). * @param skipped if the user pressed a skip >| button. * @param playingNext if true, it means another media object is being loaded in place of this * one. * Instances when we'd set it to false would be when we're not following the * queue or when the queue has ended. */ private void onPostPlayback(final Playable playable, boolean ended, boolean skipped, boolean playingNext) { PlaybackPreferences.clearCurrentlyPlayingTemporaryPlaybackSpeed(); if (playable == null) { Log.e(TAG, "Cannot do post-playback processing: media was null"); return; } Log.d(TAG, "onPostPlayback(): media=" + playable.getEpisodeTitle()); if (!(playable instanceof FeedMedia)) { Log.d(TAG, "Not doing post-playback processing: media not of type FeedMedia"); if (ended) { playable.onPlaybackCompleted(getApplicationContext()); } else { playable.onPlaybackPause(getApplicationContext()); } return; } FeedMedia media = (FeedMedia) playable; FeedItem item = media.getItem(); boolean smartMarkAsPlayed = media.hasAlmostEnded(); if (!ended && smartMarkAsPlayed) { Log.d(TAG, "smart mark as played"); } boolean autoSkipped = false; if (autoSkippedFeedMediaId != null && autoSkippedFeedMediaId.equals(item.getIdentifyingValue())) { autoSkippedFeedMediaId = null; autoSkipped = true; } if (ended || smartMarkAsPlayed) { media.onPlaybackCompleted(getApplicationContext()); } else { media.onPlaybackPause(getApplicationContext()); } if (item != null) { if (ended || smartMarkAsPlayed || autoSkipped || (skipped && !UserPreferences.shouldSkipKeepEpisode())) { // only mark the item as played if we're not keeping it anyways DBWriter.markItemPlayed(item, FeedItem.PLAYED, ended); // don't know if it actually matters to not autodownload when smart mark as played is triggered DBWriter.removeQueueItem(PlaybackService.this, ended, item); // Delete episode if enabled if (item.getFeed().getPreferences().getCurrentAutoDelete() && (!item.isTagged(FeedItem.TAG_FAVORITE) || !UserPreferences.shouldFavoriteKeepEpisode())) { DBWriter.deleteFeedMediaOfItem(PlaybackService.this, media.getId()); Log.d(TAG, "Episode Deleted"); } } } if (ended || skipped || playingNext) { DBWriter.addItemToPlaybackHistory(media); } } public void setSleepTimer(long waitingTime) { Log.d(TAG, "Setting sleep timer to " + waitingTime + " milliseconds"); taskManager.setSleepTimer(waitingTime); sendNotificationBroadcast(NOTIFICATION_TYPE_SLEEPTIMER_UPDATE, 0); } public void disableSleepTimer() { taskManager.disableSleepTimer(); sendNotificationBroadcast(NOTIFICATION_TYPE_SLEEPTIMER_UPDATE, 0); } private void sendNotificationBroadcast(int type, int code) { Intent intent = new Intent(ACTION_PLAYER_NOTIFICATION); intent.putExtra(EXTRA_NOTIFICATION_TYPE, type); intent.putExtra(EXTRA_NOTIFICATION_CODE, code); intent.setPackage(getPackageName()); sendBroadcast(intent); } private void skipEndingIfNecessary() { Playable playable = mediaPlayer.getPlayable(); if (! (playable instanceof FeedMedia)) { return; } int duration = getDuration(); int remainingTime = duration - getCurrentPosition(); FeedMedia feedMedia = (FeedMedia) playable; FeedPreferences preferences = feedMedia.getItem().getFeed().getPreferences(); int skipEnd = preferences.getFeedSkipEnding(); if (skipEnd > 0 && skipEnd * 1000 < getDuration() && (remainingTime - (skipEnd * 1000) > 0) && ((remainingTime - skipEnd * 1000) < (getCurrentPlaybackSpeed() * 1000))) { Log.d(TAG, "skipEndingIfNecessary: Skipping the remaining " + remainingTime + " " + skipEnd * 1000 + " speed " + getCurrentPlaybackSpeed()); Context context = getApplicationContext(); String skipMesg = context.getString(R.string.pref_feed_skip_ending_toast, skipEnd); Toast toast = Toast.makeText(context, skipMesg, Toast.LENGTH_LONG); toast.show(); this.autoSkippedFeedMediaId = feedMedia.getItem().getIdentifyingValue(); mediaPlayer.skip(); } } /** * Updates the Media Session for the corresponding status. * * @param playerStatus the current {@link PlayerStatus} */ private void updateMediaSession(final PlayerStatus playerStatus) { PlaybackStateCompat.Builder sessionState = new PlaybackStateCompat.Builder(); int state; if (playerStatus != null) { switch (playerStatus) { case PLAYING: state = PlaybackStateCompat.STATE_PLAYING; break; case PREPARED: case PAUSED: state = PlaybackStateCompat.STATE_PAUSED; break; case STOPPED: state = PlaybackStateCompat.STATE_STOPPED; break; case SEEKING: state = PlaybackStateCompat.STATE_FAST_FORWARDING; break; case PREPARING: case INITIALIZING: state = PlaybackStateCompat.STATE_CONNECTING; break; case ERROR: state = PlaybackStateCompat.STATE_ERROR; break; case INITIALIZED: // Deliberate fall-through case INDETERMINATE: default: state = PlaybackStateCompat.STATE_NONE; break; } } else { state = PlaybackStateCompat.STATE_NONE; } sessionState.setState(state, getCurrentPosition(), getCurrentPlaybackSpeed()); long capabilities = PlaybackStateCompat.ACTION_PLAY_PAUSE | PlaybackStateCompat.ACTION_REWIND | PlaybackStateCompat.ACTION_FAST_FORWARD | PlaybackStateCompat.ACTION_SKIP_TO_NEXT | PlaybackStateCompat.ACTION_SEEK_TO; if (useSkipToPreviousForRewindInLockscreen()) { // Workaround to fool Android so that Lockscreen will expose a skip-to-previous button, // which will be used for rewind. // The workaround is used for pre Lollipop (Androidv5) devices. // For Androidv5+, lockscreen widges are really notifications (compact), // with an independent codepath // // @see #sessionCallback in the backing callback, skipToPrevious implementation // is actually the same as rewind. So no new inconsistency is created. // @see #setupNotification() for the method to create Androidv5+ lockscreen UI // with notification (compact) capabilities = capabilities | PlaybackStateCompat.ACTION_SKIP_TO_PREVIOUS; } UiModeManager uiModeManager = (UiModeManager) getApplicationContext().getSystemService(Context.UI_MODE_SERVICE); if (uiModeManager.getCurrentModeType() == Configuration.UI_MODE_TYPE_CAR) { sessionState.addCustomAction( new PlaybackStateCompat.CustomAction.Builder( CUSTOM_ACTION_REWIND, getString(R.string.rewind_label), R.drawable.ic_notification_fast_rewind) .build()); sessionState.addCustomAction( new PlaybackStateCompat.CustomAction.Builder( CUSTOM_ACTION_FAST_FORWARD, getString(R.string.fast_forward_label), R.drawable.ic_notification_fast_forward) .build()); } sessionState.setActions(capabilities); flavorHelper.sessionStateAddActionForWear(sessionState, CUSTOM_ACTION_REWIND, getString(R.string.rewind_label), android.R.drawable.ic_media_rew); flavorHelper.sessionStateAddActionForWear(sessionState, CUSTOM_ACTION_FAST_FORWARD, getString(R.string.fast_forward_label), android.R.drawable.ic_media_ff); flavorHelper.mediaSessionSetExtraForWear(mediaSession); mediaSession.setPlaybackState(sessionState.build()); } private static boolean useSkipToPreviousForRewindInLockscreen() { // showRewindOnCompactNotification() corresponds to the "Set Lockscreen Buttons" // Settings in UI. // Hence, from user perspective, he/she is setting the buttons for Lockscreen return (UserPreferences.showRewindOnCompactNotification() && (Build.VERSION.SDK_INT < Build.VERSION_CODES.LOLLIPOP)); } /** * Used by updateMediaSessionMetadata to load notification data in another thread. */ private Thread mediaSessionSetupThread; private void updateMediaSessionMetadata(final Playable p) { if (p == null || mediaSession == null) { return; } if (mediaSessionSetupThread != null) { mediaSessionSetupThread.interrupt(); } Runnable mediaSessionSetupTask = () -> { MediaMetadataCompat.Builder builder = new MediaMetadataCompat.Builder(); builder.putString(MediaMetadataCompat.METADATA_KEY_ARTIST, p.getFeedTitle()); builder.putString(MediaMetadataCompat.METADATA_KEY_TITLE, p.getEpisodeTitle()); builder.putString(MediaMetadataCompat.METADATA_KEY_ALBUM, p.getFeedTitle()); builder.putLong(MediaMetadataCompat.METADATA_KEY_DURATION, p.getDuration()); builder.putString(MediaMetadataCompat.METADATA_KEY_DISPLAY_TITLE, p.getEpisodeTitle()); builder.putString(MediaMetadataCompat.METADATA_KEY_DISPLAY_SUBTITLE, p.getFeedTitle()); String imageLocation = ImageResourceUtils.getImageLocation(p); if (!TextUtils.isEmpty(imageLocation)) { if (UserPreferences.setLockscreenBackground()) { builder.putString(MediaMetadataCompat.METADATA_KEY_ART_URI, imageLocation); try { Bitmap art = Glide.with(this) .asBitmap() .load(imageLocation) .apply(RequestOptions.diskCacheStrategyOf(ApGlideSettings.AP_DISK_CACHE_STRATEGY)) .submit(Target.SIZE_ORIGINAL, Target.SIZE_ORIGINAL) .get(); builder.putBitmap(MediaMetadataCompat.METADATA_KEY_ART, art); } catch (Throwable tr) { Log.e(TAG, Log.getStackTraceString(tr)); } } else if (isCasting) { // In the absence of metadata art, the controller dialog takes care of creating it. builder.putString(MediaMetadataCompat.METADATA_KEY_DISPLAY_ICON_URI, imageLocation); } } if (!Thread.currentThread().isInterrupted() && stateManager.hasReceivedValidStartCommand()) { mediaSession.setSessionActivity(PendingIntent.getActivity(this, R.id.pending_intent_player_activity, PlaybackService.getPlayerActivityIntent(this), PendingIntent.FLAG_UPDATE_CURRENT)); try { mediaSession.setMetadata(builder.build()); } catch (OutOfMemoryError e) { Log.e(TAG, "Setting media session metadata", e); builder.putBitmap(MediaMetadataCompat.METADATA_KEY_ART, null); mediaSession.setMetadata(builder.build()); } } }; mediaSessionSetupThread = new Thread(mediaSessionSetupTask); mediaSessionSetupThread.start(); } /** * Used by setupNotification to load notification data in another thread. */ private Thread notificationSetupThread; /** * Prepares notification and starts the service in the foreground. */ private void setupNotification(final PlaybackServiceMediaPlayer.PSMPInfo info) { setupNotification(info.playable); } private synchronized void setupNotification(final Playable playable) { Log.d(TAG, "setupNotification"); if (notificationSetupThread != null) { notificationSetupThread.interrupt(); } if (playable == null || mediaPlayer == null) { Log.d(TAG, "setupNotification: playable=" + playable); Log.d(TAG, "setupNotification: mediaPlayer=" + mediaPlayer); if (!stateManager.hasReceivedValidStartCommand()) { stateManager.stopService(); } return; } PlayerStatus playerStatus = mediaPlayer.getPlayerStatus(); notificationBuilder.setPlayable(playable); notificationBuilder.setMediaSessionToken(mediaSession.getSessionToken()); notificationBuilder.setPlayerStatus(playerStatus); notificationBuilder.setCasting(isCasting); notificationBuilder.updatePosition(getCurrentPosition(), getCurrentPlaybackSpeed()); NotificationManagerCompat notificationManager = NotificationManagerCompat.from(this); notificationManager.notify(R.id.notification_playing, notificationBuilder.build()); startForegroundIfPlaying(playerStatus); if (!notificationBuilder.isIconCached()) { notificationSetupThread = new Thread(() -> { Log.d(TAG, "Loading notification icon"); notificationBuilder.loadIcon(); if (!Thread.currentThread().isInterrupted()) { notificationManager.notify(R.id.notification_playing, notificationBuilder.build()); } }); notificationSetupThread.start(); } } private void startForegroundIfPlaying(@NonNull PlayerStatus status) { Log.d(TAG, "startForegroundIfPlaying: " + status); if (stateManager.hasReceivedValidStartCommand()) { if (isCasting || status == PlayerStatus.PLAYING || status == PlayerStatus.PREPARING || status == PlayerStatus.SEEKING) { stateManager.startForeground(R.id.notification_playing, notificationBuilder.build()); Log.d(TAG, "foreground"); } else { stateManager.stopForeground(false); NotificationManagerCompat notificationManager = NotificationManagerCompat.from(this); notificationManager.notify(R.id.notification_playing, notificationBuilder.build()); } } } /** * Persists the current position and last played time of the media file. * * @param fromMediaPlayer if true, the information is gathered from the current Media Player * and {@param playable} and {@param position} become irrelevant. * @param playable the playable for which the current position should be saved, unless * {@param fromMediaPlayer} is true. * @param position the position that should be saved, unless {@param fromMediaPlayer} is true. */ private synchronized void saveCurrentPosition(boolean fromMediaPlayer, Playable playable, int position) { int duration; if (fromMediaPlayer) { position = getCurrentPosition(); duration = getDuration(); playable = mediaPlayer.getPlayable(); } else { duration = playable.getDuration(); } if (position != INVALID_TIME && duration != INVALID_TIME && playable != null) { Log.d(TAG, "Saving current position to " + position); playable.saveCurrentPosition( PreferenceManager.getDefaultSharedPreferences(getApplicationContext()), position, System.currentTimeMillis()); } } public boolean sleepTimerActive() { return taskManager.isSleepTimerActive(); } public long getSleepTimerTimeLeft() { return taskManager.getSleepTimerTimeLeft(); } private void bluetoothNotifyChange(PlaybackServiceMediaPlayer.PSMPInfo info, String whatChanged) { boolean isPlaying = false; if (info.playerStatus == PlayerStatus.PLAYING) { isPlaying = true; } if (info.playable != null) { Intent i = new Intent(whatChanged); i.putExtra("id", 1L); i.putExtra("artist", ""); i.putExtra("album", info.playable.getFeedTitle()); i.putExtra("track", info.playable.getEpisodeTitle()); i.putExtra("playing", isPlaying); final List<FeedItem> queue = taskManager.getQueueIfLoaded(); if (queue != null) { i.putExtra("ListSize", queue.size()); } i.putExtra("duration", (long) info.playable.getDuration()); i.putExtra("position", (long) info.playable.getPosition()); sendBroadcast(i); } } private final BroadcastReceiver autoStateUpdated = new BroadcastReceiver() { @Override public void onReceive(Context context, Intent intent) { String status = intent.getStringExtra("media_connection_status"); boolean isConnectedToCar = "media_connected".equals(status); Log.d(TAG, "Received Auto Connection update: " + status); if (!isConnectedToCar) { Log.d(TAG, "Car was unplugged during playback."); pauseIfPauseOnDisconnect(); } else { PlayerStatus playerStatus = mediaPlayer.getPlayerStatus(); if (playerStatus == PlayerStatus.PAUSED || playerStatus == PlayerStatus.PREPARED) { mediaPlayer.resume(); } else if (playerStatus == PlayerStatus.PREPARING) { mediaPlayer.setStartWhenPrepared(!mediaPlayer.isStartWhenPrepared()); } else if (playerStatus == PlayerStatus.INITIALIZED) { mediaPlayer.setStartWhenPrepared(true); mediaPlayer.prepare(); } } } }; /** * Pauses playback when the headset is disconnected and the preference is * set */ private final BroadcastReceiver headsetDisconnected = new BroadcastReceiver() { private static final String TAG = "headsetDisconnected"; private static final int UNPLUGGED = 0; private static final int PLUGGED = 1; @Override public void onReceive(Context context, Intent intent) { if (isInitialStickyBroadcast()) { // Don't pause playback after we just started, just because the receiver // delivers the current headset state (instead of a change) return; } if (TextUtils.equals(intent.getAction(), Intent.ACTION_HEADSET_PLUG)) { int state = intent.getIntExtra("state", -1); if (state != -1) { Log.d(TAG, "Headset plug event. State is " + state); if (state == UNPLUGGED) { Log.d(TAG, "Headset was unplugged during playback."); pauseIfPauseOnDisconnect(); } else if (state == PLUGGED) { Log.d(TAG, "Headset was plugged in during playback."); unpauseIfPauseOnDisconnect(false); } } else { Log.e(TAG, "Received invalid ACTION_HEADSET_PLUG intent"); } } } }; private final BroadcastReceiver bluetoothStateUpdated = new BroadcastReceiver() { @Override public void onReceive(Context context, Intent intent) { if (TextUtils.equals(intent.getAction(), BluetoothA2dp.ACTION_CONNECTION_STATE_CHANGED)) { int state = intent.getIntExtra(BluetoothA2dp.EXTRA_STATE, -1); if (state == BluetoothA2dp.STATE_CONNECTED) { Log.d(TAG, "Received bluetooth connection intent"); unpauseIfPauseOnDisconnect(true); } } } }; private final BroadcastReceiver audioBecomingNoisy = new BroadcastReceiver() { @Override public void onReceive(Context context, Intent intent) { // sound is about to change, eg. bluetooth -> speaker Log.d(TAG, "Pausing playback because audio is becoming noisy"); pauseIfPauseOnDisconnect(); } // android.media.AUDIO_BECOMING_NOISY }; /** * Pauses playback if PREF_PAUSE_ON_HEADSET_DISCONNECT was set to true. */ private void pauseIfPauseOnDisconnect() { if (UserPreferences.isPauseOnHeadsetDisconnect() && !isCasting()) { if (mediaPlayer.getPlayerStatus() == PlayerStatus.PLAYING) { transientPause = true; } mediaPlayer.pause(!UserPreferences.isPersistNotify(), true); } } /** * @param bluetooth true if the event for unpausing came from bluetooth */ private void unpauseIfPauseOnDisconnect(boolean bluetooth) { if (transientPause) { transientPause = false; if (!bluetooth && UserPreferences.isUnpauseOnHeadsetReconnect()) { mediaPlayer.resume(); } else if (bluetooth && UserPreferences.isUnpauseOnBluetoothReconnect()) { // let the user know we've started playback again... Vibrator v = (Vibrator) getApplicationContext().getSystemService(Context.VIBRATOR_SERVICE); if (v != null) { v.vibrate(500); } mediaPlayer.resume(); } } } private final BroadcastReceiver shutdownReceiver = new BroadcastReceiver() { @Override public void onReceive(Context context, Intent intent) { if (TextUtils.equals(intent.getAction(), ACTION_SHUTDOWN_PLAYBACK_SERVICE)) { stateManager.stopService(); } } }; private final BroadcastReceiver skipCurrentEpisodeReceiver = new BroadcastReceiver() { @Override public void onReceive(Context context, Intent intent) { if (TextUtils.equals(intent.getAction(), ACTION_SKIP_CURRENT_EPISODE)) { Log.d(TAG, "Received SKIP_CURRENT_EPISODE intent"); mediaPlayer.skip(); } } }; private final BroadcastReceiver pausePlayCurrentEpisodeReceiver = new BroadcastReceiver() { @Override public void onReceive(Context context, Intent intent) { if (TextUtils.equals(intent.getAction(), ACTION_PAUSE_PLAY_CURRENT_EPISODE)) { Log.d(TAG, "Received PAUSE_PLAY_CURRENT_EPISODE intent"); mediaPlayer.pause(false, false); } } }; @Subscribe(threadMode = ThreadMode.MAIN) @SuppressWarnings("unused") public void volumeAdaptionChanged(VolumeAdaptionChangedEvent event) { PlaybackVolumeUpdater playbackVolumeUpdater = new PlaybackVolumeUpdater(); playbackVolumeUpdater.updateVolumeIfNecessary(mediaPlayer, event.getFeedId(), event.getVolumeAdaptionSetting()); } @Subscribe(threadMode = ThreadMode.MAIN) @SuppressWarnings("unused") public void speedPresetChanged(SpeedPresetChangedEvent event) { if (getPlayable() instanceof FeedMedia) { if (((FeedMedia) getPlayable()).getItem().getFeed().getId() == event.getFeedId()) { if (event.getSpeed() == SPEED_USE_GLOBAL) { setSpeed(UserPreferences.getPlaybackSpeed(getPlayable().getMediaType())); } else { setSpeed(event.getSpeed()); } } } } @Subscribe(threadMode = ThreadMode.MAIN) @SuppressWarnings("unused") public void skipIntroEndingPresetChanged(SkipIntroEndingChangedEvent event) { if (getPlayable() instanceof FeedMedia) { if (((FeedMedia) getPlayable()).getItem().getFeed().getId() == event.getFeedId()) { if (event.getSkipEnding() != 0) { FeedPreferences feedPreferences = ((FeedMedia) getPlayable()).getItem().getFeed().getPreferences(); feedPreferences.setFeedSkipIntro(event.getSkipIntro()); feedPreferences.setFeedSkipEnding(event.getSkipEnding()); } } } } public static MediaType getCurrentMediaType() { return currentMediaType; } public static boolean isCasting() { return isCasting; } public void resume() { mediaPlayer.resume(); taskManager.restartSleepTimer(); } public void prepare() { mediaPlayer.prepare(); taskManager.restartSleepTimer(); } public void pause(boolean abandonAudioFocus, boolean reinit) { mediaPlayer.pause(abandonAudioFocus, reinit); } public void reinit() { mediaPlayer.reinit(); } public PlaybackServiceMediaPlayer.PSMPInfo getPSMPInfo() { return mediaPlayer.getPSMPInfo(); } public PlayerStatus getStatus() { return mediaPlayer.getPlayerStatus(); } public Playable getPlayable() { return mediaPlayer.getPlayable(); } public boolean canSetSpeed() { return mediaPlayer.canSetSpeed(); } public void setSpeed(float speed) { mediaPlayer.setPlaybackParams(speed, UserPreferences.isSkipSilence()); } public void skipSilence(boolean skipSilence) { mediaPlayer.setPlaybackParams(getCurrentPlaybackSpeed(), skipSilence); } public void setVolume(float leftVolume, float rightVolume) { mediaPlayer.setVolume(leftVolume, rightVolume); } public float getCurrentPlaybackSpeed() { if(mediaPlayer == null) { return 1.0f; } return mediaPlayer.getPlaybackSpeed(); } public boolean canDownmix() { return mediaPlayer.canDownmix(); } public void setDownmix(boolean enable) { mediaPlayer.setDownmix(enable); } public boolean isStartWhenPrepared() { return mediaPlayer.isStartWhenPrepared(); } public void setStartWhenPrepared(boolean s) { mediaPlayer.setStartWhenPrepared(s); } public void seekTo(final int t) { mediaPlayer.seekTo(t); } private void seekDelta(final int d) { mediaPlayer.seekDelta(d); } /** * Seek to the start of the specified chapter. */ public void seekToChapter(Chapter c) { seekTo((int) c.getStart()); } /** * call getDuration() on mediaplayer or return INVALID_TIME if player is in * an invalid state. */ public int getDuration() { if (mediaPlayer == null) { return INVALID_TIME; } return mediaPlayer.getDuration(); } /** * call getCurrentPosition() on mediaplayer or return INVALID_TIME if player * is in an invalid state. */ public int getCurrentPosition() { if (mediaPlayer == null) { return INVALID_TIME; } return mediaPlayer.getPosition(); } public List<String> getAudioTracks() { if (mediaPlayer == null) { return Collections.emptyList(); } return mediaPlayer.getAudioTracks(); } public int getSelectedAudioTrack() { if (mediaPlayer == null) { return -1; } return mediaPlayer.getSelectedAudioTrack(); } public void setAudioTrack(int track) { if (mediaPlayer != null) { mediaPlayer.setAudioTrack(track); } } public boolean isStreaming() { return mediaPlayer.isStreaming(); } public Pair<Integer, Integer> getVideoSize() { return mediaPlayer.getVideoSize(); } private void setupPositionObserver() { if (positionEventTimer != null) { positionEventTimer.dispose(); } Log.d(TAG, "Setting up position observer"); positionEventTimer = Observable.interval(1, TimeUnit.SECONDS) .observeOn(AndroidSchedulers.mainThread()) .subscribe(number -> { EventBus.getDefault().post(new PlaybackPositionEvent(getCurrentPosition(), getDuration())); if (Build.VERSION.SDK_INT < 29) { notificationBuilder.updatePosition(getCurrentPosition(), getCurrentPlaybackSpeed()); NotificationManager notificationManager = (NotificationManager) getSystemService(NOTIFICATION_SERVICE); notificationManager.notify(R.id.notification_playing, notificationBuilder.build()); } skipEndingIfNecessary(); }); } private void cancelPositionObserver() { if (positionEventTimer != null) { positionEventTimer.dispose(); } } private void addPlayableToQueue(Playable playable) { if (playable instanceof FeedMedia) { long itemId = ((FeedMedia) playable).getItem().getId(); DBWriter.addQueueItem(this, false, false, itemId); } } private final MediaSessionCompat.Callback sessionCallback = new MediaSessionCompat.Callback() { private static final String TAG = "MediaSessionCompat"; @Override public void onPlay() { Log.d(TAG, "onPlay()"); PlayerStatus status = getStatus(); if (status == PlayerStatus.PAUSED || status == PlayerStatus.PREPARED) { resume(); } else if (status == PlayerStatus.INITIALIZED) { setStartWhenPrepared(true); prepare(); } } @Override public void onPlayFromMediaId(String mediaId, Bundle extras) { Log.d(TAG, "onPlayFromMediaId: mediaId: " + mediaId + " extras: " + extras.toString()); FeedMedia p = DBReader.getFeedMedia(Long.parseLong(mediaId)); if (p != null) { mediaPlayer.playMediaObject(p, !p.localFileAvailable(), true, true); addPlayableToQueue(p); } } @Override public void onPlayFromSearch(String query, Bundle extras) { Log.d(TAG, "onPlayFromSearch query=" + query + " extras=" + extras.toString()); List<FeedItem> results = FeedSearcher.searchFeedItems(getBaseContext(), query, 0); if (results.size() > 0 && results.get(0).getMedia() != null) { FeedMedia media = results.get(0).getMedia(); mediaPlayer.playMediaObject(media, !media.localFileAvailable(), true, true); addPlayableToQueue(media); return; } onPlay(); } @Override public void onPause() { Log.d(TAG, "onPause()"); if (getStatus() == PlayerStatus.PLAYING) { pause(!UserPreferences.isPersistNotify(), true); } } @Override public void onStop() { Log.d(TAG, "onStop()"); mediaPlayer.stopPlayback(true); } @Override public void onSkipToPrevious() { Log.d(TAG, "onSkipToPrevious()"); seekDelta(-UserPreferences.getRewindSecs() * 1000); } @Override public void onRewind() { Log.d(TAG, "onRewind()"); seekDelta(-UserPreferences.getRewindSecs() * 1000); } @Override public void onFastForward() { Log.d(TAG, "onFastForward()"); seekDelta(UserPreferences.getFastForwardSecs() * 1000); } @Override public void onSkipToNext() { Log.d(TAG, "onSkipToNext()"); if (UserPreferences.shouldHardwareButtonSkip()) { mediaPlayer.skip(); } else { seekDelta(UserPreferences.getFastForwardSecs() * 1000); } } @Override public void onSeekTo(long pos) { Log.d(TAG, "onSeekTo()"); seekTo((int) pos); } @Override public boolean onMediaButtonEvent(final Intent mediaButton) { Log.d(TAG, "onMediaButtonEvent(" + mediaButton + ")"); if (mediaButton != null) { KeyEvent keyEvent = mediaButton.getParcelableExtra(Intent.EXTRA_KEY_EVENT); if (keyEvent != null && keyEvent.getAction() == KeyEvent.ACTION_DOWN && keyEvent.getRepeatCount() == 0) { return handleKeycode(keyEvent.getKeyCode(), false); } } return false; } @Override public void onCustomAction(String action, Bundle extra) { Log.d(TAG, "onCustomAction(" + action + ")"); if (CUSTOM_ACTION_FAST_FORWARD.equals(action)) { onFastForward(); } else if (CUSTOM_ACTION_REWIND.equals(action)) { onRewind(); } } }; private final SharedPreferences.OnSharedPreferenceChangeListener prefListener = (sharedPreferences, key) -> { if (UserPreferences.PREF_LOCKSCREEN_BACKGROUND.equals(key)) { updateMediaSessionMetadata(getPlayable()); } else { flavorHelper.onSharedPreference(key); } }; interface FlavorHelperCallback { PlaybackServiceMediaPlayer.PSMPCallback getMediaPlayerCallback(); void setMediaPlayer(PlaybackServiceMediaPlayer mediaPlayer); PlaybackServiceMediaPlayer getMediaPlayer(); void setIsCasting(boolean isCasting); void sendNotificationBroadcast(int type, int code); void saveCurrentPosition(boolean fromMediaPlayer, Playable playable, int position); void setupNotification(boolean connected, PlaybackServiceMediaPlayer.PSMPInfo info); MediaSessionCompat getMediaSession(); Intent registerReceiver(BroadcastReceiver receiver, IntentFilter filter); void unregisterReceiver(BroadcastReceiver receiver); } private final FlavorHelperCallback flavorHelperCallback = new FlavorHelperCallback() { @Override public PlaybackServiceMediaPlayer.PSMPCallback getMediaPlayerCallback() { return PlaybackService.this.mediaPlayerCallback; } @Override public void setMediaPlayer(PlaybackServiceMediaPlayer mediaPlayer) { PlaybackService.this.mediaPlayer = mediaPlayer; } @Override public PlaybackServiceMediaPlayer getMediaPlayer() { return PlaybackService.this.mediaPlayer; } @Override public void setIsCasting(boolean isCasting) { PlaybackService.isCasting = isCasting; stateManager.validStartCommandWasReceived(); } @Override public void sendNotificationBroadcast(int type, int code) { PlaybackService.this.sendNotificationBroadcast(type, code); } @Override public void saveCurrentPosition(boolean fromMediaPlayer, Playable playable, int position) { PlaybackService.this.saveCurrentPosition(fromMediaPlayer, playable, position); } @Override public void setupNotification(boolean connected, PlaybackServiceMediaPlayer.PSMPInfo info) { if (connected) { PlaybackService.this.setupNotification(info); } else { PlayerStatus status = info.playerStatus; if ((status == PlayerStatus.PLAYING || status == PlayerStatus.SEEKING || status == PlayerStatus.PREPARING || UserPreferences.isPersistNotify()) && android.os.Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN) { PlaybackService.this.setupNotification(info); } else if (!UserPreferences.isPersistNotify()) { stateManager.stopForeground(true); } } } @Override public MediaSessionCompat getMediaSession() { return PlaybackService.this.mediaSession; } @Override public Intent registerReceiver(BroadcastReceiver receiver, IntentFilter filter) { return PlaybackService.this.registerReceiver(receiver, filter); } @Override public void unregisterReceiver(BroadcastReceiver receiver) { PlaybackService.this.unregisterReceiver(receiver); } }; }
1
17,812
Good catch! Could you please move this to `PlaybackService.this.onPlaybackEnded`? I think it's more clean if everything is in one single place.
AntennaPod-AntennaPod
java
@@ -421,6 +421,7 @@ void LXQtTaskButton::activateWithDraggable() // in progress to allow drop it into an app raiseApplication(); KWindowSystem::forceActiveWindow(mWindow); + xcb_flush(QX11Info::connection()); } /************************************************
1
/* BEGIN_COMMON_COPYRIGHT_HEADER * (c)LGPL2+ * * LXQt - a lightweight, Qt based, desktop toolset * https://lxqt.org * * Copyright: 2011 Razor team * 2014 LXQt team * Authors: * Alexander Sokoloff <[email protected]> * Kuzma Shapran <[email protected]> * * This program or library is free software; you can redistribute it * and/or modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * You should have received a copy of the GNU Lesser General * Public License along with this library; if not, write to the * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, * Boston, MA 02110-1301 USA * * END_COMMON_COPYRIGHT_HEADER */ #include "lxqttaskbutton.h" #include "lxqttaskgroup.h" #include "lxqttaskbar.h" #include <LXQt/Settings> #include <QDebug> #include <XdgIcon> #include <QTimer> #include <QMenu> #include <QAction> #include <QContextMenuEvent> #include <QPainter> #include <QDrag> #include <QMouseEvent> #include <QMimeData> #include <QApplication> #include <QDragEnterEvent> #include <QStylePainter> #include <QStyleOptionToolButton> #include <QDesktopWidget> #include "lxqttaskbutton.h" #include "lxqttaskgroup.h" #include "lxqttaskbar.h" #include <KWindowSystem/KWindowSystem> // Necessary for closeApplication() #include <KWindowSystem/NETWM> #include <QX11Info> bool LXQtTaskButton::sDraggging = false; /************************************************ ************************************************/ void LeftAlignedTextStyle::drawItemText(QPainter * painter, const QRect & rect, int flags , const QPalette & pal, bool enabled, const QString & text , QPalette::ColorRole textRole) const { QString txt = text; // get the button text because the text that's given to this function may be middle-elided if (const QToolButton *tb = dynamic_cast<const QToolButton*>(painter->device())) txt = tb->text(); txt = QFontMetrics(painter->font()).elidedText(txt, Qt::ElideRight, rect.width()); QProxyStyle::drawItemText(painter, rect, (flags & ~Qt::AlignHCenter) | Qt::AlignLeft, pal, enabled, txt, textRole); } /************************************************ ************************************************/ LXQtTaskButton::LXQtTaskButton(const WId window, LXQtTaskBar * taskbar, QWidget *parent) : QToolButton(parent), mWindow(window), mUrgencyHint(false), mOrigin(Qt::TopLeftCorner), mParentTaskBar(taskbar), mPlugin(mParentTaskBar->plugin()), mIconSize(mPlugin->panel()->iconSize()), mWheelDelta(0), mDNDTimer(new QTimer(this)), mWheelTimer(new QTimer(this)) { Q_ASSERT(taskbar); setCheckable(true); setSizePolicy(QSizePolicy::Expanding, QSizePolicy::Expanding); setMinimumWidth(1); setMinimumHeight(1); setToolButtonStyle(Qt::ToolButtonTextBesideIcon); setAcceptDrops(true); updateText(); updateIcon(); mDNDTimer->setSingleShot(true); mDNDTimer->setInterval(700); connect(mDNDTimer, SIGNAL(timeout()), this, SLOT(activateWithDraggable())); mWheelTimer->setSingleShot(true); mWheelTimer->setInterval(250); connect(mWheelTimer, &QTimer::timeout, [this] { mWheelDelta = 0; // forget previous wheel deltas }); connect(LXQt::Settings::globalSettings(), SIGNAL(iconThemeChanged()), this, SLOT(updateIcon())); connect(mParentTaskBar, &LXQtTaskBar::iconByClassChanged, this, &LXQtTaskButton::updateIcon); } /************************************************ ************************************************/ LXQtTaskButton::~LXQtTaskButton() { } /************************************************ ************************************************/ void LXQtTaskButton::updateText() { KWindowInfo info(mWindow, NET::WMVisibleName | NET::WMName); QString title = info.visibleName().isEmpty() ? info.name() : info.visibleName(); setText(title.replace(QStringLiteral("&"), QStringLiteral("&&"))); setToolTip(title); } /************************************************ ************************************************/ void LXQtTaskButton::updateIcon() { QIcon ico; if (mParentTaskBar->isIconByClass()) { ico = XdgIcon::fromTheme(QString::fromUtf8(KWindowInfo{mWindow, 0, NET::WM2WindowClass}.windowClassClass()).toLower()); } if (ico.isNull()) { #if QT_VERSION >= 0x050600 int devicePixels = mIconSize * devicePixelRatioF(); #else int devicePixels = mIconSize * devicePixelRatio(); #endif ico = KWindowSystem::icon(mWindow, devicePixels, devicePixels); } setIcon(ico.isNull() ? XdgIcon::defaultApplicationIcon() : ico); } /************************************************ ************************************************/ void LXQtTaskButton::refreshIconGeometry(QRect const & geom) { xcb_connection_t* x11conn = QX11Info::connection(); if (!x11conn) { return; } NETWinInfo info(x11conn, windowId(), (WId) QX11Info::appRootWindow(), NET::WMIconGeometry, 0); NETRect const curr = info.iconGeometry(); if (curr.pos.x != geom.x() || curr.pos.y != geom.y() || curr.size.width != geom.width() || curr.size.height != geom.height()) { NETRect nrect; nrect.pos.x = geom.x(); nrect.pos.y = geom.y(); nrect.size.height = geom.height(); nrect.size.width = geom.width(); info.setIconGeometry(nrect); } } /************************************************ ************************************************/ void LXQtTaskButton::changeEvent(QEvent *event) { if (event->type() == QEvent::StyleChange) { // When the icon size changes, the panel doesn't emit any specific // signal, but it triggers a stylesheet update, which we can detect int newIconSize = mPlugin->panel()->iconSize(); if (newIconSize != mIconSize) { mIconSize = newIconSize; updateIcon(); } } QToolButton::changeEvent(event); } /************************************************ ************************************************/ void LXQtTaskButton::dragEnterEvent(QDragEnterEvent *event) { // It must be here otherwise dragLeaveEvent and dragMoveEvent won't be called // on the other hand drop and dragmove events of parent widget won't be called event->acceptProposedAction(); if (event->mimeData()->hasFormat(mimeDataFormat())) { emit dragging(event->source(), event->pos()); setAttribute(Qt::WA_UnderMouse, false); } else { mDNDTimer->start(); } QToolButton::dragEnterEvent(event); } void LXQtTaskButton::dragMoveEvent(QDragMoveEvent * event) { if (event->mimeData()->hasFormat(mimeDataFormat())) { emit dragging(event->source(), event->pos()); setAttribute(Qt::WA_UnderMouse, false); } } void LXQtTaskButton::dragLeaveEvent(QDragLeaveEvent *event) { mDNDTimer->stop(); QToolButton::dragLeaveEvent(event); } void LXQtTaskButton::dropEvent(QDropEvent *event) { mDNDTimer->stop(); if (event->mimeData()->hasFormat(mimeDataFormat())) { emit dropped(event->source(), event->pos()); setAttribute(Qt::WA_UnderMouse, false); } QToolButton::dropEvent(event); } /************************************************ ************************************************/ void LXQtTaskButton::mousePressEvent(QMouseEvent* event) { const Qt::MouseButton b = event->button(); if (Qt::LeftButton == b) mDragStartPosition = event->pos(); else if (Qt::MidButton == b && parentTaskBar()->closeOnMiddleClick()) closeApplication(); QToolButton::mousePressEvent(event); } /************************************************ ************************************************/ void LXQtTaskButton::mouseReleaseEvent(QMouseEvent* event) { if (!sDraggging && event->button() == Qt::LeftButton) { if (isChecked()) minimizeApplication(); else raiseApplication(); } QToolButton::mouseReleaseEvent(event); } /************************************************ ************************************************/ void LXQtTaskButton::wheelEvent(QWheelEvent* event) { // ignore wheel event if it is not "raise", "minimize" or "move" window if (mParentTaskBar->wheelEventsAction() < 2 || mParentTaskBar->wheelEventsAction() > 5) return QToolButton::wheelEvent(event); QPoint angleDelta = event->angleDelta(); Qt::Orientation orient = (qAbs(angleDelta.x()) > qAbs(angleDelta.y()) ? Qt::Horizontal : Qt::Vertical); int delta = (orient == Qt::Horizontal ? angleDelta.x() : angleDelta.y()); if (!mWheelTimer->isActive()) mWheelDelta += abs(delta); else { // NOTE: We should consider a short delay after the last wheel event // in order to distinguish between separate wheel rotations; otherwise, // a wheel delta threshold will not make much sense because the delta // might have been increased due to a previous and separate wheel rotation. mWheelTimer->start(); } if (mWheelDelta < mParentTaskBar->wheelDeltaThreshold()) return QToolButton::wheelEvent(event); else { mWheelDelta = 0; mWheelTimer->start(); // start to distinguish between separate wheel rotations } int D = delta < 0 ? 1 : -1; if (mParentTaskBar->wheelEventsAction() == 4) { moveApplicationToPrevNextDesktop(D < 0); } else if (mParentTaskBar->wheelEventsAction() == 5) { moveApplicationToPrevNextDesktop(D > 0); } else { if (mParentTaskBar->wheelEventsAction() == 3) D *= -1; if (D < 0) raiseApplication(); else if (D > 0) minimizeApplication(); } QToolButton::wheelEvent(event); } /************************************************ ************************************************/ QMimeData * LXQtTaskButton::mimeData() { QMimeData *mimedata = new QMimeData; QByteArray ba; QDataStream stream(&ba,QIODevice::WriteOnly); stream << (qlonglong)(mWindow); mimedata->setData(mimeDataFormat(), ba); return mimedata; } /************************************************ ************************************************/ void LXQtTaskButton::mouseMoveEvent(QMouseEvent* event) { QAbstractButton::mouseMoveEvent(event); if (!(event->buttons() & Qt::LeftButton)) return; if ((event->pos() - mDragStartPosition).manhattanLength() < QApplication::startDragDistance()) return; QDrag *drag = new QDrag(this); drag->setMimeData(mimeData()); QIcon ico = icon(); QPixmap img = ico.pixmap(ico.actualSize({32, 32})); drag->setPixmap(img); switch (parentTaskBar()->panel()->position()) { case ILXQtPanel::PositionLeft: case ILXQtPanel::PositionTop: drag->setHotSpot({0, 0}); break; case ILXQtPanel::PositionRight: case ILXQtPanel::PositionBottom: drag->setHotSpot(img.rect().bottomRight()); break; } sDraggging = true; drag->exec(); // if button is dropped out of panel (e.g. on desktop) // it is not deleted automatically by Qt drag->deleteLater(); // release mouse appropriately, by positioning the event outside // the button rectangle (otherwise, the button will be toggled) QMouseEvent releasingEvent(QEvent::MouseButtonRelease, QPoint(-1,-1), Qt::LeftButton, Qt::NoButton, Qt::NoModifier); mouseReleaseEvent(&releasingEvent); sDraggging = false; } /************************************************ ************************************************/ bool LXQtTaskButton::isApplicationHidden() const { KWindowInfo info(mWindow, NET::WMState); return (info.state() & NET::Hidden); } /************************************************ ************************************************/ bool LXQtTaskButton::isApplicationActive() const { return KWindowSystem::activeWindow() == mWindow; } /************************************************ ************************************************/ void LXQtTaskButton::activateWithDraggable() { // raise app in any time when there is a drag // in progress to allow drop it into an app raiseApplication(); KWindowSystem::forceActiveWindow(mWindow); } /************************************************ ************************************************/ void LXQtTaskButton::raiseApplication() { KWindowInfo info(mWindow, NET::WMDesktop | NET::WMState | NET::XAWMState); if (parentTaskBar()->raiseOnCurrentDesktop() && info.isMinimized()) { KWindowSystem::setOnDesktop(mWindow, KWindowSystem::currentDesktop()); } else { int winDesktop = info.desktop(); if (KWindowSystem::currentDesktop() != winDesktop) KWindowSystem::setCurrentDesktop(winDesktop); } KWindowSystem::activateWindow(mWindow); setUrgencyHint(false); } /************************************************ ************************************************/ void LXQtTaskButton::minimizeApplication() { KWindowSystem::minimizeWindow(mWindow); } /************************************************ ************************************************/ void LXQtTaskButton::maximizeApplication() { QAction* act = qobject_cast<QAction*>(sender()); if (!act) return; int state = act->data().toInt(); switch (state) { case NET::MaxHoriz: KWindowSystem::setState(mWindow, NET::MaxHoriz); break; case NET::MaxVert: KWindowSystem::setState(mWindow, NET::MaxVert); break; default: KWindowSystem::setState(mWindow, NET::Max); break; } if (!isApplicationActive()) raiseApplication(); } /************************************************ ************************************************/ void LXQtTaskButton::deMaximizeApplication() { KWindowSystem::clearState(mWindow, NET::Max); if (!isApplicationActive()) raiseApplication(); } /************************************************ ************************************************/ void LXQtTaskButton::shadeApplication() { KWindowSystem::setState(mWindow, NET::Shaded); } /************************************************ ************************************************/ void LXQtTaskButton::unShadeApplication() { KWindowSystem::clearState(mWindow, NET::Shaded); } /************************************************ ************************************************/ void LXQtTaskButton::closeApplication() { // FIXME: Why there is no such thing in KWindowSystem?? NETRootInfo(QX11Info::connection(), NET::CloseWindow).closeWindowRequest(mWindow); } /************************************************ ************************************************/ void LXQtTaskButton::setApplicationLayer() { QAction* act = qobject_cast<QAction*>(sender()); if (!act) return; int layer = act->data().toInt(); switch(layer) { case NET::KeepAbove: KWindowSystem::clearState(mWindow, NET::KeepBelow); KWindowSystem::setState(mWindow, NET::KeepAbove); break; case NET::KeepBelow: KWindowSystem::clearState(mWindow, NET::KeepAbove); KWindowSystem::setState(mWindow, NET::KeepBelow); break; default: KWindowSystem::clearState(mWindow, NET::KeepBelow); KWindowSystem::clearState(mWindow, NET::KeepAbove); break; } } /************************************************ ************************************************/ void LXQtTaskButton::moveApplicationToDesktop() { QAction* act = qobject_cast<QAction*>(sender()); if (!act) return; bool ok; int desk = act->data().toInt(&ok); if (!ok) return; KWindowSystem::setOnDesktop(mWindow, desk); } /************************************************ ************************************************/ void LXQtTaskButton::moveApplicationToPrevNextDesktop(bool next) { int deskNum = KWindowSystem::numberOfDesktops(); if (deskNum <= 1) return; int targetDesk = KWindowInfo(mWindow, NET::WMDesktop).desktop() + (next ? 1 : -1); // wrap around if (targetDesk > deskNum) targetDesk = 1; else if (targetDesk < 1) targetDesk = deskNum; KWindowSystem::setOnDesktop(mWindow, targetDesk); } /************************************************ ************************************************/ void LXQtTaskButton::moveApplication() { KWindowInfo info(mWindow, NET::WMDesktop); if (!info.isOnCurrentDesktop()) KWindowSystem::setCurrentDesktop(info.desktop()); if (isMinimized()) KWindowSystem::unminimizeWindow(mWindow); KWindowSystem::forceActiveWindow(mWindow); const QRect& g = KWindowInfo(mWindow, NET::WMGeometry).geometry(); int X = g.center().x(); int Y = g.center().y(); QCursor::setPos(X, Y); NETRootInfo(QX11Info::connection(), NET::WMMoveResize).moveResizeRequest(mWindow, X, Y, NET::Move); } /************************************************ ************************************************/ void LXQtTaskButton::resizeApplication() { KWindowInfo info(mWindow, NET::WMDesktop); if (!info.isOnCurrentDesktop()) KWindowSystem::setCurrentDesktop(info.desktop()); if (isMinimized()) KWindowSystem::unminimizeWindow(mWindow); KWindowSystem::forceActiveWindow(mWindow); const QRect& g = KWindowInfo(mWindow, NET::WMGeometry).geometry(); int X = g.bottomRight().x(); int Y = g.bottomRight().y(); QCursor::setPos(X, Y); NETRootInfo(QX11Info::connection(), NET::WMMoveResize).moveResizeRequest(mWindow, X, Y, NET::BottomRight); } /************************************************ ************************************************/ void LXQtTaskButton::contextMenuEvent(QContextMenuEvent* event) { if (event->modifiers().testFlag(Qt::ControlModifier)) { event->ignore(); return; } KWindowInfo info(mWindow, 0, NET::WM2AllowedActions); unsigned long state = KWindowInfo(mWindow, NET::WMState).state(); QMenu * menu = new QMenu(tr("Application")); menu->setAttribute(Qt::WA_DeleteOnClose); QAction* a; /* KDE menu ******* + To &Desktop > + &All Desktops + --- + &1 Desktop 1 + &2 Desktop 2 + &To Current Desktop &Move Re&size + Mi&nimize + Ma&ximize + &Shade Ad&vanced > Keep &Above Others Keep &Below Others Fill screen &Layer > Always on &top &Normal Always on &bottom --- + &Close */ /********** Desktop menu **********/ int deskNum = KWindowSystem::numberOfDesktops(); if (deskNum > 1) { int winDesk = KWindowInfo(mWindow, NET::WMDesktop).desktop(); QMenu* deskMenu = menu->addMenu(tr("To &Desktop")); a = deskMenu->addAction(tr("&All Desktops")); a->setData(NET::OnAllDesktops); a->setEnabled(winDesk != NET::OnAllDesktops); connect(a, SIGNAL(triggered(bool)), this, SLOT(moveApplicationToDesktop())); deskMenu->addSeparator(); for (int i = 0; i < deskNum; ++i) { a = deskMenu->addAction(tr("Desktop &%1").arg(i + 1)); a->setData(i + 1); a->setEnabled(i + 1 != winDesk); connect(a, SIGNAL(triggered(bool)), this, SLOT(moveApplicationToDesktop())); } int curDesk = KWindowSystem::currentDesktop(); a = menu->addAction(tr("&To Current Desktop")); a->setData(curDesk); a->setEnabled(curDesk != winDesk); connect(a, SIGNAL(triggered(bool)), this, SLOT(moveApplicationToDesktop())); } /********** Move/Resize **********/ menu->addSeparator(); a = menu->addAction(tr("&Move")); a->setEnabled(info.actionSupported(NET::ActionMove) && !(state & NET::Max) && !(state & NET::FullScreen)); connect(a, &QAction::triggered, this, &LXQtTaskButton::moveApplication); a = menu->addAction(tr("Resi&ze")); a->setEnabled(info.actionSupported(NET::ActionResize) && !(state & NET::Max) && !(state & NET::FullScreen)); connect(a, &QAction::triggered, this, &LXQtTaskButton::resizeApplication); /********** State menu **********/ menu->addSeparator(); a = menu->addAction(tr("Ma&ximize")); a->setEnabled(info.actionSupported(NET::ActionMax) && (!(state & NET::Max) || (state & NET::Hidden))); a->setData(NET::Max); connect(a, SIGNAL(triggered(bool)), this, SLOT(maximizeApplication())); if (event->modifiers() & Qt::ShiftModifier) { a = menu->addAction(tr("Maximize vertically")); a->setEnabled(info.actionSupported(NET::ActionMaxVert) && !((state & NET::MaxVert) || (state & NET::Hidden))); a->setData(NET::MaxVert); connect(a, SIGNAL(triggered(bool)), this, SLOT(maximizeApplication())); a = menu->addAction(tr("Maximize horizontally")); a->setEnabled(info.actionSupported(NET::ActionMaxHoriz) && !((state & NET::MaxHoriz) || (state & NET::Hidden))); a->setData(NET::MaxHoriz); connect(a, SIGNAL(triggered(bool)), this, SLOT(maximizeApplication())); } a = menu->addAction(tr("&Restore")); a->setEnabled((state & NET::Hidden) || (state & NET::Max) || (state & NET::MaxHoriz) || (state & NET::MaxVert)); connect(a, SIGNAL(triggered(bool)), this, SLOT(deMaximizeApplication())); a = menu->addAction(tr("Mi&nimize")); a->setEnabled(info.actionSupported(NET::ActionMinimize) && !(state & NET::Hidden)); connect(a, SIGNAL(triggered(bool)), this, SLOT(minimizeApplication())); if (state & NET::Shaded) { a = menu->addAction(tr("Roll down")); a->setEnabled(info.actionSupported(NET::ActionShade) && !(state & NET::Hidden)); connect(a, SIGNAL(triggered(bool)), this, SLOT(unShadeApplication())); } else { a = menu->addAction(tr("Roll up")); a->setEnabled(info.actionSupported(NET::ActionShade) && !(state & NET::Hidden)); connect(a, SIGNAL(triggered(bool)), this, SLOT(shadeApplication())); } /********** Layer menu **********/ menu->addSeparator(); QMenu* layerMenu = menu->addMenu(tr("&Layer")); a = layerMenu->addAction(tr("Always on &top")); // FIXME: There is no info.actionSupported(NET::ActionKeepAbove) a->setEnabled(!(state & NET::KeepAbove)); a->setData(NET::KeepAbove); connect(a, SIGNAL(triggered(bool)), this, SLOT(setApplicationLayer())); a = layerMenu->addAction(tr("&Normal")); a->setEnabled((state & NET::KeepAbove) || (state & NET::KeepBelow)); // FIXME: There is no NET::KeepNormal, so passing 0 a->setData(0); connect(a, SIGNAL(triggered(bool)), this, SLOT(setApplicationLayer())); a = layerMenu->addAction(tr("Always on &bottom")); // FIXME: There is no info.actionSupported(NET::ActionKeepBelow) a->setEnabled(!(state & NET::KeepBelow)); a->setData(NET::KeepBelow); connect(a, SIGNAL(triggered(bool)), this, SLOT(setApplicationLayer())); /********** Kill menu **********/ menu->addSeparator(); a = menu->addAction(XdgIcon::fromTheme(QStringLiteral("process-stop")), tr("&Close")); connect(a, SIGNAL(triggered(bool)), this, SLOT(closeApplication())); menu->setGeometry(mParentTaskBar->panel()->calculatePopupWindowPos(mapToGlobal(event->pos()), menu->sizeHint())); mPlugin->willShowWindow(menu); menu->show(); } /************************************************ ************************************************/ void LXQtTaskButton::setUrgencyHint(bool set) { if (mUrgencyHint == set) return; if (!set) KWindowSystem::demandAttention(mWindow, false); mUrgencyHint = set; setProperty("urgent", set); style()->unpolish(this); style()->polish(this); update(); } /************************************************ ************************************************/ bool LXQtTaskButton::isOnDesktop(int desktop) const { return KWindowInfo(mWindow, NET::WMDesktop).isOnDesktop(desktop); } bool LXQtTaskButton::isOnCurrentScreen() const { return QApplication::desktop()->screenGeometry(parentTaskBar()).intersects(KWindowInfo(mWindow, NET::WMFrameExtents).frameGeometry()); } bool LXQtTaskButton::isMinimized() const { return KWindowInfo(mWindow,NET::WMState | NET::XAWMState).isMinimized(); } Qt::Corner LXQtTaskButton::origin() const { return mOrigin; } void LXQtTaskButton::setOrigin(Qt::Corner newOrigin) { if (mOrigin != newOrigin) { mOrigin = newOrigin; update(); } } void LXQtTaskButton::setAutoRotation(bool value, ILXQtPanel::Position position) { if (value) { switch (position) { case ILXQtPanel::PositionTop: case ILXQtPanel::PositionBottom: setOrigin(Qt::TopLeftCorner); break; case ILXQtPanel::PositionLeft: setOrigin(Qt::BottomLeftCorner); break; case ILXQtPanel::PositionRight: setOrigin(Qt::TopRightCorner); break; } } else setOrigin(Qt::TopLeftCorner); } void LXQtTaskButton::paintEvent(QPaintEvent *event) { if (mOrigin == Qt::TopLeftCorner) { QToolButton::paintEvent(event); return; } QSize sz = size(); bool transpose = false; QTransform transform; switch (mOrigin) { case Qt::TopLeftCorner: break; case Qt::TopRightCorner: transform.rotate(90.0); transform.translate(0.0, -sz.width()); transpose = true; break; case Qt::BottomRightCorner: transform.rotate(180.0); transform.translate(-sz.width(), -sz.height()); break; case Qt::BottomLeftCorner: transform.rotate(270.0); transform.translate(-sz.height(), 0.0); transpose = true; break; } QStylePainter painter(this); painter.setTransform(transform); QStyleOptionToolButton opt; initStyleOption(&opt); if (transpose) opt.rect = opt.rect.transposed(); painter.drawComplexControl(QStyle::CC_ToolButton, opt); } bool LXQtTaskButton::hasDragAndDropHover() const { return mDNDTimer->isActive(); }
1
6,593
We're trying to avoid X.org specific code. So I'm not a fan of this. Can't this be achieved in display server agnostic way?
lxqt-lxqt-panel
cpp
@@ -0,0 +1,8 @@ +using System; + +namespace Collections.Core +{ + public class Class1 + { + } +}
1
1
13,557
This class can be removed?
MvvmCross-MvvmCross
.cs
@@ -13,8 +13,10 @@ import ( "net" "strconv" + "github.com/iotexproject/iotex-core/action/protocol" + "github.com/golang/protobuf/proto" - grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" + "github.com/grpc-ecosystem/go-grpc-prometheus" "github.com/pkg/errors" "go.uber.org/zap" "google.golang.org/grpc"
1
// Copyright (c) 2019 IoTeX // This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no // warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent // permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache // License 2.0 that can be found in the LICENSE file. package api import ( "context" "encoding/hex" "math/big" "net" "strconv" "github.com/golang/protobuf/proto" grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" "github.com/pkg/errors" "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/reflection" "github.com/iotexproject/iotex-core/action" "github.com/iotexproject/iotex-core/actpool" "github.com/iotexproject/iotex-core/address" "github.com/iotexproject/iotex-core/blockchain" "github.com/iotexproject/iotex-core/blockchain/block" "github.com/iotexproject/iotex-core/config" "github.com/iotexproject/iotex-core/dispatcher" "github.com/iotexproject/iotex-core/gasstation" "github.com/iotexproject/iotex-core/indexservice" "github.com/iotexproject/iotex-core/pkg/hash" "github.com/iotexproject/iotex-core/pkg/keypair" "github.com/iotexproject/iotex-core/pkg/log" "github.com/iotexproject/iotex-core/protogen/iotexapi" "github.com/iotexproject/iotex-core/protogen/iotextypes" ) var ( // ErrInternalServer indicates the internal server error ErrInternalServer = errors.New("internal server error") // ErrReceipt indicates the error of receipt ErrReceipt = errors.New("invalid receipt") // ErrAction indicates the error of action ErrAction = errors.New("invalid action") ) // BroadcastOutbound sends a broadcast message to the whole network type BroadcastOutbound func(ctx context.Context, chainID uint32, msg proto.Message) error // Config represents the config to setup api type Config struct { broadcastHandler BroadcastOutbound } // Option is the option to override the api config type Option func(cfg *Config) error // WithBroadcastOutbound is the option to broadcast msg outbound func WithBroadcastOutbound(broadcastHandler BroadcastOutbound) Option { return func(cfg *Config) error { cfg.broadcastHandler = broadcastHandler return nil } } // Server provides api for user to query blockchain data type Server struct { bc blockchain.Blockchain dp dispatcher.Dispatcher ap actpool.ActPool gs *gasstation.GasStation broadcastHandler BroadcastOutbound cfg config.API idx *indexservice.Server grpcserver *grpc.Server } // NewServer creates a new server func NewServer( cfg config.API, chain blockchain.Blockchain, dispatcher dispatcher.Dispatcher, actPool actpool.ActPool, idx *indexservice.Server, opts ...Option, ) (*Server, error) { apiCfg := Config{} for _, opt := range opts { if err := opt(&apiCfg); err != nil { return nil, err } } if cfg == (config.API{}) { log.L().Warn("API server is not configured.") cfg = config.Default.API } svr := &Server{ bc: chain, dp: dispatcher, ap: actPool, broadcastHandler: apiCfg.broadcastHandler, cfg: cfg, idx: idx, gs: gasstation.NewGasStation(chain, cfg), } svr.grpcserver = grpc.NewServer( grpc.StreamInterceptor(grpc_prometheus.StreamServerInterceptor), grpc.UnaryInterceptor(grpc_prometheus.UnaryServerInterceptor), ) iotexapi.RegisterAPIServiceServer(svr.grpcserver, svr) grpc_prometheus.Register(svr.grpcserver) reflection.Register(svr.grpcserver) return svr, nil } // GetAccount returns the metadata of an account func (api *Server) GetAccount(ctx context.Context, in *iotexapi.GetAccountRequest) (*iotexapi.GetAccountResponse, error) { state, err := api.bc.StateByAddr(in.Address) if err != nil { return nil, err } pendingNonce, err := api.ap.GetPendingNonce(in.Address) if err != nil { return nil, err } accountMeta := &iotextypes.AccountMeta{ Address: in.Address, Balance: state.Balance.String(), Nonce: state.Nonce, PendingNonce: pendingNonce, } return &iotexapi.GetAccountResponse{AccountMeta: accountMeta}, nil } // GetActions returns actions func (api *Server) GetActions(ctx context.Context, in *iotexapi.GetActionsRequest) (*iotexapi.GetActionsResponse, error) { switch { case in.GetByIndex() != nil: request := in.GetByIndex() return api.getActions(request.Start, request.Count) case in.GetByHash() != nil: request := in.GetByHash() return api.getAction(request.ActionHash, request.CheckPending) case in.GetByAddr() != nil: request := in.GetByAddr() return api.getActionsByAddress(request.Address, request.Start, request.Count) case in.GetUnconfirmedByAddr() != nil: request := in.GetUnconfirmedByAddr() return api.getUnconfirmedActionsByAddress(request.Address, request.Start, request.Count) case in.GetByBlk() != nil: request := in.GetByBlk() return api.getActionsByBlock(request.BlkHash, request.Start, request.Count) default: return nil, nil } } // GetBlockMetas returns block metadata func (api *Server) GetBlockMetas(ctx context.Context, in *iotexapi.GetBlockMetasRequest) (*iotexapi.GetBlockMetasResponse, error) { switch { case in.GetByIndex() != nil: request := in.GetByIndex() return api.getBlockMetas(request.Start, request.Count) case in.GetByHash() != nil: request := in.GetByHash() return api.getBlockMeta(request.BlkHash) default: return nil, nil } } // GetChainMeta returns blockchain metadata func (api *Server) GetChainMeta(ctx context.Context, in *iotexapi.GetChainMetaRequest) (*iotexapi.GetChainMetaResponse, error) { tipHeight := api.bc.TipHeight() totalActions, err := api.bc.GetTotalActions() if err != nil { return nil, err } blockLimit := int64(api.cfg.TpsWindow) if blockLimit <= 0 { return nil, errors.Wrapf(ErrInternalServer, "block limit is %d", blockLimit) } // avoid genesis block if int64(tipHeight) < blockLimit { blockLimit = int64(tipHeight) } r, err := api.getBlockMetas(tipHeight, uint64(blockLimit)) if err != nil { return nil, err } blks := r.BlkMetas if len(blks) == 0 { return nil, errors.New("get 0 blocks! not able to calculate aps") } epoch, err := api.getEpochData(tipHeight) if err != nil { return nil, err } timeDuration := blks[0].Timestamp - blks[len(blks)-1].Timestamp // if time duration is less than 1 second, we set it to be 1 second if timeDuration == 0 { timeDuration = 1 } tps := int64(totalActions) / timeDuration chainMeta := &iotextypes.ChainMeta{ Height: tipHeight, Epoch: epoch, Supply: blockchain.Gen.TotalSupply.String(), NumActions: int64(totalActions), Tps: tps, } return &iotexapi.GetChainMetaResponse{ChainMeta: chainMeta}, nil } // SendAction is the API to send an action to blockchain. func (api *Server) SendAction(ctx context.Context, in *iotexapi.SendActionRequest) (res *iotexapi.SendActionResponse, err error) { log.L().Debug("receive send action request") // broadcast to the network if err = api.broadcastHandler(context.Background(), api.bc.ChainID(), in.Action); err != nil { log.L().Warn("Failed to broadcast SendAction request.", zap.Error(err)) } // send to actpool via dispatcher api.dp.HandleBroadcast(context.Background(), api.bc.ChainID(), in.Action) return &iotexapi.SendActionResponse{}, nil } // GetReceiptByAction gets receipt with corresponding action hash func (api *Server) GetReceiptByAction(ctx context.Context, in *iotexapi.GetReceiptByActionRequest) (*iotexapi.GetReceiptByActionResponse, error) { actHash, err := toHash256(in.ActionHash) if err != nil { return nil, err } receipt, err := api.bc.GetReceiptByActionHash(actHash) if err != nil { return nil, err } return &iotexapi.GetReceiptByActionResponse{Receipt: receipt.ConvertToReceiptPb()}, nil } // ReadContract reads the state in a contract address specified by the slot func (api *Server) ReadContract(ctx context.Context, in *iotexapi.ReadContractRequest) (*iotexapi.ReadContractResponse, error) { log.L().Debug("receive read smart contract request") selp := &action.SealedEnvelope{} if err := selp.LoadProto(in.Action); err != nil { return nil, err } sc, ok := selp.Action().(*action.Execution) if !ok { return nil, errors.New("not execution") } callerPKHash := keypair.HashPubKey(selp.SrcPubkey()) callerAddr, err := address.FromBytes(callerPKHash[:]) if err != nil { return nil, err } res, err := api.bc.ExecuteContractRead(callerAddr, sc) if err != nil { return nil, err } return &iotexapi.ReadContractResponse{Data: hex.EncodeToString(res.ReturnValue)}, nil } // SuggestGasPrice suggests gas price func (api *Server) SuggestGasPrice(ctx context.Context, in *iotexapi.SuggestGasPriceRequest) (*iotexapi.SuggestGasPriceResponse, error) { suggestPrice, err := api.gs.SuggestGasPrice() if err != nil { return nil, err } return &iotexapi.SuggestGasPriceResponse{GasPrice: suggestPrice}, nil } // EstimateGasForAction estimates gas for action func (api *Server) EstimateGasForAction(ctx context.Context, in *iotexapi.EstimateGasForActionRequest) (*iotexapi.EstimateGasForActionResponse, error) { estimateGas, err := api.gs.EstimateGasForAction(in.Action) if err != nil { return nil, err } return &iotexapi.EstimateGasForActionResponse{Gas: estimateGas}, nil } // Start starts the API server func (api *Server) Start() error { portStr := ":" + strconv.Itoa(api.cfg.Port) lis, err := net.Listen("tcp", portStr) if err != nil { log.L().Error("API server failed to listen.", zap.Error(err)) return errors.Wrap(err, "API server failed to listen") } log.L().Info("API server is listening.", zap.String("addr", lis.Addr().String())) go func() { if err := api.grpcserver.Serve(lis); err != nil { log.L().Fatal("Node failed to serve.", zap.Error(err)) } }() return nil } // Stop stops the API server func (api *Server) Stop() error { api.grpcserver.Stop() log.L().Info("API server stops.") return nil } // GetActions returns actions within the range func (api *Server) getActions(start uint64, count uint64) (*iotexapi.GetActionsResponse, error) { var res []*iotextypes.Action var actionCount uint64 tipHeight := api.bc.TipHeight() for height := int64(tipHeight); height >= 0; height-- { blk, err := api.bc.GetBlockByHeight(uint64(height)) if err != nil { return nil, err } selps := blk.Actions for i := len(selps) - 1; i >= 0; i-- { actionCount++ if actionCount <= start { continue } if uint64(len(res)) >= count { return &iotexapi.GetActionsResponse{Actions: res}, nil } res = append(res, selps[i].Proto()) } } return &iotexapi.GetActionsResponse{Actions: res}, nil } // getAction returns action by action hash func (api *Server) getAction(actionHash string, checkPending bool) (*iotexapi.GetActionsResponse, error) { actHash, err := toHash256(actionHash) if err != nil { return nil, err } actPb, err := getAction(api.bc, api.ap, actHash, checkPending) if err != nil { return nil, err } return &iotexapi.GetActionsResponse{Actions: []*iotextypes.Action{actPb}}, nil } // getActionsByAddress returns all actions associated with an address func (api *Server) getActionsByAddress(address string, start uint64, count uint64) (*iotexapi.GetActionsResponse, error) { var res []*iotextypes.Action var actions []hash.Hash256 if api.cfg.UseRDS { actionHistory, err := api.idx.Indexer().GetIndexHistory(config.IndexAction, address) if err != nil { return nil, err } actions = append(actions, actionHistory...) } else { actionsFromAddress, err := api.bc.GetActionsFromAddress(address) if err != nil { return nil, err } actionsToAddress, err := api.bc.GetActionsToAddress(address) if err != nil { return nil, err } actionsFromAddress = append(actionsFromAddress, actionsToAddress...) actions = append(actions, actionsFromAddress...) } var actionCount uint64 for i := len(actions) - 1; i >= 0; i-- { actionCount++ if actionCount <= start { continue } if uint64(len(res)) >= count { break } actPb, err := getAction(api.bc, api.ap, actions[i], false) if err != nil { return nil, err } res = append(res, actPb) } return &iotexapi.GetActionsResponse{Actions: res}, nil } // getUnconfirmedActionsByAddress returns all unconfirmed actions in actpool associated with an address func (api *Server) getUnconfirmedActionsByAddress(address string, start uint64, count uint64) (*iotexapi.GetActionsResponse, error) { var res []*iotextypes.Action var actionCount uint64 selps := api.ap.GetUnconfirmedActs(address) for i := len(selps) - 1; i >= 0; i-- { actionCount++ if actionCount <= start { continue } if uint64(len(res)) >= count { break } res = append(res, selps[i].Proto()) } return &iotexapi.GetActionsResponse{Actions: res}, nil } // getActionsByBlock returns all actions in a block func (api *Server) getActionsByBlock(blkHash string, start uint64, count uint64) (*iotexapi.GetActionsResponse, error) { var res []*iotextypes.Action hash, err := toHash256(blkHash) if err != nil { return nil, err } blk, err := api.bc.GetBlockByHash(hash) if err != nil { return nil, err } selps := blk.Actions var actionCount uint64 for i := len(selps) - 1; i >= 0; i-- { actionCount++ if actionCount <= start { continue } if uint64(len(res)) >= count { break } res = append(res, selps[i].Proto()) } return &iotexapi.GetActionsResponse{Actions: res}, nil } // getBlockMetas gets block within the height range func (api *Server) getBlockMetas(start uint64, number uint64) (*iotexapi.GetBlockMetasResponse, error) { var res []*iotextypes.BlockMeta startHeight := api.bc.TipHeight() var blkCount uint64 for height := int(startHeight); height >= 0; height-- { blkCount++ if blkCount <= start { continue } if uint64(len(res)) >= number { break } blk, err := api.bc.GetBlockByHeight(uint64(height)) if err != nil { return nil, err } blockHeaderPb := blk.ConvertToBlockHeaderPb() hash := blk.HashBlock() txRoot := blk.TxRoot() receiptRoot := blk.ReceiptRoot() deltaStateDigest := blk.DeltaStateDigest() transferAmount := getTranferAmountInBlock(blk) blockMeta := &iotextypes.BlockMeta{ Hash: hex.EncodeToString(hash[:]), Height: blk.Height(), Timestamp: blockHeaderPb.GetTimestamp().GetSeconds(), NumActions: int64(len(blk.Actions)), ProducerAddress: blk.ProducerAddress(), TransferAmount: transferAmount.String(), TxRoot: hex.EncodeToString(txRoot[:]), ReceiptRoot: hex.EncodeToString(receiptRoot[:]), DeltaStateDigest: hex.EncodeToString(deltaStateDigest[:]), } res = append(res, blockMeta) } return &iotexapi.GetBlockMetasResponse{BlkMetas: res}, nil } // getBlockMeta returns block by block hash func (api *Server) getBlockMeta(blkHash string) (*iotexapi.GetBlockMetasResponse, error) { hash, err := toHash256(blkHash) if err != nil { return nil, err } blk, err := api.bc.GetBlockByHash(hash) if err != nil { return nil, err } blkHeaderPb := blk.ConvertToBlockHeaderPb() txRoot := blk.TxRoot() receiptRoot := blk.ReceiptRoot() deltaStateDigest := blk.DeltaStateDigest() transferAmount := getTranferAmountInBlock(blk) blockMeta := &iotextypes.BlockMeta{ Hash: blkHash, Height: blk.Height(), Timestamp: blkHeaderPb.GetTimestamp().GetSeconds(), NumActions: int64(len(blk.Actions)), ProducerAddress: blk.ProducerAddress(), TransferAmount: transferAmount.String(), TxRoot: hex.EncodeToString(txRoot[:]), ReceiptRoot: hex.EncodeToString(receiptRoot[:]), DeltaStateDigest: hex.EncodeToString(deltaStateDigest[:]), } return &iotexapi.GetBlockMetasResponse{BlkMetas: []*iotextypes.BlockMeta{blockMeta}}, nil } // getEpochData is the API to get epoch data func (api *Server) getEpochData(height uint64) (*iotextypes.EpochData, error) { if height == 0 { return nil, errors.New("epoch data is not available to block 0") } // TODO: fill with real epoch data return &iotextypes.EpochData{ Num: 0, Height: 0, BeaconChainHeight: 0, }, nil } func toHash256(hashString string) (hash.Hash256, error) { bytes, err := hex.DecodeString(hashString) if err != nil { return hash.ZeroHash256, err } var hash hash.Hash256 copy(hash[:], bytes) return hash, nil } func getAction(bc blockchain.Blockchain, ap actpool.ActPool, actHash hash.Hash256, checkPending bool) (*iotextypes.Action, error) { var selp action.SealedEnvelope var err error if selp, err = bc.GetActionByActionHash(actHash); err != nil { if checkPending { // Try to fetch pending action from actpool selp, err = ap.GetActionByHash(actHash) } } if err != nil { return nil, err } return selp.Proto(), nil } func getTranferAmountInBlock(blk *block.Block) *big.Int { totalAmount := big.NewInt(0) for _, selp := range blk.Actions { transfer, ok := selp.Action().(*action.Transfer) if !ok { continue } totalAmount.Add(totalAmount, transfer.Amount()) } return totalAmount }
1
15,285
File is not `goimports`-ed (from `goimports`)
iotexproject-iotex-core
go
@@ -72,9 +72,7 @@ type Config struct { // Logger provides a logger for the dispatcher. The default logger is a // no-op. - // TODO(shah): Export this when we're ready to deploy a branch in - // demo-yarpc-go. - logger *zap.Logger + Logger *zap.Logger } // Inbounds contains a list of inbound transports. Each inbound transport
1
// Copyright (c) 2017 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package yarpc import ( "fmt" "sync" "go.uber.org/yarpc/api/middleware" "go.uber.org/yarpc/api/transport" "go.uber.org/yarpc/internal" "go.uber.org/yarpc/internal/clientconfig" "go.uber.org/yarpc/internal/inboundmiddleware" "go.uber.org/yarpc/internal/observerware" "go.uber.org/yarpc/internal/outboundmiddleware" "go.uber.org/yarpc/internal/request" intsync "go.uber.org/yarpc/internal/sync" "github.com/opentracing/opentracing-go" "go.uber.org/multierr" "go.uber.org/zap" ) // Config specifies the parameters of a new Dispatcher constructed via // NewDispatcher. type Config struct { // Name of the service. This is the name used by other services when // making requests to this service. Name string // Inbounds define how this service receives incoming requests from other // services. // // This may be nil if this service does not receive any requests. Inbounds Inbounds // Outbounds defines how this service makes requests to other services. // // This may be nil if this service does not send any requests. Outbounds Outbounds // Inbound and Outbound Middleware that will be applied to all incoming // and outgoing requests respectively. // // These may be nil if there is no middleware to apply. InboundMiddleware InboundMiddleware OutboundMiddleware OutboundMiddleware // Tracer is deprecated. The dispatcher does nothing with this propery. Tracer opentracing.Tracer // RouterMiddleware is middleware to control how requests are routed. RouterMiddleware middleware.Router // Logger provides a logger for the dispatcher. The default logger is a // no-op. // TODO(shah): Export this when we're ready to deploy a branch in // demo-yarpc-go. logger *zap.Logger } // Inbounds contains a list of inbound transports. Each inbound transport // specifies a source through which incoming requests are received. type Inbounds []transport.Inbound // Outbounds provides access to outbounds for a remote service. Outbounds // define how requests are sent from this service to the remote service. type Outbounds map[string]transport.Outbounds // OutboundMiddleware contains the different types of outbound middlewares. type OutboundMiddleware struct { Unary middleware.UnaryOutbound Oneway middleware.OnewayOutbound } // InboundMiddleware contains the different types of inbound middlewares. type InboundMiddleware struct { Unary middleware.UnaryInbound Oneway middleware.OnewayInbound } // RouterMiddleware wraps the Router middleware type RouterMiddleware middleware.Router // NewDispatcher builds a new Dispatcher using the specified Config. At // minimum, a service name must be specified. // // Invalid configurations or errors in constructing the Dispatcher will cause // panics. func NewDispatcher(cfg Config) *Dispatcher { if cfg.Name == "" { panic("yarpc.NewDispatcher expects a service name") } if err := internal.ValidateServiceName(cfg.Name); err != nil { panic("yarpc.NewDispatcher expects a valid service name: %s" + err.Error()) } logger := zap.NewNop() if cfg.logger != nil { logger = cfg.logger.Named("yarpc").With( zap.Namespace("yarpc"), // isolate yarpc's keys zap.String("dispatcher", cfg.Name), ) cfg = addObservingMiddleware(cfg, logger) } return &Dispatcher{ name: cfg.Name, table: middleware.ApplyRouteTable(NewMapRouter(cfg.Name), cfg.RouterMiddleware), inbounds: cfg.Inbounds, outbounds: convertOutbounds(cfg.Outbounds, cfg.OutboundMiddleware), transports: collectTransports(cfg.Inbounds, cfg.Outbounds), inboundMiddleware: cfg.InboundMiddleware, log: logger, } } func addObservingMiddleware(cfg Config, logger *zap.Logger) Config { unary := observerware.NewUnary(logger, observerware.NewNopContextExtractor()) cfg.InboundMiddleware.Unary = inboundmiddleware.UnaryChain(unary, cfg.InboundMiddleware.Unary) cfg.OutboundMiddleware.Unary = outboundmiddleware.UnaryChain(unary, cfg.OutboundMiddleware.Unary) return cfg } // convertOutbounds applys outbound middleware and creates validator outbounds func convertOutbounds(outbounds Outbounds, mw OutboundMiddleware) Outbounds { outboundSpecs := make(Outbounds, len(outbounds)) for outboundKey, outs := range outbounds { if outs.Unary == nil && outs.Oneway == nil { panic(fmt.Sprintf("no outbound set for outbound key %q in dispatcher", outboundKey)) } var ( unaryOutbound transport.UnaryOutbound onewayOutbound transport.OnewayOutbound ) serviceName := outboundKey // apply outbound middleware and create ValidatorOutbounds if outs.Unary != nil { unaryOutbound = middleware.ApplyUnaryOutbound(outs.Unary, mw.Unary) unaryOutbound = request.UnaryValidatorOutbound{UnaryOutbound: unaryOutbound} } if outs.Oneway != nil { onewayOutbound = middleware.ApplyOnewayOutbound(outs.Oneway, mw.Oneway) onewayOutbound = request.OnewayValidatorOutbound{OnewayOutbound: onewayOutbound} } if outs.ServiceName != "" { serviceName = outs.ServiceName } outboundSpecs[outboundKey] = transport.Outbounds{ ServiceName: serviceName, Unary: unaryOutbound, Oneway: onewayOutbound, } } return outboundSpecs } // collectTransports iterates over all inbounds and outbounds and collects all // of their unique underlying transports. Multiple inbounds and outbounds may // share a transport, and we only want the dispatcher to manage their lifecycle // once. func collectTransports(inbounds Inbounds, outbounds Outbounds) []transport.Transport { // Collect all unique transports from inbounds and outbounds. transports := make(map[transport.Transport]struct{}) for _, inbound := range inbounds { for _, transport := range inbound.Transports() { transports[transport] = struct{}{} } } for _, outbound := range outbounds { if unary := outbound.Unary; unary != nil { for _, transport := range unary.Transports() { transports[transport] = struct{}{} } } if oneway := outbound.Oneway; oneway != nil { for _, transport := range oneway.Transports() { transports[transport] = struct{}{} } } } keys := make([]transport.Transport, 0, len(transports)) for key := range transports { keys = append(keys, key) } return keys } // Dispatcher encapsulates a YARPC application. It acts as the entry point to // send and receive YARPC requests in a transport and encoding agnostic way. type Dispatcher struct { table transport.RouteTable name string inbounds Inbounds outbounds Outbounds transports []transport.Transport inboundMiddleware InboundMiddleware // TODO (shah): add a *pally.Registry too. log *zap.Logger } // Inbounds returns a copy of the list of inbounds for this RPC object. // // The Inbounds will be returned in the same order that was used in the // configuration. func (d *Dispatcher) Inbounds() Inbounds { inbounds := make(Inbounds, len(d.inbounds)) copy(inbounds, d.inbounds) return inbounds } // ClientConfig provides the configuration needed to talk to the given // service through an outboundKey. This configuration may be directly // passed into encoding-specific RPC clients. // // keyvalueClient := json.New(dispatcher.ClientConfig("keyvalue")) // // This function panics if the outboundKey is not known. func (d *Dispatcher) ClientConfig(outboundKey string) transport.ClientConfig { if rs, ok := d.outbounds[outboundKey]; ok { return clientconfig.MultiOutbound(d.name, rs.ServiceName, rs) } panic(noOutboundForOutboundKey{OutboundKey: outboundKey}) } // Register registers zero or more procedures with this dispatcher. Incoming // requests to these procedures will be routed to the handlers specified in // the given Procedures. func (d *Dispatcher) Register(rs []transport.Procedure) { procedures := make([]transport.Procedure, 0, len(rs)) for _, r := range rs { switch r.HandlerSpec.Type() { case transport.Unary: h := middleware.ApplyUnaryInbound(r.HandlerSpec.Unary(), d.inboundMiddleware.Unary) r.HandlerSpec = transport.NewUnaryHandlerSpec(h) case transport.Oneway: h := middleware.ApplyOnewayInbound(r.HandlerSpec.Oneway(), d.inboundMiddleware.Oneway) r.HandlerSpec = transport.NewOnewayHandlerSpec(h) default: panic(fmt.Sprintf("unknown handler type %q for service %q, procedure %q", r.HandlerSpec.Type(), r.Service, r.Name)) } procedures = append(procedures, r) d.log.Info("Registration succeeded.", zap.Object("procedure", r)) } d.table.Register(procedures) } // Start starts the Dispatcher, allowing it to accept and processing new // incoming requests. // // This starts all inbounds and outbounds configured on this Dispatcher. // // This function returns immediately after everything has been started. // Servers should add a `select {}` to block to process all incoming requests. // // if err := dispatcher.Start(); err != nil { // log.Fatal(err) // } // defer dispatcher.Stop() // // select {} func (d *Dispatcher) Start() error { // NOTE: These MUST be started in the order transports, outbounds, and // then inbounds. // // If the outbounds are started before the transports, we might get a // network request before the transports are ready. // // If the inbounds are started before the outbounds, an inbound request // might result in an outbound call before the outbound is ready. var ( mu sync.Mutex allStarted []transport.Lifecycle ) d.log.Info("Starting up.") start := func(s transport.Lifecycle) func() error { return func() error { if s == nil { return nil } if err := s.Start(); err != nil { return err } mu.Lock() allStarted = append(allStarted, s) mu.Unlock() return nil } } abort := func(errs []error) error { // Failed to start so stop everything that was started. wait := intsync.ErrorWaiter{} for _, s := range allStarted { wait.Submit(s.Stop) } if newErrors := wait.Wait(); len(newErrors) > 0 { errs = append(errs, newErrors...) } return multierr.Combine(errs...) } // Set router for all inbounds for _, i := range d.inbounds { i.SetRouter(d.table) } d.log.Debug("Set router for inbounds.") // Start Transports wait := intsync.ErrorWaiter{} d.log.Debug("Starting transports.") for _, t := range d.transports { wait.Submit(start(t)) } if errs := wait.Wait(); len(errs) != 0 { return abort(errs) } d.log.Debug("Started transports.") // Start Outbounds wait = intsync.ErrorWaiter{} d.log.Debug("Starting outbounds.") for _, o := range d.outbounds { wait.Submit(start(o.Unary)) wait.Submit(start(o.Oneway)) } if errs := wait.Wait(); len(errs) != 0 { return abort(errs) } d.log.Debug("Started outbounds.") // Start Inbounds wait = intsync.ErrorWaiter{} d.log.Debug("Starting inbounds.") for _, i := range d.inbounds { wait.Submit(start(i)) } if errs := wait.Wait(); len(errs) != 0 { return abort(errs) } d.log.Debug("Started inbounds.") d.log.Debug("Registering debug pages.") addDispatcherToDebugPages(d) d.log.Debug("Registered debug pages.") d.log.Info("Started up.") return nil } // Stop stops the Dispatcher. // // This stops all outbounds and inbounds owned by this Dispatcher. // // This function returns after everything has been stopped. func (d *Dispatcher) Stop() error { // NOTE: These MUST be stopped in the order inbounds, outbounds, and then // transports. // // If the outbounds are stopped before the inbounds, we might receive a // request which needs to use a stopped outbound from a still-going // inbound. // // If the transports are stopped before the outbounds, the peers contained // in the outbound might be deleted from the transport's perspective and // cause issues. var allErrs []error d.log.Info("Starting shutdown.") // Stop Inbounds d.log.Debug("Stopping inbounds.") wait := intsync.ErrorWaiter{} for _, i := range d.inbounds { wait.Submit(i.Stop) } if errs := wait.Wait(); len(errs) > 0 { allErrs = append(allErrs, errs...) } d.log.Debug("Stopped inbounds.") // Stop Outbounds d.log.Debug("Stopping outbounds.") wait = intsync.ErrorWaiter{} for _, o := range d.outbounds { if o.Unary != nil { wait.Submit(o.Unary.Stop) } if o.Oneway != nil { wait.Submit(o.Oneway.Stop) } } if errs := wait.Wait(); len(errs) > 0 { allErrs = append(allErrs, errs...) } d.log.Debug("Stopped outbounds.") // Stop Transports d.log.Debug("Stopping transports.") wait = intsync.ErrorWaiter{} for _, t := range d.transports { wait.Submit(t.Stop) } if errs := wait.Wait(); len(errs) > 0 { allErrs = append(allErrs, errs...) } d.log.Debug("Stopped transports.") if err := multierr.Combine(allErrs...); err != nil { return err } d.log.Debug("Unregistering debug pages.") removeDispatcherFromDebugPages(d) d.log.Debug("Unregistered debug pages.") d.log.Info("Completed shutdown.") return nil } // Router returns the procedure router. func (d *Dispatcher) Router() transport.Router { return d.table } // Name returns the name of the dispatcher. func (d *Dispatcher) Name() string { return d.name }
1
13,235
To minimize the possibility of regret, perhaps we name this `ZapLogger`.
yarpc-yarpc-go
go
@@ -40,8 +40,9 @@ IntercomRails.config do |config| user_hash: Proc.new { |current_user| OpenSSL::HMAC.hexdigest("sha256", ENV['INTERCOM_API_SECRET'], current_user.email) }, - - :plan => Proc.new { |current_user| current_user.plan_name }, + plan: :plan_name, + has_logged_in_to_forum: :has_logged_in_to_forum? + has_active_subscription: :has_active_subscription? } # == User -> Company association
1
IntercomRails.config do |config| # == Intercom app_id # config.app_id = ENV["INTERCOM_APP_ID"] # == Intercom secret key # This is required to enable secure mode, you can find it on your Intercom # "security" configuration page. # config.api_secret = ENV['INTERCOM_API_SECRET'] # == Intercom API Key # This is required for some Intercom rake tasks like importing your users; # you can generate one at https://www.intercom.io/apps/api_keys. # config.api_key = ENV['INTERCOM_API_KEY'] # == Enabled Environments # Which environments is auto inclusion of the Javascript enabled for # config.enabled_environments = ["staging", "production"] # == Current user method/variable # The method/variable that contains the logged in user in your controllers. # If it is `current_user` or `@user`, then you can ignore this # # config.user.current = Proc.new { current_user } # == User model class # The class which defines your user model # # config.user.model = Proc.new { User } # == User Custom Data # A hash of additional data you wish to send about your users. # You can provide either a method name which will be sent to the current # user object, or a Proc which will be passed the current user. # config.user.custom_data = { user_hash: Proc.new { |current_user| OpenSSL::HMAC.hexdigest("sha256", ENV['INTERCOM_API_SECRET'], current_user.email) }, :plan => Proc.new { |current_user| current_user.plan_name }, } # == User -> Company association # A Proc that given a user returns an array of companies # that the user belongs to. # # config.user.company_association = Proc.new { |user| user.companies.to_a } # config.user.company_association = Proc.new { |user| [user.company] } # == Current company method/variable # The method/variable that contains the current company for the current user, # in your controllers. 'Companies' are generic groupings of users, so this # could be a company, app or group. # # config.company.current = Proc.new { current_company } # == Company Custom Data # A hash of additional data you wish to send about a company. # This works the same as User custom data above. # # config.company.custom_data = { # :number_of_messages => Proc.new { |app| app.messages.count }, # :is_interesting => :is_interesting? # } # == Company Plan name # This is the name of the plan a company is currently paying (or not paying) for. # e.g. Messaging, Free, Pro, etc. # # config.company.plan = Proc.new { |current_company| current_company.plan.name } # == Company Monthly Spend # This is the amount the company spends each month on your app. If your company # has a plan, it will set the 'total value' of that plan appropriately. # # config.company.monthly_spend = Proc.new { |current_company| current_company.plan.price } # config.company.monthly_spend = Proc.new { |current_company| (current_company.plan.price - current_company.subscription.discount) } # == Inbox Style # This enables the Intercom inbox which allows your users to read their # past conversations with your app, as well as start new ones. It is # disabled by default. # * :default shows a small tab with a question mark icon on it # * :custom attaches the inbox open event to an anchor with an # id of #Intercom. # # config.inbox.style = :default # config.inbox.style = :custom end
1
8,092
This looks a little weird, but it's a feature of the gem we're using. You can give it a symbol representing the method you want called on current_user.
thoughtbot-upcase
rb
@@ -20,13 +20,11 @@ # ---------------------------------------------------------------------- """ -Template file used by the OPF Experiment Generator to generate the actual -description.py file by replacing $XXXXXXXX tokens with desired values. - -This description.py file was generated by: -'~/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/ExpGenerator.py' +Anomaly hotgym example """ +from nupic.algorithms.anomaly import Anomaly + from nupic.frameworks.opf.expdescriptionapi import ExperimentDescriptionAPI from nupic.frameworks.opf.expdescriptionhelpers import (
1
# ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2013, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- """ Template file used by the OPF Experiment Generator to generate the actual description.py file by replacing $XXXXXXXX tokens with desired values. This description.py file was generated by: '~/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/ExpGenerator.py' """ from nupic.frameworks.opf.expdescriptionapi import ExperimentDescriptionAPI from nupic.frameworks.opf.expdescriptionhelpers import ( updateConfigFromSubConfig, applyValueGettersToContainer, DeferredDictLookup) from nupic.frameworks.opf.clamodelcallbacks import * from nupic.frameworks.opf.metrics import MetricSpec from nupic.frameworks.opf.opfutils import (InferenceType, InferenceElement) from nupic.support import aggregationDivide from nupic.frameworks.opf.opftaskdriver import ( IterationPhaseSpecLearnOnly, IterationPhaseSpecInferOnly, IterationPhaseSpecLearnAndInfer) # Model Configuration Dictionary: # # Define the model parameters and adjust for any modifications if imported # from a sub-experiment. # # These fields might be modified by a sub-experiment; this dict is passed # between the sub-experiment and base experiment # # # NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements # within the config dictionary may be assigned futures derived from the # ValueGetterBase class, such as DeferredDictLookup. # This facility is particularly handy for enabling substitution of values in # the config dictionary from other values in the config dictionary, which is # needed by permutation.py-based experiments. These values will be resolved # during the call to applyValueGettersToContainer(), # which we call after the base experiment's config dictionary is updated from # the sub-experiment. See ValueGetterBase and # DeferredDictLookup for more details about value-getters. # # For each custom encoder parameter to be exposed to the sub-experiment/ # permutation overrides, define a variable in this section, using key names # beginning with a single underscore character to avoid collisions with # pre-defined keys (e.g., _dsEncoderFieldName2_N). # # Example: # config = dict( # _dsEncoderFieldName2_N = 70, # _dsEncoderFieldName2_W = 5, # dsEncoderSchema = [ # base=dict( # fieldname='Name2', type='ScalarEncoder', # name='Name2', minval=0, maxval=270, clipInput=True, # n=DeferredDictLookup('_dsEncoderFieldName2_N'), # w=DeferredDictLookup('_dsEncoderFieldName2_W')), # ], # ) # updateConfigFromSubConfig(config) # applyValueGettersToContainer(config) config = { # Type of model that the rest of these parameters apply to. 'model': "CLA", # Version that specifies the format of the config. 'version': 1, # Intermediate variables used to compute fields in modelParams and also # referenced from the control section. 'aggregationInfo': { 'days': 0, 'fields': [('consumption', 'sum')], 'hours': 1, 'microseconds': 0, 'milliseconds': 0, 'minutes': 0, 'months': 0, 'seconds': 0, 'weeks': 0, 'years': 0}, 'predictAheadTime': None, # Model parameter dictionary. 'modelParams': { # The type of inference that this model will perform 'inferenceType': 'TemporalAnomaly', 'sensorParams': { # Sensor diagnostic output verbosity control; # if > 0: sensor region will print out on screen what it's sensing # at each step 0: silent; >=1: some info; >=2: more info; # >=3: even more info (see compute() in py/regions/RecordSensor.py) 'verbosity' : 0, # Example: # dsEncoderSchema = [ # DeferredDictLookup('__field_name_encoder'), # ], # # (value generated from DS_ENCODER_SCHEMA) 'encoders': { 'consumption': { 'clipInput': True, 'fieldname': u'consumption', 'n': 100, 'name': u'consumption', 'type': 'AdaptiveScalarEncoder', 'w': 21}, 'timestamp_dayOfWeek': { 'dayOfWeek': (21, 1), 'fieldname': u'timestamp', 'name': u'timestamp_dayOfWeek', 'type': 'DateEncoder'}, 'timestamp_timeOfDay': { 'fieldname': u'timestamp', 'name': u'timestamp_timeOfDay', 'timeOfDay': (21, 1), 'type': 'DateEncoder'}, 'timestamp_weekend': { 'fieldname': u'timestamp', 'name': u'timestamp_weekend', 'type': 'DateEncoder', 'weekend': 21}}, # A dictionary specifying the period for automatically-generated # resets from a RecordSensor; # # None = disable automatically-generated resets (also disabled if # all of the specified values evaluate to 0). # Valid keys is the desired combination of the following: # days, hours, minutes, seconds, milliseconds, microseconds, weeks # # Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12), # # (value generated from SENSOR_AUTO_RESET) 'sensorAutoReset' : None, }, 'spEnable': True, 'spParams': { # SP diagnostic output verbosity control; # 0: silent; >=1: some info; >=2: more info; 'spVerbosity' : 0, 'globalInhibition': 1, # Number of cell columns in the cortical region (same number for # SP and TP) # (see also tpNCellsPerCol) 'columnCount': 2048, 'inputWidth': 0, # SP inhibition control (absolute value); # Maximum number of active columns in the SP region's output (when # there are more, the weaker ones are suppressed) 'numActivePerInhArea': 40, 'seed': 1956, # coincInputPoolPct # What percent of the columns's receptive field is available # for potential synapses. At initialization time, we will # choose coincInputPoolPct * (2*coincInputRadius+1)^2 'coincInputPoolPct': 0.5, # The default connected threshold. Any synapse whose # permanence value is above the connected threshold is # a "connected synapse", meaning it can contribute to the # cell's firing. Typical value is 0.10. Cells whose activity # level before inhibition falls below minDutyCycleBeforeInh # will have their own internal synPermConnectedCell # threshold set below this default value. # (This concept applies to both SP and TP and so 'cells' # is correct here as opposed to 'columns') 'synPermConnected': 0.1, 'synPermActiveInc': 0.1, 'synPermInactiveDec': 0.01, 'randomSP': 1 }, # Controls whether TP is enabled or disabled; # TP is necessary for making temporal predictions, such as predicting # the next inputs. Without TP, the model is only capable of # reconstructing missing sensor inputs (via SP). 'tpEnable' : True, 'tpParams': { # TP diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity # (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py) 'verbosity': 0, # Number of cell columns in the cortical region (same number for # SP and TP) # (see also tpNCellsPerCol) 'columnCount': 2048, # The number of cells (i.e., states), allocated per column. 'cellsPerColumn': 32, 'inputWidth': 2048, 'seed': 1960, # Temporal Pooler implementation selector (see _getTPClass in # CLARegion.py). 'temporalImp': 'cpp', # New Synapse formation count # NOTE: If None, use spNumActivePerInhArea # # TODO: need better explanation 'newSynapseCount': 20, # Maximum number of synapses per segment # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # # TODO: for Ron: once the appropriate value is placed in TP # constructor, see if we should eliminate this parameter from # description.py. 'maxSynapsesPerSegment': 32, # Maximum number of segments per cell # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # # TODO: for Ron: once the appropriate value is placed in TP # constructor, see if we should eliminate this parameter from # description.py. 'maxSegmentsPerCell': 128, # Initial Permanence # TODO: need better explanation 'initialPerm': 0.21, # Permanence Increment 'permanenceInc': 0.1, # Permanence Decrement # If set to None, will automatically default to tpPermanenceInc # value. 'permanenceDec' : 0.1, 'globalDecay': 0.0, 'maxAge': 0, # Minimum number of active synapses for a segment to be considered # during search for the best-matching segments. # None=use default # Replaces: tpMinThreshold 'minThreshold': 12, # Segment activation threshold. # A segment is active if it has >= tpSegmentActivationThreshold # connected synapses that are active due to infActiveState # None=use default # Replaces: tpActivationThreshold 'activationThreshold': 12, 'outputType': 'normal', # "Pay Attention Mode" length. This tells the TP how many new # elements to append to the end of a learned sequence at a time. # Smaller values are better for datasets with short sequences, # higher values are better for datasets with long sequences. 'pamLength': 1, }, 'clParams': { # Classifier implementation selection. 'implementation': 'cpp', 'regionName' : 'CLAClassifierRegion', # Classifier diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity 'clVerbosity' : 0, # This controls how fast the classifier learns/forgets. Higher values # make it adapt faster and forget older patterns faster. 'alpha': 0.0001, # This is set after the call to updateConfigFromSubConfig and is # computed from the aggregationInfo and predictAheadTime. 'steps': '1,5', }, 'anomalyParams': { 'mode': 'likelihood', # pure(=default) / weighted / likelihood 'slidingWindowSize': 5, # >=0 / None }, 'trainSPNetOnlyIfRequested': False, }, 'predictionSteps': [1, 5], 'predictedField': 'consumption', 'numRecords': 4000, } # end of config dictionary # Adjust base config dictionary for any modifications if imported from a # sub-experiment updateConfigFromSubConfig(config) # Compute predictionSteps based on the predictAheadTime and the aggregation # period, which may be permuted over. if config['predictAheadTime'] is not None: predictionSteps = int(round(aggregationDivide( config['predictAheadTime'], config['aggregationInfo']))) assert (predictionSteps >= 1) config['modelParams']['clParams']['steps'] = str(predictionSteps) # Adjust config by applying ValueGetterBase-derived # futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order # to support value-getter-based substitutions from the sub-experiment (if any) applyValueGettersToContainer(config) control = { # The environment that the current model is being run in "environment": 'nupic', # Input stream specification per py/nupic/cluster/database/StreamDef.json. # 'dataset' : { u'info': u'test_hotgym', u'streams': [ { u'columns': [u'*'], u'info': u'hotGym.csv', u'last_record': config['numRecords'], u'source': u'file://extra/hotgym/hotgym.csv'}], 'aggregation': config['aggregationInfo'], u'version': 1}, # Iteration count: maximum number of iterations. Each iteration corresponds # to one record from the (possibly aggregated) dataset. The task is # terminated when either number of iterations reaches iterationCount or # all records in the (possibly aggregated) database have been processed, # whichever occurs first. # # iterationCount of -1 = iterate over the entire dataset 'iterationCount' : -1, # A dictionary containing all the supplementary parameters for inference "inferenceArgs":{'predictedField': config['predictedField'], 'predictionSteps': config['predictionSteps']}, # Metrics: A list of MetricSpecs that instantiate the metrics that are # computed for this experiment 'metrics':[], # Logged Metrics: A sequence of regular expressions that specify which of # the metrics from the Inference Specifications section MUST be logged for # every prediction. The regex's correspond to the automatically generated # metric labels. This is similar to the way the optimization metric is # specified in permutations.py. 'loggedMetrics': ['.*aae.*'], } # Add multi-step prediction metrics for steps in config['predictionSteps']: control['metrics'].append( MetricSpec(field=config['predictedField'], metric='multiStep', inferenceElement='multiStepBestPredictions', params={'errorMetric': 'aae', 'window': 1000, 'steps': steps})) control['metrics'].append( MetricSpec(field=config['predictedField'], metric='trivial', inferenceElement='prediction', params={'errorMetric': 'aae', 'window': 1000, 'steps': steps})) control['metrics'].append( MetricSpec(field=config['predictedField'], metric='multiStep', inferenceElement='multiStepBestPredictions', params={'errorMetric': 'altMAPE', 'window': 1000, 'steps': steps})) control['metrics'].append( MetricSpec(field=config['predictedField'], metric='trivial', inferenceElement='prediction', params={'errorMetric': 'altMAPE', 'window': 1000, 'steps': steps})) ################################################################################ ################################################################################ descriptionInterface = ExperimentDescriptionAPI(modelConfig=config, control=control)
1
17,808
`Anomaly` imported here, but not used. Please run the changed files through pylint with nupic pylint config and fix pylint findings related to your changes.
numenta-nupic
py
@@ -42,8 +42,18 @@ testUtils.checkSetup = function (content, options, target) { target = options; options = {}; } + // Normalize target, allow it to be the inserted node or '#target' + target = target || (content instanceof Node ? content : '#target'); testUtils.fixtureSetup(content); - var node = axe.utils.querySelectorAll(axe._tree[0], target || '#target')[0]; + + var node; + if (typeof target === 'string') { + node = axe.utils.querySelectorAll(axe._tree[0], target)[0]; + } else if (target instanceof Node) { + node = axe.utils.getNodeFromTree(axe._tree[0], target); + } else { + node = target; + } return [node.actualNode, options, node]; };
1
var testUtils = {}; testUtils.shadowSupport = (function(document) { 'use strict'; var v0 = document.body && typeof document.body.createShadowRoot === 'function', v1 = document.body && typeof document.body.attachShadow === 'function'; return { v0: (v0 === true), v1: (v1 === true), undefined: ( document.body && typeof document.body.attachShadow === 'undefined' && typeof document.body.createShadowRoot === 'undefined' ) }; })(document); testUtils.fixtureSetup = function (content) { 'use strict'; var fixture = document.querySelector('#fixture'); if (typeof content === 'string') { fixture.innerHTML = content; } else if (content instanceof Node) { fixture.appendChild(content); } axe._tree = axe.utils.getFlattenedTree(fixture); return fixture; }; /** * Create check arguments * * @param Node|String Stuff to go into the fixture (html or node) * @param Object Options argument for the check (optional, default: {}) * @param String Target for the check, CSS selector (default: '#target') */ testUtils.checkSetup = function (content, options, target) { 'use strict'; // Normalize the params if (typeof options !== 'object') { target = options; options = {}; } testUtils.fixtureSetup(content); var node = axe.utils.querySelectorAll(axe._tree[0], target || '#target')[0]; return [node.actualNode, options, node]; }; axe.testUtils = testUtils;
1
11,342
We needed this testutils file after all
dequelabs-axe-core
js
@@ -46,7 +46,7 @@ func addTestingTsfBlocks(bc Blockchain) error { big.NewInt(10), ) pubk, _ := keypair.DecodePublicKey(Gen.CreatorPubKey) - sig, _ := hex.DecodeString("9c1fb14affb398f850d0642f22f12433526bed742fbfb39115f9df2549b2751347bafe9ddbe50e6f02906bdc83b7c905944adc19583726dfaea83245318132ff01") + sig, _ := hex.DecodeString("c05c3ff8c9820038c03881c97f544593619ca4d1617c7c6d695aaf828339da9616877649d70948094a05a86b171320d1aae9afa4432606be3b263f808e11816e00") bd := &action.EnvelopeBuilder{} elp := bd.SetAction(tsf0). SetDestinationAddress(ta.Addrinfo["producer"].Bech32()).
1
// Copyright (c) 2018 IoTeX // This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no // warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent // permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache // License 2.0 that can be found in the LICENSE file. package blockchain import ( "context" "encoding/hex" "fmt" "math/big" "sync" "testing" "github.com/stretchr/testify/require" "github.com/iotexproject/iotex-core/action" "github.com/iotexproject/iotex-core/action/protocol" "github.com/iotexproject/iotex-core/action/protocol/account" "github.com/iotexproject/iotex-core/action/protocol/vote" "github.com/iotexproject/iotex-core/blockchain/block" "github.com/iotexproject/iotex-core/blockchain/genesis" "github.com/iotexproject/iotex-core/config" "github.com/iotexproject/iotex-core/pkg/hash" "github.com/iotexproject/iotex-core/pkg/keypair" "github.com/iotexproject/iotex-core/state/factory" ta "github.com/iotexproject/iotex-core/test/testaddress" "github.com/iotexproject/iotex-core/testutil" ) const ( testDBPath = "db.test" testTriePath = "trie.test" ) func addTestingTsfBlocks(bc Blockchain) error { // Add block 0 tsf0, _ := action.NewTransfer( 1, big.NewInt(3000000000), Gen.CreatorAddr(), ta.Addrinfo["producer"].Bech32(), []byte{}, uint64(100000), big.NewInt(10), ) pubk, _ := keypair.DecodePublicKey(Gen.CreatorPubKey) sig, _ := hex.DecodeString("9c1fb14affb398f850d0642f22f12433526bed742fbfb39115f9df2549b2751347bafe9ddbe50e6f02906bdc83b7c905944adc19583726dfaea83245318132ff01") bd := &action.EnvelopeBuilder{} elp := bd.SetAction(tsf0). SetDestinationAddress(ta.Addrinfo["producer"].Bech32()). SetNonce(1). SetGasLimit(100000). SetGasPrice(big.NewInt(10)).Build() selp := action.AssembleSealedEnvelope(elp, Gen.CreatorAddr(), pubk, sig) actionMap := make(map[string][]action.SealedEnvelope) actionMap[selp.SrcAddr()] = []action.SealedEnvelope{selp} blk, err := bc.MintNewBlock( actionMap, ta.Keyinfo["producer"].PubKey, ta.Keyinfo["producer"].PriKey, ta.Addrinfo["producer"].Bech32(), 0, ) if err != nil { return err } if err := bc.ValidateBlock(blk, true); err != nil { return err } if err := bc.CommitBlock(blk); err != nil { return err } addr0 := ta.Addrinfo["producer"].Bech32() priKey0 := ta.Keyinfo["producer"].PriKey addr1 := ta.Addrinfo["alfa"].Bech32() priKey1 := ta.Keyinfo["alfa"].PriKey addr2 := ta.Addrinfo["bravo"].Bech32() addr3 := ta.Addrinfo["charlie"].Bech32() priKey3 := ta.Keyinfo["charlie"].PriKey addr4 := ta.Addrinfo["delta"].Bech32() priKey4 := ta.Keyinfo["delta"].PriKey addr5 := ta.Addrinfo["echo"].Bech32() priKey5 := ta.Keyinfo["echo"].PriKey addr6 := ta.Addrinfo["foxtrot"].Bech32() // Add block 1 // test --> A, B, C, D, E, F tsf1, err := testutil.SignedTransfer(addr0, addr1, priKey0, 1, big.NewInt(20), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice)) if err != nil { return err } tsf2, err := testutil.SignedTransfer(addr0, addr2, priKey0, 2, big.NewInt(30), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice)) if err != nil { return err } tsf3, err := testutil.SignedTransfer(addr0, addr3, priKey0, 3, big.NewInt(50), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice)) if err != nil { return err } tsf4, err := testutil.SignedTransfer(addr0, addr4, priKey0, 4, big.NewInt(70), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice)) if err != nil { return err } tsf5, err := testutil.SignedTransfer(addr0, addr5, priKey0, 5, big.NewInt(110), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice)) if err != nil { return err } tsf6, err := testutil.SignedTransfer(addr0, addr6, priKey0, 6, big.NewInt(50<<20), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice)) if err != nil { return err } accMap := make(map[string][]action.SealedEnvelope) accMap[tsf1.SrcAddr()] = []action.SealedEnvelope{tsf1, tsf2, tsf3, tsf4, tsf5, tsf6} blk, err = bc.MintNewBlock( accMap, ta.Keyinfo["producer"].PubKey, ta.Keyinfo["producer"].PriKey, ta.Addrinfo["producer"].Bech32(), 0, ) if err != nil { return err } if err := bc.ValidateBlock(blk, true); err != nil { return err } if err := bc.CommitBlock(blk); err != nil { return err } // Add block 2 // Charlie --> A, B, D, E, test tsf1, err = testutil.SignedTransfer(addr3, addr1, priKey3, 1, big.NewInt(1), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice)) if err != nil { return err } tsf2, err = testutil.SignedTransfer(addr3, addr2, priKey3, 2, big.NewInt(1), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice)) if err != nil { return err } tsf3, err = testutil.SignedTransfer(addr3, addr4, priKey3, 3, big.NewInt(1), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice)) if err != nil { return err } tsf4, err = testutil.SignedTransfer(addr3, addr5, priKey3, 4, big.NewInt(1), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice)) if err != nil { return err } tsf5, err = testutil.SignedTransfer(addr3, addr0, priKey3, 5, big.NewInt(1), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice)) if err != nil { return err } accMap = make(map[string][]action.SealedEnvelope) accMap[tsf1.SrcAddr()] = []action.SealedEnvelope{tsf1, tsf2, tsf3, tsf4, tsf5} blk, err = bc.MintNewBlock( accMap, ta.Keyinfo["producer"].PubKey, ta.Keyinfo["producer"].PriKey, ta.Addrinfo["producer"].Bech32(), 0, ) if err != nil { return err } if err := bc.ValidateBlock(blk, true); err != nil { return err } if err := bc.CommitBlock(blk); err != nil { return err } // Add block 3 // Delta --> B, E, F, test tsf1, err = testutil.SignedTransfer(addr4, addr2, priKey4, 1, big.NewInt(1), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice)) if err != nil { return err } tsf2, err = testutil.SignedTransfer(addr4, addr5, priKey4, 2, big.NewInt(1), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice)) if err != nil { return err } tsf3, err = testutil.SignedTransfer(addr4, addr6, priKey4, 3, big.NewInt(1), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice)) if err != nil { return err } tsf4, err = testutil.SignedTransfer(addr4, addr0, priKey4, 4, big.NewInt(1), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice)) if err != nil { return err } accMap = make(map[string][]action.SealedEnvelope) accMap[tsf1.SrcAddr()] = []action.SealedEnvelope{tsf1, tsf2, tsf3, tsf4} blk, err = bc.MintNewBlock( accMap, ta.Keyinfo["producer"].PubKey, ta.Keyinfo["producer"].PriKey, ta.Addrinfo["producer"].Bech32(), 0, ) if err != nil { return err } if err := bc.ValidateBlock(blk, true); err != nil { return err } if err := bc.CommitBlock(blk); err != nil { return err } // Add block 4 // Delta --> A, B, C, D, F, test tsf1, err = testutil.SignedTransfer(addr5, addr1, priKey5, 1, big.NewInt(2), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice)) if err != nil { return err } tsf2, err = testutil.SignedTransfer(addr5, addr2, priKey5, 2, big.NewInt(2), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice)) if err != nil { return err } tsf3, err = testutil.SignedTransfer(addr5, addr3, priKey5, 3, big.NewInt(2), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice)) if err != nil { return err } tsf4, err = testutil.SignedTransfer(addr5, addr4, priKey5, 4, big.NewInt(2), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice)) if err != nil { return err } tsf5, err = testutil.SignedTransfer(addr5, addr6, priKey5, 5, big.NewInt(2), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice)) if err != nil { return err } tsf6, err = testutil.SignedTransfer(addr5, addr0, priKey5, 6, big.NewInt(2), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice)) if err != nil { return err } vote1, err := testutil.SignedVote(addr3, addr3, priKey3, 6, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice)) if err != nil { return err } vote2, err := testutil.SignedVote(addr1, addr1, priKey1, 1, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice)) if err != nil { return err } accMap = make(map[string][]action.SealedEnvelope) accMap[tsf1.SrcAddr()] = []action.SealedEnvelope{tsf1, tsf2, tsf3, tsf4, tsf5, tsf6} accMap[vote1.SrcAddr()] = []action.SealedEnvelope{vote1} accMap[vote2.SrcAddr()] = []action.SealedEnvelope{vote2} blk, err = bc.MintNewBlock( accMap, ta.Keyinfo["producer"].PubKey, ta.Keyinfo["producer"].PriKey, ta.Addrinfo["producer"].Bech32(), 0, ) if err != nil { return err } if err := bc.ValidateBlock(blk, true); err != nil { return err } if blk.TxRoot() != blk.CalculateTxRoot() { return err } return bc.CommitBlock(blk) } func TestCreateBlockchain(t *testing.T) { require := require.New(t) ctx := context.Background() cfg := config.Default // disable account-based testing cfg.Chain.TrieDBPath = "" // create chain bc := NewBlockchain(cfg, InMemStateFactoryOption(), InMemDaoOption()) bc.Validator().AddActionEnvelopeValidators(protocol.NewGenericValidator(bc)) bc.Validator().AddActionValidators(account.NewProtocol(), vote.NewProtocol(bc)) bc.GetFactory().AddActionHandlers(account.NewProtocol(), vote.NewProtocol(bc)) require.NoError(bc.Start(ctx)) require.NotNil(bc) height := bc.TipHeight() require.Equal(0, int(height)) fmt.Printf("Create blockchain pass, height = %d\n", height) defer func() { err := bc.Stop(ctx) require.NoError(err) }() // verify Genesis block genesis, _ := bc.GetBlockByHeight(0) require.NotNil(genesis) // serialize data, err := genesis.Serialize() require.Nil(err) transfers, votes, _ := action.ClassifyActions(genesis.Actions) require.Equal(0, len(transfers)) require.Equal(21, len(votes)) fmt.Printf("Block size match pass\n") fmt.Printf("Marshaling Block pass\n") // deserialize deserialize := block.Block{} err = deserialize.Deserialize(data) require.Nil(err) fmt.Printf("Unmarshaling Block pass\n") blkhash := genesis.HashBlock() require.Equal(blkhash, deserialize.HashBlock()) fmt.Printf("Serialize/Deserialize Block hash = %x match\n", blkhash) blkhash = genesis.CalculateTxRoot() require.Equal(blkhash, deserialize.CalculateTxRoot()) fmt.Printf("Serialize/Deserialize Block merkle = %x match\n", blkhash) // add 4 sample blocks require.Nil(addTestingTsfBlocks(bc)) height = bc.TipHeight() require.Equal(5, int(height)) } func TestBlockchain_MintNewBlock(t *testing.T) { ctx := context.Background() cfg := config.Default bc := NewBlockchain(cfg, InMemStateFactoryOption(), InMemDaoOption()) bc.Validator().AddActionEnvelopeValidators(protocol.NewGenericValidator(bc)) bc.Validator().AddActionValidators(account.NewProtocol(), vote.NewProtocol(bc)) bc.GetFactory().AddActionHandlers(account.NewProtocol(), vote.NewProtocol(bc)) require.NoError(t, bc.Start(ctx)) defer require.NoError(t, bc.Stop(ctx)) pk, _ := keypair.DecodePublicKey(Gen.CreatorPubKey) // The signature should only matches the transfer amount 3000000000 sig, err := hex.DecodeString("9c1fb14affb398f850d0642f22f12433526bed742fbfb39115f9df2549b2751347bafe9ddbe50e6f02906bdc83b7c905944adc19583726dfaea83245318132ff01") require.NoError(t, err) cases := make(map[int64]bool) cases[0] = true cases[1] = false for k, v := range cases { tsf, err := action.NewTransfer( 1, big.NewInt(3000000000+k), Gen.CreatorAddr(), ta.Addrinfo["producer"].Bech32(), []byte{}, uint64(100000), big.NewInt(10), ) require.NoError(t, err) bd := &action.EnvelopeBuilder{} elp := bd.SetAction(tsf). SetDestinationAddress(ta.Addrinfo["producer"].Bech32()). SetNonce(1). SetGasLimit(100000). SetGasPrice(big.NewInt(10)).Build() selp := action.AssembleSealedEnvelope(elp, Gen.CreatorAddr(), pk, sig) actionMap := make(map[string][]action.SealedEnvelope) actionMap[selp.SrcAddr()] = []action.SealedEnvelope{selp} _, err = bc.MintNewBlock( actionMap, ta.Keyinfo["producer"].PubKey, ta.Keyinfo["producer"].PriKey, ta.Addrinfo["producer"].Bech32(), 0, ) if v { require.NoError(t, err) } else { require.Error(t, err) } } } type MockSubscriber struct { counter int mu sync.RWMutex } func (ms *MockSubscriber) HandleBlock(blk *block.Block) error { ms.mu.Lock() tsfs, _, _ := action.ClassifyActions(blk.Actions) ms.counter += len(tsfs) ms.mu.Unlock() return nil } func (ms *MockSubscriber) Counter() int { ms.mu.RLock() defer ms.mu.RUnlock() return ms.counter } func TestLoadBlockchainfromDB(t *testing.T) { require := require.New(t) ctx := context.Background() testutil.CleanupPath(t, testTriePath) defer testutil.CleanupPath(t, testTriePath) testutil.CleanupPath(t, testDBPath) defer testutil.CleanupPath(t, testDBPath) cfg := config.Default cfg.Chain.TrieDBPath = testTriePath cfg.Chain.ChainDBPath = testDBPath cfg.Chain.EnableIndex = true sf, err := factory.NewFactory(cfg, factory.DefaultTrieOption()) require.Nil(err) sf.AddActionHandlers(account.NewProtocol()) // Create a blockchain from scratch bc := NewBlockchain(cfg, PrecreatedStateFactoryOption(sf), BoltDBDaoOption()) bc.Validator().AddActionEnvelopeValidators(protocol.NewGenericValidator(bc)) bc.Validator().AddActionValidators(account.NewProtocol(), vote.NewProtocol(bc)) sf.AddActionHandlers(vote.NewProtocol(bc)) require.NoError(bc.Start(ctx)) require.NoError(addCreatorToFactory(sf)) ms := &MockSubscriber{counter: 0} err = bc.AddSubscriber(ms) require.Nil(err) require.Equal(0, ms.Counter()) height := bc.TipHeight() fmt.Printf("Open blockchain pass, height = %d\n", height) require.Nil(addTestingTsfBlocks(bc)) err = bc.Stop(ctx) require.NoError(err) require.Equal(27, ms.Counter()) // Load a blockchain from DB sf, err = factory.NewFactory(cfg, factory.DefaultTrieOption()) require.Nil(err) sf.AddActionHandlers(account.NewProtocol()) bc = NewBlockchain(cfg, PrecreatedStateFactoryOption(sf), BoltDBDaoOption()) bc.Validator().AddActionEnvelopeValidators(protocol.NewGenericValidator(bc)) bc.Validator().AddActionValidators(account.NewProtocol(), vote.NewProtocol(bc)) require.NoError(bc.Start(ctx)) defer func() { require.NoError(bc.Stop(ctx)) }() // check hash<-->height mapping blkhash, err := bc.GetHashByHeight(0) require.Nil(err) height, err = bc.GetHeightByHash(blkhash) require.Nil(err) require.Equal(uint64(0), height) blk, err := bc.GetBlockByHash(blkhash) require.Nil(err) require.Equal(blkhash, blk.HashBlock()) fmt.Printf("Genesis hash = %x\n", blkhash) hash1, err := bc.GetHashByHeight(1) require.Nil(err) height, err = bc.GetHeightByHash(hash1) require.Nil(err) require.Equal(uint64(1), height) blk, err = bc.GetBlockByHash(hash1) require.Nil(err) require.Equal(hash1, blk.HashBlock()) fmt.Printf("block 1 hash = %x\n", hash1) hash2, err := bc.GetHashByHeight(2) require.Nil(err) height, err = bc.GetHeightByHash(hash2) require.Nil(err) require.Equal(uint64(2), height) blk, err = bc.GetBlockByHash(hash2) require.Nil(err) require.Equal(hash2, blk.HashBlock()) fmt.Printf("block 2 hash = %x\n", hash2) hash3, err := bc.GetHashByHeight(3) require.Nil(err) height, err = bc.GetHeightByHash(hash3) require.Nil(err) require.Equal(uint64(3), height) blk, err = bc.GetBlockByHash(hash3) require.Nil(err) require.Equal(hash3, blk.HashBlock()) fmt.Printf("block 3 hash = %x\n", hash3) hash4, err := bc.GetHashByHeight(4) require.Nil(err) height, err = bc.GetHeightByHash(hash4) require.Nil(err) require.Equal(uint64(4), height) blk, err = bc.GetBlockByHash(hash4) require.Nil(err) require.Equal(hash4, blk.HashBlock()) fmt.Printf("block 4 hash = %x\n", hash4) hash5, err := bc.GetHashByHeight(5) require.Nil(err) height, err = bc.GetHeightByHash(hash5) require.Nil(err) require.Equal(uint64(5), height) blk, err = bc.GetBlockByHash(hash5) require.Nil(err) require.Equal(hash5, blk.HashBlock()) fmt.Printf("block 5 hash = %x\n", hash5) empblk, err := bc.GetBlockByHash(hash.ZeroHash32B) require.Nil(empblk) require.NotNil(err.Error()) blk, err = bc.GetBlockByHeight(60000) require.Nil(blk) require.NotNil(err) // add wrong blocks h := bc.TipHeight() blkhash = bc.TipHash() blk, err = bc.GetBlockByHeight(h) require.Nil(err) require.Equal(blkhash, blk.HashBlock()) fmt.Printf("Current tip = %d hash = %x\n", h, blkhash) // add block with wrong height cbTsf := action.NewCoinBaseTransfer(1, big.NewInt(50), ta.Addrinfo["bravo"].Bech32()) require.NotNil(cbTsf) bd := action.EnvelopeBuilder{} elp := bd.SetNonce(1). SetDestinationAddress(ta.Addrinfo["bravo"].Bech32()). SetGasLimit(genesis.ActionGasLimit). SetAction(cbTsf).Build() selp, err := action.Sign(elp, ta.Addrinfo["bravo"].Bech32(), ta.Keyinfo["bravo"].PriKey) require.NoError(err) nblk, err := block.NewTestingBuilder(). SetChainID(0). SetHeight(h+2). SetPrevBlockHash(blkhash). SetTimeStamp(testutil.TimestampNow()). AddActions(selp).SignAndBuild(ta.Keyinfo["bravo"].PubKey, ta.Keyinfo["bravo"].PriKey) require.NoError(err) err = bc.ValidateBlock(&nblk, true) require.NotNil(err) fmt.Printf("Cannot validate block %d: %v\n", blk.Height(), err) // add block with zero prev hash cbTsf2 := action.NewCoinBaseTransfer(1, big.NewInt(50), ta.Addrinfo["bravo"].Bech32()) require.NotNil(cbTsf2) bd = action.EnvelopeBuilder{} elp = bd.SetNonce(1). SetDestinationAddress(ta.Addrinfo["bravo"].Bech32()). SetGasLimit(genesis.ActionGasLimit). SetAction(cbTsf2).Build() selp2, err := action.Sign(elp, ta.Addrinfo["bravo"].Bech32(), ta.Keyinfo["bravo"].PriKey) require.NoError(err) nblk, err = block.NewTestingBuilder(). SetChainID(0). SetHeight(h+1). SetPrevBlockHash(hash.ZeroHash32B). SetTimeStamp(testutil.TimestampNow()). AddActions(selp2).SignAndBuild(ta.Keyinfo["bravo"].PubKey, ta.Keyinfo["bravo"].PriKey) require.NoError(err) err = bc.ValidateBlock(&nblk, true) require.NotNil(err) fmt.Printf("Cannot validate block %d: %v\n", blk.Height(), err) // add existing block again will have no effect blk, err = bc.GetBlockByHeight(3) require.NotNil(blk) require.Nil(err) require.NoError(bc.(*blockchain).commitBlock(blk)) fmt.Printf("Cannot add block 3 again: %v\n", err) // check all Tx from block 4 blk, err = bc.GetBlockByHeight(5) require.Nil(err) require.Equal(hash5, blk.HashBlock()) tsfs, votes, _ := action.ClassifyActions(blk.Actions) for _, transfer := range tsfs { transferHash := transfer.Hash() blkhash, err := bc.GetBlockHashByTransferHash(transferHash) require.Nil(err) require.Equal(blkhash, hash5) transfer1, err := bc.GetTransferByTransferHash(transferHash) require.Nil(err) require.Equal(transfer1.Hash(), transferHash) } for _, vote := range votes { voteHash := vote.Hash() blkhash, err := bc.GetBlockHashByVoteHash(voteHash) require.Nil(err) require.Equal(blkhash, hash5) vote1, err := bc.GetVoteByVoteHash(voteHash) require.Nil(err) require.Equal(vote1.Hash(), voteHash) } fromTransfers, err := bc.GetTransfersFromAddress(ta.Addrinfo["charlie"].Bech32()) require.Nil(err) require.Equal(len(fromTransfers), 5) toTransfers, err := bc.GetTransfersToAddress(ta.Addrinfo["charlie"].Bech32()) require.Nil(err) require.Equal(len(toTransfers), 2) fromVotes, err := bc.GetVotesFromAddress(ta.Addrinfo["charlie"].Bech32()) require.Nil(err) require.Equal(len(fromVotes), 1) fromVotes, err = bc.GetVotesFromAddress(ta.Addrinfo["alfa"].Bech32()) require.Nil(err) require.Equal(len(fromVotes), 1) toVotes, err := bc.GetVotesToAddress(ta.Addrinfo["charlie"].Bech32()) require.Nil(err) require.Equal(len(toVotes), 1) toVotes, err = bc.GetVotesToAddress(ta.Addrinfo["alfa"].Bech32()) require.Nil(err) require.Equal(len(toVotes), 1) totalTransfers, err := bc.GetTotalTransfers() require.Nil(err) require.Equal(totalTransfers, uint64(27)) totalVotes, err := bc.GetTotalVotes() require.Nil(err) require.Equal(totalVotes, uint64(23)) _, err = bc.GetTransferByTransferHash(hash.ZeroHash32B) require.NotNil(err) _, err = bc.GetVoteByVoteHash(hash.ZeroHash32B) require.NotNil(err) _, err = bc.StateByAddr("") require.NotNil(err) } func TestLoadBlockchainfromDBWithoutExplorer(t *testing.T) { require := require.New(t) testutil.CleanupPath(t, testTriePath) defer testutil.CleanupPath(t, testTriePath) testutil.CleanupPath(t, testDBPath) defer testutil.CleanupPath(t, testDBPath) ctx := context.Background() cfg := config.Default cfg.DB.UseBadgerDB = false // test with boltDB cfg.Chain.TrieDBPath = testTriePath cfg.Chain.ChainDBPath = testDBPath sf, err := factory.NewFactory(cfg, factory.DefaultTrieOption()) require.Nil(err) sf.AddActionHandlers(account.NewProtocol()) // Create a blockchain from scratch bc := NewBlockchain(cfg, PrecreatedStateFactoryOption(sf), BoltDBDaoOption()) bc.Validator().AddActionEnvelopeValidators(protocol.NewGenericValidator(bc)) bc.Validator().AddActionValidators(account.NewProtocol(), vote.NewProtocol(bc)) sf.AddActionHandlers(vote.NewProtocol(bc)) require.NoError(bc.Start(ctx)) require.NoError(addCreatorToFactory(sf)) ms := &MockSubscriber{counter: 0} err = bc.AddSubscriber(ms) require.Nil(err) require.Equal(0, ms.counter) err = bc.RemoveSubscriber(ms) require.Nil(err) height := bc.TipHeight() fmt.Printf("Open blockchain pass, height = %d\n", height) require.Nil(addTestingTsfBlocks(bc)) err = bc.Stop(ctx) require.NoError(err) require.Equal(0, ms.counter) // Load a blockchain from DB sf, err = factory.NewFactory(cfg, factory.DefaultTrieOption()) require.Nil(err) sf.AddActionHandlers(account.NewProtocol()) bc = NewBlockchain(cfg, PrecreatedStateFactoryOption(sf), BoltDBDaoOption()) bc.Validator().AddActionEnvelopeValidators(protocol.NewGenericValidator(bc)) bc.Validator().AddActionValidators(account.NewProtocol(), vote.NewProtocol(bc)) require.NoError(bc.Start(ctx)) defer func() { err := bc.Stop(ctx) require.NoError(err) }() require.NotNil(bc) // check hash<-->height mapping blkhash, err := bc.GetHashByHeight(0) require.Nil(err) height, err = bc.GetHeightByHash(blkhash) require.Nil(err) require.Equal(uint64(0), height) blk, err := bc.GetBlockByHash(blkhash) require.Nil(err) require.Equal(blkhash, blk.HashBlock()) fmt.Printf("Genesis hash = %x\n", blkhash) hash1, err := bc.GetHashByHeight(1) require.Nil(err) height, err = bc.GetHeightByHash(hash1) require.Nil(err) require.Equal(uint64(1), height) blk, err = bc.GetBlockByHash(hash1) require.Nil(err) require.Equal(hash1, blk.HashBlock()) fmt.Printf("block 1 hash = %x\n", hash1) hash2, err := bc.GetHashByHeight(2) require.Nil(err) height, err = bc.GetHeightByHash(hash2) require.Nil(err) require.Equal(uint64(2), height) blk, err = bc.GetBlockByHash(hash2) require.Nil(err) require.Equal(hash2, blk.HashBlock()) fmt.Printf("block 2 hash = %x\n", hash2) hash3, err := bc.GetHashByHeight(3) require.Nil(err) height, err = bc.GetHeightByHash(hash3) require.Nil(err) require.Equal(uint64(3), height) blk, err = bc.GetBlockByHash(hash3) require.Nil(err) require.Equal(hash3, blk.HashBlock()) fmt.Printf("block 3 hash = %x\n", hash3) hash4, err := bc.GetHashByHeight(4) require.Nil(err) height, err = bc.GetHeightByHash(hash4) require.Nil(err) require.Equal(uint64(4), height) blk, err = bc.GetBlockByHash(hash4) require.Nil(err) require.Equal(hash4, blk.HashBlock()) fmt.Printf("block 4 hash = %x\n", hash4) empblk, err := bc.GetBlockByHash(hash.ZeroHash32B) require.Nil(empblk) require.NotNil(err.Error()) blk, err = bc.GetBlockByHeight(60000) require.Nil(blk) require.NotNil(err) // add wrong blocks h := bc.TipHeight() blkhash = bc.TipHash() blk, err = bc.GetBlockByHeight(h) require.Nil(err) require.Equal(blkhash, blk.HashBlock()) fmt.Printf("Current tip = %d hash = %x\n", h, blkhash) // add block with wrong height cbTsf := action.NewCoinBaseTransfer(1, big.NewInt(50), ta.Addrinfo["bravo"].Bech32()) require.NotNil(cbTsf) bd := &action.EnvelopeBuilder{} elp := bd.SetNonce(1). SetDestinationAddress(ta.Addrinfo["bravo"].Bech32()). SetGasLimit(genesis.ActionGasLimit). SetAction(cbTsf).Build() selp, err := action.Sign(elp, ta.Addrinfo["bravo"].Bech32(), ta.Keyinfo["bravo"].PriKey) require.NoError(err) nblk, err := block.NewTestingBuilder(). SetChainID(0). SetHeight(h+2). SetPrevBlockHash(blkhash). SetTimeStamp(testutil.TimestampNow()). AddActions(selp).SignAndBuild(ta.Keyinfo["bravo"].PubKey, ta.Keyinfo["bravo"].PriKey) require.NoError(err) err = bc.ValidateBlock(&nblk, true) require.NotNil(err) fmt.Printf("Cannot validate block %d: %v\n", blk.Height(), err) // add block with zero prev hash cbTsf2 := action.NewCoinBaseTransfer(1, big.NewInt(50), ta.Addrinfo["bravo"].Bech32()) require.NotNil(cbTsf2) bd = &action.EnvelopeBuilder{} elp = bd.SetNonce(1). SetDestinationAddress(ta.Addrinfo["bravo"].Bech32()). SetGasLimit(genesis.ActionGasLimit). SetAction(cbTsf2).Build() selp2, err := action.Sign(elp, ta.Addrinfo["bravo"].Bech32(), ta.Keyinfo["bravo"].PriKey) require.NoError(err) nblk, err = block.NewTestingBuilder(). SetChainID(0). SetHeight(h+1). SetPrevBlockHash(hash.ZeroHash32B). SetTimeStamp(testutil.TimestampNow()). AddActions(selp2).SignAndBuild(ta.Keyinfo["bravo"].PubKey, ta.Keyinfo["bravo"].PriKey) require.NoError(err) err = bc.ValidateBlock(&nblk, true) require.NotNil(err) fmt.Printf("Cannot validate block %d: %v\n", blk.Height(), err) // add existing block again will have no effect blk, err = bc.GetBlockByHeight(3) require.NotNil(blk) require.Nil(err) require.NoError(bc.(*blockchain).commitBlock(blk)) fmt.Printf("Cannot add block 3 again: %v\n", err) // check all Tx from block 4 blk, err = bc.GetBlockByHeight(4) require.Nil(err) require.Equal(hash4, blk.HashBlock()) tsfs, votes, _ := action.ClassifyActions(blk.Actions) for _, transfer := range tsfs { transferHash := transfer.Hash() _, err := bc.GetBlockHashByTransferHash(transferHash) require.NotNil(err) _, err = bc.GetTransferByTransferHash(transferHash) require.NotNil(err) } for _, vote := range votes { voteHash := vote.Hash() _, err := bc.GetBlockHashByVoteHash(voteHash) require.NotNil(err) _, err = bc.GetVoteByVoteHash(voteHash) require.NotNil(err) } _, err = bc.GetTransfersFromAddress(ta.Addrinfo["charlie"].Bech32()) require.NotNil(err) _, err = bc.GetTransfersToAddress(ta.Addrinfo["charlie"].Bech32()) require.NotNil(err) _, err = bc.GetVotesFromAddress(ta.Addrinfo["charlie"].Bech32()) require.NotNil(err) _, err = bc.GetVotesFromAddress(ta.Addrinfo["alfa"].Bech32()) require.NotNil(err) _, err = bc.GetVotesToAddress(ta.Addrinfo["charlie"].Bech32()) require.NotNil(err) _, err = bc.GetVotesToAddress(ta.Addrinfo["alfa"].Bech32()) require.NotNil(err) _, err = bc.GetTotalTransfers() require.NotNil(err) _, err = bc.GetTotalVotes() require.NotNil(err) _, err = bc.GetTransferByTransferHash(hash.ZeroHash32B) require.NotNil(err) _, err = bc.GetVoteByVoteHash(hash.ZeroHash32B) require.NotNil(err) _, err = bc.StateByAddr("") require.NotNil(err) } func TestBlockchain_Validator(t *testing.T) { cfg := config.Default // disable account-based testing cfg.Chain.TrieDBPath = "" ctx := context.Background() bc := NewBlockchain(cfg, InMemDaoOption(), InMemStateFactoryOption()) require.NoError(t, bc.Start(ctx)) defer func() { err := bc.Stop(ctx) require.Nil(t, err) }() require.NotNil(t, bc) val := bc.Validator() require.NotNil(t, bc) bc.SetValidator(val) require.NotNil(t, bc.Validator()) } func TestBlockchainInitialCandidate(t *testing.T) { require := require.New(t) testutil.CleanupPath(t, testTriePath) defer testutil.CleanupPath(t, testTriePath) testutil.CleanupPath(t, testDBPath) defer testutil.CleanupPath(t, testDBPath) cfg := config.Default cfg.Chain.TrieDBPath = testTriePath cfg.Chain.ChainDBPath = testDBPath cfg.Chain.NumCandidates = 2 sf, err := factory.NewFactory(cfg, factory.DefaultTrieOption()) require.Nil(err) sf.AddActionHandlers(account.NewProtocol(), vote.NewProtocol(nil)) bc := NewBlockchain(cfg, PrecreatedStateFactoryOption(sf), BoltDBDaoOption()) require.NoError(bc.Start(context.Background())) defer func() { require.NoError(bc.Stop(context.Background())) }() // TODO: change the value when Candidates size is changed height, err := sf.Height() require.NoError(err) require.Equal(uint64(0), height) candidate, err := sf.CandidatesByHeight(height) require.NoError(err) require.True(len(candidate) == 2) } func TestCoinbaseTransfer(t *testing.T) { require := require.New(t) testutil.CleanupPath(t, testTriePath) defer testutil.CleanupPath(t, testTriePath) testutil.CleanupPath(t, testDBPath) defer testutil.CleanupPath(t, testDBPath) cfg := config.Default cfg.Chain.TrieDBPath = testTriePath cfg.Chain.ChainDBPath = testDBPath sf, err := factory.NewFactory(cfg, factory.DefaultTrieOption()) sf.AddActionHandlers(account.NewProtocol()) require.Nil(err) bc := NewBlockchain(cfg, PrecreatedStateFactoryOption(sf), BoltDBDaoOption()) bc.Validator().AddActionEnvelopeValidators(protocol.NewGenericValidator(bc)) bc.Validator().AddActionValidators(account.NewProtocol(), vote.NewProtocol(bc)) require.NoError(bc.Start(context.Background())) defer func() { require.NoError(bc.Stop(context.Background())) }() require.NoError(addCreatorToFactory(sf)) height := bc.TipHeight() require.Equal(0, int(height)) actionMap := make(map[string][]action.SealedEnvelope) blk, err := bc.MintNewBlock( actionMap, ta.Keyinfo["alfa"].PubKey, ta.Keyinfo["alfa"].PriKey, ta.Addrinfo["alfa"].Bech32(), 0, ) require.Nil(err) s, err := bc.StateByAddr(ta.Addrinfo["alfa"].Bech32()) require.Nil(err) require.Equal(big.NewInt(0), s.Balance) require.Nil(bc.ValidateBlock(blk, true)) require.Nil(bc.CommitBlock(blk)) height = bc.TipHeight() require.True(height == 1) require.True(len(blk.Actions) == 1) s, err = bc.StateByAddr(ta.Addrinfo["alfa"].Bech32()) require.Nil(err) require.Equal(Gen.BlockReward, s.Balance) } func TestBlockchain_StateByAddr(t *testing.T) { require := require.New(t) cfg := config.Default // disable account-based testing // create chain bc := NewBlockchain(cfg, InMemDaoOption(), InMemStateFactoryOption()) require.NoError(bc.Start(context.Background())) require.NotNil(bc) s, err := bc.StateByAddr(Gen.CreatorAddr()) require.NoError(err) require.Equal(uint64(0), s.Nonce) bal := big.NewInt(7700000000) require.Equal(bal.Mul(bal, big.NewInt(1e18)).String(), s.Balance.String()) require.Equal(hash.ZeroHash32B, s.Root) require.Equal([]byte(nil), s.CodeHash) require.Equal(false, s.IsCandidate) require.Equal(big.NewInt(0), s.VotingWeight) require.Equal("", s.Votee) } func TestBlocks(t *testing.T) { // This test is used for committing block verify benchmark purpose t.Skip() require := require.New(t) cfg := config.Default testutil.CleanupPath(t, testTriePath) defer testutil.CleanupPath(t, testTriePath) testutil.CleanupPath(t, testDBPath) defer testutil.CleanupPath(t, testDBPath) cfg.Chain.TrieDBPath = testTriePath cfg.Chain.ChainDBPath = testDBPath sf, _ := factory.NewFactory(cfg, factory.InMemTrieOption()) // Create a blockchain from scratch bc := NewBlockchain(cfg, PrecreatedStateFactoryOption(sf), BoltDBDaoOption()) require.NoError(bc.Start(context.Background())) defer func() { require.NoError(bc.Stop(context.Background())) }() require.NoError(addCreatorToFactory(sf)) a := ta.Addrinfo["alfa"].Bech32() priKeyA := ta.Keyinfo["alfa"].PriKey c := ta.Addrinfo["bravo"].Bech32() ws, err := sf.NewWorkingSet() require.NoError(err) _, err = account.LoadOrCreateAccount(ws, a, big.NewInt(100000)) require.NoError(err) _, err = account.LoadOrCreateAccount(ws, c, big.NewInt(100000)) require.NoError(err) gasLimit := testutil.TestGasLimit ctx := protocol.WithRunActionsCtx(context.Background(), protocol.RunActionsCtx{ ProducerAddr: ta.Addrinfo["producer"].Bech32(), GasLimit: &gasLimit, EnableGasCharge: testutil.EnableGasCharge, }) _, _, err = ws.RunActions(ctx, 0, nil) require.NoError(err) require.NoError(sf.Commit(ws)) for i := 0; i < 10; i++ { acts := []action.SealedEnvelope{} actionMap := make(map[string][]action.SealedEnvelope) actionMap[a] = []action.SealedEnvelope{} for i := 0; i < 1000; i++ { tsf, err := testutil.SignedTransfer(a, c, priKeyA, 1, big.NewInt(2), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice)) require.NoError(err) acts = append(acts, tsf) actionMap[a] = append(actionMap[a], tsf) } blk, _ := bc.MintNewBlock( actionMap, ta.Keyinfo["producer"].PubKey, ta.Keyinfo["producer"].PriKey, ta.Addrinfo["producer"].Bech32(), 0, ) require.Nil(bc.ValidateBlock(blk, true)) require.Nil(bc.CommitBlock(blk)) } } func TestActions(t *testing.T) { // This test is used for block verify benchmark purpose t.Skip() require := require.New(t) cfg := config.Default testutil.CleanupPath(t, testTriePath) defer testutil.CleanupPath(t, testTriePath) testutil.CleanupPath(t, testDBPath) defer testutil.CleanupPath(t, testDBPath) cfg.Chain.TrieDBPath = testTriePath cfg.Chain.ChainDBPath = testDBPath sf, _ := factory.NewFactory(cfg, factory.InMemTrieOption()) // Create a blockchain from scratch bc := NewBlockchain(cfg, PrecreatedStateFactoryOption(sf), BoltDBDaoOption()) require.NoError(bc.Start(context.Background())) defer func() { require.NoError(bc.Stop(context.Background())) }() require.NoError(addCreatorToFactory(sf)) a := ta.Addrinfo["alfa"].Bech32() priKeyA := ta.Keyinfo["alfa"].PriKey c := ta.Addrinfo["bravo"].Bech32() ws, err := sf.NewWorkingSet() require.NoError(err) _, err = account.LoadOrCreateAccount(ws, a, big.NewInt(100000)) require.NoError(err) _, err = account.LoadOrCreateAccount(ws, c, big.NewInt(100000)) require.NoError(err) gasLimit := testutil.TestGasLimit ctx := protocol.WithRunActionsCtx(context.Background(), protocol.RunActionsCtx{ ProducerAddr: ta.Addrinfo["producer"].Bech32(), GasLimit: &gasLimit, EnableGasCharge: testutil.EnableGasCharge, }) _, _, err = ws.RunActions(ctx, 0, nil) require.NoError(err) require.NoError(sf.Commit(ws)) val := &validator{sf: sf, validatorAddr: ""} bc.Validator().AddActionEnvelopeValidators(protocol.NewGenericValidator(bc)) bc.Validator().AddActionValidators(account.NewProtocol(), vote.NewProtocol(bc)) actionMap := make(map[string][]action.SealedEnvelope) for i := 0; i < 5000; i++ { tsf, err := testutil.SignedTransfer(a, c, priKeyA, 1, big.NewInt(2), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice)) require.NoError(err) actionMap[a] = append(actionMap[a], tsf) vote, err := testutil.SignedVote(a, a, priKeyA, 1, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice)) require.NoError(err) actionMap[a] = append(actionMap[a], vote) } blk, _ := bc.MintNewBlock( actionMap, ta.Keyinfo["producer"].PubKey, ta.Keyinfo["producer"].PriKey, ta.Addrinfo["producer"].Bech32(), 0, ) require.Nil(val.Validate(blk, 0, blk.PrevHash(), true)) } func TestStartExistingBlockchain(t *testing.T) { require := require.New(t) ctx := context.Background() testutil.CleanupPath(t, testTriePath) testutil.CleanupPath(t, testDBPath) // Disable block reward to make bookkeeping easier Gen.BlockReward = big.NewInt(0) cfg := config.Default cfg.Chain.TrieDBPath = testTriePath cfg.Chain.ChainDBPath = testDBPath sf, err := factory.NewFactory(cfg, factory.DefaultTrieOption()) require.NoError(err) // Create a blockchain from scratch bc := NewBlockchain(cfg, PrecreatedStateFactoryOption(sf), BoltDBDaoOption()) require.NotNil(bc) bc.Validator().AddActionEnvelopeValidators(protocol.NewGenericValidator(bc)) bc.Validator().AddActionValidators(account.NewProtocol(), vote.NewProtocol(bc)) sf.AddActionHandlers(vote.NewProtocol(bc)) require.NoError(bc.Start(ctx)) defer func() { require.NoError(bc.Stop(ctx)) }() sf.AddActionHandlers(account.NewProtocol()) defer func() { require.NoError(sf.Stop(ctx)) require.NoError(bc.Stop(ctx)) testutil.CleanupPath(t, testTriePath) testutil.CleanupPath(t, testDBPath) }() require.NoError(addTestingTsfBlocks(bc)) require.True(5 == bc.TipHeight()) // delete state db and recover to tip testutil.CleanupPath(t, testTriePath) sf, err = factory.NewFactory(cfg, factory.DefaultTrieOption()) require.NoError(err) require.NoError(sf.Start(context.Background())) sf.AddActionHandlers(account.NewProtocol()) sf.AddActionHandlers(vote.NewProtocol(bc)) chain, ok := bc.(*blockchain) require.True(ok) chain.sf = sf require.NoError(chain.startExistingBlockchain(0)) height, _ := chain.sf.Height() require.Equal(bc.TipHeight(), height) // recover to height 3 testutil.CleanupPath(t, testTriePath) sf, err = factory.NewFactory(cfg, factory.DefaultTrieOption()) require.NoError(err) require.NoError(sf.Start(context.Background())) sf.AddActionHandlers(account.NewProtocol()) sf.AddActionHandlers(vote.NewProtocol(bc)) chain.sf = sf require.NoError(chain.startExistingBlockchain(3)) height, _ = chain.sf.Height() require.Equal(bc.TipHeight(), height) require.True(3 == height) } func addCreatorToFactory(sf factory.Factory) error { ws, err := sf.NewWorkingSet() if err != nil { return err } if _, err = account.LoadOrCreateAccount(ws, ta.Addrinfo["producer"].Bech32(), Gen.TotalSupply); err != nil { return err } gasLimit := testutil.TestGasLimit ctx := protocol.WithRunActionsCtx(context.Background(), protocol.RunActionsCtx{ ProducerAddr: ta.Addrinfo["producer"].Bech32(), GasLimit: &gasLimit, EnableGasCharge: testutil.EnableGasCharge, }) if _, _, err = ws.RunActions(ctx, 0, nil); err != nil { return err } return sf.Commit(ws) }
1
14,872
line is 161 characters (from `lll`)
iotexproject-iotex-core
go
@@ -19,6 +19,8 @@ import ( "fmt" "time" + "github.com/chaos-mesh/chaos-mesh/pkg/controllerutils" + dnspb "github.com/chaos-mesh/k8s_dns_chaos/pb" "github.com/go-logr/logr" "golang.org/x/sync/errgroup"
1
// Copyright 2020 Chaos Mesh Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package dnschaos import ( "context" "errors" "fmt" "time" dnspb "github.com/chaos-mesh/k8s_dns_chaos/pb" "github.com/go-logr/logr" "golang.org/x/sync/errgroup" "google.golang.org/grpc" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/tools/cache" ctrl "sigs.k8s.io/controller-runtime" kubeclient "sigs.k8s.io/controller-runtime/pkg/client" "github.com/chaos-mesh/chaos-mesh/api/v1alpha1" "github.com/chaos-mesh/chaos-mesh/controllers/config" "github.com/chaos-mesh/chaos-mesh/controllers/recover" "github.com/chaos-mesh/chaos-mesh/pkg/chaosdaemon/client" "github.com/chaos-mesh/chaos-mesh/pkg/chaosdaemon/pb" "github.com/chaos-mesh/chaos-mesh/pkg/events" "github.com/chaos-mesh/chaos-mesh/pkg/finalizer" "github.com/chaos-mesh/chaos-mesh/pkg/router" ctx "github.com/chaos-mesh/chaos-mesh/pkg/router/context" end "github.com/chaos-mesh/chaos-mesh/pkg/router/endpoint" "github.com/chaos-mesh/chaos-mesh/pkg/selector" ) // endpoint is dns-chaos reconciler type endpoint struct { ctx.Context } type recoverer struct { kubeclient.Client Log logr.Logger } // Apply applies dns-chaos func (r *endpoint) Apply(ctx context.Context, req ctrl.Request, chaos v1alpha1.InnerObject) error { dnschaos, ok := chaos.(*v1alpha1.DNSChaos) if !ok { err := errors.New("chaos is not DNSChaos") r.Log.Error(err, "chaos is not DNSChaos", "chaos", chaos) return err } pods, err := selector.SelectAndFilterPods(ctx, r.Client, r.Reader, &dnschaos.Spec, config.ControllerCfg.ClusterScoped, config.ControllerCfg.TargetNamespace, config.ControllerCfg.AllowedNamespaces, config.ControllerCfg.IgnoredNamespaces) if err != nil { r.Log.Error(err, "failed to select and generate pods") return err } // get dns server's ip used for chaos service, err := selector.GetService(ctx, r.Client, "", config.ControllerCfg.Namespace, config.ControllerCfg.DNSServiceName) if err != nil { r.Log.Error(err, "fail to get service") return err } r.Log.Info("Set DNS chaos to DNS service", "ip", service.Spec.ClusterIP) err = r.setDNSServerRules(service.Spec.ClusterIP, config.ControllerCfg.DNSServicePort, dnschaos.Name, pods, dnschaos.Spec.Action, dnschaos.Spec.Scope) if err != nil { r.Log.Error(err, "fail to set DNS server rules") return err } if err = r.applyAllPods(ctx, pods, dnschaos, service.Spec.ClusterIP); err != nil { r.Log.Error(err, "failed to apply chaos on all pods") return err } dnschaos.Status.Experiment.PodRecords = make([]v1alpha1.PodStatus, 0, len(pods)) for _, pod := range pods { ps := v1alpha1.PodStatus{ Namespace: pod.Namespace, Name: pod.Name, HostIP: pod.Status.HostIP, PodIP: pod.Status.PodIP, } dnschaos.Status.Experiment.PodRecords = append(dnschaos.Status.Experiment.PodRecords, ps) } r.Event(dnschaos, v1.EventTypeNormal, events.ChaosInjected, "") return nil } // Recover means the reconciler recovers the chaos action func (r *endpoint) Recover(ctx context.Context, req ctrl.Request, chaos v1alpha1.InnerObject) error { dnschaos, ok := chaos.(*v1alpha1.DNSChaos) if !ok { err := errors.New("chaos is not DNSChaos") r.Log.Error(err, "chaos is not DNSChaos", "chaos", chaos) return err } // get dns server's ip used for chaos service, err := selector.GetService(ctx, r.Client, "", config.ControllerCfg.Namespace, config.ControllerCfg.DNSServiceName) if err != nil { r.Log.Error(err, "fail to get service") return err } r.Log.Info("Cancel DNS chaos to DNS service", "ip", service.Spec.ClusterIP) r.cancelDNSServerRules(service.Spec.ClusterIP, config.ControllerCfg.DNSServicePort, dnschaos.Name) rd := recover.Delegate{Client: r.Client, Log: r.Log, RecoverIntf: &recoverer{r.Client, r.Log}} finalizers, err := rd.CleanFinalizersAndRecover(ctx, chaos, dnschaos.Finalizers, dnschaos.Annotations) if err != nil { return err } dnschaos.Finalizers = finalizers r.Event(dnschaos, v1.EventTypeNormal, events.ChaosRecovered, "") return nil } func (r *recoverer) RecoverPod(ctx context.Context, pod *v1.Pod, somechaos v1alpha1.InnerObject) error { r.Log.Info("Try to recover pod", "namespace", pod.Namespace, "name", pod.Name) daemonClient, err := client.NewChaosDaemonClient(ctx, r.Client, pod, config.ControllerCfg.ChaosDaemonPort) if err != nil { r.Log.Error(err, "get chaos daemon client") return err } defer daemonClient.Close() if len(pod.Status.ContainerStatuses) == 0 { return fmt.Errorf("%s %s can't get the state of container", pod.Namespace, pod.Name) } target := pod.Status.ContainerStatuses[0].ContainerID _, err = daemonClient.SetDNSServer(ctx, &pb.SetDNSServerRequest{ ContainerId: target, Enable: false, EnterNS: true, }) if err != nil { r.Log.Error(err, "recover pod for DNS chaos") return err } return nil } // Object would return the instance of chaos func (r *endpoint) Object() v1alpha1.InnerObject { return &v1alpha1.DNSChaos{} } func (r *endpoint) applyAllPods(ctx context.Context, pods []v1.Pod, chaos *v1alpha1.DNSChaos, dnsServerIP string) error { g := errgroup.Group{} for index := range pods { pod := &pods[index] key, err := cache.MetaNamespaceKeyFunc(pod) if err != nil { r.Log.Error(err, "get meta namespace key") return err } chaos.Finalizers = finalizer.InsertFinalizer(chaos.Finalizers, key) g.Go(func() error { return r.applyPod(ctx, pod, dnsServerIP) }) } err := g.Wait() if err != nil { r.Log.Error(err, "g.Wait") return err } return nil } func (r *endpoint) applyPod(ctx context.Context, pod *v1.Pod, dnsServerIP string) error { r.Log.Info("Try to apply dns chaos", "namespace", pod.Namespace, "name", pod.Name) daemonClient, err := client.NewChaosDaemonClient(ctx, r.Client, pod, config.ControllerCfg.ChaosDaemonPort) if err != nil { r.Log.Error(err, "get chaos daemon client") return err } defer daemonClient.Close() if len(pod.Status.ContainerStatuses) == 0 { return fmt.Errorf("%s %s can't get the state of container", pod.Namespace, pod.Name) } target := pod.Status.ContainerStatuses[0].ContainerID _, err = daemonClient.SetDNSServer(ctx, &pb.SetDNSServerRequest{ ContainerId: target, DnsServer: dnsServerIP, Enable: true, EnterNS: true, }) if err != nil { r.Log.Error(err, "set dns server") return err } return nil } func (r *endpoint) setDNSServerRules(dnsServerIP string, port int, name string, pods []v1.Pod, action v1alpha1.DNSChaosAction, scope v1alpha1.DNSChaosScope) error { r.Log.Info("setDNSServerRules", "name", name) pbPods := make([]*dnspb.Pod, len(pods)) for i, pod := range pods { pbPods[i] = &dnspb.Pod{ Name: pod.Name, Namespace: pod.Namespace, } } conn, err := grpc.Dial(fmt.Sprintf("%s:%d", dnsServerIP, port), grpc.WithInsecure()) if err != nil { return err } defer conn.Close() c := dnspb.NewDNSClient(conn) request := &dnspb.SetDNSChaosRequest{ Name: name, Action: string(action), Pods: pbPods, Scope: string(scope), } ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() response, err := c.SetDNSChaos(ctx, request) if err != nil { return err } if !response.Result { return fmt.Errorf("set dns chaos to dns server error %s", response.Msg) } return nil } func (r *endpoint) cancelDNSServerRules(dnsServerIP string, port int, name string) error { conn, err := grpc.Dial(fmt.Sprintf("%s:%d", dnsServerIP, port), grpc.WithInsecure()) if err != nil { return err } defer conn.Close() c := dnspb.NewDNSClient(conn) request := &dnspb.CancelDNSChaosRequest{ Name: name, } ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() response, err := c.CancelDNSChaos(ctx, request) if err != nil { return err } if !response.Result { return fmt.Errorf("set dns chaos to dns server error %s", response.Msg) } return nil } func init() { router.Register("dnschaos", &v1alpha1.DNSChaos{}, func(obj runtime.Object) bool { return true }, func(ctx ctx.Context) end.Endpoint { return &endpoint{ Context: ctx, } }) }
1
19,806
Please re-format/groupimport this line. You might need a little manual work.
chaos-mesh-chaos-mesh
go
@@ -35,7 +35,11 @@ type DaisyWorker interface { // designed to be run once and discarded. In other words, don't run RunAndReadSerialValue // twice on the same instance. func NewDaisyWorker(wf *daisy.Workflow, env EnvironmentSettings, logger logging.Logger) DaisyWorker { - return &defaultDaisyWorker{wf, env, logger} + worker := &defaultDaisyWorker{wf, env, logger} + worker.env.ApplyToWorkflow(worker.wf) + worker.env.ApplyWorkerCustomizations(worker.wf) + daisy_utils.UpdateAllInstanceNoExternalIP(worker.wf, worker.env.NoExternalIP) + return worker } type defaultDaisyWorker struct {
1
// Copyright 2020 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package daisycommon import ( "context" daisy_utils "github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/common/utils/daisy" "github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/common/utils/logging" "github.com/GoogleCloudPlatform/compute-image-tools/daisy" ) // To rebuild the mock for DaisyWorker, run `go generate ./...` //go:generate go run github.com/golang/mock/mockgen -package mocks -source $GOFILE -destination ../mocks/mock_daisy_worker.go // DaisyWorker is a facade over daisy.Workflow to facilitate mocking. type DaisyWorker interface { RunAndReadSerialValue(key string, vars map[string]string) (string, error) Cancel(reason string) bool } // NewDaisyWorker returns an implementation of DaisyWorker. The returned value is // designed to be run once and discarded. In other words, don't run RunAndReadSerialValue // twice on the same instance. func NewDaisyWorker(wf *daisy.Workflow, env EnvironmentSettings, logger logging.Logger) DaisyWorker { return &defaultDaisyWorker{wf, env, logger} } type defaultDaisyWorker struct { wf *daisy.Workflow env EnvironmentSettings logger logging.Logger } // runAndReadSerialValue runs the daisy workflow with the supplied vars, and returns the serial // output value associated with the supplied key. func (w *defaultDaisyWorker) RunAndReadSerialValue(key string, vars map[string]string) (string, error) { w.env.ApplyToWorkflow(w.wf) w.env.ApplyWorkerCustomizations(w.wf) for k, v := range vars { w.wf.AddVar(k, v) } err := w.wf.RunWithModifiers(context.Background(), w.preValidateFunction, w.postValidateFunction) if w.wf.Logger != nil { for _, trace := range w.wf.Logger.ReadSerialPortLogs() { w.logger.Trace(trace) } } if err != nil { return "", err } return w.wf.GetSerialConsoleOutputValue(key), nil } func (w *defaultDaisyWorker) Cancel(reason string) bool { if w.wf != nil { w.wf.CancelWithReason(reason) return true } //indicate cancel was not performed return false } func (w *defaultDaisyWorker) preValidateFunction(wf *daisy.Workflow) { // no-op } func (w *defaultDaisyWorker) postValidateFunction(wf *daisy.Workflow) { daisy_utils.UpdateAllInstanceNoExternalIP(wf, w.env.NoExternalIP) }
1
13,798
minor, you can just use `wf` instead of `worker.wf`
GoogleCloudPlatform-compute-image-tools
go
@@ -161,11 +161,12 @@ public final class HttpRequestUtilsTest { @Test public void testGetJsonBodyForSingleMapObject() throws IOException, ServletException { final HttpServletRequest httpRequest = Mockito.mock(HttpServletRequest.class); - final String originalString = " {\n" + " \"action\": \"update\",\n" + " \"table\": \"ramp\",\n" - + " \"conditions\" : {\n" + " \"rampId\" : \"dali\"\n" + " },\n" - + " \"values\": {\n" - + " \"rampStage\": 2,\n" + " \"lastUpdatedTime\": 1566259437000\n" + " }\n" - + " }\n"; + final String originalString = + " {\n" + " \"action\": \"update\",\n" + " \"table\": \"ramp\",\n" + + " \"conditions\" : {\n" + " \"rampId\" : \"dali\"\n" + " },\n" + + " \"values\": {\n" + + " \"rampStage\": 2,\n" + " \"lastUpdatedTime\": 1566259437000\n" + " }\n" + + " }\n"; final InputStream inputStream = new ByteArrayInputStream(originalString.getBytes(UTF_8)); final InputStreamReader inputStreamReader = new InputStreamReader(inputStream, UTF_8); final BufferedReader bufferedReader = new BufferedReader(inputStreamReader);
1
/* * Copyright 2015 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.server; import static java.nio.charset.StandardCharsets.UTF_8; import azkaban.executor.ExecutableFlow; import azkaban.executor.ExecutionOptions; import azkaban.executor.ExecutorManagerException; import azkaban.sla.SlaAction; import azkaban.sla.SlaOption; import azkaban.sla.SlaType; import azkaban.user.Permission.Type; import azkaban.user.User; import azkaban.user.UserManager; import azkaban.user.UserManagerException; import azkaban.utils.TestUtils; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import java.io.BufferedReader; import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.time.Duration; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import javax.servlet.ServletException; import javax.servlet.http.HttpServletRequest; import org.junit.Assert; import org.junit.Test; import org.mockito.Mockito; /** * Test class for HttpRequestUtils */ public final class HttpRequestUtilsTest { /* Helper method to get a test flow and add required properties */ public static ExecutableFlow createExecutableFlow() throws IOException { final ExecutableFlow flow = TestUtils.createTestExecutableFlow("exectest1", "exec1"); flow.getExecutionOptions().getFlowParameters() .put(ExecutionOptions.FLOW_PRIORITY, "1"); flow.getExecutionOptions().getFlowParameters() .put(ExecutionOptions.USE_EXECUTOR, "2"); return flow; } /* Test that flow properties are removed for non-admin user */ @Test public void TestFilterNonAdminOnlyFlowParams() throws IOException, ExecutorManagerException, UserManagerException { final ExecutableFlow flow = createExecutableFlow(); final UserManager manager = TestUtils.createTestXmlUserManager(); final User user = manager.getUser("testUser", "testUser"); HttpRequestUtils.filterAdminOnlyFlowParams(manager, flow.getExecutionOptions(), user); Assert.assertFalse(flow.getExecutionOptions().getFlowParameters() .containsKey(ExecutionOptions.FLOW_PRIORITY)); Assert.assertFalse(flow.getExecutionOptions().getFlowParameters() .containsKey(ExecutionOptions.USE_EXECUTOR)); } /* Test that flow properties are retained for admin user */ @Test public void TestFilterAdminOnlyFlowParams() throws IOException, ExecutorManagerException, UserManagerException { final ExecutableFlow flow = createExecutableFlow(); final UserManager manager = TestUtils.createTestXmlUserManager(); final User user = manager.getUser("testAdmin", "testAdmin"); HttpRequestUtils.filterAdminOnlyFlowParams(manager, flow.getExecutionOptions(), user); Assert.assertTrue(flow.getExecutionOptions().getFlowParameters() .containsKey(ExecutionOptions.FLOW_PRIORITY)); Assert.assertTrue(flow.getExecutionOptions().getFlowParameters() .containsKey(ExecutionOptions.USE_EXECUTOR)); } /* Test exception, if param is a valid integer */ @Test public void testvalidIntegerParam() throws ExecutorManagerException { final Map<String, String> params = new HashMap<>(); params.put("param1", "123"); HttpRequestUtils.validateIntegerParam(params, "param1"); } /* Test exception, if param is not a valid integer */ @Test(expected = ExecutorManagerException.class) public void testInvalidIntegerParam() throws ExecutorManagerException { final Map<String, String> params = new HashMap<>(); params.put("param1", "1dff2"); HttpRequestUtils.validateIntegerParam(params, "param1"); } /* Verify permission for admin user */ @Test public void testHasAdminPermission() throws UserManagerException { final UserManager manager = TestUtils.createTestXmlUserManager(); final User adminUser = manager.getUser("testAdmin", "testAdmin"); Assert.assertTrue(HttpRequestUtils.hasPermission(manager, adminUser, Type.ADMIN)); } /* verify permission for non-admin user */ @Test public void testHasOrdinaryPermission() throws UserManagerException { final UserManager manager = TestUtils.createTestXmlUserManager(); final User testUser = manager.getUser("testUser", "testUser"); Assert.assertFalse(HttpRequestUtils.hasPermission(manager, testUser, Type.ADMIN)); } @Test public void testGetJsonBodyForListOfMapObject() throws IOException, ServletException { final HttpServletRequest httpRequest = Mockito.mock(HttpServletRequest.class); final String originalString = "[\n" + " {\n" + " \"action\": \"update\",\n" + " \"table\": \"ramp\",\n" + " \"conditions\" : {\n" + " \"rampId\" : \"dali\"\n" + " },\n" + " \"values\": {\n" + " \"rampStage\": 2,\n" + " \"lastUpdatedTime\": 1566259437000\n" + " }\n" + " }\n" + "]"; final InputStream inputStream = new ByteArrayInputStream(originalString.getBytes(UTF_8)); final InputStreamReader inputStreamReader = new InputStreamReader(inputStream, UTF_8); final BufferedReader bufferedReader = new BufferedReader(inputStreamReader); Mockito.when(httpRequest.getReader()).thenReturn(bufferedReader); final Object object = HttpRequestUtils.getJsonBody(httpRequest); Assert.assertTrue(object instanceof List); final List<Map<String, Object>> list = (List<Map<String, Object>>) object; Assert.assertEquals(list.size(), 1); Assert.assertEquals(list.get(0).get("action").toString(), "update"); Assert.assertEquals(list.get(0).get("table").toString(), "ramp"); Assert.assertEquals( ((Map<String, Object>) list.get(0).get("conditions")).get("rampId").toString(), "dali"); Assert.assertEquals(((Map<String, Object>) list.get(0).get("values")).get("rampStage"), 2); Assert.assertEquals(((Map<String, Object>) list.get(0).get("values")).get("lastUpdatedTime"), 1566259437000L); } @Test public void testGetJsonBodyForSingleMapObject() throws IOException, ServletException { final HttpServletRequest httpRequest = Mockito.mock(HttpServletRequest.class); final String originalString = " {\n" + " \"action\": \"update\",\n" + " \"table\": \"ramp\",\n" + " \"conditions\" : {\n" + " \"rampId\" : \"dali\"\n" + " },\n" + " \"values\": {\n" + " \"rampStage\": 2,\n" + " \"lastUpdatedTime\": 1566259437000\n" + " }\n" + " }\n"; final InputStream inputStream = new ByteArrayInputStream(originalString.getBytes(UTF_8)); final InputStreamReader inputStreamReader = new InputStreamReader(inputStream, UTF_8); final BufferedReader bufferedReader = new BufferedReader(inputStreamReader); Mockito.when(httpRequest.getReader()).thenReturn(bufferedReader); final Object object = HttpRequestUtils.getJsonBody(httpRequest); Assert.assertTrue(object instanceof Map); final Map<String, Object> singleObj = (Map<String, Object>) object; Assert.assertEquals(singleObj.get("action").toString(), "update"); Assert.assertEquals(singleObj.get("table").toString(), "ramp"); Assert .assertEquals(((Map<String, Object>) singleObj.get("conditions")).get("rampId").toString(), "dali"); Assert.assertEquals(((Map<String, Object>) singleObj.get("values")).get("rampStage"), 2); Assert.assertEquals(((Map<String, Object>) singleObj.get("values")).get("lastUpdatedTime"), 1566259437000L); } @Test public void testParseFlowOptionsSla() throws Exception { HttpServletRequest req = mockRequestWithSla(ImmutableMap.of( // job_name, status, duration, is_email, is_kill "slaSettings[1]", ",FINISH,2:30,true,false", "slaSettings[2]", "test_job,SUCCESS,12:00,false,true", "slaSettings[3]", ",SUCCESS,12:00,true,true")); ExecutionOptions options = HttpRequestUtils.parseFlowOptions(req, "test-flow"); List<SlaOption> slaOptions = options.getSlaOptions(); final List<SlaOption> expected = Arrays.asList( new SlaOption(SlaType.FLOW_FINISH, "test-flow", "", Duration.ofMinutes(150), ImmutableSet.of(SlaAction.ALERT), ImmutableList.of()), new SlaOption(SlaType.JOB_SUCCEED, "test-flow", "test_job", Duration.ofMinutes(720), ImmutableSet.of(SlaAction.KILL), ImmutableList.of()), new SlaOption(SlaType.FLOW_SUCCEED, "test-flow", "", Duration.ofMinutes(720), ImmutableSet.of(SlaAction.ALERT, SlaAction.KILL), ImmutableList.of()) ); Assert.assertEquals(expected, slaOptions); } @Test public void testParseFlowOptionsSlaWithEmail() throws Exception { HttpServletRequest req = mockRequestWithSla(ImmutableMap.of( "slaSettings[1]", ",FINISH,2:30,true,false", "slaEmails", "[email protected],[email protected]")); ExecutionOptions options = HttpRequestUtils.parseFlowOptions(req, "test-flow"); List<SlaOption> slaOptions = options.getSlaOptions(); final List<SlaOption> expected = Arrays.asList(new SlaOption(SlaType.FLOW_FINISH, "test-flow", "", Duration.ofMinutes(150), ImmutableSet.of(SlaAction.ALERT), ImmutableList.of("[email protected]", "[email protected]"))); Assert.assertEquals(expected, slaOptions); } private static HttpServletRequest mockRequestWithSla(Map<String, String> params) { HttpServletRequest req = Mockito.mock(HttpServletRequest.class); Mockito.when(req.getParameterNames()).thenReturn(Collections.enumeration(params.keySet())); Mockito.when(req.getParameter(Mockito.anyString())) .thenAnswer(i -> params.get(i.getArgument(0, String.class))); return req; } }
1
20,040
I would suggest putting this in the resources directory with the same package as of this test class. Then you can utilize the method azkaban.utils.TestUtils#readResource to read it as string.
azkaban-azkaban
java
@@ -21,6 +21,7 @@ import ( "text/template" errors "github.com/openebs/maya/pkg/errors/v1alpha1" + templates "github.com/openebs/maya/pkg/upgrade/templates" appsv1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types"
1
/* Copyright 2019 The OpenEBS Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1alpha1 import ( "fmt" "text/template" errors "github.com/openebs/maya/pkg/errors/v1alpha1" appsv1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" ) type cspDeployPatchDetails struct { UpgradeVersion, ImageTag, PoolImage, PoolMgmtImage, MExporterImage string } func getCSPDeployPatchDetails( d *appsv1.Deployment, ) (*cspDeployPatchDetails, error) { patchDetails := &cspDeployPatchDetails{} cstorPoolImage, err := getBaseImage(d, "cstor-pool") if err != nil { return nil, err } cstorPoolMgmtImage, err := getBaseImage(d, "cstor-pool-mgmt") if err != nil { return nil, err } MExporterImage, err := getBaseImage(d, "maya-exporter") if err != nil { return nil, err } patchDetails.PoolImage = cstorPoolImage patchDetails.PoolMgmtImage = cstorPoolMgmtImage patchDetails.MExporterImage = MExporterImage if imageTag != "" { patchDetails.ImageTag = imageTag } else { patchDetails.ImageTag = upgradeVersion } return patchDetails, nil } func cspUpgrade(cspName, openebsNamespace string) error { if cspName == "" { return errors.Errorf("missing csp name") } if openebsNamespace == "" { return errors.Errorf("missing openebs namespace") } cspObj, err := cspClient.Get(cspName, metav1.GetOptions{}) if err != nil { return errors.Wrapf(err, "failed to get csp %s", cspName) } cspVersion := cspObj.Labels["openebs.io/version"] if (cspVersion != currentVersion) && (cspVersion != upgradeVersion) { return errors.Errorf( "cstor pool version %s is neither %s nor %s\n", cspVersion, currentVersion, upgradeVersion, ) } cspLabel := "openebs.io/cstor-pool=" + cspName cspDeployObj, err := getDeployment(cspLabel, openebsNamespace) if err != nil { return errors.Wrapf(err, "failed to get deployment for csp %s", cspName) } if cspDeployObj.Name == "" { return errors.Errorf("missing deployment name for csp %s", cspName) } cspDeployVersion, err := getOpenEBSVersion(cspDeployObj) if err != nil { return err } if (cspDeployVersion != currentVersion) && (cspDeployVersion != upgradeVersion) { return errors.Errorf( "cstor pool version %s is neither %s nor %s\n", cspVersion, currentVersion, upgradeVersion, ) } if cspVersion == currentVersion { tmpl, err := template.New("cspPatch").Parse(openebsVersionPatchTemplate) if err != nil { return errors.Wrapf(err, "failed to create template for csp patch") } err = tmpl.Execute(&buffer, upgradeVersion) if err != nil { return errors.Wrapf(err, "failed to populate template for csp patch") } cspPatch := buffer.String() buffer.Reset() _, err = cspClient.Patch( cspName, types.MergePatchType, []byte(cspPatch), ) if err != nil { return errors.Wrapf(err, "failed to patch csp %s", cspName) } fmt.Printf("patched csp %s\n", cspName) } else { fmt.Printf("csp %s already in %s version\n", cspName, upgradeVersion) } if cspDeployVersion == currentVersion { patchDetails, err := getCSPDeployPatchDetails(cspDeployObj) if err != nil { return err } patchDetails.UpgradeVersion = upgradeVersion tmpl, err := template.New("cspDeployPatch").Parse(cspDeployPatchTemplate) if err != nil { return errors.Wrapf(err, "failed to create template for csp deployment patch") } err = tmpl.Execute(&buffer, patchDetails) if err != nil { return errors.Wrapf(err, "failed to populate template for csp deployment patch") } cspDeployPatch := buffer.String() buffer.Reset() err = patchDelpoyment( cspDeployObj.Name, openebsNamespace, types.StrategicMergePatchType, []byte(cspDeployPatch), ) if err != nil { return errors.Wrapf(err, "failed to patch deployment %s", cspDeployObj.Name) } fmt.Printf("patched csp deployment %s\n", cspName) } else { fmt.Printf("csp deployment %s already in %s version\n", cspDeployObj.Name, upgradeVersion, ) } fmt.Println("Upgrade Successful for csp", cspName) return nil }
1
17,156
could not import github.com/openebs/maya/pkg/upgrade/templates (invalid package name: "") (from `typecheck`)
openebs-maya
go
@@ -182,7 +182,7 @@ namespace OpenTelemetry.Exporter.Prometheus.Tests // Once a pull model is implemented, we'll not have this issue and we need to add tests // at that time. - // If in future, there is a official .NET Prometheus Client library, and OT Exporter + // If in future, there is a official .NET Prometheus Client library, and OTel Exporter // choses to take a dependency on it, then none of these concerns arise. } }
1
// <copyright file="PrometheusExporterTests.cs" company="OpenTelemetry Authors"> // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // </copyright> using System; using System.Collections.Generic; using System.Net; using System.Net.Http; using System.Threading.Tasks; #if !NETFRAMEWORK using Microsoft.AspNetCore.Hosting; using Microsoft.AspNetCore.TestHost; using Microsoft.Extensions.DependencyInjection; #endif using OpenTelemetry.Metrics.Export; using OpenTelemetry.Trace; using Xunit; using Xunit.Abstractions; namespace OpenTelemetry.Exporter.Prometheus.Tests { public class PrometheusExporterTests { private const int MetricPushIntervalMsec = 100; private const int WaitDuration = MetricPushIntervalMsec + 100; private readonly ITestOutputHelper output; public PrometheusExporterTests(ITestOutputHelper output) { this.output = output; } [Fact] public async Task E2ETestMetricsHttpServerAsync() { var promOptions = new PrometheusExporterOptions() { Url = "http://localhost:9184/metrics/" }; var promExporter = new PrometheusExporter(promOptions); var simpleProcessor = new UngroupedBatcher(); var metricsHttpServer = new PrometheusExporterMetricsHttpServer(promExporter); try { metricsHttpServer.Start(); CollectMetrics(simpleProcessor, promExporter); } finally { await Task.Delay(WaitDuration); var client = new HttpClient(); var response = await client.GetAsync("http://localhost:9184/metrics/"); Assert.Equal(HttpStatusCode.OK, response.StatusCode); var responseText = response.Content.ReadAsStringAsync().Result; this.output.WriteLine($"Response from metrics API is \n {responseText}"); this.ValidateResponse(responseText); metricsHttpServer.Stop(); } } #if !NETFRAMEWORK [Fact] public async Task E2ETestMiddleware() { var promOptions = new PrometheusExporterOptions { Url = "/metrics" }; var promExporter = new PrometheusExporter(promOptions); var simpleProcessor = new UngroupedBatcher(); var builder = new WebHostBuilder() .Configure(app => { app.UsePrometheus(); }) .ConfigureServices(services => { services.AddSingleton(promOptions); services.AddSingleton(promExporter); // Temporary till we figure out metrics configuration }); var server = new TestServer(builder); var client = server.CreateClient(); try { CollectMetrics(simpleProcessor, promExporter); } finally { var response = await client.GetAsync("/foo"); Assert.Equal(HttpStatusCode.NotFound, response.StatusCode); await Task.Delay(WaitDuration); response = await client.GetAsync("/metrics"); Assert.Equal(HttpStatusCode.OK, response.StatusCode); var responseText = await response.Content.ReadAsStringAsync(); this.output.WriteLine($"Response from metrics API is \n {responseText}"); this.ValidateResponse(responseText); } } #endif private static void CollectMetrics(UngroupedBatcher simpleProcessor, MetricExporter exporter) { var meter = Sdk.CreateMeterProvider(mb => { mb.SetMetricProcessor(simpleProcessor); mb.SetMetricExporter(exporter); mb.SetMetricPushInterval(TimeSpan.FromMilliseconds(MetricPushIntervalMsec)); }).GetMeter("library1"); var testCounter = meter.CreateInt64Counter("testCounter"); var testMeasure = meter.CreateInt64Measure("testMeasure"); var labels1 = new List<KeyValuePair<string, string>> { new KeyValuePair<string, string>("dim1", "value1"), new KeyValuePair<string, string>("dim2", "value1"), }; var labels2 = new List<KeyValuePair<string, string>> { new KeyValuePair<string, string>("dim1", "value2"), new KeyValuePair<string, string>("dim2", "value2"), }; var defaultContext = default(SpanContext); for (int i = 0; i < 10; i++) { testCounter.Add(defaultContext, 100, meter.GetLabelSet(labels1)); testCounter.Add(defaultContext, 10, meter.GetLabelSet(labels1)); testCounter.Add(defaultContext, 200, meter.GetLabelSet(labels2)); testCounter.Add(defaultContext, 10, meter.GetLabelSet(labels2)); testMeasure.Record(defaultContext, 10, meter.GetLabelSet(labels1)); testMeasure.Record(defaultContext, 100, meter.GetLabelSet(labels1)); testMeasure.Record(defaultContext, 5, meter.GetLabelSet(labels1)); testMeasure.Record(defaultContext, 500, meter.GetLabelSet(labels1)); } } private void ValidateResponse(string responseText) { // Validate counters. Assert.Contains("TYPE testCounter counter", responseText); Assert.Contains("testCounter{dim1=\"value1\",dim2=\"value1\"}", responseText); Assert.Contains("testCounter{dim1=\"value2\",dim2=\"value2\"}", responseText); // Validate measure. Assert.Contains("# TYPE testMeasure summary", responseText); // sum is 6150 = 10 * (10+100+5+500) Assert.Contains("testMeasure_sum{dim1=\"value1\"} 6150", responseText); // count is 10 * 4 Assert.Contains("testMeasure_count{dim1=\"value1\"} 40", responseText); // Min is 5 Assert.Contains("testMeasure{dim1=\"value1\",quantile=\"0\"} 5", responseText); // Max is 500 Assert.Contains("testMeasure{dim1=\"value1\",quantile=\"1\"} 500", responseText); // TODO: Validate that # TYPE occurs only once per metric. // Also validate that for every metric+dimension, there is only one row in the response. // Though the above are Prometheus Server requirements, we haven't enforced it in code. // This is because we have implemented Prometheus using a Push Controller, where // we accumulate metrics from each Push into exporter, and is used to construct // out for /metrics call. Because of this, its possible that multiple Push has occurred // before Prometheus server makes /metrics call. (i.e Prometheus scrape interval is much more // than Push interval scenario) // Once a pull model is implemented, we'll not have this issue and we need to add tests // at that time. // If in future, there is a official .NET Prometheus Client library, and OT Exporter // choses to take a dependency on it, then none of these concerns arise. } } }
1
16,052
a official -> an official
open-telemetry-opentelemetry-dotnet
.cs
@@ -37,7 +37,7 @@ class ProxyListenerKinesis(ProxyListener): def forward_request(self, method, path, data, headers): data, encoding_type = self.decode_content(data or '{}', True) action = headers.get('X-Amz-Target', '').split('.')[-1] - if action == 'RegisterStreamConsumer': + if action == 'RegisterStreamConsumer' and config.KINESIS_PROVIDER == 'kinesalite': stream_arn = data.get('StreamARN', '').strip('" ') cons_arn = data.get('ConsumerARN', '').strip('" ') cons_name = data.get('ConsumerName', '').strip('" ')
1
import re import json import time import base64 import random import logging import cbor2 from requests.models import Response from localstack import config from localstack.constants import APPLICATION_JSON, APPLICATION_CBOR from localstack.utils.aws import aws_stack from localstack.utils.common import to_str, json_safe, clone, epoch_timestamp, now_utc from localstack.utils.analytics import event_publisher from localstack.services.awslambda import lambda_api from localstack.services.generic_proxy import ProxyListener, RegionBackend from localstack.utils.aws.aws_responses import convert_to_binary_event_payload LOG = logging.getLogger(__name__) # action headers (should be left here - imported/required by other files) ACTION_PREFIX = 'Kinesis_20131202' ACTION_PUT_RECORD = '%s.PutRecord' % ACTION_PREFIX ACTION_PUT_RECORDS = '%s.PutRecords' % ACTION_PREFIX ACTION_LIST_STREAMS = '%s.ListStreams' % ACTION_PREFIX class KinesisBackend(RegionBackend): def __init__(self): # list of stream consumer details self.stream_consumers = [] # maps stream name to list of enhanced monitoring metrics self.enhanced_metrics = {} class ProxyListenerKinesis(ProxyListener): def forward_request(self, method, path, data, headers): data, encoding_type = self.decode_content(data or '{}', True) action = headers.get('X-Amz-Target', '').split('.')[-1] if action == 'RegisterStreamConsumer': stream_arn = data.get('StreamARN', '').strip('" ') cons_arn = data.get('ConsumerARN', '').strip('" ') cons_name = data.get('ConsumerName', '').strip('" ') prev_consumer = find_consumer(cons_arn, cons_name, stream_arn) if prev_consumer: msg = 'Consumer %s already exists' % prev_consumer.get('ConsumerARN') return simple_error_response(msg, 400, 'ResourceAlreadyExists', encoding_type) consumer = clone(data) consumer['ConsumerStatus'] = 'ACTIVE' consumer['ConsumerARN'] = '%s/consumer/%s' % (stream_arn, cons_name) consumer['ConsumerCreationTimestamp'] = now_utc() consumer = json_safe(consumer) stream_consumers = KinesisBackend.get().stream_consumers stream_consumers.append(consumer) result = {'Consumer': consumer} return encoded_response(result, encoding_type) elif action == 'DeregisterStreamConsumer': def consumer_matches(c): stream_arn = data.get('StreamARN', '').strip('" ') cons_name = data.get('ConsumerName', '').strip('" ') cons_arn = data.get('ConsumerARN', '').strip('" ') return (c.get('ConsumerARN') == cons_arn or (c.get('StreamARN') == stream_arn and c.get('ConsumerName') == cons_name)) region = KinesisBackend.get() region.stream_consumers = [c for c in region.stream_consumers if not consumer_matches(c)] return {} elif action == 'ListStreamConsumers': stream_consumers = KinesisBackend.get().stream_consumers stream_arn = data.get('StreamARN', '').strip('" ') result = { 'Consumers': [c for c in stream_consumers if c.get('StreamARN') == stream_arn] } return encoded_response(result, encoding_type) elif action == 'DescribeStreamConsumer': consumer_arn = data.get('ConsumerARN', '').strip('" ') consumer_name = data.get('ConsumerName', '').strip('" ') stream_arn = data.get('StreamARN', '').strip('" ') consumer_to_locate = find_consumer(consumer_arn, consumer_name, stream_arn) if(not consumer_to_locate): error_msg = 'Consumer %s not found.' % (consumer_arn or consumer_name) return simple_error_response(error_msg, 400, 'ResourceNotFoundException', encoding_type) create_timestamp = consumer_to_locate.get('ConsumerCreationTimestamp') time_formated = int(create_timestamp) if encoding_type is not APPLICATION_JSON else create_timestamp result = { 'ConsumerDescription': { 'ConsumerARN': consumer_to_locate.get('ConsumerARN'), 'ConsumerCreationTimestamp': time_formated, 'ConsumerName': consumer_to_locate.get('ConsumerName'), 'ConsumerStatus': 'ACTIVE', 'StreamARN': data.get('StreamARN') } } return encoded_response(result, encoding_type) elif action == 'SubscribeToShard': result = subscribe_to_shard(data, headers) return result elif action == 'EnableEnhancedMonitoring': stream_name = data.get('StreamName', '').strip('" ') metrics = data.get('ShardLevelMetrics', []) enhanced_metrics = KinesisBackend.get().enhanced_metrics stream_metrics = enhanced_metrics[stream_name] = enhanced_metrics.get(stream_name) or [] stream_metrics += [m for m in metrics if m not in stream_metrics] return {} elif action == 'DisableEnhancedMonitoring': stream_name = data.get('StreamName', '').strip('" ') metrics = data.get('ShardLevelMetrics', []) enhanced_metrics = KinesisBackend.get().enhanced_metrics stream_metrics = enhanced_metrics.get(stream_name) or [] enhanced_metrics[stream_name] = [m for m in stream_metrics if m not in metrics] return result if random.random() < config.KINESIS_ERROR_PROBABILITY: if action in ['PutRecord', 'PutRecords']: return kinesis_error_response(data, action) return True def return_response(self, method, path, data, headers, response): action = headers.get('X-Amz-Target', '').split('.')[-1] data, encoding_type = self.decode_content(data or '{}', True) response._content = self.replace_in_encoded(response.content or '') records = [] if action in ('CreateStream', 'DeleteStream'): event_type = (event_publisher.EVENT_KINESIS_CREATE_STREAM if action == 'CreateStream' else event_publisher.EVENT_KINESIS_DELETE_STREAM) payload = {'n': event_publisher.get_hash(data.get('StreamName'))} if action == 'CreateStream': payload['s'] = data.get('ShardCount') event_publisher.fire_event(event_type, payload=payload) elif action == 'PutRecord': response_body = self.decode_content(response.content) # Note: avoid adding 'encryptionType':'NONE' in the event_record, as this breaks .NET Lambdas event_record = { 'approximateArrivalTimestamp': epoch_timestamp(), 'data': data['Data'], 'partitionKey': data['PartitionKey'], 'sequenceNumber': response_body.get('SequenceNumber') } event_records = [event_record] stream_name = data['StreamName'] lambda_api.process_kinesis_records(event_records, stream_name) elif action == 'PutRecords': event_records = [] response_body = self.decode_content(response.content) if 'Records' in response_body: response_records = response_body['Records'] records = data['Records'] for index in range(0, len(records)): record = records[index] # Note: avoid adding 'encryptionType':'NONE' in the event_record, as this breaks .NET Lambdas event_record = { 'approximateArrivalTimestamp': epoch_timestamp(), 'data': record['Data'], 'partitionKey': record['PartitionKey'], 'sequenceNumber': response_records[index].get('SequenceNumber') } event_records.append(event_record) stream_name = data['StreamName'] lambda_api.process_kinesis_records(event_records, stream_name) elif action == 'UpdateShardCount': # Currently kinesalite, which backs the Kinesis implementation for localstack, does # not support UpdateShardCount: # https://github.com/mhart/kinesalite/issues/61 # # [Terraform](https://www.terraform.io) makes the call to UpdateShardCount when it # applies Kinesis resources. A Terraform run fails when this is not present. # # The code that follows just returns a successful response, bypassing the 400 # response that kinesalite returns. # response = Response() response.status_code = 200 content = { 'CurrentShardCount': 1, 'StreamName': data['StreamName'], 'TargetShardCount': data['TargetShardCount'] } response.encoding = 'UTF-8' response._content = json.dumps(content) return response elif action == 'GetRecords': sdk_v2 = self.sdk_is_v2(headers.get('User-Agent', '').split(' ')[0]) results, encoding_type = self.decode_content(response.content, True) records = results.get('Records', []) if not records: return response for record in records: if sdk_v2: record['ApproximateArrivalTimestamp'] = int(record['ApproximateArrivalTimestamp']) if not isinstance(record['Data'], str): # Remove double quotes from data written as bytes # https://github.com/localstack/localstack/issues/3588 tmp = bytearray(record['Data']['data']) if len(tmp) >= 2 and tmp[0] == tmp[-1] == b'"'[0]: tmp = tmp[1:-1] if encoding_type == APPLICATION_JSON: record['Data'] = to_str(base64.b64encode(tmp)) else: record['Data'] = to_str(tmp) else: tmp = base64.b64decode(record['Data']) if len(tmp) >= 2 and tmp[0] == tmp[-1] == b'"'[0]: tmp = tmp[1:-1] record['Data'] = to_str(base64.b64encode(tmp)) response._content = cbor2.dumps(results) if encoding_type == APPLICATION_CBOR else json.dumps(results) return response def sdk_is_v2(self, user_agent): if re.search(r'\/2.\d+.\d+', user_agent): return True return False def replace_in_encoded(self, data): if not data: return '' def _replace(_data): return re.sub(r'arn:aws:kinesis:[^:]+:', 'arn:aws:kinesis:%s:' % aws_stack.get_region(), _data) decoded, type_encoding = self.decode_content(data, True) if type_encoding == APPLICATION_JSON: return _replace(to_str(data)) if type_encoding == APPLICATION_CBOR: replaced = _replace(json.dumps(decoded)) return cbor2.dumps(json.loads(replaced)) def decode_content(self, data, describe=False): content_type = '' try: decoded = json.loads(to_str(data)) content_type = APPLICATION_JSON except UnicodeDecodeError: decoded = cbor2.loads(data) content_type = APPLICATION_CBOR if describe: return decoded, content_type return decoded def encode_data(data, encoding_type): if encoding_type == APPLICATION_CBOR: return cbor2.dumps(data) return json.dumps(data) def encoded_response(data, encoding_type=APPLICATION_JSON, status_code=200): response = Response() response.status_code = status_code response.headers.update({'content-type': encoding_type}) response._content = encode_data(data, encoding_type) return response def subscribe_to_shard(data, headers): kinesis = aws_stack.connect_to_service('kinesis') stream_name = find_stream_for_consumer(data['ConsumerARN']) iter_type = data['StartingPosition']['Type'] kwargs = {} starting_sequence_number = data['StartingPosition'].get('SequenceNumber') or '0' if iter_type in ['AT_SEQUENCE_NUMBER', 'AFTER_SEQUENCE_NUMBER']: kwargs['StartingSequenceNumber'] = starting_sequence_number elif iter_type in ['AT_TIMESTAMP']: # or value is just an example timestamp from aws docs timestamp = data['StartingPosition'].get('Timestamp') or 1459799926.480 kwargs['Timestamp'] = timestamp iterator = kinesis.get_shard_iterator(StreamName=stream_name, ShardId=data['ShardId'], ShardIteratorType=iter_type, **kwargs)['ShardIterator'] data_needs_encoding = False if 'java' in headers.get('User-Agent', '').split(' ')[0]: data_needs_encoding = True def send_events(): yield convert_to_binary_event_payload('', event_type='initial-response') iter = iterator last_sequence_number = starting_sequence_number # TODO: find better way to run loop up to max 5 minutes (until connection terminates)! for i in range(5 * 60): result = None try: result = kinesis.get_records(ShardIterator=iter) except Exception as e: if 'ResourceNotFoundException' in str(e): LOG.debug('Kinesis stream "%s" has been deleted, closing shard subscriber' % stream_name) return raise iter = result.get('NextShardIterator') records = result.get('Records', []) for record in records: record['ApproximateArrivalTimestamp'] = record['ApproximateArrivalTimestamp'].timestamp() if data_needs_encoding: record['Data'] = base64.b64encode(record['Data']) record['Data'] = to_str(record['Data']) last_sequence_number = record['SequenceNumber'] if not records: time.sleep(1) continue response = { 'ChildShards': [], 'ContinuationSequenceNumber': last_sequence_number, 'MillisBehindLatest': 0, 'Records': json_safe(records), } result = json.dumps(response) yield convert_to_binary_event_payload(result, event_type='SubscribeToShardEvent') headers = {} return send_events(), headers def find_consumer(consumer_arn='', consumer_name='', stream_arn=''): stream_consumers = KinesisBackend.get().stream_consumers for consumer in stream_consumers: if consumer_arn and consumer_arn == consumer.get('ConsumerARN'): return consumer elif consumer_name == consumer.get('ConsumerName') and stream_arn == consumer.get('StreamARN'): return consumer def find_stream_for_consumer(consumer_arn): kinesis = aws_stack.connect_to_service('kinesis') for stream_name in kinesis.list_streams()['StreamNames']: stream_arn = aws_stack.kinesis_stream_arn(stream_name) for cons in kinesis.list_stream_consumers(StreamARN=stream_arn)['Consumers']: if cons['ConsumerARN'] == consumer_arn: return stream_name raise Exception('Unable to find stream for stream consumer %s' % consumer_arn) def simple_error_response(msg, code, type_error, encoding_type=APPLICATION_JSON): body = {'message': msg, '__type': type_error} return encoded_response(body, encoding_type, code) def kinesis_error_response(data, action): error_response = Response() if action == 'PutRecord': error_response.status_code = 400 content = { 'ErrorCode': 'ProvisionedThroughputExceededException', 'ErrorMessage': 'Rate exceeded for shard X in stream Y under account Z.' } else: error_response.status_code = 200 content = {'FailedRecordCount': 1, 'Records': []} for record in data.get('Records', []): content['Records'].append({ 'ErrorCode': 'ProvisionedThroughputExceededException', 'ErrorMessage': 'Rate exceeded for shard X in stream Y under account Z.' }) error_response._content = json.dumps(content) return error_response # instantiate listener UPDATE_KINESIS = ProxyListenerKinesis()
1
12,570
I only want the proxy request for this to run for kinesalite.
localstack-localstack
py
@@ -272,7 +272,7 @@ var DatePicker = React.createClass({displayName: 'DatePicker', React.DOM.div(null, DateInput({ date: this.props.selected, - dateFormat: this.props.dateFormat, + dateFormat: this.props.dateFormat, focus: this.state.focus, onBlur: this.handleBlur, onFocus: this.handleFocus,
1
!function(e){if("object"==typeof exports&&"undefined"!=typeof module)module.exports=e();else if("function"==typeof define&&define.amd)define([],e);else{var f;"undefined"!=typeof window?f=window:"undefined"!=typeof global?f=global:"undefined"!=typeof self&&(f=self),f.DatePicker=e()}}(function(){var define,module,exports;return (function e(t,n,r){function s(o,u){if(!n[o]){if(!t[o]){var a=typeof require=="function"&&require;if(!u&&a)return a(o,!0);if(i)return i(o,!0);var f=new Error("Cannot find module '"+o+"'");throw f.code="MODULE_NOT_FOUND",f}var l=n[o]={exports:{}};t[o][0].call(l.exports,function(e){var n=t[o][1][e];return s(n?n:e)},l,l.exports,e,t,n,r)}return n[o].exports}var i=typeof require=="function"&&require;for(var o=0;o<r.length;o++)s(r[o]);return s})({1:[function(require,module,exports){ /** @jsx React.DOM */ var Day = require('./day'); var DateUtil = require('./util/date'); var Calendar = React.createClass({displayName: 'Calendar', getInitialState: function() { return { date: new DateUtil(this.props.selected).clone() }; }, componentWillReceiveProps: function(nextProps) { // When the selected date changed if (nextProps.selected !== this.props.selected) { this.setState({ date: new DateUtil(nextProps.selected).clone() }); } }, increaseMonth: function() { this.setState({ date: this.state.date.addMonth() }); }, decreaseMonth: function() { this.setState({ date: this.state.date.subtractMonth() }); }, weeks: function() { return this.state.date.mapWeeksInMonth(this.renderWeek); }, handleDayClick: function(day) { this.props.onSelect(day); }, renderWeek: function(weekStart, key) { if(! weekStart.weekInMonth(this.state.date)) { return; } return ( React.DOM.div({key: key, className: "week"}, this.days(weekStart) ) ); }, renderDay: function(day, key) { return ( Day({ key: key, day: day, date: this.state.date, onClick: this.handleDayClick.bind(this, day), selected: new DateUtil(this.props.selected)}) ); }, days: function(weekStart) { return weekStart.mapDaysInWeek(this.renderDay); }, render: function() { return ( React.DOM.div({className: "datepicker-calendar", onMouseDown: this.props.onMouseDown}, React.DOM.div({className: "datepicker-calendar-triangle"}), React.DOM.div({className: "datepicker-calendar-header"}, React.DOM.a({className: "datepicker-calendar-header-navigation-left", onClick: this.decreaseMonth} ), React.DOM.span({className: "datepicker-calendar-header-month"}, this.state.date.format("MMMM YYYY") ), React.DOM.a({className: "datepicker-calendar-header-navigation-right", onClick: this.increaseMonth} ), React.DOM.div(null, React.DOM.div({className: "datepicker-calendar-header-day"}, "Mo"), React.DOM.div({className: "datepicker-calendar-header-day"}, "Tu"), React.DOM.div({className: "datepicker-calendar-header-day"}, "We"), React.DOM.div({className: "datepicker-calendar-header-day"}, "Th"), React.DOM.div({className: "datepicker-calendar-header-day"}, "Fr"), React.DOM.div({className: "datepicker-calendar-header-day"}, "Sa"), React.DOM.div({className: "datepicker-calendar-header-day"}, "Su") ) ), React.DOM.div({className: "datepicker-calendar-month"}, this.weeks() ) ) ); } }); module.exports = Calendar; },{"./day":4,"./util/date":6}],2:[function(require,module,exports){ /** @jsx React.DOM */ var DateUtil = require('./util/date'); var DateInput = React.createClass({displayName: 'DateInput', getDefaultProps: function() { return { dateFormat: 'YYYY-MM-DD' }; }, getInitialState: function() { return { value: this.props.date.format(this.props.dateFormat) }; }, componentDidMount: function() { this.toggleFocus(this.props.focus); }, componentWillReceiveProps: function(newProps) { this.toggleFocus(newProps.focus); this.setState({ value: newProps.date.format(this.props.dateFormat) }); }, toggleFocus: function(focus) { if (focus) { this.refs.input.getDOMNode().focus(); } else { this.refs.input.getDOMNode().blur(); } }, handleChange: function(event) { var date = moment(event.target.value, this.props.dateFormat, true); this.setState({ value: event.target.value }); if (this.isValueAValidDate()) { this.props.setSelected(new DateUtil(date)); } }, isValueAValidDate: function() { var date = moment(event.target.value, this.props.dateFormat, true); return date.isValid(); }, handleKeyDown: function(event) { switch(event.key) { case "Enter": event.preventDefault(); this.props.handleEnter(); break; } }, handleClick: function(event) { this.props.handleClick(event); }, render: function() { return React.DOM.input({ ref: "input", type: "text", value: this.state.value, onBlur: this.props.onBlur, onClick: this.handleClick, onKeyDown: this.handleKeyDown, onFocus: this.props.onFocus, onChange: this.handleChange, className: "datepicker-input"}); } }); module.exports = DateInput; },{"./util/date":6}],3:[function(require,module,exports){ /** @jsx React.DOM */ var Popover = require('./popover'); var DateUtil = require('./util/date'); var Calendar = require('./calendar'); var DateInput = require('./date_input'); var DatePicker = React.createClass({displayName: 'DatePicker', getInitialState: function() { return { focus: false }; }, handleFocus: function() { this.setState({ focus: true }); }, hideCalendar: function() { this.setState({ focus: false }); }, handleBlur: function() { this.setState({ focus: !! this._shouldBeFocussed }); if (!! this._shouldBeFocussed) { // Firefox doesn't support immediately focussing inside of blur setTimeout(function() { this.setState({ focus: true }); }.bind(this), 0); } // Reset the value of this._shouldBeFocussed to it's default this._shouldBeFocussed = false; }, handleCalendarMouseDown: function() { this._shouldBeFocussed = true; }, handleSelect: function(date) { this.setSelected(date); setTimeout(function(){ this.hideCalendar(); }.bind(this), 200); }, setSelected: function(date) { this.props.onChange(date.moment()); }, onInputClick: function() { this.setState({ focus: true }); }, calendar: function() { if (this.state.focus) { return ( Popover(null, Calendar({ selected: this.props.selected, onSelect: this.handleSelect, onMouseDown: this.handleCalendarMouseDown}) ) ); } }, render: function() { return ( React.DOM.div(null, DateInput({ date: this.props.selected, dateFormat: this.props.dateFormat, focus: this.state.focus, onBlur: this.handleBlur, onFocus: this.handleFocus, handleClick: this.onInputClick, handleEnter: this.hideCalendar, setSelected: this.setSelected}), this.calendar() ) ); } }); module.exports = DatePicker; },{"./calendar":1,"./date_input":2,"./popover":5,"./util/date":6}],4:[function(require,module,exports){ /** @jsx React.DOM */ var Day = React.createClass({displayName: 'Day', render: function() { classes = React.addons.classSet({ 'datepicker-calendar-day': true, 'selected': this.props.day.sameDay(this.props.selected), 'this-month': this.props.day.sameMonth(this.props.date), 'today': this.props.day.sameDay(moment()) }); return ( React.DOM.div({className: classes, onClick: this.props.onClick}, this.props.day.day() ) ); } }); module.exports = Day; },{}],5:[function(require,module,exports){ /** @jsx React.DOM */ var Popover = React.createClass({ displayName: 'Popover', componentWillMount: function() { popoverContainer = document.createElement('span'); popoverContainer.className = 'datepicker-calendar-container'; this._popoverElement = popoverContainer; document.querySelector('body').appendChild(this._popoverElement); }, componentDidMount: function() { this._renderPopover(); }, componentDidUpdate: function() { this._renderPopover(); }, _popoverComponent: function() { var className = this.props.className; return ( React.DOM.div({className: className}, React.DOM.div({className: "datepicker-calendar-popover-content"}, this.props.children ) ) ); }, _tetherOptions: function() { return { element: this._popoverElement, target: this.getDOMNode().parentElement, attachment: 'top left', targetAttachment: 'bottom left', targetOffset: '10px 0', optimizations: { moveElement: false // always moves to <body> anyway! }, constraints: [ { to: 'scrollParent', attachment: 'together', pin: true } ] }; }, _renderPopover: function() { React.renderComponent(this._popoverComponent(), this._popoverElement); if (this._tether != null) { this._tether.setOptions(this._tetherOptions()); } else { this._tether = new Tether(this._tetherOptions()); } }, componentWillUnmount: function() { this._tether.destroy(); React.unmountComponentAtNode(this._popoverElement); if (this._popoverElement.parentNode) { this._popoverElement.parentNode.removeChild(this._popoverElement); } }, render: function() { return React.DOM.span(null); } }); module.exports = Popover; },{}],6:[function(require,module,exports){ function DateUtil(date) { this._date = date; } DateUtil.prototype.sameDay = function(other) { return this._date.isSame(other._date, 'day'); }; DateUtil.prototype.sameMonth = function(other) { return this._date.isSame(other._date, 'month'); }; DateUtil.prototype.day = function() { return this._date.date(); }; DateUtil.prototype.mapDaysInWeek = function(callback) { var week = []; var firstDay = this._date.clone().startOf('isoWeek'); for(var i = 0; i < 7; i++) { var day = new DateUtil(firstDay.clone().add('days', i)); week[i] = callback(day, i); } return week; }; DateUtil.prototype.mapWeeksInMonth = function(callback) { var month = []; var firstDay = this._date.clone().startOf('month').startOf('isoWeek'); for(var i = 0; i < 6; i++) { var weekStart = new DateUtil(firstDay.clone().add('weeks', i)); month[i] = callback(weekStart, i); } return month; }; DateUtil.prototype.weekInMonth = function(other) { var firstDayInWeek = this._date.clone(); var lastDayInWeek = this._date.clone().isoWeekday(7); return firstDayInWeek.isSame(other._date, 'month') || lastDayInWeek.isSame(other._date, 'month'); }; DateUtil.prototype.format = function() { return this._date.format.apply(this._date, arguments); }; DateUtil.prototype.addMonth = function() { return new DateUtil(this._date.clone().add('month', 1)); }; DateUtil.prototype.subtractMonth = function() { return new DateUtil(this._date.clone().subtract('month', 1)); }; DateUtil.prototype.clone = function() { return new DateUtil(this._date.clone()); }; DateUtil.prototype.moment = function() { return this._date; }; module.exports = DateUtil; },{}]},{},[3])(3) });
1
4,792
White space boya?
Hacker0x01-react-datepicker
js
@@ -79,6 +79,9 @@ type StackEvent cloudformation.StackEvent // StackDescription represents an existing AWS CloudFormation stack. type StackDescription cloudformation.Stack +// StackSummary represents a summary of an existing AWS CloudFormation stack. +type StackSummary cloudformation.StackSummary + // SDK returns the underlying struct from the AWS SDK. func (d *StackDescription) SDK() *cloudformation.Stack { raw := cloudformation.Stack(*d)
1
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package cloudformation import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/cloudformation" ) // Stack represents a AWS CloudFormation stack. type Stack struct { Name string *stackConfig } type stackConfig struct { Template string Parameters []*cloudformation.Parameter Tags []*cloudformation.Tag RoleARN *string } // StackOption allows you to initialize a Stack with additional properties. type StackOption func(s *Stack) // NewStack creates a stack with the given name and template body. func NewStack(name, template string, opts ...StackOption) *Stack { s := &Stack{ Name: name, stackConfig: &stackConfig{ Template: template, }, } for _, opt := range opts { opt(s) } return s } // WithParameters passes parameters to a stack. func WithParameters(params map[string]string) StackOption { return func(s *Stack) { var flatParams []*cloudformation.Parameter for k, v := range params { flatParams = append(flatParams, &cloudformation.Parameter{ ParameterKey: aws.String(k), ParameterValue: aws.String(v), }) } s.Parameters = flatParams } } // WithTags applies the tags to a stack. func WithTags(tags map[string]string) StackOption { return func(s *Stack) { var flatTags []*cloudformation.Tag for k, v := range tags { flatTags = append(flatTags, &cloudformation.Tag{ Key: aws.String(k), Value: aws.String(v), }) } s.Tags = flatTags } } // WithRoleARN specifies the role that CloudFormation will assume when creating the stack. func WithRoleARN(roleARN string) StackOption { return func(s *Stack) { s.RoleARN = aws.String(roleARN) } } // StackEvent represents a stack event for a resource. type StackEvent cloudformation.StackEvent // StackDescription represents an existing AWS CloudFormation stack. type StackDescription cloudformation.Stack // SDK returns the underlying struct from the AWS SDK. func (d *StackDescription) SDK() *cloudformation.Stack { raw := cloudformation.Stack(*d) return &raw }
1
15,659
Maybe ditch these since `StackSummary` is not used anymore.
aws-copilot-cli
go
@@ -21,14 +21,15 @@ class BaseSemanticHead(BaseModule, metaclass=ABCMeta): num_classes, init_cfg=None, loss_seg=dict( - type='CrossEntropyLoss', ignore_index=-1, + type='CrossEntropyLoss', + ignore_index=255, loss_weight=1.0)): super(BaseSemanticHead, self).__init__(init_cfg) self.loss_seg = build_loss(loss_seg) self.num_classes = num_classes @force_fp32(apply_to=('seg_preds', )) - def loss(self, seg_preds, gt_semantic_seg, label_bias=1): + def loss(self, seg_preds, gt_semantic_seg): """Get the loss of semantic head. Args:
1
# Copyright (c) OpenMMLab. All rights reserved. from abc import ABCMeta, abstractmethod import torch.nn.functional as F from mmcv.runner import BaseModule, force_fp32 from ..builder import build_loss from ..utils import interpolate_as class BaseSemanticHead(BaseModule, metaclass=ABCMeta): """Base module of Semantic Head. Args: num_classes (int): the number of classes. init_cfg (dict): the initialization config. loss_seg (dict): the loss of the semantic head. """ def __init__(self, num_classes, init_cfg=None, loss_seg=dict( type='CrossEntropyLoss', ignore_index=-1, loss_weight=1.0)): super(BaseSemanticHead, self).__init__(init_cfg) self.loss_seg = build_loss(loss_seg) self.num_classes = num_classes @force_fp32(apply_to=('seg_preds', )) def loss(self, seg_preds, gt_semantic_seg, label_bias=1): """Get the loss of semantic head. Args: seg_preds (Tensor): The input logits with the shape (N, C, H, W). gt_semantic_seg: The ground truth of semantic segmentation with the shape (N, H, W). label_bias: The starting number of the semantic label. Default: 1. Returns: dict: the loss of semantic head. """ if seg_preds.shape[-2:] != gt_semantic_seg.shape[-2:]: seg_preds = interpolate_as(seg_preds, gt_semantic_seg) seg_preds = seg_preds.permute((0, 2, 3, 1)) # make the semantic label start from 0 gt_semantic_seg = gt_semantic_seg - label_bias loss_seg = self.loss_seg( seg_preds.reshape(-1, self.num_classes), # => [NxHxW, C] gt_semantic_seg.reshape(-1).long()) return dict(loss_seg=loss_seg) @abstractmethod def forward(self, x): """Placeholder of forward function. Returns: dict[str, Tensor]: A dictionary, including features and predicted scores. Required keys: 'seg_preds' and 'feats'. """ pass def forward_train(self, x, gt_semantic_seg, label_bias=1): output = self.forward(x) seg_preds = output['seg_preds'] return self.loss(seg_preds, gt_semantic_seg, label_bias) def simple_test(self, x, img_metas, rescale=False): output = self.forward(x) seg_preds = output['seg_preds'] seg_preds = F.interpolate( seg_preds, size=img_metas[0]['pad_shape'][:2], mode='bilinear', align_corners=False) if rescale: h, w, _ = img_metas[0]['img_shape'] seg_preds = seg_preds[:, :, :h, :w] h, w, _ = img_metas[0]['ori_shape'] seg_preds = F.interpolate( seg_preds, size=(h, w), mode='bilinear', align_corners=False) return seg_preds
1
25,858
suggest to indicate the value range & meaning in docstring
open-mmlab-mmdetection
py
@@ -20,7 +20,7 @@ const ( // calls. listenAddress = ":9500" // metricsPath is the endpoint of exporter. - metricsPath = "/metrics" + metricsPath = "/metrics/" // controllerAddress is the address where jiva controller listens. controllerAddress = "http://localhost:9501" // casType is the type of container attached storage (CAS) from which
1
package command import ( "errors" goflag "flag" "log" "net/url" "github.com/golang/glog" "github.com/openebs/maya/cmd/maya-exporter/app/collector" "github.com/openebs/maya/pkg/util" "github.com/prometheus/client_golang/prometheus" "github.com/spf13/cobra" ) // Constants defined here are the default value of the flags. Which can be // changed while running the binary. const ( // listenAddress is the address where exporter listens for the rest api // calls. listenAddress = ":9500" // metricsPath is the endpoint of exporter. metricsPath = "/metrics" // controllerAddress is the address where jiva controller listens. controllerAddress = "http://localhost:9501" // casType is the type of container attached storage (CAS) from which // the metrics need to be exported. Default is Jiva" casType = "jiva" ) // VolumeExporterOptions is used to create flags for the monitoring command type VolumeExporterOptions struct { ListenAddress string MetricsPath string ControllerAddress string CASType string } // AddListenAddressFlag is used to create flag to pass the listen address of exporter. func AddListenAddressFlag(cmd *cobra.Command, value *string) { cmd.Flags().StringVarP(value, "listen.addr", "a", *value, "Address on which to expose metrics and web interface.)") } // AddMetricsPathFlag is used to create flag to pass the listen path where volume // metrics are exposed. func AddMetricsPathFlag(cmd *cobra.Command, value *string) { cmd.Flags().StringVarP(value, "listen.path", "m", *value, "Path under which to expose metrics.") } // AddControllerAddressFlag is used to create flag to pass the Jiva volume // controllers IP. func AddControllerAddressFlag(cmd *cobra.Command, value *string) { cmd.Flags().StringVarP(value, "controller.addr", "c", *value, "IP address from where metrics to be exported") } // AddCASTypeFlag is used to create flag to pass the storage engine name func AddCASTypeFlag(cmd *cobra.Command, value *string) { cmd.Flags().StringVarP(value, "cas.type", "e", *value, "Type of container attached storage engine") } // NewCmdVolumeExporter is used to create command monitoring and it initialize // monitoring flags also. func NewCmdVolumeExporter() (*cobra.Command, error) { // create an instance of VolumeExporterOptions to initialize with default // values for the flags. options := VolumeExporterOptions{} options.ControllerAddress = controllerAddress options.ListenAddress = listenAddress options.MetricsPath = metricsPath options.CASType = casType cmd := &cobra.Command{ Short: "Collect metrics from OpenEBS volumes", Long: `maya-exporter can be used to monitor openebs volumes and pools. It can be deployed alongside the openebs volume or pool containers as sidecars.`, Example: `maya-exporter -a=http://localhost:8001 -c=:9500 -m=/metrics`, Run: func(cmd *cobra.Command, args []string) { util.CheckErr(Run(cmd, &options), util.Fatal) }, } cmd.Flags().AddGoFlagSet(goflag.CommandLine) goflag.CommandLine.Parse([]string{}) AddControllerAddressFlag(cmd, &options.ControllerAddress) AddListenAddressFlag(cmd, &options.ListenAddress) AddMetricsPathFlag(cmd, &options.MetricsPath) AddCASTypeFlag(cmd, &options.CASType) return cmd, nil } // Run used to process commands,args and call openebs exporter and it returns // nil on successful execution. func Run(cmd *cobra.Command, options *VolumeExporterOptions) error { glog.Infof("Starting maya-exporter ...") option := Initialize(options) if len(option) == 0 { glog.Fatal("maya-exporter only supports jiva and cstor as storage engine") return nil } if option == "cstor" { glog.Infof("initialising maya-exporter for the cstor") options.RegisterCstorStatsExporter() } if option == "jiva" { log.Println("Initialising maya-exporter for the jiva") if err := options.RegisterJivaStatsExporter(); err != nil { glog.Fatal(err) return nil } } options.StartMayaExporter() return nil } // RegisterJivaStatsExporter parses the jiva controller URL and // initialises an instance of JivaStatsExporter.This returns err // if the URL is not correct. func (o *VolumeExporterOptions) RegisterJivaStatsExporter() error { controllerURL, err := url.ParseRequestURI(o.ControllerAddress) if err != nil { glog.Error(err) return errors.New("Error in parsing the URI") } exporter := collector.NewJivaStatsExporter(controllerURL, o.CASType) prometheus.MustRegister(exporter) return nil } // RegisterCstorStatsExporter initiates the connection with the cstor and register // the exporter with Prometheus for collecting the metrics.This doesn't returns // error because that case is handled in InitiateConnection(). func (o *VolumeExporterOptions) RegisterCstorStatsExporter() { var c collector.Cstor c.InitiateConnection() if c.Conn == nil { glog.Error("Connection is not established with the cstor.") } exporter := collector.NewCstorStatsExporter(c.Conn, o.CASType) prometheus.MustRegister(exporter) glog.Info("Registered the exporter") return }
1
10,573
not sure if it works with prometheus by default, otherwise we will have to add this into prometheus config also
openebs-maya
go
@@ -24,6 +24,10 @@ import ( "github.com/projectcalico/felix/stringutils" ) +func (r *DefaultRuleRenderer) CleanupEndPoint(ifaceName string) { + r.epmm.RemoveEndPointMark(ifaceName) +} + func (r *DefaultRuleRenderer) WorkloadDispatchChains( endpoints map[proto.WorkloadEndpointID]*proto.WorkloadEndpoint, ) []*Chain {
1
// Copyright (c) 2017 Tigera, Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package rules import ( "sort" log "github.com/sirupsen/logrus" . "github.com/projectcalico/felix/iptables" "github.com/projectcalico/felix/proto" "github.com/projectcalico/felix/stringutils" ) func (r *DefaultRuleRenderer) WorkloadDispatchChains( endpoints map[proto.WorkloadEndpointID]*proto.WorkloadEndpoint, ) []*Chain { // Extract endpoint names. log.WithField("numEndpoints", len(endpoints)).Debug("Rendering workload dispatch chains") names := make([]string, 0, len(endpoints)) for _, endpoint := range endpoints { names = append(names, endpoint.Name) } return r.dispatchChains( names, WorkloadFromEndpointPfx, WorkloadToEndpointPfx, ChainFromWorkloadDispatch, ChainToWorkloadDispatch, true, ) } func (r *DefaultRuleRenderer) HostDispatchChains( endpoints map[string]proto.HostEndpointID, applyOnForward bool, ) []*Chain { return r.hostDispatchChains(endpoints, false, applyOnForward) } func (r *DefaultRuleRenderer) FromHostDispatchChains( endpoints map[string]proto.HostEndpointID, ) []*Chain { return r.hostDispatchChains(endpoints, true, false) } func (r *DefaultRuleRenderer) hostDispatchChains( endpoints map[string]proto.HostEndpointID, fromOnly bool, applyOnForward bool, ) []*Chain { // Extract endpoint names. log.WithField("numEndpoints", len(endpoints)).Debug("Rendering host dispatch chains") names := make([]string, 0, len(endpoints)) for ifaceName := range endpoints { names = append(names, ifaceName) } if fromOnly { return r.dispatchChains( names, HostFromEndpointPfx, "", ChainDispatchFromHostEndpoint, "", false, ) } if !applyOnForward { return r.dispatchChains( names, HostFromEndpointPfx, HostToEndpointPfx, ChainDispatchFromHostEndpoint, ChainDispatchToHostEndpoint, false, ) } return append( r.dispatchChains( names, HostFromEndpointPfx, HostToEndpointPfx, ChainDispatchFromHostEndpoint, ChainDispatchToHostEndpoint, false, ), r.dispatchChains( names, HostFromEndpointForwardPfx, HostToEndpointForwardPfx, ChainDispatchFromHostEndPointForward, ChainDispatchToHostEndpointForward, false, )..., ) } func (r *DefaultRuleRenderer) dispatchChains( names []string, fromEndpointPfx, toEndpointPfx, dispatchFromEndpointChainName, dispatchToEndpointChainName string, dropAtEndOfChain bool, ) []*Chain { // Sort interface names so that rules in the dispatch chain are ordered deterministically. // Otherwise we would reprogram the dispatch chain when there is no real change. sort.Strings(names) log.WithField("ifaceNames", names).Debug("Rendering dispatch chains") // Since there can be >100 endpoints, putting them in a single list adds some latency to // endpoints that are later in the chain. To reduce that impact, we build a shallow tree of // chains based on the prefixes of the chains. // Start by figuring out the common prefix of the endpoint names. Commonly, this will // be the interface prefix, e.g. "cali", but we may get lucky if multiple interfaces share // a longer prefix. commonPrefix := stringutils.CommonPrefix(names) log.WithField("commonPrefix", commonPrefix).Debug("Calculated common prefix") // Then, divide the names into bins based on their next character. prefixes := []string{} prefixToNames := map[string][]string{} lastName := "" for _, name := range names { if name == "" { log.Panic("Unable to render dispatch chain. Empty interface name.") } if name == lastName { log.WithField("ifaceName", name).Error( "Multiple endpoints with same interface name detected. " + "Incorrect policy may be applied.") continue } prefix := commonPrefix if len(name) > len(commonPrefix) { prefix = name[:len(commonPrefix)+1] } if _, present := prefixToNames[prefix]; !present { // Record the prefixes in sorted order (if we iterate over the map, we get a // random order, which we don't want). prefixes = append(prefixes, prefix) } prefixToNames[prefix] = append(prefixToNames[prefix], name) lastName = name } rootFromEndpointRules := make([]Rule, 0) rootToEndpointRules := make([]Rule, 0) // Now, iterate over the prefixes. If there are multiple names in a prefix, we render a // child chain for that prefix. Otherwise, we render the rule directly to avoid the cost // of an extra goto. var chains []*Chain for _, prefix := range prefixes { ifaceNames := prefixToNames[prefix] logCxt := log.WithFields(log.Fields{ "prefix": prefix, "namesWithPrefix": ifaceNames, }) logCxt.Debug("Considering prefix") if len(ifaceNames) > 1 { // More than one name, render a prefix match in the root chain... nextChar := prefix[len(commonPrefix):] ifaceMatch := prefix + "+" childFromChainName := dispatchFromEndpointChainName + "-" + nextChar childToChainName := dispatchToEndpointChainName + "-" + nextChar logCxt := logCxt.WithFields(log.Fields{ "childFromChainName": childFromChainName, "childToChainName": childToChainName, "ifaceMatch": ifaceMatch, }) logCxt.Debug("Multiple interfaces with prefix, rendering child chain") rootFromEndpointRules = append(rootFromEndpointRules, Rule{ Match: Match().InInterface(ifaceMatch), // Note: we use a goto here, which means that packets will not // return to this chain. This prevents packets from traversing the // rest of the root chain once we've found their prefix. Action: GotoAction{ Target: childFromChainName, }, }) rootToEndpointRules = append(rootToEndpointRules, Rule{ Match: Match().OutInterface(ifaceMatch), Action: GotoAction{ Target: childToChainName, }, }) // ...and child chains. childFromEndpointRules := make([]Rule, 0) childToEndpointRules := make([]Rule, 0) for _, name := range ifaceNames { logCxt.WithField("ifaceName", name).Debug("Adding rule to child chains") childFromEndpointRules = append(childFromEndpointRules, Rule{ Match: Match().InInterface(name), Action: GotoAction{ Target: EndpointChainName(fromEndpointPfx, name), }, }) childToEndpointRules = append(childToEndpointRules, Rule{ Match: Match().OutInterface(name), Action: GotoAction{ Target: EndpointChainName(toEndpointPfx, name), }, }) } if dropAtEndOfChain { // Since we use a goto in the root chain (as described above), we // need to duplicate the drop rules at the end of the child chain // since packets that reach the end of the child chain would // return up past the root chain, appearing to be accepted. logCxt.Debug("Adding drop rules at end of child chains.") childFromEndpointRules = append(childFromEndpointRules, Rule{ Match: Match(), Action: DropAction{}, Comment: "Unknown interface", }) childToEndpointRules = append(childToEndpointRules, Rule{ Match: Match(), Action: DropAction{}, Comment: "Unknown interface", }) } childFromEndpointChain := &Chain{ Name: childFromChainName, Rules: childFromEndpointRules, } childToEndpointChain := &Chain{ Name: childToChainName, Rules: childToEndpointRules, } if toEndpointPfx != "" { chains = append(chains, childFromEndpointChain, childToEndpointChain) } else { // Only emit from endpoint chains. chains = append(chains, childFromEndpointChain) } } else { // Only one name with this prefix, render rules directly into the root // chains. ifaceName := ifaceNames[0] logCxt.WithField("ifaceName", ifaceName).Debug("Adding rule to root chains") rootFromEndpointRules = append(rootFromEndpointRules, Rule{ Match: Match().InInterface(ifaceName), Action: GotoAction{ Target: EndpointChainName(fromEndpointPfx, ifaceName), }, }) rootToEndpointRules = append(rootToEndpointRules, Rule{ Match: Match().OutInterface(ifaceName), Action: GotoAction{ Target: EndpointChainName(toEndpointPfx, ifaceName), }, }) } } if dropAtEndOfChain { log.Debug("Adding drop rules at end of root chains.") rootFromEndpointRules = append(rootFromEndpointRules, Rule{ Match: Match(), Action: DropAction{}, Comment: "Unknown interface", }) rootToEndpointRules = append(rootToEndpointRules, Rule{ Match: Match(), Action: DropAction{}, Comment: "Unknown interface", }) } fromEndpointDispatchChain := &Chain{ Name: dispatchFromEndpointChainName, Rules: rootFromEndpointRules, } toEndpointDispatchChain := &Chain{ Name: dispatchToEndpointChainName, Rules: rootToEndpointRules, } if toEndpointPfx != "" { chains = append(chains, fromEndpointDispatchChain, toEndpointDispatchChain) } else { // Only emit from endpoint chains. chains = append(chains, fromEndpointDispatchChain) } return chains }
1
16,061
The rule renderer isn't meant to be stateful so probably best to move this out of here
projectcalico-felix
c
@@ -57,6 +57,7 @@ func setupContainerdConfig(ctx context.Context, cfg *config.Node) error { DisableCgroup: disableCgroup, IsRunningInUserNS: isRunningInUserNS, PrivateRegistryConfig: privRegistries.Registry(), + ExtraRuntimes: findNvidiaContainerRuntimes(nil), } selEnabled, selConfigured, err := selinuxStatus()
1
// +build linux package containerd import ( "context" "io/ioutil" "os" "time" "github.com/opencontainers/runc/libcontainer/userns" "github.com/pkg/errors" "github.com/rancher/k3s/pkg/agent/templates" util2 "github.com/rancher/k3s/pkg/agent/util" "github.com/rancher/k3s/pkg/cgroups" "github.com/rancher/k3s/pkg/daemons/config" "github.com/rancher/k3s/pkg/version" "github.com/rancher/wharfie/pkg/registries" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" "google.golang.org/grpc" runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" "k8s.io/kubernetes/pkg/kubelet/util" ) func getContainerdArgs(cfg *config.Node) []string { args := []string{ "containerd", "-c", cfg.Containerd.Config, "-a", cfg.Containerd.Address, "--state", cfg.Containerd.State, "--root", cfg.Containerd.Root, } return args } // setupContainerdConfig generates the containerd.toml, using a template combined with various // runtime configurations and registry mirror settings provided by the administrator. func setupContainerdConfig(ctx context.Context, cfg *config.Node) error { privRegistries, err := registries.GetPrivateRegistries(cfg.AgentConfig.PrivateRegistry) if err != nil { return err } isRunningInUserNS := userns.RunningInUserNS() _, _, hasCFS, hasPIDs := cgroups.CheckCgroups() // "/sys/fs/cgroup" is namespaced cgroupfsWritable := unix.Access("/sys/fs/cgroup", unix.W_OK) == nil disableCgroup := isRunningInUserNS && (!hasCFS || !hasPIDs || !cgroupfsWritable) if disableCgroup { logrus.Warn("cgroup v2 controllers are not delegated for rootless. Disabling cgroup.") } var containerdTemplate string containerdConfig := templates.ContainerdConfig{ NodeConfig: cfg, DisableCgroup: disableCgroup, IsRunningInUserNS: isRunningInUserNS, PrivateRegistryConfig: privRegistries.Registry(), } selEnabled, selConfigured, err := selinuxStatus() if err != nil { return errors.Wrap(err, "failed to detect selinux") } switch { case !cfg.SELinux && selEnabled: logrus.Warn("SELinux is enabled on this host, but " + version.Program + " has not been started with --selinux - containerd SELinux support is disabled") case cfg.SELinux && !selConfigured: logrus.Warnf("SELinux is enabled for "+version.Program+" but process is not running in context '%s', "+version.Program+"-selinux policy may need to be applied", SELinuxContextType) } containerdTemplateBytes, err := ioutil.ReadFile(cfg.Containerd.Template) if err == nil { logrus.Infof("Using containerd template at %s", cfg.Containerd.Template) containerdTemplate = string(containerdTemplateBytes) } else if os.IsNotExist(err) { containerdTemplate = templates.ContainerdConfigTemplate } else { return err } parsedTemplate, err := templates.ParseTemplateFromConfig(containerdTemplate, containerdConfig) if err != nil { return err } return util2.WriteFile(cfg.Containerd.Config, parsedTemplate) } // criConnection connects to a CRI socket at the given path. func CriConnection(ctx context.Context, address string) (*grpc.ClientConn, error) { addr, dialer, err := util.GetAddressAndDialer("unix://" + address) if err != nil { return nil, err } conn, err := grpc.Dial(addr, grpc.WithInsecure(), grpc.WithTimeout(3*time.Second), grpc.WithContextDialer(dialer), grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(maxMsgSize))) if err != nil { return nil, err } c := runtimeapi.NewRuntimeServiceClient(conn) _, err = c.Version(ctx, &runtimeapi.VersionRequest{ Version: "0.1.0", }) if err != nil { conn.Close() return nil, err } return conn, nil }
1
10,032
passing in a `nil` here just so that the tests can pass in an alternative implementation seems weird, but I don't know what the convention is for doing something like this - @briandowns?
k3s-io-k3s
go
@@ -3,12 +3,12 @@ 'use strict'; var clone = require('clone'); -var dot = require('@deque/dot'); +var doT = require('@deque/dot'); var templates = require('./templates'); var buildManual = require('./build-manual'); var entities = new (require('html-entities').AllHtmlEntities)(); var packageJSON = require('../package.json'); -var dotRegex = /\{\{.+?\}\}/g; +var doTRegex = /\{\{.+?\}\}/g; var axeVersion = packageJSON.version.substring( 0,
1
/*eslint-env node */ /*eslint max-len: off */ 'use strict'; var clone = require('clone'); var dot = require('@deque/dot'); var templates = require('./templates'); var buildManual = require('./build-manual'); var entities = new (require('html-entities').AllHtmlEntities)(); var packageJSON = require('../package.json'); var dotRegex = /\{\{.+?\}\}/g; var axeVersion = packageJSON.version.substring( 0, packageJSON.version.lastIndexOf('.') ); var descriptionTableHeader = '| Rule ID | Description | Impact | Tags | Issue Type | ACT Rules |\n| :------- | :------- | :------- | :------- | :------- | :------- |\n'; dot.templateSettings.strip = false; function getLocale(grunt, options) { var localeFile; if (options.locale) { localeFile = './locales/' + options.locale + '.json'; } if (localeFile) { return grunt.file.readJSON(localeFile); } } function makeHeaderLink(title) { return title .replace(/ /g, '-') .replace(/[\.&]/g, '') .toLowerCase(); } function buildRules(grunt, options, commons, callback) { var axeImpact = Object.freeze(['minor', 'moderate', 'serious', 'critical']); // TODO: require('../axe') does not work if grunt configure is moved after uglify, npm test breaks with undefined. Complicated grunt concurrency issue. var locale = getLocale(grunt, options); options.getFiles = false; buildManual(grunt, options, commons, function(result) { var metadata = { rules: {}, checks: {} }; var descriptions = { wcag20: { title: 'WCAG 2.0 Level A & AA Rules', rules: [] }, wcag21: { title: 'WCAG 2.1 Level A & AA Rules', rules: [] }, bestPractice: { title: 'Best Practices Rules', intro: 'Rules that do not necessarily conform to WCAG success criterion but are industry accepted practices that improve the user experience.', rules: [] }, wcag2aaa: { title: 'WCAG 2.0 and 2.1 level AAA rules', intro: 'Rules that check for conformance to WCAG AAA success criteria that can be fully automated.', rules: [] }, experimental: { title: 'Experimental Rules', intro: 'Rules we are still testing and developing. They are not enabled by default in axe-core, but are enabled for the axe browser extensions.', rules: [] }, deprecated: { title: 'Deprecated Rules', intro: 'Deprecated rules are not enabled by default and will be removed in the next major release.', rules: [] } }; var TOC = Object.keys(descriptions) .map(key => { return `- [${descriptions[key].title}](#${makeHeaderLink( descriptions[key].title )})`; }) .join('\n'); var tags = options.tags ? options.tags.split(/\s*,\s*/) : []; var rules = result.rules; var checks = result.checks; // Translate checks before parsing them so that translations // get applied to the metadata object if (locale && locale.checks) { checks.forEach(function(check) { if (locale.checks[check.id] && check.metadata) { check.metadata.messages = locale.checks[check.id]; } }); } parseChecks(checks); function parseMetaData(source, propType) { var data = source.metadata; var key = source.id || source.type; if (key && locale && locale[propType] && propType !== 'checks') { data = locale[propType][key] || data; } var result = clone(data) || {}; if (result.messages) { Object.keys(result.messages).forEach(function(key) { // only convert to templated function for strings // objects handled later in publish-metadata.js if ( typeof result.messages[key] !== 'object' && dotRegex.test(result.messages[key]) ) { result.messages[key] = dot .template(result.messages[key]) .toString(); } }); } //TODO this is actually failureSummaries, property name should better reflect that if (result.failureMessage && dotRegex.test(result.failureMessage)) { result.failureMessage = dot.template(result.failureMessage).toString(); } return result; } function createFailureSummaryObject(summaries) { var result = {}; summaries.forEach(function(summary) { if (summary.type) { result[summary.type] = parseMetaData(summary, 'failureSummaries'); } }); return result; } function getIncompleteMsg(summaries) { var result = {}; summaries.forEach(function(summary) { if ( summary.incompleteFallbackMessage && dotRegex.test(summary.incompleteFallbackMessage) ) { result = dot.template(summary.incompleteFallbackMessage).toString(); } }); return result; } function replaceFunctions(string) { return string .replace( /"(evaluate|after|gather|matches|source|commons)":\s*("[^"]+?.js")/g, function(m, p1, p2) { return m.replace(p2, getSource(p2.replace(/^"|"$/g, ''), p1)); } ) .replace(/"(function anonymous\([\s\S]+?\) {)([\s\S]+?)(})"/g, function( m ) { return JSON.parse(m); }) .replace(/"(\(function \(\) {)([\s\S]+?)(}\)\(\))"/g, function(m) { return JSON.parse(m); }); } function getSource(file, type) { return grunt.template.process(templates[type], { data: { source: grunt.file.read(file) } }); } function findCheck(checks, id) { return checks.filter(function(check) { if (check.id === id) { return true; } })[0]; } function blacklist(k, v) { if (options.blacklist.indexOf(k) !== -1) { return undefined; } return v; } function parseChecks(collection) { return collection.map(function(check) { var c = {}; var id = typeof check === 'string' ? check : check.id; var definition = clone(findCheck(checks, id)); if (!definition) { grunt.log.error('check ' + id + ' not found'); } c.options = check.options || definition.options; c.id = id; if (definition.metadata && !metadata.checks[id]) { metadata.checks[id] = parseMetaData(definition, 'checks'); } return c.options === undefined ? id : c; }); } function traverseChecks(checkCollection, predicate, startValue) { return checkCollection.reduce(function(out, check) { var id = typeof check === 'string' ? check : check.id; var definition = clone(findCheck(checks, id)); if (!definition) { grunt.log.error('check ' + id + ' not found'); } return predicate(definition, out); }, startValue); } function parseImpactForRule(rule) { function capitalize(s) { return s.charAt(0).toUpperCase() + s.slice(1); } if (rule.impact) { return capitalize(rule.impact); } function getUniqueArr(arr) { return arr.filter(function(value, index, self) { return self.indexOf(value) === index; }); } function getImpactScores(definition, out) { if (definition && definition.metadata && definition.metadata.impact) { var impactScore = axeImpact.indexOf(definition.metadata.impact); out.push(impactScore); } return out; } function getScore(checkCollection, onlyHighestScore) { var scores = traverseChecks(checkCollection, getImpactScores, []); if (scores && scores.length) { return onlyHighestScore ? [Math.max.apply(null, scores)] : getUniqueArr(scores); } else { return []; } } var highestImpactForRuleTypeAny = getScore(rule.any, true); var allUniqueImpactsForRuleTypeAll = getScore(rule.all, false); var allUniqueImpactsForRuleTypeNone = getScore(rule.none, false); var cumulativeImpacts = highestImpactForRuleTypeAny .concat(allUniqueImpactsForRuleTypeAll) .concat(allUniqueImpactsForRuleTypeNone); var cumulativeScores = getUniqueArr(cumulativeImpacts).sort(); //order lowest to highest return cumulativeScores.reduce(function(out, cV) { return out.length ? out + ', ' + capitalize(axeImpact[cV]) : capitalize(axeImpact[cV]); }, ''); } function parseFailureForRule(rule) { function hasFailure(definition, out) { if ( !rule.reviewOnFail && definition && definition.metadata && definition.metadata.impact ) { out = out || !!definition.metadata.messages.fail; } return out; } return ( traverseChecks(rule.any, hasFailure, false) || traverseChecks(rule.all, hasFailure, false) || traverseChecks(rule.none, hasFailure, false) ); } function parseIncompleteForRule(rule) { function hasIncomplete(definition, out) { if (definition && definition.metadata && definition.metadata.impact) { out = out || !!definition.metadata.messages.incomplete || rule.reviewOnFail; } return out; } return ( traverseChecks(rule.any, hasIncomplete, false) || traverseChecks(rule.all, hasIncomplete, false) || traverseChecks(rule.none, hasIncomplete, false) ); } function createActLinksForRule(rule) { var actIds = rule.actIds || []; var actLinks = []; actIds.forEach(id => actLinks.push(`[${id}](https://act-rules.github.io/rules/${id})`) ); return actLinks.join(', '); } rules.map(function(rule) { var impact = parseImpactForRule(rule); var canFail = parseFailureForRule(rule); var canIncomplete = parseIncompleteForRule(rule); rule.any = parseChecks(rule.any); rule.all = parseChecks(rule.all); rule.none = parseChecks(rule.none); if (rule.metadata && !metadata.rules[rule.id]) { metadata.rules[rule.id] = parseMetaData(rule, 'rules'); // Translate rules } var rules; if (rule.tags.includes('deprecated')) { rules = descriptions.deprecated.rules; } else if (rule.tags.includes('experimental')) { rules = descriptions.experimental.rules; } else if (rule.tags.find(tag => tag.includes('aaa'))) { rules = descriptions.wcag2aaa.rules; } else if (rule.tags.includes('best-practice')) { rules = descriptions.bestPractice.rules; } else if (rule.tags.find(tag => tag.startsWith('wcag2a'))) { rules = descriptions.wcag20.rules; } else { rules = descriptions.wcag21.rules; } var issueType = []; if (canFail) { issueType.push('failure'); } if (canIncomplete) { issueType.push('needs&nbsp;review'); } var actLinks = createActLinksForRule(rule); rules.push([ `[${rule.id}](https://dequeuniversity.com/rules/axe/${axeVersion}/${rule.id}?application=RuleDescription)`, entities.encode(rule.metadata.description), impact, rule.tags.join(', '), issueType.join(', '), actLinks ]); if (tags.length) { rule.enabled = !!rule.tags.filter(function(t) { return tags.indexOf(t) !== -1; }).length; } return rule; }); var ruleTables = Object.keys(descriptions) .map(key => { var description = descriptions[key]; return ` ## ${description.title} ${description.intro ? description.intro : ''} ${ description.rules.length ? descriptionTableHeader : '_There are no matching rules_' }${description.rules .map(function(row) { return '| ' + row.join(' | ') + ' |'; }) .join('\n')}`; }) .join('\n\n'); var descriptions = ` # Rule Descriptions ## Table of Contents ${TOC} ${ruleTables}`; // Translate failureSummaries metadata.failureSummaries = createFailureSummaryObject(result.misc); metadata.incompleteFallbackMessage = getIncompleteMsg(result.misc); callback({ auto: replaceFunctions( JSON.stringify( { lang: options.locale || 'en', data: metadata, rules: rules, checks: checks }, blacklist ) ), manual: replaceFunctions( JSON.stringify( { data: metadata, rules: rules, checks: checks }, blacklist ) ), descriptions }); }); } module.exports = buildRules;
1
17,212
IMO `dot` should be preferred. Remember `aXe`?
dequelabs-axe-core
js
@@ -59,6 +59,7 @@ namespace AppDomain.Instance return -10; } + Console.ReadKey(); return 0; } }
1
using System; using System.Data.Common; using System.Data.SqlClient; using System.Net.Http; using System.Net.Http.Headers; using System.Reflection; using System.Runtime.InteropServices; using System.Threading; namespace AppDomain.Instance { public class AppDomainInstanceProgram : MarshalByRefObject { public static void Main(string[] args) { new AppDomainInstanceProgram().Run(args); } public int Run(string[] args) { Console.WriteLine("Starting AppDomain Instance Test"); string appDomainName = "crash-dummy"; string programName = string.Empty; int index = 1; if (args?.Length > 0) { appDomainName = args[0]; index = int.Parse(args[1]); programName = args[2]; } NestedProgram instance; if (programName.Equals("SqlServer", StringComparison.OrdinalIgnoreCase)) { instance = new SqlServerNestedProgram(); } else if (programName.Equals("Elasticsearch", StringComparison.OrdinalIgnoreCase)) { instance = new ElasticsearchNestedProgram(); } else { Console.WriteLine($"programName {programName} not recognized. Exiting with error code -10."); return -10; } try { instance.AppDomainName = appDomainName; instance.AppDomainIndex = index; instance.Run(); } catch (Exception ex) { Console.WriteLine($"We have encountered an exception in this instance: {appDomainName} : {ex.Message}"); Console.Error.WriteLine(ex); return -10; } return 0; } } }
1
16,162
Not a huge deal but this will block tests, also, don't they stay open by default now?
DataDog-dd-trace-dotnet
.cs
@@ -70,7 +70,7 @@ func makeLevels(min logrus.Level) []logrus.Level { } func makeTelemetryState(cfg TelemetryConfig, hookFactory hookFactory) (*telemetryState, error) { - history := createLogBuffer(cfg.LogHistoryDepth) + history := createLogBuffer(2) if cfg.SessionGUID == "" { cfg.SessionGUID = uuid.NewV4().String() }
1
// Copyright (C) 2019 Algorand, Inc. // This file is part of go-algorand // // go-algorand is free software: you can redistribute it and/or modify // it under the terms of the GNU Affero General Public License as // published by the Free Software Foundation, either version 3 of the // License, or (at your option) any later version. // // go-algorand is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Affero General Public License for more details. // // You should have received a copy of the GNU Affero General Public License // along with go-algorand. If not, see <https://www.gnu.org/licenses/>. package logging import ( "fmt" "io" "os" "path/filepath" "strings" "time" "github.com/satori/go.uuid" "github.com/sirupsen/logrus" "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/logging/telemetryspec" ) const telemetryPrefix = "/" const telemetrySeparator = "/" // EnableTelemetry configures and enables telemetry based on the config provided func EnableTelemetry(cfg TelemetryConfig, l *logger) (err error) { telemetry, err := makeTelemetryState(cfg, createElasticHook) if err != nil { return } enableTelemetryState(telemetry, l) return } func enableTelemetryState(telemetry *telemetryState, l *logger) { l.loggerState.telemetry = telemetry // Hook our normal logging to send desired types to telemetry l.AddHook(telemetry.hook) // Wrap current logger Output writer to capture history l.setOutput(telemetry.wrapOutput(l.getOutput())) } func makeLevels(min logrus.Level) []logrus.Level { levels := []logrus.Level{} for _, l := range []logrus.Level{ logrus.PanicLevel, logrus.FatalLevel, logrus.ErrorLevel, logrus.WarnLevel, logrus.InfoLevel, logrus.DebugLevel, } { if l <= min { levels = append(levels, l) } } return levels } func makeTelemetryState(cfg TelemetryConfig, hookFactory hookFactory) (*telemetryState, error) { history := createLogBuffer(cfg.LogHistoryDepth) if cfg.SessionGUID == "" { cfg.SessionGUID = uuid.NewV4().String() } hook, err := createTelemetryHook(cfg, history, hookFactory) if err != nil { return nil, err } telemetry := &telemetryState{ history, createAsyncHookLevels(hook, 32, 100, makeLevels(cfg.MinLogLevel)), } return telemetry, nil } // ReadTelemetryConfigOrDefault reads telemetry config from file or defaults if no config file found. func ReadTelemetryConfigOrDefault(dataDir *string, genesisID string) (cfg TelemetryConfig, err error) { err = nil if dataDir != nil && *dataDir != "" { configPath := filepath.Join(*dataDir, TelemetryConfigFilename) cfg, err = LoadTelemetryConfig(configPath) } if err != nil && os.IsNotExist(err) { var configPath string configPath, err = config.GetConfigFilePath(TelemetryConfigFilename) if err != nil { cfg = createTelemetryConfig() return } cfg, err = LoadTelemetryConfig(configPath) } if err != nil { cfg = createTelemetryConfig() if os.IsNotExist(err) { err = nil } else { return } } ch := config.GetCurrentVersion().Channel // Should not happen, but default to "dev" if channel is unspecified. if ch == "" { ch = "dev" } cfg.ChainID = fmt.Sprintf("%s-%s", ch, genesisID) return cfg, err } // EnsureTelemetryConfig creates a new TelemetryConfig structure with a generated GUID and the appropriate Telemetry endpoint // Err will be non-nil if the file doesn't exist, or if error loading. // Cfg will always be valid. func EnsureTelemetryConfig(dataDir *string, genesisID string) (TelemetryConfig, error) { cfg, _, err := EnsureTelemetryConfigCreated(dataDir, genesisID) return cfg, err } // EnsureTelemetryConfigCreated is the same as EnsureTelemetryConfig but it also returns a bool indicating // whether EnsureTelemetryConfig had to create the config. func EnsureTelemetryConfigCreated(dataDir *string, genesisID string) (TelemetryConfig, bool, error) { configPath := "" var cfg TelemetryConfig var err error if dataDir != nil && *dataDir != "" { configPath = filepath.Join(*dataDir, TelemetryConfigFilename) cfg, err = LoadTelemetryConfig(configPath) if err != nil && os.IsNotExist(err) { // if it just didn't exist, try again at the other path configPath = "" } } if configPath == "" { configPath, err = config.GetConfigFilePath(TelemetryConfigFilename) if err != nil { cfg := createTelemetryConfig() initializeConfig(cfg) return cfg, true, err } cfg, err = LoadTelemetryConfig(configPath) } created := false if err != nil { err = nil created = true cfg = createTelemetryConfig() cfg.FilePath = configPath // Initialize our desired cfg.FilePath // There was no config file, create it. err = cfg.Save(configPath) } ch := config.GetCurrentVersion().Channel // Should not happen, but default to "dev" if channel is unspecified. if ch == "" { ch = "dev" } cfg.ChainID = fmt.Sprintf("%s-%s", ch, genesisID) initializeConfig(cfg) return cfg, created, err } // wrapOutput wraps the log writer so we can keep a history of // the tail of the file to send with critical telemetry events when logged. func (t *telemetryState) wrapOutput(out io.Writer) io.Writer { return t.history.wrapOutput(out) } func (t *telemetryState) logMetrics(l logger, category telemetryspec.Category, metrics telemetryspec.MetricDetails, details interface{}) { if metrics == nil { return } l = l.WithFields(logrus.Fields{ "metrics": metrics, }).(logger) t.logTelemetry(l, buildMessage(string(category), string(metrics.Identifier())), details) } func (t *telemetryState) logEvent(l logger, category telemetryspec.Category, identifier telemetryspec.Event, details interface{}) { t.logTelemetry(l, buildMessage(string(category), string(identifier)), details) } func (t *telemetryState) logStartOperation(l logger, category telemetryspec.Category, identifier telemetryspec.Operation) TelemetryOperation { op := makeTelemetryOperation(t, category, identifier) t.logTelemetry(l, buildMessage(string(category), string(identifier), "Start"), nil) return op } func buildMessage(args ...string) string { message := telemetryPrefix + strings.Join(args, telemetrySeparator) return message } // logTelemetry explicitly only sends telemetry events to the cloud. func (t *telemetryState) logTelemetry(l logger, message string, details interface{}) { if details != nil { l = l.WithFields(logrus.Fields{ "details": details, }).(logger) } entry := l.entry.WithFields(Fields{ "session": l.GetTelemetrySession(), "instanceName": l.GetInstanceName(), }) // Populate entry like logrus.entry.log() does entry.Time = time.Now() entry.Level = logrus.InfoLevel entry.Message = message t.hook.Fire(entry) } func (t *telemetryState) Close() { t.hook.Close() } func (t *telemetryState) Flush() { t.hook.Flush() }
1
36,913
Could you make it a local constant for now ?
algorand-go-algorand
go
@@ -950,6 +950,10 @@ class UserResource(object): if has_invalid_value: raise_invalid(self.request, **error_details) + if field in ('id', 'collection_id'): + if isinstance(value, int): + value = str(value) + filters.append(Filter(field, value, operator)) return filters
1
import re import functools import colander import venusian import six from pyramid import exceptions as pyramid_exceptions from pyramid.decorator import reify from pyramid.security import Everyone from pyramid.httpexceptions import (HTTPNotModified, HTTPPreconditionFailed, HTTPNotFound, HTTPServiceUnavailable) from kinto.core import logger from kinto.core import Service from kinto.core.errors import http_error, raise_invalid, send_alert, ERRORS from kinto.core.events import ACTIONS from kinto.core.storage import exceptions as storage_exceptions, Filter, Sort from kinto.core.utils import ( COMPARISON, classname, native_value, decode64, encode64, json, encode_header, decode_header, dict_subset ) from .model import Model, ShareableModel from .schema import ResourceSchema from .viewset import ViewSet, ShareableViewSet def register(depth=1, **kwargs): """Ressource class decorator. Register the decorated class in the cornice registry. Pass all its keyword arguments to the register_resource function. """ def wrapped(resource): register_resource(resource, depth=depth + 1, **kwargs) return resource return wrapped def register_resource(resource_cls, settings=None, viewset=None, depth=1, **kwargs): """Register a resource in the cornice registry. :param resource_cls: The resource class to register. It should be a class or have a "name" attribute. :param viewset: A ViewSet object, which will be used to find out which arguments should be appended to the views, and where the views are. :param depth: A depth offset. It will be used to determine what is the level of depth in the call tree. (set to 1 by default.) Any additional keyword parameters will be used to override the viewset attributes. """ if viewset is None: viewset = resource_cls.default_viewset(**kwargs) else: viewset.update(**kwargs) resource_name = viewset.get_name(resource_cls) def register_service(endpoint_type, settings): """Registers a service in cornice, for the given type. """ path_pattern = getattr(viewset, '%s_path' % endpoint_type) path_values = {'resource_name': resource_name} path = path_pattern.format(**path_values) name = viewset.get_service_name(endpoint_type, resource_cls) service = Service(name, path, depth=depth, **viewset.get_service_arguments()) # Attach viewset and resource to the service for later reference. service.viewset = viewset service.resource = resource_cls service.type = endpoint_type # Attach collection and record paths. service.collection_path = viewset.collection_path.format(**path_values) service.record_path = (viewset.record_path.format(**path_values) if viewset.record_path is not None else None) methods = getattr(viewset, '%s_methods' % endpoint_type) for method in methods: if not viewset.is_endpoint_enabled( endpoint_type, resource_name, method.lower(), settings): continue argument_getter = getattr(viewset, '%s_arguments' % endpoint_type) view_args = argument_getter(resource_cls, method) view = viewset.get_view(endpoint_type, method.lower()) service.add_view(method, view, klass=resource_cls, **view_args) return service def callback(context, name, ob): # get the callbacks registred by the inner services # and call them from here when the @resource classes are being # scanned by venusian. config = context.config.with_package(info.module) # Storage is mandatory for resources. if not hasattr(config.registry, 'storage'): msg = 'Mandatory storage backend is missing from configuration.' raise pyramid_exceptions.ConfigurationError(msg) # A service for the list. service = register_service('collection', config.registry.settings) config.add_cornice_service(service) # An optional one for record endpoint. if getattr(viewset, 'record_path') is not None: service = register_service('record', config.registry.settings) config.add_cornice_service(service) info = venusian.attach(resource_cls, callback, category='pyramid', depth=depth) return callback class UserResource(object): """Base resource class providing every endpoint.""" default_viewset = ViewSet """Default :class:`kinto.core.resource.viewset.ViewSet` class to use when the resource is registered.""" default_model = Model """Default :class:`kinto.core.resource.model.Model` class to use for interacting the :mod:`kinto.core.storage` and :mod:`kinto.core.permission` backends.""" mapping = ResourceSchema() """Schema to validate records.""" def __init__(self, request, context=None): # Models are isolated by user. parent_id = self.get_parent_id(request) # Authentication to storage is transmitted as is (cf. cloud_storage). auth = request.headers.get('Authorization') # ID generator by resource name in settings. default_id_generator = request.registry.id_generators[''] resource_name = context.resource_name if context else '' id_generator = request.registry.id_generators.get(resource_name, default_id_generator) self.model = self.default_model( storage=request.registry.storage, id_generator=id_generator, collection_id=classname(self), parent_id=parent_id, auth=auth) self.request = request self.context = context self.record_id = self.request.matchdict.get('id') self.force_patch_update = False # Log resource context. logger.bind(collection_id=self.model.collection_id, collection_timestamp=self.timestamp) @reify def timestamp(self): """Return the current collection timestamp. :rtype: int """ try: return self.model.timestamp() except storage_exceptions.BackendError as e: is_readonly = self.request.registry.settings['readonly'] if not is_readonly: raise e # If the instance is configured to be readonly, and if the # collection is empty, the backend will try to bump the timestamp. # It fails if the configured db user has not write privileges. logger.exception(e) error_msg = ("Collection timestamp cannot be written. " "Records endpoint must be hit at least once from a " "writable instance.") raise http_error(HTTPServiceUnavailable(), errno=ERRORS.BACKEND, message=error_msg) def get_parent_id(self, request): """Return the parent_id of the resource with regards to the current request. :param request: The request used to create the resource. :rtype: str """ return request.prefixed_userid def _get_known_fields(self): """Return all the `field` defined in the ressource mapping.""" known_fields = [c.name for c in self.mapping.children] + \ [self.model.id_field, self.model.modified_field, self.model.deleted_field] return known_fields def is_known_field(self, field): """Return ``True`` if `field` is defined in the resource schema. If the resource schema allows unknown fields, this will always return ``True``. :param str field: Field name :rtype: bool """ if self.mapping.get_option('preserve_unknown'): return True known_fields = self._get_known_fields() # Test first level only: ``target.data.id`` -> ``target`` field = field.split('.', 1)[0] return field in known_fields # # End-points # def collection_get(self): """Model ``GET`` endpoint: retrieve multiple records. :raises: :exc:`~pyramid:pyramid.httpexceptions.HTTPNotModified` if ``If-None-Match`` header is provided and collection not modified in the interim. :raises: :exc:`~pyramid:pyramid.httpexceptions.HTTPPreconditionFailed` if ``If-Match`` header is provided and collection modified in the iterim. :raises: :exc:`~pyramid:pyramid.httpexceptions.HTTPBadRequest` if filters or sorting are invalid. """ self._add_timestamp_header(self.request.response) self._add_cache_header(self.request.response) self._raise_304_if_not_modified() self._raise_412_if_modified() headers = self.request.response.headers filters = self._extract_filters() limit = self._extract_limit() sorting = self._extract_sorting(limit) partial_fields = self._extract_partial_fields() filter_fields = [f.field for f in filters] include_deleted = self.model.modified_field in filter_fields pagination_rules, offset = self._extract_pagination_rules_from_token( limit, sorting) records, total_records = self.model.get_records( filters=filters, sorting=sorting, limit=limit, pagination_rules=pagination_rules, include_deleted=include_deleted) offset = offset + len(records) next_page = None if limit and len(records) == limit and offset < total_records: lastrecord = records[-1] next_page = self._next_page_url(sorting, limit, lastrecord, offset) headers['Next-Page'] = encode_header(next_page) if partial_fields: records = [ dict_subset(record, partial_fields) for record in records ] # Bind metric about response size. logger.bind(nb_records=len(records), limit=limit) headers['Total-Records'] = encode_header('%s' % total_records) return self.postprocess(records) def collection_post(self): """Model ``POST`` endpoint: create a record. If the new record id conflicts against an existing one, the posted record is ignored, and the existing record is returned, with a ``200`` status. :raises: :exc:`~pyramid:pyramid.httpexceptions.HTTPPreconditionFailed` if ``If-Match`` header is provided and collection modified in the iterim. .. seealso:: Add custom behaviour by overriding :meth:`kinto.core.resource.UserResource.process_record` """ new_record = self.request.validated.get('data', {}) try: # Since ``id`` does not belong to schema, it is not in validated # data. Must look up in body. id_field = self.model.id_field new_record[id_field] = _id = self.request.json['data'][id_field] self._raise_400_if_invalid_id(_id) existing = self._get_record_or_404(_id) except (HTTPNotFound, KeyError, ValueError): existing = None self._raise_412_if_modified(record=existing) if existing: record = existing action = ACTIONS.READ else: new_record = self.process_record(new_record) record = self.model.create_record(new_record) self.request.response.status_code = 201 action = ACTIONS.CREATE return self.postprocess(record, action=action) def collection_delete(self): """Model ``DELETE`` endpoint: delete multiple records. :raises: :exc:`~pyramid:pyramid.httpexceptions.HTTPPreconditionFailed` if ``If-Match`` header is provided and collection modified in the iterim. :raises: :exc:`~pyramid:pyramid.httpexceptions.HTTPBadRequest` if filters are invalid. """ self._raise_412_if_modified() filters = self._extract_filters() records, _ = self.model.get_records(filters=filters) deleted = self.model.delete_records(filters=filters) action = len(deleted) > 0 and ACTIONS.DELETE or ACTIONS.READ return self.postprocess(deleted, action=action, old=records) def get(self): """Record ``GET`` endpoint: retrieve a record. :raises: :exc:`~pyramid:pyramid.httpexceptions.HTTPNotFound` if the record is not found. :raises: :exc:`~pyramid:pyramid.httpexceptions.HTTPNotModified` if ``If-None-Match`` header is provided and record not modified in the interim. :raises: :exc:`~pyramid:pyramid.httpexceptions.HTTPPreconditionFailed` if ``If-Match`` header is provided and record modified in the iterim. """ self._raise_400_if_invalid_id(self.record_id) record = self._get_record_or_404(self.record_id) timestamp = record[self.model.modified_field] self._add_timestamp_header(self.request.response, timestamp=timestamp) self._add_cache_header(self.request.response) self._raise_304_if_not_modified(record) self._raise_412_if_modified(record) partial_fields = self._extract_partial_fields() if partial_fields: record = dict_subset(record, partial_fields) return self.postprocess(record) def put(self): """Record ``PUT`` endpoint: create or replace the provided record and return it. :raises: :exc:`~pyramid:pyramid.httpexceptions.HTTPPreconditionFailed` if ``If-Match`` header is provided and record modified in the iterim. .. note:: If ``If-None-Match: *`` request header is provided, the ``PUT`` will succeed only if no record exists with this id. .. seealso:: Add custom behaviour by overriding :meth:`kinto.core.resource.UserResource.process_record`. """ self._raise_400_if_invalid_id(self.record_id) id_field = self.model.id_field existing = None tombstones = None try: existing = self._get_record_or_404(self.record_id) except HTTPNotFound: # Look if this record used to exist (for preconditions check). filter_by_id = Filter(id_field, self.record_id, COMPARISON.EQ) tombstones, _ = self.model.get_records(filters=[filter_by_id], include_deleted=True) if len(tombstones) > 0: existing = tombstones[0] finally: if existing: self._raise_412_if_modified(existing) # If `data` is not provided, use existing record (or empty if creation) post_record = self.request.validated.get('data', existing) or {} record_id = post_record.setdefault(id_field, self.record_id) self._raise_400_if_id_mismatch(record_id, self.record_id) new_record = self.process_record(post_record, old=existing) if existing and not tombstones: record = self.model.update_record(new_record) else: record = self.model.create_record(new_record) self.request.response.status_code = 201 timestamp = record[self.model.modified_field] self._add_timestamp_header(self.request.response, timestamp=timestamp) action = existing and ACTIONS.UPDATE or ACTIONS.CREATE return self.postprocess(record, action=action, old=existing) def patch(self): """Record ``PATCH`` endpoint: modify a record and return its new version. If a request header ``Response-Behavior`` is set to ``light``, only the fields whose value was changed are returned. If set to ``diff``, only the fields whose value became different than the one provided are returned. :raises: :exc:`~pyramid:pyramid.httpexceptions.HTTPNotFound` if the record is not found. :raises: :exc:`~pyramid:pyramid.httpexceptions.HTTPPreconditionFailed` if ``If-Match`` header is provided and record modified in the iterim. .. seealso:: Add custom behaviour by overriding :meth:`kinto.core.resource.UserResource.apply_changes` or :meth:`kinto.core.resource.UserResource.process_record`. """ self._raise_400_if_invalid_id(self.record_id) existing = self._get_record_or_404(self.record_id) self._raise_412_if_modified(existing) try: # `data` attribute may not be present if only perms are patched. changes = self.request.json.get('data', {}) except ValueError: # If no `data` nor `permissions` is provided in patch, reject! # XXX: This should happen in schema instead (c.f. ShareableViewSet) error_details = { 'name': 'data', 'description': 'Provide at least one of data or permissions', } raise_invalid(self.request, **error_details) updated = self.apply_changes(existing, changes=changes) record_id = updated.setdefault(self.model.id_field, self.record_id) self._raise_400_if_id_mismatch(record_id, self.record_id) new_record = self.process_record(updated, old=existing) changed_fields = [k for k in changes.keys() if existing.get(k) != new_record.get(k)] # Save in storage if necessary. if changed_fields or self.force_patch_update: new_record = self.model.update_record(new_record) else: # Behave as if storage would have added `id` and `last_modified`. for extra_field in [self.model.modified_field, self.model.id_field]: new_record[extra_field] = existing[extra_field] # Adjust response according to ``Response-Behavior`` header body_behavior = self.request.headers.get('Response-Behavior', 'full') if body_behavior.lower() == 'light': # Only fields that were changed. data = {k: new_record[k] for k in changed_fields} elif body_behavior.lower() == 'diff': # Only fields that are different from those provided. data = {k: new_record[k] for k in changed_fields if changes.get(k) != new_record.get(k)} else: data = new_record timestamp = new_record.get(self.model.modified_field, existing[self.model.modified_field]) self._add_timestamp_header(self.request.response, timestamp=timestamp) return self.postprocess(data, action=ACTIONS.UPDATE, old=existing) def delete(self): """Record ``DELETE`` endpoint: delete a record and return it. :raises: :exc:`~pyramid:pyramid.httpexceptions.HTTPNotFound` if the record is not found. :raises: :exc:`~pyramid:pyramid.httpexceptions.HTTPPreconditionFailed` if ``If-Match`` header is provided and record modified in the iterim. """ self._raise_400_if_invalid_id(self.record_id) record = self._get_record_or_404(self.record_id) self._raise_412_if_modified(record) # Retreive the last_modified information from a querystring if present. last_modified = self.request.GET.get('last_modified') if last_modified: last_modified = native_value(last_modified.strip('"')) if not isinstance(last_modified, six.integer_types): error_details = { 'name': 'last_modified', 'location': 'querystring', 'description': 'Invalid value for %s' % last_modified } raise_invalid(self.request, **error_details) # If less or equal than current record. Ignore it. if last_modified <= record[self.model.modified_field]: last_modified = None deleted = self.model.delete_record(record, last_modified=last_modified) return self.postprocess(deleted, action=ACTIONS.DELETE, old=record) # # Data processing # def process_record(self, new, old=None): """Hook for processing records before they reach storage, to introduce specific logics on fields for example. .. code-block:: python def process_record(self, new, old=None): new = super(MyResource, self).process_record(new, old) version = old['version'] if old else 0 new['version'] = version + 1 return new Or add extra validation based on request: .. code-block:: python from kinto.core.errors import raise_invalid def process_record(self, new, old=None): new = super(MyResource, self).process_record(new, old) if new['browser'] not in request.headers['User-Agent']: raise_invalid(self.request, name='browser', error='Wrong') return new :param dict new: the validated record to be created or updated. :param dict old: the old record to be updated, ``None`` for creation endpoints. :returns: the processed record. :rtype: dict """ modified_field = self.model.modified_field new_last_modified = new.get(modified_field) # Drop the new last_modified if it is not an integer. is_integer = isinstance(new_last_modified, int) if not is_integer: new.pop(modified_field, None) return new # Drop the new last_modified if lesser or equal to the old one. is_less_or_equal = (old is not None and new_last_modified <= old[modified_field]) if is_less_or_equal: new.pop(modified_field, None) return new def apply_changes(self, record, changes): """Merge `changes` into `record` fields. .. note:: This is used in the context of PATCH only. Override this to control field changes at record level, for example: .. code-block:: python def apply_changes(self, record, changes): # Ignore value change if inferior if record['position'] > changes.get('position', -1): changes.pop('position', None) return super(MyResource, self).apply_changes(record, changes) :raises: :exc:`~pyramid:pyramid.httpexceptions.HTTPBadRequest` if result does not comply with resource schema. :returns: the new record with `changes` applied. :rtype: dict """ for field, value in changes.items(): has_changed = record.get(field, value) != value if self.mapping.is_readonly(field) and has_changed: error_details = { 'name': field, 'description': 'Cannot modify {0}'.format(field) } raise_invalid(self.request, **error_details) updated = record.copy() updated.update(**changes) try: return self.mapping.deserialize(updated) except colander.Invalid as e: # Transform the errors we got from colander into Cornice errors. # We could not rely on Service schema because the record should be # validated only once the changes are applied for field, error in e.asdict().items(): raise_invalid(self.request, name=field, description=error) def postprocess(self, result, action=ACTIONS.READ, old=None): body = { 'data': result } parent_id = self.get_parent_id(self.request) self.request.notify_resource_event(parent_id=parent_id, timestamp=self.timestamp, data=result, action=action, old=old) return body # # Internals # def _get_record_or_404(self, record_id): """Retrieve record from storage and raise ``404 Not found`` if missing. :raises: :exc:`~pyramid:pyramid.httpexceptions.HTTPNotFound` if the record is not found. """ if self.context and self.context.current_record: # Set during authorization. Save a storage hit. return self.context.current_record try: return self.model.get_record(record_id) except storage_exceptions.RecordNotFoundError: response = http_error(HTTPNotFound(), errno=ERRORS.INVALID_RESOURCE_ID) raise response def _add_timestamp_header(self, response, timestamp=None): """Add current timestamp in response headers, when request comes in. """ if timestamp is None: timestamp = self.timestamp # Pyramid takes care of converting. response.last_modified = timestamp / 1000.0 # Return timestamp as ETag. response.headers['ETag'] = encode_header('"%s"' % timestamp) def _add_cache_header(self, response): """Add Cache-Control and Expire headers, based a on a setting for the current resource. Cache headers will be set with anonymous requests only. .. note:: The ``Cache-Control: no-cache`` response header does not prevent caching in client. It will indicate the client to revalidate the response content on each access. The client will send a conditional request to the server and check that a ``304 Not modified`` is returned before serving content from cache. """ resource_name = self.context.resource_name if self.context else '' setting_key = '%s_cache_expires_seconds' % resource_name collection_expires = self.request.registry.settings.get(setting_key) is_anonymous = self.request.prefixed_userid is None if collection_expires and is_anonymous: response.cache_expires(seconds=int(collection_expires)) else: # Since `Expires` response header provides an HTTP data with a # resolution in seconds, do not use Pyramid `cache_expires()` in # order to omit it. response.cache_control.no_cache = True response.cache_control.no_store = True def _raise_400_if_invalid_id(self, record_id): """Raise 400 if specified record id does not match the format excepted by storage backends. :raises: :class:`pyramid.httpexceptions.HTTPBadRequest` """ is_string = isinstance(record_id, six.string_types) if not is_string or not self.model.id_generator.match(record_id): error_details = { 'location': 'path', 'description': "Invalid record id" } raise_invalid(self.request, **error_details) def _raise_304_if_not_modified(self, record=None): """Raise 304 if current timestamp is inferior to the one specified in headers. :raises: :exc:`~pyramid:pyramid.httpexceptions.HTTPNotModified` """ if_none_match = self.request.headers.get('If-None-Match') if not if_none_match: return if_none_match = decode_header(if_none_match) try: if not (if_none_match[0] == if_none_match[-1] == '"'): raise ValueError() modified_since = int(if_none_match[1:-1]) except (IndexError, ValueError): if if_none_match == '*': return error_details = { 'location': 'headers', 'description': "Invalid value for If-None-Match" } raise_invalid(self.request, **error_details) if record: current_timestamp = record[self.model.modified_field] else: current_timestamp = self.model.timestamp() if current_timestamp <= modified_since: response = HTTPNotModified() self._add_timestamp_header(response, timestamp=current_timestamp) raise response def _raise_412_if_modified(self, record=None): """Raise 412 if current timestamp is superior to the one specified in headers. :raises: :exc:`~pyramid:pyramid.httpexceptions.HTTPPreconditionFailed` """ if_match = self.request.headers.get('If-Match') if_none_match = self.request.headers.get('If-None-Match') if not if_match and not if_none_match: return if_match = decode_header(if_match) if if_match else None if record and if_none_match and decode_header(if_none_match) == '*': if record.get(self.model.deleted_field, False): # Tombstones should not prevent creation. return modified_since = -1 # Always raise. elif if_match: try: if not (if_match[0] == if_match[-1] == '"'): raise ValueError() modified_since = int(if_match[1:-1]) except (IndexError, ValueError): message = ("Invalid value for If-Match. The value should " "be integer between double quotes.") error_details = { 'location': 'headers', 'description': message } raise_invalid(self.request, **error_details) else: # In case _raise_304_if_not_modified() did not raise. return if record: current_timestamp = record[self.model.modified_field] else: current_timestamp = self.model.timestamp() if current_timestamp > modified_since: error_msg = 'Resource was modified meanwhile' details = {'existing': record} if record else {} response = http_error(HTTPPreconditionFailed(), errno=ERRORS.MODIFIED_MEANWHILE, message=error_msg, details=details) self._add_timestamp_header(response, timestamp=current_timestamp) raise response def _raise_400_if_id_mismatch(self, new_id, record_id): """Raise 400 if the `new_id`, within the request body, does not match the `record_id`, obtained from request path. :raises: :class:`pyramid.httpexceptions.HTTPBadRequest` """ if new_id != record_id: error_msg = 'Record id does not match existing record' error_details = { 'name': self.model.id_field, 'description': error_msg } raise_invalid(self.request, **error_details) def _extract_partial_fields(self): """Extract the fields to do the projection from QueryString parameters. """ fields = self.request.GET.get('_fields', None) if fields: fields = fields.split(',') root_fields = [f.split('.')[0] for f in fields] known_fields = self._get_known_fields() invalid_fields = set(root_fields) - set(known_fields) preserve_unknown = self.mapping.get_option('preserve_unknown') if not preserve_unknown and invalid_fields: error_msg = "Fields %s do not exist" % ','.join(invalid_fields) error_details = { 'name': "Invalid _fields parameter", 'description': error_msg } raise_invalid(self.request, **error_details) # Since id and last_modified are part of the synchronisation # API, force their presence in payloads. fields = fields + [self.model.id_field, self.model.modified_field] return fields def _extract_limit(self): """Extract limit value from QueryString parameters.""" paginate_by = self.request.registry.settings['paginate_by'] limit = self.request.GET.get('_limit', paginate_by) if limit: try: limit = int(limit) except ValueError: error_details = { 'location': 'querystring', 'description': "_limit should be an integer" } raise_invalid(self.request, **error_details) # If limit is higher than paginate_by setting, ignore it. if limit and paginate_by: limit = min(limit, paginate_by) return limit def _extract_filters(self, queryparams=None): """Extracts filters from QueryString parameters.""" if not queryparams: queryparams = self.request.GET filters = [] for param, paramvalue in queryparams.items(): param = param.strip() error_details = { 'name': param, 'location': 'querystring', 'description': 'Invalid value for %s' % param } # Ignore specific fields if param.startswith('_') and param not in ('_since', '_to', '_before'): continue # Handle the _since specific filter. if param in ('_since', '_to', '_before'): value = native_value(paramvalue.strip('"')) if not isinstance(value, six.integer_types): raise_invalid(self.request, **error_details) if param == '_since': operator = COMPARISON.GT else: if param == '_to': message = ('_to is now deprecated, ' 'you should use _before instead') url = ('https://kinto.readthedocs.io/en/2.4.0/api/' 'resource.html#list-of-available-url-' 'parameters') send_alert(self.request, message, url) operator = COMPARISON.LT filters.append( Filter(self.model.modified_field, value, operator) ) continue allKeywords = '|'.join([i.name.lower() for i in COMPARISON]) m = re.match(r'^('+allKeywords+')_([\w\.]+)$', param) if m: keyword, field = m.groups() operator = getattr(COMPARISON, keyword.upper()) else: operator, field = COMPARISON.EQ, param if not self.is_known_field(field): error_msg = "Unknown filter field '{0}'".format(param) error_details['description'] = error_msg raise_invalid(self.request, **error_details) value = native_value(paramvalue) if operator in (COMPARISON.IN, COMPARISON.EXCLUDE): value = set([native_value(v) for v in paramvalue.split(',')]) all_integers = all([isinstance(v, six.integer_types) for v in value]) all_strings = all([isinstance(v, six.text_type) for v in value]) has_invalid_value = ( (field == self.model.id_field and not all_strings) or (field == self.model.modified_field and not all_integers) ) if has_invalid_value: raise_invalid(self.request, **error_details) filters.append(Filter(field, value, operator)) return filters def _extract_sorting(self, limit): """Extracts filters from QueryString parameters.""" specified = self.request.GET.get('_sort', '').split(',') sorting = [] modified_field_used = self.model.modified_field in specified for field in specified: field = field.strip() m = re.match(r'^([\-+]?)([\w\.]+)$', field) if m: order, field = m.groups() if not self.is_known_field(field): error_details = { 'location': 'querystring', 'description': "Unknown sort field '{0}'".format(field) } raise_invalid(self.request, **error_details) direction = -1 if order == '-' else 1 sorting.append(Sort(field, direction)) if not modified_field_used: # Add a sort by the ``modified_field`` in descending order # useful for pagination sorting.append(Sort(self.model.modified_field, -1)) return sorting def _build_pagination_rules(self, sorting, last_record, rules=None): """Return the list of rules for a given sorting attribute and last_record. """ if rules is None: rules = [] rule = [] next_sorting = sorting[:-1] for field, _ in next_sorting: rule.append(Filter(field, last_record.get(field), COMPARISON.EQ)) field, direction = sorting[-1] if direction == -1: rule.append(Filter(field, last_record.get(field), COMPARISON.LT)) else: rule.append(Filter(field, last_record.get(field), COMPARISON.GT)) rules.append(rule) if len(next_sorting) == 0: return rules return self._build_pagination_rules(next_sorting, last_record, rules) def _extract_pagination_rules_from_token(self, limit, sorting): """Get pagination params.""" queryparams = self.request.GET token = queryparams.get('_token', None) filters = [] offset = 0 if token: try: tokeninfo = json.loads(decode64(token)) if not isinstance(tokeninfo, dict): raise ValueError() last_record = tokeninfo['last_record'] offset = tokeninfo['offset'] except (ValueError, KeyError, TypeError): error_msg = '_token has invalid content' error_details = { 'location': 'querystring', 'description': error_msg } raise_invalid(self.request, **error_details) filters = self._build_pagination_rules(sorting, last_record) return filters, offset def _next_page_url(self, sorting, limit, last_record, offset): """Build the Next-Page header from where we stopped.""" token = self._build_pagination_token(sorting, last_record, offset) params = self.request.GET.copy() params['_limit'] = limit params['_token'] = token service = self.request.current_service next_page_url = self.request.route_url(service.name, _query=params, **self.request.matchdict) return next_page_url def _build_pagination_token(self, sorting, last_record, offset): """Build a pagination token. It is a base64 JSON object with the sorting fields values of the last_record. """ token = { 'last_record': {}, 'offset': offset } for field, _ in sorting: token['last_record'][field] = last_record[field] return encode64(json.dumps(token)) class ShareableResource(UserResource): """Shareable resources allow to set permissions on records, in order to share their access or protect their modification. """ default_model = ShareableModel default_viewset = ShareableViewSet permissions = ('read', 'write') """List of allowed permissions names.""" def __init__(self, *args, **kwargs): super(ShareableResource, self).__init__(*args, **kwargs) # In base resource, PATCH only hit storage if no data has changed. # Here, we force update because we add the current principal to # the ``write`` ACE. self.force_patch_update = True # Required by the ShareableModel class. self.model.permission = self.request.registry.permission if self.request.prefixed_userid is None: # The principal of an anonymous is system.Everyone self.model.current_principal = Everyone else: self.model.current_principal = self.request.prefixed_userid self.model.effective_principals = self.request.effective_principals if self.context: self.model.get_permission_object_id = functools.partial( self.context.get_permission_object_id, self.request) def get_parent_id(self, request): """Unlike :class:`kinto.core.resource.UserResource`, records are not isolated by user. See https://github.com/mozilla-services/cliquet/issues/549 :returns: A constant empty value. """ return '' def _extract_filters(self, queryparams=None): """Override default filters extraction from QueryString to allow partial collection of records. XXX: find more elegant approach to add custom filters. """ filters = super(ShareableResource, self)._extract_filters(queryparams) ids = self.context.shared_ids if ids is not None: filter_by_id = Filter(self.model.id_field, ids, COMPARISON.IN) filters.insert(0, filter_by_id) return filters def _raise_412_if_modified(self, record=None): """Do not provide the permissions among the record fields. Ref: https://github.com/Kinto/kinto/issues/224 """ if record: record = record.copy() record.pop(self.model.permissions_field, None) return super(ShareableResource, self)._raise_412_if_modified(record) def process_record(self, new, old=None): """Read permissions from request body, and in the case of ``PUT`` every existing ACE is removed (using empty list). """ new = super(ShareableResource, self).process_record(new, old) permissions = self.request.validated.get('permissions', {}) annotated = new.copy() if permissions: is_put = (self.request.method.lower() == 'put') if is_put: # Remove every existing ACEs using empty lists. for perm in self.permissions: permissions.setdefault(perm, []) annotated[self.model.permissions_field] = permissions return annotated def postprocess(self, result, action=ACTIONS.READ, old=None): """Add ``permissions`` attribute in response body. In the HTTP API, it was decided that ``permissions`` would reside outside the ``data`` attribute. """ body = {} if not isinstance(result, list): # record endpoint. perms = result.pop(self.model.permissions_field, None) if perms is not None: body['permissions'] = {k: list(p) for k, p in perms.items()} if old: # Remove permissions from event payload. old.pop(self.model.permissions_field, None) data = super(ShareableResource, self).postprocess(result, action, old) body.update(data) return body
1
9,919
What is this `collection_id` field here?
Kinto-kinto
py
@@ -698,7 +698,10 @@ class Automaton(six.with_metaclass(Automaton_metaclass)): # Services def debug(self, lvl, msg): if self.debug_level >= lvl: - log_interactive.debug(msg) + if conf.interactive: + log_interactive.debug(msg) + else: + print(msg) def send(self, pkt): if self.state.state in self.interception_points:
1
# This file is part of Scapy # See http://www.secdev.org/projects/scapy for more information # Copyright (C) Philippe Biondi <[email protected]> # Copyright (C) Gabriel Potter <[email protected]> # This program is published under a GPLv2 license """ Automata with states, transitions and actions. """ from __future__ import absolute_import import types import itertools import time import os import sys import traceback from select import select from collections import deque import threading from scapy.config import conf from scapy.utils import do_graph from scapy.error import log_interactive, warning from scapy.plist import PacketList from scapy.data import MTU from scapy.supersocket import SuperSocket from scapy.consts import WINDOWS import scapy.modules.six as six if WINDOWS: from scapy.error import Scapy_Exception recv_error = Scapy_Exception else: recv_error = () """ In Windows, select.select is not available for custom objects. Here's the implementation of scapy to re-create this functionality # noqa: E501 # Passive way: using no-ressources locks +---------+ +---------------+ +-------------------------+ # noqa: E501 | Start +------------->Select_objects +----->+Linux: call select.select| # noqa: E501 +---------+ |(select.select)| +-------------------------+ # noqa: E501 +-------+-------+ | +----v----+ +--------+ | Windows | |Time Out+----------------------------------+ # noqa: E501 +----+----+ +----+---+ | # noqa: E501 | ^ | # noqa: E501 Event | | | # noqa: E501 + | | | # noqa: E501 | +-------v-------+ | | # noqa: E501 | +------+Selectable Sel.+-----+-----------------+-----------+ | # noqa: E501 | | +-------+-------+ | | | v +-----v-----+ # noqa: E501 +-------v----------+ | | | | | Passive lock<-----+release_all<------+ # noqa: E501 |Data added to list| +----v-----+ +-----v-----+ +----v-----+ v v + +-----------+ | # noqa: E501 +--------+---------+ |Selectable| |Selectable | |Selectable| ............ | | # noqa: E501 | +----+-----+ +-----------+ +----------+ | | # noqa: E501 | v | | # noqa: E501 v +----+------+ +------------------+ +-------------v-------------------+ | # noqa: E501 +-----+------+ |wait_return+-->+ check_recv: | | | | # noqa: E501 |call_release| +----+------+ |If data is in list| | END state: selectable returned | +---+--------+ # noqa: E501 +-----+-------- v +-------+----------+ | | | exit door | # noqa: E501 | else | +---------------------------------+ +---+--------+ # noqa: E501 | + | | # noqa: E501 | +----v-------+ | | # noqa: E501 +--------->free -->Passive lock| | | # noqa: E501 +----+-------+ | | # noqa: E501 | | | # noqa: E501 | v | # noqa: E501 +------------------Selectable-Selector-is-advertised-that-the-selectable-is-readable---------+ """ class SelectableObject(object): """DEV: to implement one of those, you need to add 2 things to your object: - add "check_recv" function - call "self.call_release" once you are ready to be read You can set the __selectable_force_select__ to True in the class, if you want to # noqa: E501 force the handler to use fileno(). This may only be usable on sockets created using # noqa: E501 the builtin socket API.""" __selectable_force_select__ = False def __init__(self): self.hooks = [] def check_recv(self): """DEV: will be called only once (at beginning) to check if the object is ready.""" # noqa: E501 raise OSError("This method must be overwritten.") def _wait_non_ressources(self, callback): """This get started as a thread, and waits for the data lock to be freed then advertise itself to the SelectableSelector using the callback""" # noqa: E501 self.trigger = threading.Lock() self.was_ended = False self.trigger.acquire() self.trigger.acquire() if not self.was_ended: callback(self) def wait_return(self, callback): """Entry point of SelectableObject: register the callback""" if self.check_recv(): return callback(self) _t = threading.Thread(target=self._wait_non_ressources, args=(callback,)) # noqa: E501 _t.setDaemon(True) _t.start() def register_hook(self, hook): """DEV: When call_release() will be called, the hook will also""" self.hooks.append(hook) def call_release(self, arborted=False): """DEV: Must be call when the object becomes ready to read. Relesases the lock of _wait_non_ressources""" self.was_ended = arborted try: self.trigger.release() except (threading.ThreadError, AttributeError): pass # Trigger hooks for hook in self.hooks: hook() class SelectableSelector(object): """ Select SelectableObject objects. inputs: objects to process remain: timeout. If 0, return []. customTypes: types of the objects that have the check_recv function. """ def _release_all(self): """Releases all locks to kill all threads""" for i in self.inputs: i.call_release(True) self.available_lock.release() def _timeout_thread(self, remain): """Timeout before releasing every thing, if nothing was returned""" time.sleep(remain) if not self._ended: self._ended = True self._release_all() def _exit_door(self, _input): """This function is passed to each SelectableObject as a callback The SelectableObjects have to call it once there are ready""" self.results.append(_input) if self._ended: return self._ended = True self._release_all() def __init__(self, inputs, remain): self.results = [] self.inputs = list(inputs) self.remain = remain self.available_lock = threading.Lock() self.available_lock.acquire() self._ended = False def process(self): """Entry point of SelectableSelector""" if WINDOWS: select_inputs = [] for i in self.inputs: if not isinstance(i, SelectableObject): warning("Unknown ignored object type: %s", type(i)) elif i.__selectable_force_select__: # Then use select.select select_inputs.append(i) elif not self.remain and i.check_recv(): self.results.append(i) elif self.remain: i.wait_return(self._exit_door) if select_inputs: # Use default select function self.results.extend(select(select_inputs, [], [], self.remain)[0]) # noqa: E501 if not self.remain: return self.results threading.Thread(target=self._timeout_thread, args=(self.remain,)).start() # noqa: E501 if not self._ended: self.available_lock.acquire() return self.results else: r, _, _ = select(self.inputs, [], [], self.remain) return r def select_objects(inputs, remain): """ Select SelectableObject objects. Same than: ``select.select([inputs], [], [], remain)`` But also works on Windows, only on SelectableObject. :param inputs: objects to process :param remain: timeout. If 0, return []. """ handler = SelectableSelector(inputs, remain) return handler.process() class ObjectPipe(SelectableObject): read_allowed_exceptions = () def __init__(self): self.closed = False self.rd, self.wr = os.pipe() self.queue = deque() SelectableObject.__init__(self) def fileno(self): return self.rd def check_recv(self): return len(self.queue) > 0 def send(self, obj): self.queue.append(obj) os.write(self.wr, b"X") self.call_release() def write(self, obj): self.send(obj) def recv(self, n=0): if self.closed: if self.check_recv(): return self.queue.popleft() return None os.read(self.rd, 1) return self.queue.popleft() def read(self, n=0): return self.recv(n) def close(self): if not self.closed: self.closed = True os.close(self.rd) os.close(self.wr) self.queue.clear() @staticmethod def select(sockets, remain=conf.recv_poll_rate): # Only handle ObjectPipes results = [] for s in sockets: if s.closed: results.append(s) if results: return results, None return select_objects(sockets, remain), None class Message: def __init__(self, **args): self.__dict__.update(args) def __repr__(self): return "<Message %s>" % " ".join("%s=%r" % (k, v) for (k, v) in six.iteritems(self.__dict__) # noqa: E501 if not k.startswith("_")) class _instance_state: def __init__(self, instance): self.__self__ = instance.__self__ self.__func__ = instance.__func__ self.__self__.__class__ = instance.__self__.__class__ def __getattr__(self, attr): return getattr(self.__func__, attr) def __call__(self, *args, **kargs): return self.__func__(self.__self__, *args, **kargs) def breaks(self): return self.__self__.add_breakpoints(self.__func__) def intercepts(self): return self.__self__.add_interception_points(self.__func__) def unbreaks(self): return self.__self__.remove_breakpoints(self.__func__) def unintercepts(self): return self.__self__.remove_interception_points(self.__func__) ############## # Automata # ############## class ATMT: STATE = "State" ACTION = "Action" CONDITION = "Condition" RECV = "Receive condition" TIMEOUT = "Timeout condition" IOEVENT = "I/O event" class NewStateRequested(Exception): def __init__(self, state_func, automaton, *args, **kargs): self.func = state_func self.state = state_func.atmt_state self.initial = state_func.atmt_initial self.error = state_func.atmt_error self.final = state_func.atmt_final Exception.__init__(self, "Request state [%s]" % self.state) self.automaton = automaton self.args = args self.kargs = kargs self.action_parameters() # init action parameters def action_parameters(self, *args, **kargs): self.action_args = args self.action_kargs = kargs return self def run(self): return self.func(self.automaton, *self.args, **self.kargs) def __repr__(self): return "NewStateRequested(%s)" % self.state @staticmethod def state(initial=0, final=0, error=0): def deco(f, initial=initial, final=final): f.atmt_type = ATMT.STATE f.atmt_state = f.__name__ f.atmt_initial = initial f.atmt_final = final f.atmt_error = error def state_wrapper(self, *args, **kargs): return ATMT.NewStateRequested(f, self, *args, **kargs) state_wrapper.__name__ = "%s_wrapper" % f.__name__ state_wrapper.atmt_type = ATMT.STATE state_wrapper.atmt_state = f.__name__ state_wrapper.atmt_initial = initial state_wrapper.atmt_final = final state_wrapper.atmt_error = error state_wrapper.atmt_origfunc = f return state_wrapper return deco @staticmethod def action(cond, prio=0): def deco(f, cond=cond): if not hasattr(f, "atmt_type"): f.atmt_cond = {} f.atmt_type = ATMT.ACTION f.atmt_cond[cond.atmt_condname] = prio return f return deco @staticmethod def condition(state, prio=0): def deco(f, state=state): f.atmt_type = ATMT.CONDITION f.atmt_state = state.atmt_state f.atmt_condname = f.__name__ f.atmt_prio = prio return f return deco @staticmethod def receive_condition(state, prio=0): def deco(f, state=state): f.atmt_type = ATMT.RECV f.atmt_state = state.atmt_state f.atmt_condname = f.__name__ f.atmt_prio = prio return f return deco @staticmethod def ioevent(state, name, prio=0, as_supersocket=None): def deco(f, state=state): f.atmt_type = ATMT.IOEVENT f.atmt_state = state.atmt_state f.atmt_condname = f.__name__ f.atmt_ioname = name f.atmt_prio = prio f.atmt_as_supersocket = as_supersocket return f return deco @staticmethod def timeout(state, timeout): def deco(f, state=state, timeout=timeout): f.atmt_type = ATMT.TIMEOUT f.atmt_state = state.atmt_state f.atmt_timeout = timeout f.atmt_condname = f.__name__ return f return deco class _ATMT_Command: RUN = "RUN" NEXT = "NEXT" FREEZE = "FREEZE" STOP = "STOP" END = "END" EXCEPTION = "EXCEPTION" SINGLESTEP = "SINGLESTEP" BREAKPOINT = "BREAKPOINT" INTERCEPT = "INTERCEPT" ACCEPT = "ACCEPT" REPLACE = "REPLACE" REJECT = "REJECT" class _ATMT_supersocket(SuperSocket, SelectableObject): def __init__(self, name, ioevent, automaton, proto, *args, **kargs): SelectableObject.__init__(self) self.name = name self.ioevent = ioevent self.proto = proto # write, read self.spa, self.spb = ObjectPipe(), ObjectPipe() # Register recv hook self.spb.register_hook(self.call_release) kargs["external_fd"] = {ioevent: (self.spa, self.spb)} self.atmt = automaton(*args, **kargs) self.atmt.runbg() def fileno(self): return self.spb.fileno() def send(self, s): if not isinstance(s, bytes): s = bytes(s) return self.spa.send(s) def check_recv(self): return self.spb.check_recv() def recv(self, n=MTU): r = self.spb.recv(n) if self.proto is not None: r = self.proto(r) return r def close(self): if not self.closed: self.atmt.stop() self.spa.close() self.spb.close() self.closed = True @staticmethod def select(sockets, remain=conf.recv_poll_rate): return select_objects(sockets, remain), None class _ATMT_to_supersocket: def __init__(self, name, ioevent, automaton): self.name = name self.ioevent = ioevent self.automaton = automaton def __call__(self, proto, *args, **kargs): return _ATMT_supersocket( self.name, self.ioevent, self.automaton, proto, *args, **kargs ) class Automaton_metaclass(type): def __new__(cls, name, bases, dct): cls = super(Automaton_metaclass, cls).__new__(cls, name, bases, dct) cls.states = {} cls.state = None cls.recv_conditions = {} cls.conditions = {} cls.ioevents = {} cls.timeout = {} cls.actions = {} cls.initial_states = [] cls.ionames = [] cls.iosupersockets = [] members = {} classes = [cls] while classes: c = classes.pop(0) # order is important to avoid breaking method overloading # noqa: E501 classes += list(c.__bases__) for k, v in six.iteritems(c.__dict__): if k not in members: members[k] = v decorated = [v for v in six.itervalues(members) if isinstance(v, types.FunctionType) and hasattr(v, "atmt_type")] # noqa: E501 for m in decorated: if m.atmt_type == ATMT.STATE: s = m.atmt_state cls.states[s] = m cls.recv_conditions[s] = [] cls.ioevents[s] = [] cls.conditions[s] = [] cls.timeout[s] = [] if m.atmt_initial: cls.initial_states.append(m) elif m.atmt_type in [ATMT.CONDITION, ATMT.RECV, ATMT.TIMEOUT, ATMT.IOEVENT]: # noqa: E501 cls.actions[m.atmt_condname] = [] for m in decorated: if m.atmt_type == ATMT.CONDITION: cls.conditions[m.atmt_state].append(m) elif m.atmt_type == ATMT.RECV: cls.recv_conditions[m.atmt_state].append(m) elif m.atmt_type == ATMT.IOEVENT: cls.ioevents[m.atmt_state].append(m) cls.ionames.append(m.atmt_ioname) if m.atmt_as_supersocket is not None: cls.iosupersockets.append(m) elif m.atmt_type == ATMT.TIMEOUT: cls.timeout[m.atmt_state].append((m.atmt_timeout, m)) elif m.atmt_type == ATMT.ACTION: for c in m.atmt_cond: cls.actions[c].append(m) for v in six.itervalues(cls.timeout): v.sort(key=lambda x: x[0]) v.append((None, None)) for v in itertools.chain(six.itervalues(cls.conditions), six.itervalues(cls.recv_conditions), six.itervalues(cls.ioevents)): v.sort(key=lambda x: x.atmt_prio) for condname, actlst in six.iteritems(cls.actions): actlst.sort(key=lambda x: x.atmt_cond[condname]) for ioev in cls.iosupersockets: setattr(cls, ioev.atmt_as_supersocket, _ATMT_to_supersocket(ioev.atmt_as_supersocket, ioev.atmt_ioname, cls)) # noqa: E501 return cls def build_graph(self): s = 'digraph "%s" {\n' % self.__class__.__name__ se = "" # Keep initial nodes at the beginning for better rendering for st in six.itervalues(self.states): if st.atmt_initial: se = ('\t"%s" [ style=filled, fillcolor=blue, shape=box, root=true];\n' % st.atmt_state) + se # noqa: E501 elif st.atmt_final: se += '\t"%s" [ style=filled, fillcolor=green, shape=octagon ];\n' % st.atmt_state # noqa: E501 elif st.atmt_error: se += '\t"%s" [ style=filled, fillcolor=red, shape=octagon ];\n' % st.atmt_state # noqa: E501 s += se for st in six.itervalues(self.states): for n in st.atmt_origfunc.__code__.co_names + st.atmt_origfunc.__code__.co_consts: # noqa: E501 if n in self.states: s += '\t"%s" -> "%s" [ color=green ];\n' % (st.atmt_state, n) # noqa: E501 for c, k, v in ([("purple", k, v) for k, v in self.conditions.items()] + # noqa: E501 [("red", k, v) for k, v in self.recv_conditions.items()] + # noqa: E501 [("orange", k, v) for k, v in self.ioevents.items()]): for f in v: for n in f.__code__.co_names + f.__code__.co_consts: if n in self.states: line = f.atmt_condname for x in self.actions[f.atmt_condname]: line += "\\l>[%s]" % x.__name__ s += '\t"%s" -> "%s" [label="%s", color=%s];\n' % (k, n, line, c) # noqa: E501 for k, v in six.iteritems(self.timeout): for t, f in v: if f is None: continue for n in f.__code__.co_names + f.__code__.co_consts: if n in self.states: line = "%s/%.1fs" % (f.atmt_condname, t) for x in self.actions[f.atmt_condname]: line += "\\l>[%s]" % x.__name__ s += '\t"%s" -> "%s" [label="%s",color=blue];\n' % (k, n, line) # noqa: E501 s += "}\n" return s def graph(self, **kargs): s = self.build_graph() return do_graph(s, **kargs) class Automaton(six.with_metaclass(Automaton_metaclass)): def parse_args(self, debug=0, store=1, **kargs): self.debug_level = debug self.socket_kargs = kargs self.store_packets = store def master_filter(self, pkt): return True def my_send(self, pkt): self.send_sock.send(pkt) # Utility classes and exceptions class _IO_fdwrapper(SelectableObject): def __init__(self, rd, wr): if rd is not None and not isinstance(rd, (int, ObjectPipe)): rd = rd.fileno() if wr is not None and not isinstance(wr, (int, ObjectPipe)): wr = wr.fileno() self.rd = rd self.wr = wr SelectableObject.__init__(self) def fileno(self): if isinstance(self.rd, ObjectPipe): return self.rd.fileno() return self.rd def check_recv(self): return self.rd.check_recv() def read(self, n=65535): if isinstance(self.rd, ObjectPipe): return self.rd.recv(n) return os.read(self.rd, n) def write(self, msg): self.call_release() if isinstance(self.wr, ObjectPipe): self.wr.send(msg) return return os.write(self.wr, msg) def recv(self, n=65535): return self.read(n) def send(self, msg): return self.write(msg) class _IO_mixer(SelectableObject): def __init__(self, rd, wr): self.rd = rd self.wr = wr SelectableObject.__init__(self) def fileno(self): if isinstance(self.rd, int): return self.rd return self.rd.fileno() def check_recv(self): return self.rd.check_recv() def recv(self, n=None): return self.rd.recv(n) def read(self, n=None): return self.recv(n) def send(self, msg): self.wr.send(msg) return self.call_release() def write(self, msg): return self.send(msg) class AutomatonException(Exception): def __init__(self, msg, state=None, result=None): Exception.__init__(self, msg) self.state = state self.result = result class AutomatonError(AutomatonException): pass class ErrorState(AutomatonException): pass class Stuck(AutomatonException): pass class AutomatonStopped(AutomatonException): pass class Breakpoint(AutomatonStopped): pass class Singlestep(AutomatonStopped): pass class InterceptionPoint(AutomatonStopped): def __init__(self, msg, state=None, result=None, packet=None): Automaton.AutomatonStopped.__init__(self, msg, state=state, result=result) # noqa: E501 self.packet = packet class CommandMessage(AutomatonException): pass # Services def debug(self, lvl, msg): if self.debug_level >= lvl: log_interactive.debug(msg) def send(self, pkt): if self.state.state in self.interception_points: self.debug(3, "INTERCEPT: packet intercepted: %s" % pkt.summary()) self.intercepted_packet = pkt cmd = Message(type=_ATMT_Command.INTERCEPT, state=self.state, pkt=pkt) # noqa: E501 self.cmdout.send(cmd) cmd = self.cmdin.recv() self.intercepted_packet = None if cmd.type == _ATMT_Command.REJECT: self.debug(3, "INTERCEPT: packet rejected") return elif cmd.type == _ATMT_Command.REPLACE: pkt = cmd.pkt self.debug(3, "INTERCEPT: packet replaced by: %s" % pkt.summary()) # noqa: E501 elif cmd.type == _ATMT_Command.ACCEPT: self.debug(3, "INTERCEPT: packet accepted") else: raise self.AutomatonError("INTERCEPT: unknown verdict: %r" % cmd.type) # noqa: E501 self.my_send(pkt) self.debug(3, "SENT : %s" % pkt.summary()) if self.store_packets: self.packets.append(pkt.copy()) # Internals def __init__(self, *args, **kargs): external_fd = kargs.pop("external_fd", {}) self.send_sock_class = kargs.pop("ll", conf.L3socket) self.recv_sock_class = kargs.pop("recvsock", conf.L2listen) self.started = threading.Lock() self.threadid = None self.breakpointed = None self.breakpoints = set() self.interception_points = set() self.intercepted_packet = None self.debug_level = 0 self.init_args = args self.init_kargs = kargs self.io = type.__new__(type, "IOnamespace", (), {}) self.oi = type.__new__(type, "IOnamespace", (), {}) self.cmdin = ObjectPipe() self.cmdout = ObjectPipe() self.ioin = {} self.ioout = {} for n in self.ionames: extfd = external_fd.get(n) if not isinstance(extfd, tuple): extfd = (extfd, extfd) ioin, ioout = extfd if ioin is None: ioin = ObjectPipe() elif not isinstance(ioin, SelectableObject): ioin = self._IO_fdwrapper(ioin, None) if ioout is None: ioout = ObjectPipe() elif not isinstance(ioout, SelectableObject): ioout = self._IO_fdwrapper(None, ioout) self.ioin[n] = ioin self.ioout[n] = ioout ioin.ioname = n ioout.ioname = n setattr(self.io, n, self._IO_mixer(ioout, ioin)) setattr(self.oi, n, self._IO_mixer(ioin, ioout)) for stname in self.states: setattr(self, stname, _instance_state(getattr(self, stname))) self.start() def __iter__(self): return self def __del__(self): self.stop() def _run_condition(self, cond, *args, **kargs): try: self.debug(5, "Trying %s [%s]" % (cond.atmt_type, cond.atmt_condname)) # noqa: E501 cond(self, *args, **kargs) except ATMT.NewStateRequested as state_req: self.debug(2, "%s [%s] taken to state [%s]" % (cond.atmt_type, cond.atmt_condname, state_req.state)) # noqa: E501 if cond.atmt_type == ATMT.RECV: if self.store_packets: self.packets.append(args[0]) for action in self.actions[cond.atmt_condname]: self.debug(2, " + Running action [%s]" % action.__name__) action(self, *state_req.action_args, **state_req.action_kargs) raise except Exception as e: self.debug(2, "%s [%s] raised exception [%s]" % (cond.atmt_type, cond.atmt_condname, e)) # noqa: E501 raise else: self.debug(2, "%s [%s] not taken" % (cond.atmt_type, cond.atmt_condname)) # noqa: E501 def _do_start(self, *args, **kargs): ready = threading.Event() _t = threading.Thread(target=self._do_control, args=(ready,) + (args), kwargs=kargs) # noqa: E501 _t.setDaemon(True) _t.start() ready.wait() def _do_control(self, ready, *args, **kargs): with self.started: self.threadid = threading.currentThread().ident # Update default parameters a = args + self.init_args[len(args):] k = self.init_kargs.copy() k.update(kargs) self.parse_args(*a, **k) # Start the automaton self.state = self.initial_states[0](self) self.send_sock = self.send_sock_class(**self.socket_kargs) self.listen_sock = self.recv_sock_class(**self.socket_kargs) self.packets = PacketList(name="session[%s]" % self.__class__.__name__) # noqa: E501 singlestep = True iterator = self._do_iter() self.debug(3, "Starting control thread [tid=%i]" % self.threadid) # Sync threads ready.set() try: while True: c = self.cmdin.recv() self.debug(5, "Received command %s" % c.type) if c.type == _ATMT_Command.RUN: singlestep = False elif c.type == _ATMT_Command.NEXT: singlestep = True elif c.type == _ATMT_Command.FREEZE: continue elif c.type == _ATMT_Command.STOP: break while True: state = next(iterator) if isinstance(state, self.CommandMessage): break elif isinstance(state, self.Breakpoint): c = Message(type=_ATMT_Command.BREAKPOINT, state=state) # noqa: E501 self.cmdout.send(c) break if singlestep: c = Message(type=_ATMT_Command.SINGLESTEP, state=state) # noqa: E501 self.cmdout.send(c) break except (StopIteration, RuntimeError): c = Message(type=_ATMT_Command.END, result=self.final_state_output) self.cmdout.send(c) except Exception as e: exc_info = sys.exc_info() self.debug(3, "Transferring exception from tid=%i:\n%s" % (self.threadid, traceback.format_exception(*exc_info))) # noqa: E501 m = Message(type=_ATMT_Command.EXCEPTION, exception=e, exc_info=exc_info) # noqa: E501 self.cmdout.send(m) self.debug(3, "Stopping control thread (tid=%i)" % self.threadid) self.threadid = None # Close sockets if self.listen_sock: self.listen_sock.close() if self.send_sock: self.send_sock.close() def _do_iter(self): while True: try: self.debug(1, "## state=[%s]" % self.state.state) # Entering a new state. First, call new state function if self.state.state in self.breakpoints and self.state.state != self.breakpointed: # noqa: E501 self.breakpointed = self.state.state yield self.Breakpoint("breakpoint triggered on state %s" % self.state.state, # noqa: E501 state=self.state.state) self.breakpointed = None state_output = self.state.run() if self.state.error: raise self.ErrorState("Reached %s: [%r]" % (self.state.state, state_output), # noqa: E501 result=state_output, state=self.state.state) # noqa: E501 if self.state.final: self.final_state_output = state_output return if state_output is None: state_output = () elif not isinstance(state_output, list): state_output = state_output, # Then check immediate conditions for cond in self.conditions[self.state.state]: self._run_condition(cond, *state_output) # If still there and no conditions left, we are stuck! if (len(self.recv_conditions[self.state.state]) == 0 and len(self.ioevents[self.state.state]) == 0 and len(self.timeout[self.state.state]) == 1): raise self.Stuck("stuck in [%s]" % self.state.state, state=self.state.state, result=state_output) # noqa: E501 # Finally listen and pay attention to timeouts expirations = iter(self.timeout[self.state.state]) next_timeout, timeout_func = next(expirations) t0 = time.time() fds = [self.cmdin] if len(self.recv_conditions[self.state.state]) > 0: fds.append(self.listen_sock) for ioev in self.ioevents[self.state.state]: fds.append(self.ioin[ioev.atmt_ioname]) while True: t = time.time() - t0 if next_timeout is not None: if next_timeout <= t: self._run_condition(timeout_func, *state_output) next_timeout, timeout_func = next(expirations) if next_timeout is None: remain = None else: remain = next_timeout - t self.debug(5, "Select on %r" % fds) r = select_objects(fds, remain) self.debug(5, "Selected %r" % r) for fd in r: self.debug(5, "Looking at %r" % fd) if fd == self.cmdin: yield self.CommandMessage("Received command message") # noqa: E501 elif fd == self.listen_sock: try: pkt = self.listen_sock.recv(MTU) except recv_error: pass else: if pkt is not None: if self.master_filter(pkt): self.debug(3, "RECVD: %s" % pkt.summary()) # noqa: E501 for rcvcond in self.recv_conditions[self.state.state]: # noqa: E501 self._run_condition(rcvcond, pkt, *state_output) # noqa: E501 else: self.debug(4, "FILTR: %s" % pkt.summary()) # noqa: E501 else: self.debug(3, "IOEVENT on %s" % fd.ioname) for ioevt in self.ioevents[self.state.state]: if ioevt.atmt_ioname == fd.ioname: self._run_condition(ioevt, fd, *state_output) # noqa: E501 except ATMT.NewStateRequested as state_req: self.debug(2, "switching from [%s] to [%s]" % (self.state.state, state_req.state)) # noqa: E501 self.state = state_req yield state_req # Public API def add_interception_points(self, *ipts): for ipt in ipts: if hasattr(ipt, "atmt_state"): ipt = ipt.atmt_state self.interception_points.add(ipt) def remove_interception_points(self, *ipts): for ipt in ipts: if hasattr(ipt, "atmt_state"): ipt = ipt.atmt_state self.interception_points.discard(ipt) def add_breakpoints(self, *bps): for bp in bps: if hasattr(bp, "atmt_state"): bp = bp.atmt_state self.breakpoints.add(bp) def remove_breakpoints(self, *bps): for bp in bps: if hasattr(bp, "atmt_state"): bp = bp.atmt_state self.breakpoints.discard(bp) def start(self, *args, **kargs): if not self.started.locked(): self._do_start(*args, **kargs) def run(self, resume=None, wait=True): if resume is None: resume = Message(type=_ATMT_Command.RUN) self.cmdin.send(resume) if wait: try: c = self.cmdout.recv() except KeyboardInterrupt: self.cmdin.send(Message(type=_ATMT_Command.FREEZE)) return if c.type == _ATMT_Command.END: return c.result elif c.type == _ATMT_Command.INTERCEPT: raise self.InterceptionPoint("packet intercepted", state=c.state.state, packet=c.pkt) # noqa: E501 elif c.type == _ATMT_Command.SINGLESTEP: raise self.Singlestep("singlestep state=[%s]" % c.state.state, state=c.state.state) # noqa: E501 elif c.type == _ATMT_Command.BREAKPOINT: raise self.Breakpoint("breakpoint triggered on state [%s]" % c.state.state, state=c.state.state) # noqa: E501 elif c.type == _ATMT_Command.EXCEPTION: six.reraise(c.exc_info[0], c.exc_info[1], c.exc_info[2]) def runbg(self, resume=None, wait=False): self.run(resume, wait) def next(self): return self.run(resume=Message(type=_ATMT_Command.NEXT)) __next__ = next def stop(self): self.cmdin.send(Message(type=_ATMT_Command.STOP)) with self.started: # Flush command pipes while True: r = select_objects([self.cmdin, self.cmdout], 0) if not r: break for fd in r: fd.recv() def restart(self, *args, **kargs): self.stop() self.start(*args, **kargs) def accept_packet(self, pkt=None, wait=False): rsm = Message() if pkt is None: rsm.type = _ATMT_Command.ACCEPT else: rsm.type = _ATMT_Command.REPLACE rsm.pkt = pkt return self.run(resume=rsm, wait=wait) def reject_packet(self, wait=False): rsm = Message(type=_ATMT_Command.REJECT) return self.run(resume=rsm, wait=wait)
1
17,443
Is there a reason why we don't use logging for this?
secdev-scapy
py
@@ -0,0 +1,18 @@ +<?php + +/** + * Copyright © Bold Brand Commerce Sp. z o.o. All rights reserved. + * See LICENSE.txt for license details. + */ + +declare(strict_types=1); + +namespace Ergonode\Core\Infrastructure\Exception; + +class DenoralizationException extends NormalizerException +{ + public function __construct(string $message, \Throwable $previous = null) + { + parent::__construct($message, $previous); + } +}
1
1
9,393
Exceptions should be placed in application layer -> infrastructure is aware of application - not the other way around
ergonode-backend
php
@@ -34,6 +34,7 @@ def test_simple_json_output(): ("column", 0), ("line", 1), ("message", "Line too long (1/2)"), + ("message-id", "C0301"), ("module", "0123"), ("obj", ""), ("path", "0123"),
1
# Copyright (c) 2015-2016 Claudiu Popa <[email protected]> # Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html # For details: https://github.com/PyCQA/pylint/blob/master/COPYING """Test for the JSON reporter.""" import json import six from pylint.lint import PyLinter from pylint import checkers from pylint.reporters.json import JSONReporter def test_simple_json_output(): output = six.StringIO() reporter = JSONReporter() linter = PyLinter(reporter=reporter) checkers.initialize(linter) linter.config.persistent = 0 linter.reporter.set_output(output) linter.open() linter.set_current_module('0123') linter.add_message('line-too-long', line=1, args=(1, 2)) # we call this method because we didn't actually run the checkers reporter.display_messages(None) expected_result = [[ ("column", 0), ("line", 1), ("message", "Line too long (1/2)"), ("module", "0123"), ("obj", ""), ("path", "0123"), ("symbol", "line-too-long"), ("type", "convention"), ]] report_result = json.loads(output.getvalue()) report_result = [sorted(report_result[0].items(), key=lambda item: item[0])] assert report_result == expected_result
1
9,445
IIRC issue mentioned reporting _symbolic message_ - so in this case it would be `line-too-long`.
PyCQA-pylint
py
@@ -1427,6 +1427,7 @@ int GetSpellStatValue(uint32 spell_id, const char* stat_identifier, uint8 slot) else if (id == "descnum") { return spells[spell_id].descnum; } else if (id == "effectdescnum") { return spells[spell_id].effectdescnum; } else if (id == "npc_no_los") { return spells[spell_id].npc_no_los; } + else if (id == "feedbackable") { return spells[spell_id].reflectable; } else if (id == "reflectable") { return spells[spell_id].reflectable; } else if (id == "bonushate") { return spells[spell_id].bonushate; } else if (id == "endurcost") { return spells[spell_id].EndurCost; }
1
/* EQEMu: Everquest Server Emulator Copyright (C) 2001-2002 EQEMu Development Team (http://eqemu.org) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY except by those people which sell it, which are required to give you total support for your newly bought product; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* General outline of spell casting process 1. a) Client clicks a spell bar gem, ability, or item. client_process.cpp gets the op and calls CastSpell() with all the relevant info including cast time. b) NPC does CastSpell() from AI 2. a) CastSpell() determines there is a cast time and sets some state keeping flags to be used to check the progress of casting and finish it later. b) CastSpell() sees there's no cast time, and calls CastedSpellFinished() Go to step 4. 3. SpellProcess() notices that the spell casting timer which was set by CastSpell() is expired, and calls CastedSpellFinished() 4. CastedSpellFinished() checks some timed spell specific things, like wether to interrupt or not, due to movement or melee. If successful SpellFinished() is called. 5. SpellFinished() checks some things like LoS, reagents, target and figures out what's going to get hit by this spell based on its type. 6. a) Single target spell, SpellOnTarget() is called. b) AE spell, Entity::AESpell() is called. c) Group spell, Group::CastGroupSpell()/SpellOnTarget() is called as needed. 7. SpellOnTarget() may or may not call SpellEffect() to cause effects to the target 8. If this was timed, CastedSpellFinished() will restore the client's spell bar gems. Most user code should call CastSpell(), with a 0 casting time if needed, and not SpellFinished(). */ #include "../common/eqemu_logsys.h" #include "classes.h" #include "spdat.h" #ifndef WIN32 #include <stdlib.h> #include "unix.h" #endif /////////////////////////////////////////////////////////////////////////////// // spell property testing functions bool IsTargetableAESpell(uint16 spell_id) { if (IsValidSpell(spell_id) && spells[spell_id].targettype == ST_AETarget) { return true; } return false; } bool IsSacrificeSpell(uint16 spell_id) { return IsEffectInSpell(spell_id, SE_Sacrifice); } bool IsLifetapSpell(uint16 spell_id) { // Ancient Lifebane: 2115 if (IsValidSpell(spell_id) && (spells[spell_id].targettype == ST_Tap || spells[spell_id].targettype == ST_TargetAETap || spell_id == 2115)) return true; return false; } bool IsMezSpell(uint16 spell_id) { return IsEffectInSpell(spell_id, SE_Mez); } bool IsStunSpell(uint16 spell_id) { return IsEffectInSpell(spell_id, SE_Stun); } bool IsSummonSpell(uint16 spellid) { for (int o = 0; o < EFFECT_COUNT; o++) { uint32 tid = spells[spellid].effectid[o]; if (tid == SE_SummonPet || tid == SE_SummonItem || tid == SE_SummonPC) return true; } return false; } bool IsEvacSpell(uint16 spellid) { return IsEffectInSpell(spellid, SE_Succor); } bool IsDamageSpell(uint16 spellid) { for (int o = 0; o < EFFECT_COUNT; o++) { uint32 tid = spells[spellid].effectid[o]; if ((tid == SE_CurrentHPOnce || tid == SE_CurrentHP) && spells[spellid].targettype != ST_Tap && spells[spellid].buffduration < 1 && spells[spellid].base[o] < 0) return true; } return false; } bool IsFearSpell(uint16 spell_id) { return IsEffectInSpell(spell_id, SE_Fear); } bool IsCureSpell(uint16 spell_id) { const SPDat_Spell_Struct &sp = spells[spell_id]; bool CureEffect = false; for(int i = 0; i < EFFECT_COUNT; i++){ if (sp.effectid[i] == SE_DiseaseCounter || sp.effectid[i] == SE_PoisonCounter || sp.effectid[i] == SE_CurseCounter || sp.effectid[i] == SE_CorruptionCounter) CureEffect = true; } if (CureEffect && IsBeneficialSpell(spell_id)) return true; return false; } bool IsSlowSpell(uint16 spell_id) { const SPDat_Spell_Struct &sp = spells[spell_id]; for(int i = 0; i < EFFECT_COUNT; i++) if ((sp.effectid[i] == SE_AttackSpeed && sp.base[i] < 100) || (sp.effectid[i] == SE_AttackSpeed4)) return true; return false; } bool IsHasteSpell(uint16 spell_id) { const SPDat_Spell_Struct &sp = spells[spell_id]; for(int i = 0; i < EFFECT_COUNT; i++) if(sp.effectid[i] == SE_AttackSpeed) return (sp.base[i] < 100); return false; } bool IsHarmonySpell(uint16 spell_id) { // IsEffectInSpell(spell_id, SE_Lull) - Lull is not calculated anywhere atm return (IsEffectInSpell(spell_id, SE_Harmony) || IsEffectInSpell(spell_id, SE_ChangeFrenzyRad)); } bool IsPercentalHealSpell(uint16 spell_id) { return IsEffectInSpell(spell_id, SE_PercentalHeal); } bool IsGroupOnlySpell(uint16 spell_id) { return IsValidSpell(spell_id) && spells[spell_id].goodEffect == 2; } bool IsBeneficialSpell(uint16 spell_id) { if (!IsValidSpell(spell_id)) return false; // You'd think just checking goodEffect flag would be enough? if (spells[spell_id].goodEffect == 1) { // If the target type is ST_Self or ST_Pet and is a SE_CancleMagic spell // it is not Beneficial SpellTargetType tt = spells[spell_id].targettype; if (tt != ST_Self && tt != ST_Pet && IsEffectInSpell(spell_id, SE_CancelMagic)) return false; // When our targettype is ST_Target, ST_AETarget, ST_Aniaml, ST_Undead, or ST_Pet // We need to check more things! if (tt == ST_Target || tt == ST_AETarget || tt == ST_Animal || tt == ST_Undead || tt == ST_Pet) { uint16 sai = spells[spell_id].SpellAffectIndex; // If the resisttype is magic and SpellAffectIndex is Calm/memblur/dispell sight // it's not beneficial if (spells[spell_id].resisttype == RESIST_MAGIC) { // checking these SAI cause issues with the rng defensive proc line // So I guess instead of fixing it for real, just a quick hack :P if (spells[spell_id].effectid[0] != SE_DefensiveProc && (sai == SAI_Calm || sai == SAI_Dispell_Sight || sai == SAI_Memory_Blur || sai == SAI_Calm_Song)) return false; } else { // If the resisttype is not magic and spell is Bind Sight or Cast Sight // It's not beneficial if ((sai == SAI_Calm && IsEffectInSpell(spell_id, SE_Harmony)) || (sai == SAI_Calm_Song && IsEffectInSpell(spell_id, SE_BindSight)) || (sai == SAI_Dispell_Sight && spells[spell_id].skill == 18 && !IsEffectInSpell(spell_id, SE_VoiceGraft))) return false; } } } // And finally, if goodEffect is not 0 or if it's a group spell it's beneficial return spells[spell_id].goodEffect != 0 || IsGroupSpell(spell_id); } bool IsDetrimentalSpell(uint16 spell_id) { return !IsBeneficialSpell(spell_id); } bool IsInvisSpell(uint16 spell_id) { if (IsEffectInSpell(spell_id, SE_Invisibility) || IsEffectInSpell(spell_id, SE_Invisibility2) || IsEffectInSpell(spell_id, SE_InvisVsUndead) || IsEffectInSpell(spell_id, SE_InvisVsUndead2) || IsEffectInSpell(spell_id, SE_InvisVsAnimals)) { return true; } return false; } bool IsInvulnerabilitySpell(uint16 spell_id) { return IsEffectInSpell(spell_id, SE_DivineAura); } bool IsCHDurationSpell(uint16 spell_id) { return IsEffectInSpell(spell_id, SE_CompleteHeal); } bool IsPoisonCounterSpell(uint16 spell_id) { return IsEffectInSpell(spell_id, SE_PoisonCounter); } bool IsDiseaseCounterSpell(uint16 spell_id) { return IsEffectInSpell(spell_id, SE_DiseaseCounter); } bool IsSummonItemSpell(uint16 spell_id) { return IsEffectInSpell(spell_id, SE_SummonItem); } bool IsSummonSkeletonSpell(uint16 spell_id) { return IsEffectInSpell(spell_id, SE_NecPet); } bool IsSummonPetSpell(uint16 spell_id) { if (IsEffectInSpell(spell_id, SE_SummonPet) || IsEffectInSpell(spell_id, SE_SummonBSTPet)) return true; return false; } bool IsSummonPCSpell(uint16 spell_id) { return IsEffectInSpell(spell_id, SE_SummonPC); } bool IsCharmSpell(uint16 spell_id) { return IsEffectInSpell(spell_id, SE_Charm); } bool IsBlindSpell(uint16 spell_id) { return IsEffectInSpell(spell_id, SE_Blind); } bool IsEffectHitpointsSpell(uint16 spell_id) { return IsEffectInSpell(spell_id, SE_CurrentHP); } bool IsReduceCastTimeSpell(uint16 spell_id) { return IsEffectInSpell(spell_id, SE_IncreaseSpellHaste); } bool IsIncreaseDurationSpell(uint16 spell_id) { return IsEffectInSpell(spell_id, SE_IncreaseSpellDuration); } bool IsReduceManaSpell(uint16 spell_id) { return IsEffectInSpell(spell_id, SE_ReduceManaCost); } bool IsExtRangeSpell(uint16 spell_id) { return IsEffectInSpell(spell_id, SE_IncreaseRange); } bool IsImprovedHealingSpell(uint16 spell_id) { return IsEffectInSpell(spell_id, SE_ImprovedHeal); } bool IsImprovedDamageSpell(uint16 spell_id) { return IsEffectInSpell(spell_id, SE_ImprovedDamage); } bool IsAEDurationSpell(uint16 spell_id) { if (IsValidSpell(spell_id) && (spells[spell_id].targettype == ST_AETarget || spells[spell_id].targettype == ST_UndeadAE) && spells[spell_id].AEDuration != 0) return true; return false; } bool IsPureNukeSpell(uint16 spell_id) { int i, effect_count = 0; if (!IsValidSpell(spell_id)) return false; for (i = 0; i < EFFECT_COUNT; i++) if (!IsBlankSpellEffect(spell_id, i)) effect_count++; if (effect_count == 1 && IsEffectInSpell(spell_id, SE_CurrentHP) && spells[spell_id].buffduration == 0 && IsDamageSpell(spell_id)) return true; return false; } bool IsAENukeSpell(uint16 spell_id) { if (IsValidSpell(spell_id) && IsPureNukeSpell(spell_id) && spells[spell_id].aoerange > 0) return true; return false; } bool IsPBAENukeSpell(uint16 spell_id) { if (IsValidSpell(spell_id) && IsPureNukeSpell(spell_id) && spells[spell_id].aoerange > 0 && spells[spell_id].targettype == ST_AECaster) return true; return false; } bool IsAERainNukeSpell(uint16 spell_id) { if (IsValidSpell(spell_id) && IsPureNukeSpell(spell_id) && spells[spell_id].aoerange > 0 && spells[spell_id].AEDuration > 1000) return true; return false; } bool IsPartialCapableSpell(uint16 spell_id) { if (spells[spell_id].no_partial_resist) return false; // spell uses 600 (partial) scale if first effect is damage, else it uses 200 scale. // this includes DoTs. no_partial_resist excludes spells like necro snares for (int o = 0; o < EFFECT_COUNT; o++) { auto tid = spells[spell_id].effectid[o]; if (IsBlankSpellEffect(spell_id, o)) continue; if ((tid == SE_CurrentHPOnce || tid == SE_CurrentHP) && spells[spell_id].base[o] < 0) return true; return false; } return false; } bool IsResistableSpell(uint16 spell_id) { // for now only detrimental spells are resistable. later on i will // add specific exceptions for the beneficial spells that are resistable if (IsDetrimentalSpell(spell_id)) return true; return false; } // checks if this spell affects your group bool IsGroupSpell(uint16 spell_id) { if (IsValidSpell(spell_id) && (spells[spell_id].targettype == ST_AEBard || spells[spell_id].targettype == ST_Group || spells[spell_id].targettype == ST_GroupTeleport)) return true; return false; } // checks if this spell can be targeted bool IsTGBCompatibleSpell(uint16 spell_id) { if (IsValidSpell(spell_id) && (!IsDetrimentalSpell(spell_id) && spells[spell_id].buffduration != 0 && !IsBardSong(spell_id) && !IsEffectInSpell(spell_id, SE_Illusion))) return true; return false; } bool IsBardSong(uint16 spell_id) { if (IsValidSpell(spell_id) && spells[spell_id].classes[BARD - 1] < 255 && !spells[spell_id].IsDisciplineBuff) return true; return false; } bool IsEffectInSpell(uint16 spellid, int effect) { int j; if (!IsValidSpell(spellid)) return false; for (j = 0; j < EFFECT_COUNT; j++) if (spells[spellid].effectid[j] == effect) return true; return false; } // arguments are spell id and the index of the effect to check. // this is used in loops that process effects inside a spell to skip // the blanks bool IsBlankSpellEffect(uint16 spellid, int effect_index) { int effect, base, formula; effect = spells[spellid].effectid[effect_index]; base = spells[spellid].base[effect_index]; formula = spells[spellid].formula[effect_index]; // SE_CHA is "spacer" // SE_Stacking* are also considered blank where this is used if (effect == SE_Blank || (effect == SE_CHA && base == 0 && formula == 100) || effect == SE_StackingCommand_Block || effect == SE_StackingCommand_Overwrite) return true; return false; } // checks some things about a spell id, to see if we can proceed bool IsValidSpell(uint32 spellid) { if (SPDAT_RECORDS > 0 && spellid != 0 && spellid != 1 && spellid != 0xFFFFFFFF && spellid < SPDAT_RECORDS && spells[spellid].player_1[0]) return true; return false; } // returns the lowest level of any caster which can use the spell int GetMinLevel(uint16 spell_id) { int r, min = 255; const SPDat_Spell_Struct &spell = spells[spell_id]; for (r = 0; r < PLAYER_CLASS_COUNT; r++) if (spell.classes[r] < min) min = spell.classes[r]; // if we can't cast the spell return 0 // just so it wont screw up calculations used in other areas of the code // seen 127, 254, 255 if (min >= 127) return 0; else return min; } int GetSpellLevel(uint16 spell_id, int classa) { if (classa >= PLAYER_CLASS_COUNT) return 255; const SPDat_Spell_Struct &spell = spells[spell_id]; return spell.classes[classa - 1]; } // this will find the first occurrence of effect. this is handy // for spells like mez and charm, but if the effect appears more than once // in a spell this will just give back the first one. int GetSpellEffectIndex(uint16 spell_id, int effect) { int i; if (!IsValidSpell(spell_id)) return -1; for (i = 0; i < EFFECT_COUNT; i++) if (spells[spell_id].effectid[i] == effect) return i; return -1; } // returns the level required to use the spell if that class/level // can use it, 0 otherwise // note: this isn't used by anything right now int CanUseSpell(uint16 spellid, int classa, int level) { int level_to_use; if (!IsValidSpell(spellid) || classa >= PLAYER_CLASS_COUNT) return 0; level_to_use = spells[spellid].classes[classa - 1]; if (level_to_use && level_to_use != 255 && level >= level_to_use) return level_to_use; return 0; } bool BeneficialSpell(uint16 spell_id) { if (spell_id <= 0 || spell_id >= SPDAT_RECORDS /*|| spells[spell_id].stacking == 27*/ ) return true; switch (spells[spell_id].goodEffect) { case 1: case 3: return true; } return false; } bool GroupOnlySpell(uint16 spell_id) { switch (spells[spell_id].goodEffect) { case 2: case 3: return true; } switch (spell_id) { case 1771: return true; } return false; } int32 CalculatePoisonCounters(uint16 spell_id) { if (!IsValidSpell(spell_id)) return 0; int32 Counters = 0; for (int i = 0; i < EFFECT_COUNT; i++) if (spells[spell_id].effectid[i] == SE_PoisonCounter && spells[spell_id].base[i] > 0) Counters += spells[spell_id].base[i]; return Counters; } int32 CalculateDiseaseCounters(uint16 spell_id) { if (!IsValidSpell(spell_id)) return 0; int32 Counters = 0; for (int i = 0; i < EFFECT_COUNT; i++) if(spells[spell_id].effectid[i] == SE_DiseaseCounter && spells[spell_id].base[i] > 0) Counters += spells[spell_id].base[i]; return Counters; } int32 CalculateCurseCounters(uint16 spell_id) { if (!IsValidSpell(spell_id)) return 0; int32 Counters = 0; for (int i = 0; i < EFFECT_COUNT; i++) if(spells[spell_id].effectid[i] == SE_CurseCounter && spells[spell_id].base[i] > 0) Counters += spells[spell_id].base[i]; return Counters; } int32 CalculateCorruptionCounters(uint16 spell_id) { if (!IsValidSpell(spell_id)) return 0; int32 Counters = 0; for (int i = 0; i < EFFECT_COUNT; i++) if (spells[spell_id].effectid[i] == SE_CorruptionCounter && spells[spell_id].base[i] > 0) Counters += spells[spell_id].base[i]; return Counters; } int32 CalculateCounters(uint16 spell_id) { int32 counter = CalculatePoisonCounters(spell_id); if (counter != 0) return counter; counter = CalculateDiseaseCounters(spell_id); if (counter != 0) return counter; counter = CalculateCurseCounters(spell_id); if (counter != 0) return counter; counter = CalculateCorruptionCounters(spell_id); return counter; } bool IsDisciplineBuff(uint16 spell_id) { if (!IsValidSpell(spell_id)) return false; if (spells[spell_id].IsDisciplineBuff && spells[spell_id].targettype == ST_Self) return true; return false; } bool IsDiscipline(uint16 spell_id) { if (!IsValidSpell(spell_id)) return false; if (spells[spell_id].mana == 0 && (spells[spell_id].EndurCost || spells[spell_id].EndurUpkeep)) return true; return false; } bool IsCombatSkill(uint16 spell_id) { if (!IsValidSpell(spell_id)) return false; //Check if Discipline if ((spells[spell_id].mana == 0 && (spells[spell_id].EndurCost || spells[spell_id].EndurUpkeep))) return true; return false; } bool IsResurrectionEffects(uint16 spell_id) { // spell id 756 is Resurrection Effects spell if(IsValidSpell(spell_id) && (spell_id == 756 || spell_id == 757)) return true; return false; } bool IsRuneSpell(uint16 spell_id) { if (IsValidSpell(spell_id)) for (int i = 0; i < EFFECT_COUNT; i++) if (spells[spell_id].effectid[i] == SE_Rune) return true; return false; } bool IsMagicRuneSpell(uint16 spell_id) { if(IsValidSpell(spell_id)) for(int i = 0; i < EFFECT_COUNT; i++) if(spells[spell_id].effectid[i] == SE_AbsorbMagicAtt) return true; return false; } bool IsManaTapSpell(uint16 spell_id) { if (IsValidSpell(spell_id)) for (int i = 0; i < EFFECT_COUNT; i++) if (spells[spell_id].effectid[i] == SE_CurrentMana && spells[spell_id].targettype == ST_Tap) return true; return false; } bool IsAllianceSpellLine(uint16 spell_id) { if (IsEffectInSpell(spell_id, SE_AddFaction)) return true; return false; } bool IsDeathSaveSpell(uint16 spell_id) { if (IsEffectInSpell(spell_id, SE_DeathSave)) return true; return false; } // Deathsave spells with base of 1 are partial bool IsPartialDeathSaveSpell(uint16 spell_id) { if (!IsValidSpell(spell_id)) return false; for (int i = 0; i < EFFECT_COUNT; i++) if (spells[spell_id].effectid[i] == SE_DeathSave && spells[spell_id].base[i] == 1) return true; return false; } // Deathsave spells with base 2 are "full" bool IsFullDeathSaveSpell(uint16 spell_id) { if (!IsValidSpell(spell_id)) return false; for (int i = 0; i < EFFECT_COUNT; i++) if (spells[spell_id].effectid[i] == SE_DeathSave && spells[spell_id].base[i] == 2) return true; return false; } bool IsShadowStepSpell(uint16 spell_id) { if (IsEffectInSpell(spell_id, SE_ShadowStep)) return true; return false; } bool IsSuccorSpell(uint16 spell_id) { if (IsEffectInSpell(spell_id, SE_Succor)) return true; return false; } bool IsTeleportSpell(uint16 spell_id) { if (IsEffectInSpell(spell_id, SE_Teleport)) return true; return false; } bool IsGateSpell(uint16 spell_id) { if (IsEffectInSpell(spell_id, SE_Gate)) return true; return false; } bool IsPlayerIllusionSpell(uint16 spell_id) { if (IsEffectInSpell(spell_id, SE_Illusion) && spells[spell_id].targettype == ST_Self) return true; return false; } int GetSpellEffectDescNum(uint16 spell_id) { if (IsValidSpell(spell_id)) return spells[spell_id].effectdescnum; return -1; } DmgShieldType GetDamageShieldType(uint16 spell_id, int32 DSType) { // If we have a DamageShieldType for this spell from the damageshieldtypes table, return that, // else, make a guess, based on the resist type. Default return value is DS_THORNS if (IsValidSpell(spell_id)) { LogSpells("DamageShieldType for spell [{}] ([{}]) is [{}]", spell_id, spells[spell_id].name, spells[spell_id].DamageShieldType); if (spells[spell_id].DamageShieldType) return (DmgShieldType) spells[spell_id].DamageShieldType; switch (spells[spell_id].resisttype) { case RESIST_COLD: return DS_TORMENT; case RESIST_FIRE: return DS_BURN; case RESIST_DISEASE: return DS_DECAY; default: return DS_THORNS; } } else if (DSType) return (DmgShieldType) DSType; return DS_THORNS; } bool IsLDoNObjectSpell(uint16 spell_id) { if (IsEffectInSpell(spell_id, SE_AppraiseLDonChest) || IsEffectInSpell(spell_id, SE_DisarmLDoNTrap) || IsEffectInSpell(spell_id, SE_UnlockLDoNChest)) return true; return false; } int32 GetSpellResistType(uint16 spell_id) { return spells[spell_id].resisttype; } int32 GetSpellTargetType(uint16 spell_id) { return (int32)spells[spell_id].targettype; } bool IsHealOverTimeSpell(uint16 spell_id) { if (IsEffectInSpell(spell_id, SE_HealOverTime) && !IsGroupSpell(spell_id)) return true; return false; } bool IsCompleteHealSpell(uint16 spell_id) { if (spell_id == 13 || IsEffectInSpell(spell_id, SE_CompleteHeal) || (IsPercentalHealSpell(spell_id) && !IsGroupSpell(spell_id))) return true; return false; } bool IsFastHealSpell(uint16 spell_id) { const int MaxFastHealCastingTime = 2000; if (spells[spell_id].cast_time <= MaxFastHealCastingTime && spells[spell_id].effectid[0] == 0 && spells[spell_id].base[0] > 0 && !IsGroupSpell(spell_id)) return true; return false; } bool IsVeryFastHealSpell(uint16 spell_id) { const int MaxFastHealCastingTime = 1000; if (spells[spell_id].cast_time <= MaxFastHealCastingTime && spells[spell_id].effectid[0] == 0 && spells[spell_id].base[0] > 0 && !IsGroupSpell(spell_id)) return true; return false; } bool IsRegularSingleTargetHealSpell(uint16 spell_id) { if(spells[spell_id].effectid[0] == 0 && spells[spell_id].base[0] > 0 && spells[spell_id].targettype == ST_Target && spells[spell_id].buffduration == 0 && !IsCompleteHealSpell(spell_id) && !IsHealOverTimeSpell(spell_id) && !IsGroupSpell(spell_id)) return true; return false; } bool IsRegularGroupHealSpell(uint16 spell_id) { if (IsGroupSpell(spell_id) && !IsCompleteHealSpell(spell_id) && !IsHealOverTimeSpell(spell_id)) return true; return false; } bool IsGroupCompleteHealSpell(uint16 spell_id) { if (IsGroupSpell(spell_id) && IsCompleteHealSpell(spell_id)) return true; return false; } bool IsGroupHealOverTimeSpell(uint16 spell_id) { if( IsGroupSpell(spell_id) && IsHealOverTimeSpell(spell_id) && spells[spell_id].buffduration < 10) return true; return false; } bool IsDebuffSpell(uint16 spell_id) { if (IsBeneficialSpell(spell_id) || IsEffectHitpointsSpell(spell_id) || IsStunSpell(spell_id) || IsMezSpell(spell_id) || IsCharmSpell(spell_id) || IsSlowSpell(spell_id) || IsEffectInSpell(spell_id, SE_Root) || IsEffectInSpell(spell_id, SE_CancelMagic) || IsEffectInSpell(spell_id, SE_MovementSpeed) || IsFearSpell(spell_id) || IsEffectInSpell(spell_id, SE_InstantHate)) return false; else return true; } bool IsResistDebuffSpell(uint16 spell_id) { if ((IsEffectInSpell(spell_id, SE_ResistFire) || IsEffectInSpell(spell_id, SE_ResistCold) || IsEffectInSpell(spell_id, SE_ResistPoison) || IsEffectInSpell(spell_id, SE_ResistDisease) || IsEffectInSpell(spell_id, SE_ResistMagic) || IsEffectInSpell(spell_id, SE_ResistAll) || IsEffectInSpell(spell_id, SE_ResistCorruption)) && !IsBeneficialSpell(spell_id)) return true; else return false; } bool IsSelfConversionSpell(uint16 spell_id) { if (GetSpellTargetType(spell_id) == ST_Self && IsEffectInSpell(spell_id, SE_CurrentMana) && IsEffectInSpell(spell_id, SE_CurrentHP) && spells[spell_id].base[GetSpellEffectIndex(spell_id, SE_CurrentMana)] > 0 && spells[spell_id].base[GetSpellEffectIndex(spell_id, SE_CurrentHP)] < 0) return true; else return false; } // returns true for both detrimental and beneficial buffs bool IsBuffSpell(uint16 spell_id) { if (IsValidSpell(spell_id) && (spells[spell_id].buffduration || spells[spell_id].buffdurationformula)) return true; return false; } bool IsPersistDeathSpell(uint16 spell_id) { if (IsValidSpell(spell_id) && spells[spell_id].persistdeath) return true; return false; } bool IsSuspendableSpell(uint16 spell_id) { if (IsValidSpell(spell_id) && spells[spell_id].suspendable) return true; return false; } uint32 GetMorphTrigger(uint32 spell_id) { for (int i = 0; i < EFFECT_COUNT; ++i) if (spells[spell_id].effectid[i] == SE_CastOnFadeEffect) return spells[spell_id].base[i]; return 0; } bool IsCastonFadeDurationSpell(uint16 spell_id) { for (int i = 0; i < EFFECT_COUNT; ++i) { if (spells[spell_id].effectid[i] == SE_CastOnFadeEffect || spells[spell_id].effectid[i] == SE_CastOnFadeEffectNPC || spells[spell_id].effectid[i] == SE_CastOnFadeEffectAlways){ return true; } } return false; } bool IsPowerDistModSpell(uint16 spell_id) { if (IsValidSpell(spell_id) && (spells[spell_id].max_dist_mod || spells[spell_id].min_dist_mod) && spells[spell_id].max_dist > spells[spell_id].min_dist) return true; return false; } uint32 GetPartialMeleeRuneReduction(uint32 spell_id) { for (int i = 0; i < EFFECT_COUNT; ++i) if (spells[spell_id].effectid[i] == SE_MitigateMeleeDamage) return spells[spell_id].base[i]; return 0; } uint32 GetPartialMagicRuneReduction(uint32 spell_id) { for (int i = 0; i < EFFECT_COUNT; ++i) if (spells[spell_id].effectid[i] == SE_MitigateSpellDamage) return spells[spell_id].base[i]; return 0; } uint32 GetPartialMeleeRuneAmount(uint32 spell_id) { for (int i = 0; i < EFFECT_COUNT; ++i) if (spells[spell_id].effectid[i] == SE_MitigateMeleeDamage) return spells[spell_id].max[i]; return 0; } uint32 GetPartialMagicRuneAmount(uint32 spell_id) { for (int i = 0; i < EFFECT_COUNT; ++i) if (spells[spell_id].effectid[i] == SE_MitigateSpellDamage) return spells[spell_id].max[i]; return 0; } bool DetrimentalSpellAllowsRest(uint16 spell_id) { if (IsValidSpell(spell_id)) return spells[spell_id].AllowRest; return false; } bool NoDetrimentalSpellAggro(uint16 spell_id) { if (IsValidSpell(spell_id)) return spells[spell_id].no_detrimental_spell_aggro; return false; } bool IsStackableDot(uint16 spell_id) { // rules according to client if (!IsValidSpell(spell_id)) return false; const auto &spell = spells[spell_id]; if (spell.dot_stacking_exempt || spell.goodEffect || !spell.buffdurationformula) return false; return IsEffectInSpell(spell_id, SE_CurrentHP) || IsEffectInSpell(spell_id, SE_GravityEffect); } bool IsBardOnlyStackEffect(int effect) { switch(effect) { /*case SE_CurrentMana: case SE_ManaRegen_v2: case SE_CurrentHP: case SE_HealOverTime:*/ case SE_BardAEDot: return true; default: return false; } } bool IsCastWhileInvis(uint16 spell_id) { if (!IsValidSpell(spell_id)) return false; const auto &spell = spells[spell_id]; if (spell.sneak || spell.cast_not_standing) return true; return false; } bool IsEffectIgnoredInStacking(int spa) { // this should match RoF2 switch (spa) { case SE_SeeInvis: case SE_DiseaseCounter: case SE_PoisonCounter: case SE_Levitate: case SE_InfraVision: case SE_UltraVision: case SE_CurrentHPOnce: case SE_CurseCounter: case SE_ImprovedDamage: case SE_ImprovedHeal: case SE_SpellResistReduction: case SE_IncreaseSpellHaste: case SE_IncreaseSpellDuration: case SE_IncreaseRange: case SE_SpellHateMod: case SE_ReduceReagentCost: case SE_ReduceManaCost: case SE_FcStunTimeMod: case SE_LimitMaxLevel: case SE_LimitResist: case SE_LimitTarget: case SE_LimitEffect: case SE_LimitSpellType: case SE_LimitSpell: case SE_LimitMinDur: case SE_LimitInstant: case SE_LimitMinLevel: case SE_LimitCastTimeMin: case SE_LimitCastTimeMax: case SE_StackingCommand_Block: case SE_StackingCommand_Overwrite: case SE_PetPowerIncrease: case SE_SkillDamageAmount: case SE_ChannelChanceSpells: case SE_Blank: case SE_FcDamageAmt: case SE_SpellDurationIncByTic: case SE_FcSpellVulnerability: case SE_FcDamageAmtIncoming: case SE_FcDamagePctCrit: case SE_FcDamageAmtCrit: case SE_ReduceReuseTimer: case SE_LimitCombatSkills: case SE_BlockNextSpellFocus: case SE_SpellTrigger: case SE_LimitManaMin: case SE_CorruptionCounter: case SE_ApplyEffect: case SE_NegateSpellEffect: case SE_LimitSpellGroup: case SE_LimitManaMax: case SE_FcHealAmt: case SE_FcHealPctIncoming: case SE_FcHealAmtIncoming: case SE_FcHealPctCritIncoming: case SE_FcHealAmtCrit: case SE_LimitClass: case SE_LimitRace: case SE_FcBaseEffects: case 415: case SE_SkillDamageAmount2: case SE_FcLimitUse: case SE_FcIncreaseNumHits: case SE_LimitUseMin: case SE_LimitUseType: case SE_GravityEffect: case 425: //Spell effects implemented after ROF2, following same pattern, lets assume these should go here. case SE_Fc_Spell_Damage_Pct_IncomingPC: case SE_Fc_Spell_Damage_Amt_IncomingPC: case SE_Ff_CasterClass: case SE_Ff_Same_Caster: case SE_Proc_Timer_Modifier: case SE_Weapon_Stance: case SE_TwinCastBlocker: case SE_Fc_CastTimeAmt: case SE_Fc_CastTimeMod2: case SE_Ff_DurationMax: case SE_Ff_Endurance_Max: case SE_Ff_Endurance_Min: case SE_Ff_ReuseTimeMin: case SE_Ff_ReuseTimeMax: case SE_Ff_Value_Min: case SE_Ff_Value_Max: return true; default: return false; } } bool IsFocusLimit(int spa) { switch (spa) { case SE_LimitMaxLevel: case SE_LimitResist: case SE_LimitTarget: case SE_LimitEffect: case SE_LimitSpellType: case SE_LimitSpell: case SE_LimitMinDur: case SE_LimitInstant: case SE_LimitMinLevel: case SE_LimitCastTimeMin: case SE_LimitCastTimeMax: case SE_LimitCombatSkills: case SE_LimitManaMin: case SE_LimitSpellGroup: case SE_LimitManaMax: case SE_LimitSpellClass: case SE_LimitSpellSubclass: case SE_LimitClass: case SE_LimitRace: case SE_LimitCastingSkill: case SE_LimitUseMin: case SE_LimitUseType: case SE_Ff_Override_NotFocusable: case SE_Ff_CasterClass: case SE_Ff_Same_Caster: case SE_Ff_DurationMax: case SE_Ff_Endurance_Max: case SE_Ff_Endurance_Min: case SE_Ff_ReuseTimeMin: case SE_Ff_ReuseTimeMax: case SE_Ff_Value_Min: case SE_Ff_Value_Max: return true; default: return false; } } uint32 GetNimbusEffect(uint16 spell_id) { if (IsValidSpell(spell_id)) return (int32)spells[spell_id].NimbusEffect; return 0; } int32 GetFuriousBash(uint16 spell_id) { if (!IsValidSpell(spell_id)) return 0; bool found_effect_limit = false; int32 mod = 0; for (int i = 0; i < EFFECT_COUNT; ++i) if (spells[spell_id].effectid[i] == SE_SpellHateMod) mod = spells[spell_id].base[i]; else if (spells[spell_id].effectid[i] == SE_LimitEffect && spells[spell_id].base[i] == 999) found_effect_limit = true; if (found_effect_limit) return mod; else return 0; } bool IsShortDurationBuff(uint16 spell_id) { if (IsValidSpell(spell_id) && spells[spell_id].short_buff_box != 0) return true; return false; } bool IsSpellUsableThisZoneType(uint16 spell_id, uint8 zone_type) { //check if spell can be cast in any zone (-1 or 255), then if spell zonetype matches zone's zonetype // || spells[spell_id].zonetype == 255 comparing signed 8 bit int to 255 is always false if (IsValidSpell(spell_id) && (spells[spell_id].zonetype == -1 || spells[spell_id].zonetype == zone_type)) return true; return false; } const char* GetSpellName(uint16 spell_id) { return spells[spell_id].name; } bool SpellRequiresTarget(int spell_id) { if (spells[spell_id].targettype == ST_AEClientV1 || spells[spell_id].targettype == ST_Self || spells[spell_id].targettype == ST_AECaster || spells[spell_id].targettype == ST_Ring || spells[spell_id].targettype == ST_Beam) { return false; } return true; } int GetSpellStatValue(uint32 spell_id, const char* stat_identifier, uint8 slot) { if (!IsValidSpell(spell_id)) return 0; if (!stat_identifier) return 0; if (slot > 0) slot -= 1; std::string id = stat_identifier; for(uint32 i = 0; i < id.length(); ++i) { id[i] = tolower(id[i]); } if (slot < 16) { if (id == "classes") { return spells[spell_id].classes[slot]; } else if (id == "deities") { return spells[spell_id].deities[slot]; } } if (slot < 12) { if (id == "base") { return spells[spell_id].base[slot]; } else if (id == "base2") { return spells[spell_id].base2[slot]; } else if (id == "max") { return spells[spell_id].max[slot]; } else if (id == "formula") { return spells[spell_id].formula[slot]; } else if (id == "effectid") { return spells[spell_id].effectid[slot]; } } if (slot < 4) { if (id == "components") { return spells[spell_id].components[slot]; } else if (id == "component_counts") { return spells[spell_id].component_counts[slot]; } else if (id == "noexpendreagent") { return spells[spell_id].NoexpendReagent[slot]; } } if (id == "range") { return static_cast<int32>(spells[spell_id].range); } else if (id == "aoerange") { return static_cast<int32>(spells[spell_id].aoerange); } else if (id == "pushback") { return static_cast<int32>(spells[spell_id].pushback); } else if (id == "pushup") { return static_cast<int32>(spells[spell_id].pushup); } else if (id == "cast_time") { return spells[spell_id].cast_time; } else if (id == "recovery_time") { return spells[spell_id].recovery_time; } else if (id == "recast_time") { return spells[spell_id].recast_time; } else if (id == "buffdurationformula") { return spells[spell_id].buffdurationformula; } else if (id == "buffduration") { return spells[spell_id].buffduration; } else if (id == "aeduration") { return spells[spell_id].AEDuration; } else if (id == "mana") { return spells[spell_id].mana; } //else if (id == "LightType") {stat = spells[spell_id].LightType; } - Not implemented else if (id == "goodeffect") { return spells[spell_id].goodEffect; } else if (id == "activated") { return spells[spell_id].Activated; } else if (id == "resisttype") { return spells[spell_id].resisttype; } else if (id == "targettype") { return spells[spell_id].targettype; } else if (id == "basediff") { return spells[spell_id].basediff; } else if (id == "skill") { return spells[spell_id].skill; } else if (id == "zonetype") { return spells[spell_id].zonetype; } else if (id == "environmenttype") { return spells[spell_id].EnvironmentType; } else if (id == "timeofday") { return spells[spell_id].TimeOfDay; } else if (id == "castinganim") { return spells[spell_id].CastingAnim; } else if (id == "spellaffectindex") { return spells[spell_id].SpellAffectIndex; } else if (id == "disallow_sit") { return spells[spell_id].disallow_sit; } //else if (id == "spellanim") {stat = spells[spell_id].spellanim; } - Not implemented else if (id == "uninterruptable") { return spells[spell_id].uninterruptable; } else if (id == "resistdiff") { return spells[spell_id].ResistDiff; } else if (id == "dot_stacking_exempt") { return spells[spell_id].dot_stacking_exempt; } else if (id == "recourselink") { return spells[spell_id].RecourseLink; } else if (id == "no_partial_resist") { return spells[spell_id].no_partial_resist; } else if (id == "short_buff_box") { return spells[spell_id].short_buff_box; } else if (id == "descnum") { return spells[spell_id].descnum; } else if (id == "effectdescnum") { return spells[spell_id].effectdescnum; } else if (id == "npc_no_los") { return spells[spell_id].npc_no_los; } else if (id == "reflectable") { return spells[spell_id].reflectable; } else if (id == "bonushate") { return spells[spell_id].bonushate; } else if (id == "endurcost") { return spells[spell_id].EndurCost; } else if (id == "endurtimerindex") { return spells[spell_id].EndurTimerIndex; } else if (id == "isdisciplinebuff") { return spells[spell_id].IsDisciplineBuff; } else if (id == "hateadded") { return spells[spell_id].HateAdded; } else if (id == "endurupkeep") { return spells[spell_id].EndurUpkeep; } else if (id == "numhitstype") { return spells[spell_id].numhitstype; } else if (id == "numhits") { return spells[spell_id].numhits; } else if (id == "pvpresistbase") { return spells[spell_id].pvpresistbase; } else if (id == "pvpresistcalc") { return spells[spell_id].pvpresistcalc; } else if (id == "pvpresistcap") { return spells[spell_id].pvpresistcap; } else if (id == "spell_category") { return spells[spell_id].spell_category; } else if (id == "can_mgb") { return spells[spell_id].can_mgb; } else if (id == "dispel_flag") { return spells[spell_id].dispel_flag; } else if (id == "minresist") { return spells[spell_id].MinResist; } else if (id == "maxresist") { return spells[spell_id].MaxResist; } else if (id == "viral_targets") { return spells[spell_id].viral_targets; } else if (id == "viral_timer") { return spells[spell_id].viral_timer; } else if (id == "nimbuseffect") { return spells[spell_id].NimbusEffect; } else if (id == "directional_start") { return static_cast<int32>(spells[spell_id].directional_start); } else if (id == "directional_end") { return static_cast<int32>(spells[spell_id].directional_end); } else if (id == "not_focusable") { return spells[spell_id].not_focusable; } else if (id == "suspendable") { return spells[spell_id].suspendable; } else if (id == "viral_range") { return spells[spell_id].viral_range; } else if (id == "spellgroup") { return spells[spell_id].spellgroup; } else if (id == "rank") { return spells[spell_id].rank; } else if (id == "no_resist") { return spells[spell_id].no_resist; } else if (id == "castrestriction") { return spells[spell_id].CastRestriction; } else if (id == "allowrest") { return spells[spell_id].AllowRest; } else if (id == "incombat") { return spells[spell_id].InCombat; } else if (id == "outofcombat") { return spells[spell_id].OutofCombat; } else if (id == "aemaxtargets") { return spells[spell_id].aemaxtargets; } else if (id == "no_heal_damage_item_mod") { return spells[spell_id].no_heal_damage_item_mod; } else if (id == "persistdeath") { return spells[spell_id].persistdeath; } else if (id == "min_dist") { return static_cast<int32>(spells[spell_id].min_dist); } else if (id == "min_dist_mod") { return static_cast<int32>(spells[spell_id].min_dist_mod); } else if (id == "max_dist") { return static_cast<int32>(spells[spell_id].max_dist); } else if (id == "min_range") { return static_cast<int32>(spells[spell_id].min_range); } else if (id == "damageshieldtype") { return spells[spell_id].DamageShieldType; } return 0; }
1
10,848
Is this intended to be `spells[spell_id].reflectable` here? Edit: Should be `spells[spell_id].feedbackable`, right?
EQEmu-Server
cpp
@@ -0,0 +1,11 @@ +class AddUsersAllowedToTeams < ActiveRecord::Migration + def up + add_column :teams, :max_users, :integer + + change_column_null :teams, :max_users, false, 0 + end + + def down + remove_column :teams, :max_users + end +end
1
1
8,996
How should we set this for existing teams?
thoughtbot-upcase
rb
@@ -145,6 +145,11 @@ func TestInstallManager(t *testing.T) { expectPasswordSecret: true, expectProvisionMetadataUpdate: true, }, + { + name: "infraID already set on cluster provision", // fatal error + existing: []runtime.Object{testClusterDeployment(), testClusterProvisionWithInfraIDSet()}, + expectError: true, + }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) {
1
package installmanager import ( "context" "fmt" "io/ioutil" "os" "path" "path/filepath" "testing" "github.com/stretchr/testify/require" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes/scheme" "k8s.io/utils/pointer" installertypes "github.com/openshift/installer/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/openshift/hive/apis" hivev1 "github.com/openshift/hive/apis/hive/v1" awsclient "github.com/openshift/hive/pkg/awsclient" "github.com/openshift/hive/pkg/constants" ) const ( testDeploymentName = "test-deployment" testProvisionName = "test-provision" testNamespace = "test-namespace" pullSecretSecretName = "pull-secret" installerBinary = "openshift-install" ocBinary = "oc" fakeInstallerBinary = `#!/bin/sh echo "Fake Installer" echo $@ WORKDIR=%s echo '{"clusterName":"test-cluster","infraID":"test-cluster-fe9531","clusterID":"fe953108-f64c-4166-bb8e-20da7665ba00", "aws":{"region":"us-east-1","identifier":[{"kubernetes.io/cluster/dgoodwin-dev":"owned"}]}}' > $WORKDIR/metadata.json mkdir -p $WORKDIR/auth/ echo "fakekubeconfig" > $WORKDIR/auth/kubeconfig echo "fakepassword" > $WORKDIR/auth/kubeadmin-password echo "some fake installer log output" > /tmp/openshift-install-console.log ` fakeSSHAddBinary = `#!/bin/bash KEY_FILE_PATH=${1} if [[ ${KEY_FILE_PATH} != "%s" ]]; then echo "Parameter not what expected" exit 1 fi exit 0 ` fakeSSHAgentSockPath = "/path/to/agent/sockfile" fakeSSHAgentPID = "12345" alwaysErrorBinary = `#!/bin/sh exit 1` ) var ( fakeSSHAgentBinary = `#!/bin/sh echo "SSH_AUTH_SOCK=%s; export SSH_AUTH_SOCK;" echo "SSH_AGENT_PID=%s; export SSH_AGENT_PID;" echo "echo Agent pid %s;"` ) func init() { log.SetLevel(log.DebugLevel) } func TestInstallManager(t *testing.T) { apis.AddToScheme(scheme.Scheme) tests := []struct { name string existing []runtime.Object failedMetadataRead bool failedKubeconfigSave bool failedAdminPasswordSave bool failedInstallerLogRead bool failedProvisionUpdate *int32 expectKubeconfigSecret bool expectPasswordSecret bool expectProvisionMetadataUpdate bool expectProvisionLogUpdate bool expectError bool }{ { name: "successful install", existing: []runtime.Object{testClusterDeployment(), testClusterProvision()}, expectKubeconfigSecret: true, expectPasswordSecret: true, expectProvisionMetadataUpdate: true, expectProvisionLogUpdate: true, }, { name: "failed metadata read", existing: []runtime.Object{testClusterDeployment(), testClusterProvision()}, failedMetadataRead: true, expectError: true, }, { name: "failed cluster provision metadata update", existing: []runtime.Object{testClusterDeployment(), testClusterProvision()}, failedProvisionUpdate: pointer.Int32Ptr(0), expectKubeconfigSecret: true, expectPasswordSecret: true, expectError: true, }, { name: "failed cluster provision log update", // a non-fatal error existing: []runtime.Object{testClusterDeployment(), testClusterProvision()}, failedProvisionUpdate: pointer.Int32Ptr(1), expectKubeconfigSecret: true, expectPasswordSecret: true, expectProvisionMetadataUpdate: true, }, { name: "failed admin kubeconfig save", // fatal error existing: []runtime.Object{testClusterDeployment(), testClusterProvision()}, failedKubeconfigSave: true, expectError: true, }, { name: "failed admin username/password save", // fatal error existing: []runtime.Object{testClusterDeployment(), testClusterProvision()}, failedAdminPasswordSave: true, expectKubeconfigSecret: true, expectError: true, }, { name: "failed saving of installer log", // non-fatal existing: []runtime.Object{testClusterDeployment(), testClusterProvision()}, failedInstallerLogRead: true, expectKubeconfigSecret: true, expectPasswordSecret: true, expectProvisionMetadataUpdate: true, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { tempDir, err := ioutil.TempDir("", "installmanagertest") require.NoError(t, err) defer os.RemoveAll(tempDir) defer os.Remove(installerConsoleLogFilePath) binaryTempDir, err := ioutil.TempDir(tempDir, "bin") require.NoError(t, err) pullSecret := testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecretName, corev1.DockerConfigJsonKey, "{}") existing := test.existing existing = append(existing, pullSecret) mocks := setupDefaultMocks(t, existing...) // This is necessary for the mocks to report failures like methods not being called an expected number of times. defer mocks.mockCtrl.Finish() // create a fake install-config mountedInstallConfigFile := filepath.Join(tempDir, "mounted-install-config.yaml") if err := ioutil.WriteFile(mountedInstallConfigFile, []byte("INSTALL_CONFIG: FAKE"), 0600); err != nil { t.Fatalf("error creating temporary fake install-config file: %v", err) } // create a fake pull secret file mountedPullSecretFile := filepath.Join(tempDir, "mounted-pull-secret.json") if err := ioutil.WriteFile(mountedPullSecretFile, []byte("{}"), 0600); err != nil { t.Fatalf("error creating temporary fake pull secret file: %v", err) } im := InstallManager{ LogLevel: "debug", WorkDir: tempDir, ClusterProvisionName: testProvisionName, Namespace: testNamespace, DynamicClient: mocks.fakeKubeClient, InstallConfigMountPath: mountedInstallConfigFile, PullSecretMountPath: mountedPullSecretFile, binaryDir: binaryTempDir, } im.Complete([]string{}) im.waitForProvisioningStage = func(*hivev1.ClusterProvision, *InstallManager) error { return nil } if !assert.NoError(t, writeFakeBinary(filepath.Join(tempDir, installerBinary), fmt.Sprintf(fakeInstallerBinary, tempDir))) { t.Fail() } if !assert.NoError(t, writeFakeBinary(filepath.Join(tempDir, ocBinary), fmt.Sprintf(fakeInstallerBinary, tempDir))) { t.Fail() } if test.failedMetadataRead { im.readClusterMetadata = func(*hivev1.ClusterProvision, *InstallManager) ([]byte, *installertypes.ClusterMetadata, error) { return nil, nil, fmt.Errorf("failed to save metadata") } } if test.failedKubeconfigSave { im.uploadAdminKubeconfig = func(*hivev1.ClusterProvision, *InstallManager) (*corev1.Secret, error) { return nil, fmt.Errorf("failed to save admin kubeconfig") } } if test.failedAdminPasswordSave { im.uploadAdminPassword = func(*hivev1.ClusterProvision, *InstallManager) (*corev1.Secret, error) { return nil, fmt.Errorf("failed to save admin password") } } if test.failedInstallerLogRead { im.readInstallerLog = func(*hivev1.ClusterProvision, *InstallManager, bool) (string, error) { return "", fmt.Errorf("failed to save install log") } } if test.failedProvisionUpdate != nil { calls := int32(0) im.updateClusterProvision = func(provision *hivev1.ClusterProvision, im *InstallManager, mutation provisionMutation) error { callNumber := calls calls = calls + 1 if callNumber == *test.failedProvisionUpdate { return fmt.Errorf("failed to update provision") } return updateClusterProvisionWithRetries(provision, im, mutation) } } // We don't want to run the uninstaller, so stub it out im.cleanupFailedProvision = alwaysSucceedCleanupFailedProvision // Save the list of actuators so that it can be restored at the end of this test im.actuator = &s3LogUploaderActuator{awsClientFn: func(c client.Client, secretName, namespace, region string, logger log.FieldLogger) (awsclient.Client, error) { return mocks.mockAWSClient, nil }} err = im.Run() if test.expectError { assert.Error(t, err) } else { assert.NoError(t, err) } adminKubeconfig := &corev1.Secret{} err = mocks.fakeKubeClient.Get(context.Background(), types.NamespacedName{ Namespace: testNamespace, Name: fmt.Sprintf("%s-admin-kubeconfig", testProvisionName), }, adminKubeconfig) if test.expectKubeconfigSecret { if assert.NoError(t, err) { kubeconfig, ok := adminKubeconfig.Data["kubeconfig"] if assert.True(t, ok) { assert.Equal(t, []byte("fakekubeconfig\n"), kubeconfig, "unexpected kubeconfig") } assert.Equal(t, testClusterProvision().Name, adminKubeconfig.Labels[constants.ClusterProvisionNameLabel], "incorrect cluster provision name label") assert.Equal(t, constants.SecretTypeKubeConfig, adminKubeconfig.Labels[constants.SecretTypeLabel], "incorrect secret type label") } } else { assert.True(t, apierrors.IsNotFound(err), "unexpected response from getting kubeconfig secret: %v", err) } adminPassword := &corev1.Secret{} err = mocks.fakeKubeClient.Get(context.Background(), types.NamespacedName{ Namespace: testNamespace, Name: fmt.Sprintf("%s-admin-password", testProvisionName), }, adminPassword) if test.expectPasswordSecret { if assert.NoError(t, err) { username, ok := adminPassword.Data["username"] if assert.True(t, ok) { assert.Equal(t, []byte("kubeadmin"), username, "unexpected admin username") } password, ok := adminPassword.Data["password"] if assert.True(t, ok) { assert.Equal(t, []byte("fakepassword"), password, "unexpected admin password") } assert.Equal(t, testClusterProvision().Name, adminPassword.Labels[constants.ClusterProvisionNameLabel], "incorrect cluster provision name label") assert.Equal(t, constants.SecretTypeKubeAdminCreds, adminPassword.Labels[constants.SecretTypeLabel], "incorrect secret type label") } } else { assert.True(t, apierrors.IsNotFound(err), "unexpected response from getting password secret: %v", err) } provision := &hivev1.ClusterProvision{} if err := mocks.fakeKubeClient.Get(context.Background(), types.NamespacedName{ Namespace: testNamespace, Name: testProvisionName, }, provision, ); !assert.NoError(t, err) { t.Fail() } if test.expectProvisionMetadataUpdate { assert.NotNil(t, provision.Spec.Metadata, "expected metadata to be set") if assert.NotNil(t, provision.Spec.AdminKubeconfigSecretRef, "expected kubeconfig secret reference to be set") { assert.Equal(t, "test-provision-admin-kubeconfig", provision.Spec.AdminKubeconfigSecretRef.Name, "unexpected name for kubeconfig secret reference") } if assert.NotNil(t, provision.Spec.AdminPasswordSecretRef, "expected password secret reference to be set") { assert.Equal(t, "test-provision-admin-password", provision.Spec.AdminPasswordSecretRef.Name, "unexpected name for password secret reference") } } else { assert.Nil(t, provision.Spec.Metadata, "expected metadata to be empty") assert.Nil(t, provision.Spec.AdminKubeconfigSecretRef, "expected kubeconfig secret reference to be empty") assert.Nil(t, provision.Spec.AdminPasswordSecretRef, "expected password secret reference to be empty") } if test.expectProvisionLogUpdate { if assert.NotNil(t, provision.Spec.InstallLog, "expected install log to be set") { assert.Equal(t, "some fake installer log output\n", *provision.Spec.InstallLog, "did not find expected contents in saved installer log") } } else { assert.Nil(t, provision.Spec.InstallLog, "expected install log to be empty") } }) } } func writeFakeBinary(fileName string, contents string) error { data := []byte(contents) err := ioutil.WriteFile(fileName, data, 0755) return err } func testClusterDeployment() *hivev1.ClusterDeployment { return &hivev1.ClusterDeployment{ ObjectMeta: metav1.ObjectMeta{ Name: testDeploymentName, Namespace: testNamespace, }, Spec: hivev1.ClusterDeploymentSpec{ Provisioning: &hivev1.Provisioning{}, }, } } func alwaysSucceedCleanupFailedProvision(client.Client, *hivev1.ClusterDeployment, string, log.FieldLogger) error { log.Debugf("running always successful uninstall") return nil } func testSecret(secretType corev1.SecretType, name, key, value string) *corev1.Secret { s := &corev1.Secret{ Type: secretType, ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: testNamespace, }, Data: map[string][]byte{ key: []byte(value), }, } return s } func TestCleanupRegex(t *testing.T) { tests := []struct { name string sourceString string expectedString string }{ { name: "install log example", sourceString: `level=info msg="Consuming \"Worker Ignition Config\" from target directory" level=info msg="Consuming \"Bootstrap Ignition Config\" from target directory" level=info msg="Consuming \"Master Ignition Config\" from target directory" level=info msg="Creating infrastructure resources..." level=info msg="Waiting up to 30m0s for the Kubernetes API at https://api.test-cluster.example.com:6443..." level=info msg="API v1.13.4+af45cda up" level=info msg="Waiting up to 30m0s for the bootstrap-complete event..." level=info msg="Destroying the bootstrap resources..." level=info msg="Waiting up to 30m0s for the cluster at https://api.test-cluster.example.com:6443 to initialize..." level=info msg="Waiting up to 10m0s for the openshift-console route to be created..." level=info msg="Install complete!" level=info msg="To access the cluster as the system:admin user when using 'oc', run 'export KUBECONFIG=/output/auth/kubeconfig'" level=info msg="Access the OpenShift web-console here: https://console-openshift-console.apps.test-cluster.example.com" level=info msg="Login to the console with user: kubeadmin, password: SomeS-ecret-Passw-ord12-34567"`, expectedString: `level=info msg="Consuming \"Worker Ignition Config\" from target directory" level=info msg="Consuming \"Bootstrap Ignition Config\" from target directory" level=info msg="Consuming \"Master Ignition Config\" from target directory" level=info msg="Creating infrastructure resources..." level=info msg="Waiting up to 30m0s for the Kubernetes API at https://api.test-cluster.example.com:6443..." level=info msg="API v1.13.4+af45cda up" level=info msg="Waiting up to 30m0s for the bootstrap-complete event..." level=info msg="Destroying the bootstrap resources..." level=info msg="Waiting up to 30m0s for the cluster at https://api.test-cluster.example.com:6443 to initialize..." level=info msg="Waiting up to 10m0s for the openshift-console route to be created..." level=info msg="Install complete!" level=info msg="To access the cluster as the system:admin user when using 'oc', run 'export KUBECONFIG=/output/auth/kubeconfig'" level=info msg="Access the OpenShift web-console here: https://console-openshift-console.apps.test-cluster.example.com" REDACTED LINE OF OUTPUT`, }, { name: "password at start of line", sourceString: `some log line password at start of line more log`, expectedString: `some log line REDACTED LINE OF OUTPUT more log`, }, { name: "password in first line", sourceString: `first line password more text second line no magic string`, expectedString: `REDACTED LINE OF OUTPUT second line no magic string`, }, { name: "password in last line", sourceString: `first line last line with password in text`, expectedString: `first line REDACTED LINE OF OUTPUT`, }, { name: "case sensitivity test", sourceString: `abc PaSsWoRd def`, expectedString: `REDACTED LINE OF OUTPUT`, }, { name: "libvirt ssh connection error in console log", sourceString: "Internal error: could not connect to libvirt: virError(Code=38, Domain=7, Message='Cannot recv data: Permission denied, please try again.\\r\\nPermission denied (publickey,gssapi-keyex,gssapi-with-mic,password)", // In addition to redacting the line with "password" the // escaped carriage returns and newlines are unescaped. expectedString: "Internal error: could not connect to libvirt: virError(Code=38, Domain=7, Message='Cannot recv data: Permission denied, please try again.\r\nREDACTED LINE OF OUTPUT", }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { cleanedString := cleanupLogOutput(test.sourceString) assert.Equal(t, test.expectedString, cleanedString, "unexpected cleaned string") }) } } func TestInstallManagerSSH(t *testing.T) { apis.AddToScheme(scheme.Scheme) tests := []struct { name string existingSSHAgentRunning bool expectedEnvVars map[string]string badSSHAgent bool badSSHAdd bool expectedError bool }{ { name: "already running SSH agent", existingSSHAgentRunning: true, }, { name: "no running SSH agent", expectedEnvVars: map[string]string{ "SSH_AUTH_SOCK": fakeSSHAgentSockPath, "SSH_AGENT_PID": fakeSSHAgentPID, }, }, { name: "error on launching SSH agent", badSSHAgent: true, expectedError: true, }, { name: "error on running ssh-add", expectedEnvVars: map[string]string{ "SSH_AUTH_SOCK": fakeSSHAgentSockPath, "SSH_AGENT_PID": fakeSSHAgentPID, }, badSSHAdd: true, expectedError: true, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { // clear out env vars for each test loop if err := os.Unsetenv("SSH_AUTH_SOCK"); err != nil { t.Fatalf("error clearing out existing env var: %v", err) } if err := os.Unsetenv("SSH_AGENT_PID"); err != nil { t.Fatalf("error clearing out existing env var: %v", err) } // temp dir to hold fake ssh-add and ssh-agent and ssh keys testDir, err := ioutil.TempDir("", "installmanagersshfake") if err != nil { t.Fatalf("error creating directory hold temp ssh items: %v", err) } defer os.RemoveAll(testDir) // create a fake SSH private key file sshKeyFile := filepath.Join(testDir, "tempSSHKey") if err := ioutil.WriteFile(sshKeyFile, []byte("FAKE SSH KEY CONTENT"), 0600); err != nil { t.Fatalf("error creating temporary fake SSH key file: %v", err) } // create a fake 'ssh-add' binary sshAddBinFileContent := fmt.Sprintf(fakeSSHAddBinary, sshKeyFile) if test.badSSHAdd { sshAddBinFileContent = alwaysErrorBinary } sshAddBinFile := filepath.Join(testDir, "ssh-add") if err := ioutil.WriteFile(sshAddBinFile, []byte(sshAddBinFileContent), 0555); err != nil { t.Fatalf("error creating fake ssh-add binary: %v", err) } // create a fake 'ssh-agent' binary sshAgentBinFileContent := fmt.Sprintf(fakeSSHAgentBinary, fakeSSHAgentSockPath, fakeSSHAgentPID, fakeSSHAgentPID) if test.badSSHAgent { sshAgentBinFileContent = alwaysErrorBinary } sshAgentBinFile := filepath.Join(testDir, "ssh-agent") if err := ioutil.WriteFile(sshAgentBinFile, []byte(sshAgentBinFileContent), 0555); err != nil { t.Fatalf("error creating fake ssh-agent binary: %v", err) } // create a fake install-config mountedInstallConfigFile := filepath.Join(testDir, "mounted-install-config.yaml") if err := ioutil.WriteFile(mountedInstallConfigFile, []byte("INSTALL_CONFIG: FAKE"), 0600); err != nil { t.Fatalf("error creating temporary fake install-config file: %v", err) } // create a fake pull secret file mountedPullSecretFile := filepath.Join(testDir, "mounted-pull-secret.json") if err := ioutil.WriteFile(mountedPullSecretFile, []byte("{}"), 0600); err != nil { t.Fatalf("error creating temporary fake pull secret file: %v", err) } tempDir, err := ioutil.TempDir("", "installmanagersshtestresults") if err != nil { t.Fatalf("errored while setting up temp dir for test: %v", err) } defer os.RemoveAll(tempDir) im := InstallManager{ LogLevel: "debug", WorkDir: tempDir, InstallConfigMountPath: mountedInstallConfigFile, PullSecretMountPath: mountedPullSecretFile, } if test.existingSSHAgentRunning { if err := os.Setenv("SSH_AUTH_SOCK", fakeSSHAgentSockPath); err != nil { t.Fatalf("errored setting up fake ssh auth sock env: %v", err) } } im.Complete([]string{}) // place fake binaries early into path origPathEnv := os.Getenv("PATH") pathEnv := fmt.Sprintf("%s:%s", testDir, origPathEnv) if err := os.Setenv("PATH", pathEnv); err != nil { t.Fatalf("error setting PATH (for fake binaries): %v", err) } cleanup, err := im.initSSHAgent([]string{sshKeyFile}) // restore PATH if err := os.Setenv("PATH", origPathEnv); err != nil { t.Fatalf("error restoring PATH after test: %v", err) } if test.expectedError { assert.Error(t, err, "expected an error while initializing SSH") } else { assert.NoError(t, err, "unexpected error while testing SSH initialization") } // check env vars are properly set/cleaned if !test.existingSSHAgentRunning { for k, v := range test.expectedEnvVars { val := os.Getenv(k) assert.Equal(t, v, val, "env var %s not expected value", k) } // cleanup cleanup() // verify cleanup for _, envVar := range test.expectedEnvVars { assert.Empty(t, os.Getenv(envVar)) } } }) } } func TestInstallManagerSSHKnownHosts(t *testing.T) { apis.AddToScheme(scheme.Scheme) tests := []struct { name string knownHosts []string expectedFile string }{ { name: "single ssh known host", knownHosts: []string{ "192.168.86.100 ecdsa-sha2-nistp256 FOOBAR", }, expectedFile: `192.168.86.100 ecdsa-sha2-nistp256 FOOBAR`, }, { name: "multiple ssh known hosts", knownHosts: []string{ "192.168.86.100 ecdsa-sha2-nistp256 FOOBAR", "192.168.86.101 ecdsa-sha2-nistp256 FOOBAR2", "192.168.86.102 ecdsa-sha2-nistp256 FOOBAR3", }, expectedFile: `192.168.86.100 ecdsa-sha2-nistp256 FOOBAR 192.168.86.101 ecdsa-sha2-nistp256 FOOBAR2 192.168.86.102 ecdsa-sha2-nistp256 FOOBAR3`, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { tempDir, err := ioutil.TempDir("", "installmanagersshknownhosts") require.NoError(t, err, "error creating test tempdir") defer os.RemoveAll(tempDir) im := InstallManager{ log: log.WithField("test", test.name), } err = im.writeSSHKnownHosts(tempDir, test.knownHosts) require.NoError(t, err, "error writing ssh known hosts ") content, err := ioutil.ReadFile(filepath.Join(tempDir, ".ssh", "known_hosts")) require.NoError(t, err, "error reading expected ssh known_hosts file") assert.Equal(t, test.expectedFile, string(content), "unexpected known_hosts file contents") }) } } func TestIsBootstrapComplete(t *testing.T) { cases := []struct { name string errCode int expectedComplete bool }{ { name: "complete", errCode: 0, expectedComplete: true, }, { name: "not complete", errCode: 1, expectedComplete: false, }, } for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { dir, err := ioutil.TempDir("", "TestIsBootstrapComplete") if err != nil { t.Fatalf("could not create temp dir: %v", err) } defer os.RemoveAll(dir) script := fmt.Sprintf("#!/bin/bash\nexit %d", tc.errCode) if err := ioutil.WriteFile(path.Join(dir, "openshift-install"), []byte(script), 0777); err != nil { t.Fatalf("could not write openshift-install file: %v", err) } im := &InstallManager{WorkDir: dir} actualComplete := im.isBootstrapComplete() assert.Equal(t, tc.expectedComplete, actualComplete, "unexpected bootstrap complete") }) } } func Test_pasteInPullSecret(t *testing.T) { for _, inputFile := range []string{ "install-config.yaml", "install-config-with-existing-pull-secret.yaml", } { t.Run(inputFile, func(t *testing.T) { icData, err := ioutil.ReadFile(filepath.Join("testdata", inputFile)) if !assert.NoError(t, err, "unexpected error reading install-config.yaml") { return } expected, err := ioutil.ReadFile(filepath.Join("testdata", "install-config-with-pull-secret.yaml")) if !assert.NoError(t, err, "unexpected error reading install-config-with-pull-secret.yaml") { return } actual, err := pasteInPullSecret(icData, filepath.Join("testdata", "pull-secret.json")) assert.NoError(t, err, "unexpected error pasting in pull secret") assert.Equal(t, string(expected), string(actual), "unexpected InstallConfig with pasted pull secret") }) } }
1
20,899
It would be nice if we could verify that this is in fact the error we expected. But that's a latent issue, something for the backlog.
openshift-hive
go
@@ -23,11 +23,11 @@ import ( "testing" "time" + "github.com/mysteriumnetwork/go-openvpn/openvpn" + "github.com/mysteriumnetwork/go-openvpn/openvpn/middlewares/state" "github.com/mysteriumnetwork/node/client/stats" "github.com/mysteriumnetwork/node/communication" "github.com/mysteriumnetwork/node/identity" - "github.com/mysteriumnetwork/node/openvpn" - "github.com/mysteriumnetwork/node/openvpn/middlewares/state" "github.com/mysteriumnetwork/node/server" "github.com/mysteriumnetwork/node/service_discovery/dto" "github.com/mysteriumnetwork/node/session"
1
/* * Copyright (C) 2017 The "MysteriumNetwork/node" Authors. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package connection import ( "errors" "sync" "testing" "time" "github.com/mysteriumnetwork/node/client/stats" "github.com/mysteriumnetwork/node/communication" "github.com/mysteriumnetwork/node/identity" "github.com/mysteriumnetwork/node/openvpn" "github.com/mysteriumnetwork/node/openvpn/middlewares/state" "github.com/mysteriumnetwork/node/server" "github.com/mysteriumnetwork/node/service_discovery/dto" "github.com/mysteriumnetwork/node/session" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/suite" ) type testContext struct { suite.Suite connManager *connectionManager fakeDiscoveryClient *server.ClientFake fakeOpenVpn *fakeOpenvpnClient fakeStatsKeeper *fakeSessionStatsKeeper fakeDialog *fakeDialog openvpnCreationError error sync.RWMutex } var ( myID = identity.FromAddress("identity-1") activeProviderID = identity.FromAddress("vpn-node-1") activeProviderContact = dto.Contact{} activeProposal = dto.ServiceProposal{ ProviderID: activeProviderID.Address, ProviderContacts: []dto.Contact{activeProviderContact}, } ) func (tc *testContext) SetupTest() { tc.Lock() defer tc.Unlock() tc.fakeDiscoveryClient = server.NewClientFake() tc.fakeDiscoveryClient.RegisterProposal(activeProposal, nil) tc.fakeDialog = &fakeDialog{} dialogCreator := func(consumer, provider identity.Identity, contact dto.Contact) (communication.Dialog, error) { tc.RLock() defer tc.RUnlock() return tc.fakeDialog, nil } tc.fakeOpenVpn = &fakeOpenvpnClient{ nil, []openvpn.State{ openvpn.ProcessStarted, openvpn.ConnectingState, openvpn.WaitState, openvpn.AuthenticatingState, openvpn.GetConfigState, openvpn.AssignIpState, openvpn.ConnectedState, }, []openvpn.State{ openvpn.ExitingState, openvpn.ProcessExited, }, nil, sync.WaitGroup{}, sync.RWMutex{}, } tc.openvpnCreationError = nil fakeVpnClientFactory := func(vpnSession session.SessionDto, consumerID identity.Identity, providerID identity.Identity, callback state.Callback, options ConnectOptions) (openvpn.Process, error) { tc.RLock() defer tc.RUnlock() //each test can set this value to simulate openvpn creation error, this flag is reset BEFORE each test if tc.openvpnCreationError != nil { return nil, tc.openvpnCreationError } tc.fakeOpenVpn.StateCallback(callback) return tc.fakeOpenVpn, nil } tc.fakeStatsKeeper = &fakeSessionStatsKeeper{} tc.connManager = NewManager(tc.fakeDiscoveryClient, dialogCreator, fakeVpnClientFactory, tc.fakeStatsKeeper) } func (tc *testContext) TestWhenNoConnectionIsMadeStatusIsNotConnected() { assert.Exactly(tc.T(), statusNotConnected(), tc.connManager.Status()) } func (tc *testContext) TestWithUnknownProviderConnectionIsNotMade() { noProposalsError := errors.New("provider has no service proposals") assert.Equal(tc.T(), noProposalsError, tc.connManager.Connect(myID, identity.FromAddress("unknown-node"), ConnectOptions{})) assert.Equal(tc.T(), statusNotConnected(), tc.connManager.Status()) assert.False(tc.T(), tc.fakeStatsKeeper.sessionStartMarked) } func (tc *testContext) TestOnConnectErrorStatusIsNotConnectedAndSessionStartIsNotMarked() { fatalVpnError := errors.New("fatal connection error") tc.fakeOpenVpn.onStartReturnError = fatalVpnError assert.Error(tc.T(), tc.connManager.Connect(myID, activeProviderID, ConnectOptions{})) assert.Equal(tc.T(), statusNotConnected(), tc.connManager.Status()) assert.True(tc.T(), tc.fakeDialog.closed) assert.False(tc.T(), tc.fakeStatsKeeper.sessionStartMarked) } func (tc *testContext) TestWhenManagerMadeConnectionStatusReturnsConnectedStateAndSessionId() { err := tc.connManager.Connect(myID, activeProviderID, ConnectOptions{}) assert.NoError(tc.T(), err) assert.Equal(tc.T(), statusConnected("vpn-connection-id"), tc.connManager.Status()) } func (tc *testContext) TestWhenManagerMadeConnectionSessionStartIsMarked() { err := tc.connManager.Connect(myID, activeProviderID, ConnectOptions{}) assert.NoError(tc.T(), err) assert.True(tc.T(), tc.fakeStatsKeeper.sessionStartMarked) } func (tc *testContext) TestStatusReportsConnectingWhenConnectionIsInProgress() { tc.fakeOpenVpn.onStartReportStates = []openvpn.State{} go func() { tc.connManager.Connect(myID, activeProviderID, ConnectOptions{}) assert.Fail(tc.T(), "This should never return") }() waitABit() assert.Equal(tc.T(), statusConnecting(), tc.connManager.Status()) } func (tc *testContext) TestStatusReportsDisconnectingThenNotConnected() { tc.fakeOpenVpn.onStopReportStates = []openvpn.State{} err := tc.connManager.Connect(myID, activeProviderID, ConnectOptions{}) assert.NoError(tc.T(), err) assert.Equal(tc.T(), statusConnected("vpn-connection-id"), tc.connManager.Status()) assert.NoError(tc.T(), tc.connManager.Disconnect()) assert.Equal(tc.T(), statusDisconnecting(), tc.connManager.Status()) tc.fakeOpenVpn.reportState(openvpn.ExitingState) tc.fakeOpenVpn.reportState(openvpn.ProcessExited) waitABit() assert.Equal(tc.T(), statusNotConnected(), tc.connManager.Status()) assert.True(tc.T(), tc.fakeStatsKeeper.sessionEndMarked) } func (tc *testContext) TestConnectResultsInAlreadyConnectedErrorWhenConnectionExists() { assert.NoError(tc.T(), tc.connManager.Connect(myID, activeProviderID, ConnectOptions{})) assert.Equal(tc.T(), ErrAlreadyExists, tc.connManager.Connect(myID, activeProviderID, ConnectOptions{})) } func (tc *testContext) TestDisconnectReturnsErrorWhenNoConnectionExists() { assert.Equal(tc.T(), ErrNoConnection, tc.connManager.Disconnect()) } func (tc *testContext) TestReconnectingStatusIsReportedWhenOpenVpnGoesIntoReconnectingState() { assert.NoError(tc.T(), tc.connManager.Connect(myID, activeProviderID, ConnectOptions{})) tc.fakeOpenVpn.reportState(openvpn.ReconnectingState) waitABit() assert.Equal(tc.T(), statusReconnecting(), tc.connManager.Status()) } func (tc *testContext) TestDoubleDisconnectResultsInError() { assert.NoError(tc.T(), tc.connManager.Connect(myID, activeProviderID, ConnectOptions{})) assert.Equal(tc.T(), statusConnected("vpn-connection-id"), tc.connManager.Status()) assert.NoError(tc.T(), tc.connManager.Disconnect()) waitABit() assert.Equal(tc.T(), statusNotConnected(), tc.connManager.Status()) assert.Equal(tc.T(), ErrNoConnection, tc.connManager.Disconnect()) } func (tc *testContext) TestTwoConnectDisconnectCyclesReturnNoError() { assert.NoError(tc.T(), tc.connManager.Connect(myID, activeProviderID, ConnectOptions{})) assert.Equal(tc.T(), statusConnected("vpn-connection-id"), tc.connManager.Status()) assert.NoError(tc.T(), tc.connManager.Disconnect()) waitABit() assert.Equal(tc.T(), statusNotConnected(), tc.connManager.Status()) assert.NoError(tc.T(), tc.connManager.Connect(myID, activeProviderID, ConnectOptions{})) assert.Equal(tc.T(), statusConnected("vpn-connection-id"), tc.connManager.Status()) assert.NoError(tc.T(), tc.connManager.Disconnect()) waitABit() assert.Equal(tc.T(), statusNotConnected(), tc.connManager.Status()) } func (tc *testContext) TestConnectFailsIfOpenvpnFactoryReturnsError() { tc.openvpnCreationError = errors.New("failed to create vpn instance") assert.Error(tc.T(), tc.connManager.Connect(myID, activeProviderID, ConnectOptions{})) } func (tc *testContext) TestStatusIsConnectedWhenConnectCommandReturnsWithoutError() { tc.connManager.Connect(myID, activeProviderID, ConnectOptions{}) assert.Equal(tc.T(), statusConnected("vpn-connection-id"), tc.connManager.Status()) } func (tc *testContext) TestConnectingInProgressCanBeCanceled() { tc.fakeOpenVpn.onStartReportStates = []openvpn.State{} connectWaiter := &sync.WaitGroup{} connectWaiter.Add(1) var err error go func() { defer connectWaiter.Done() err = tc.connManager.Connect(myID, activeProviderID, ConnectOptions{}) }() waitABit() assert.Equal(tc.T(), statusConnecting(), tc.connManager.Status()) assert.NoError(tc.T(), tc.connManager.Disconnect()) connectWaiter.Wait() assert.Equal(tc.T(), ErrConnectionCancelled, err) } func (tc *testContext) TestConnectMethodReturnsErrorIfOpenvpnClientExitsDuringConnect() { tc.fakeOpenVpn.onStartReportStates = []openvpn.State{} tc.fakeOpenVpn.onStopReportStates = []openvpn.State{} connectWaiter := sync.WaitGroup{} connectWaiter.Add(1) var err error go func() { defer connectWaiter.Done() err = tc.connManager.Connect(myID, activeProviderID, ConnectOptions{}) }() waitABit() tc.fakeOpenVpn.reportState(openvpn.ProcessExited) connectWaiter.Wait() assert.Equal(tc.T(), ErrOpenvpnProcessDied, err) } func TestConnectionManagerSuite(t *testing.T) { suite.Run(t, new(testContext)) } type fakeOpenvpnClient struct { onStartReturnError error onStartReportStates []openvpn.State onStopReportStates []openvpn.State stateCallback state.Callback fakeProcess sync.WaitGroup sync.RWMutex } func (foc *fakeOpenvpnClient) Start() error { foc.RLock() defer foc.RUnlock() if foc.onStartReturnError != nil { return foc.onStartReturnError } foc.fakeProcess.Add(1) for _, openvpnState := range foc.onStartReportStates { foc.reportState(openvpnState) } return nil } func (foc *fakeOpenvpnClient) Wait() error { foc.fakeProcess.Wait() return nil } func (foc *fakeOpenvpnClient) Stop() { for _, openvpnState := range foc.onStopReportStates { foc.reportState(openvpnState) } foc.fakeProcess.Done() } func (foc *fakeOpenvpnClient) reportState(state openvpn.State) { foc.RLock() defer foc.RUnlock() foc.stateCallback(state) } func (foc *fakeOpenvpnClient) StateCallback(callback state.Callback) { foc.Lock() defer foc.Unlock() foc.stateCallback = callback } type fakeDialog struct { peerID identity.Identity closed bool sync.RWMutex } func (fd *fakeDialog) PeerID() identity.Identity { fd.RLock() defer fd.RUnlock() return fd.peerID } func (fd *fakeDialog) Close() error { fd.Lock() defer fd.Unlock() fd.closed = true return nil } func (fd *fakeDialog) Receive(consumer communication.MessageConsumer) error { return nil } func (fd *fakeDialog) Respond(consumer communication.RequestConsumer) error { return nil } func (fd *fakeDialog) Send(producer communication.MessageProducer) error { return nil } func (fd *fakeDialog) Request(producer communication.RequestProducer) (responsePtr interface{}, err error) { return &session.SessionCreateResponse{ Success: true, Message: "Everything is great!", Session: session.SessionDto{ ID: "vpn-connection-id", Config: []byte{}, }, }, nil } type fakeSessionStatsKeeper struct { sessionStartMarked, sessionEndMarked bool } func (fsk *fakeSessionStatsKeeper) Save(stats stats.SessionStats) { } func (fsk *fakeSessionStatsKeeper) Retrieve() stats.SessionStats { return stats.SessionStats{} } func (fsk *fakeSessionStatsKeeper) MarkSessionStart() { fsk.sessionStartMarked = true } func (fsk *fakeSessionStatsKeeper) GetSessionDuration() time.Duration { return time.Duration(0) } func (fsk *fakeSessionStatsKeeper) MarkSessionEnd() { fsk.sessionEndMarked = true } func waitABit() { //usually time.Sleep call gives a chance for other goroutines to kick in //important when testing async code time.Sleep(10 * time.Millisecond) }
1
11,897
I hate then some internal process specific middleware leaks into connection manager :( todo later.
mysteriumnetwork-node
go
@@ -102,7 +102,14 @@ def setupNupic(): name = "nupic", version = version, packages = findPackages(repositoryDir), + # A lot of this stuff may not be packaged properly, most if it was added in + # an effort to get a binary package prepared for nupic.regression testing + # on Travis-CI, but it wasn't done the right way. I'll be refactoring a lot + # of this for https://github.com/numenta/nupic/issues/408, so this will be + # changing soon. -- Matt package_data = { + "nupic.support.configuration_base": ["nupic-default.xml"], + "nupic.support.__init__": ["nupic-logging.conf"], "nupic": ["README.md", "LICENSE.txt", "CMakeLists.txt", "*.so", "*.dll", "*.dylib"], "nupic.bindings": ["_*.so", "_*.dll"],
1
import shutil import sys import os import subprocess from setuptools import setup """ This file only will call CMake process to generate scripts, build, and then install the NuPIC binaries. ANY EXTRA code related to build process MUST be put into CMake file. """ repositoryDir = os.getcwd() # Read command line options looking for extra options for CMake and Make # For example, an user could type: # python setup.py install make_options="-j3" # which will add "-j3" option to Make commandline cmakeOptions = "" makeOptions = "install" setupOptions = "" mustBuildExtensions = False for arg in sys.argv[:]: if ("cmake_options" in arg) or ("make_options" in arg): (option, _, rhs) = arg.partition("=") if option == "--cmake_options": cmakeOptions = rhs sys.argv.remove(arg) if option == "--make_options": makeOptions = makeOptions + " " + rhs sys.argv.remove(arg) elif not "setup.py" in arg: if ("build" in arg) or ("install" in arg): mustBuildExtensions = True setupOptions += arg + " " # Check if no option was passed, i.e. if "setup.py" is the only option # If True, "develop" is passed by default # This is useful when a developer wish build the project directly from an IDE if len(sys.argv) == 1: print "No command passed. Using 'develop' as default command. Use " \ "'python setup.py --help' for more information." sys.argv.append("develop") mustBuildExtensions = True # Get version from local file. version = None with open("VERSION", "r") as versionFile: version = versionFile.read().strip() def findPackages(repositoryDir): """ Traverse nupic directory and create packages for each subdir containing a __init__.py file """ packages = [] for root, _, files in os.walk(repositoryDir + "/nupic"): if "__init__.py" in files: subdir = root.replace(repositoryDir + "/", "") packages.append(subdir.replace("/", ".")) return packages def buildExtensionsNupic(): """ CMake-specific build operations """ # Prepare directories to the CMake process sourceDir = repositoryDir buildScriptsDir = repositoryDir + "/build/scripts" if os.path.exists(buildScriptsDir): shutil.rmtree(buildScriptsDir) os.makedirs(buildScriptsDir) os.chdir(buildScriptsDir) # Generate build files with CMake returnCode = subprocess.call( "cmake %s %s" % (sourceDir, cmakeOptions), shell=True ) if returnCode != 0: sys.exit("Unable to generate build scripts!") # Build library with Make returnCode = subprocess.call("make " + makeOptions, shell=True) if returnCode != 0: sys.exit("Unable to build the library!") def setupNupic(): """ Package setup operations """ # Setup library os.chdir(repositoryDir) setup( name = "nupic", version = version, packages = findPackages(repositoryDir), package_data = { "nupic": ["README.md", "LICENSE.txt", "CMakeLists.txt", "*.so", "*.dll", "*.dylib"], "nupic.bindings": ["_*.so", "_*.dll"], "nupic.data": ["*.json"], "nupic.frameworks.opf.exp_generator": ["*.json", "*.tpl"], "nupic.frameworks.opf.jsonschema": ["*.json"], "nupic.support.resources.images": ["*.png", "*.gif", "*.ico", "*.graffle"], "nupic.swarming.jsonschema": ["*.json"] }, data_files=[ ("", [ "CMakeLists.txt", "config/default/nupic-default.xml" ] ) ], include_package_data = True, description = "Numenta Platform for Intelligent Computing", author="Numenta", author_email="[email protected]", url="https://github.com/numenta/nupic", classifiers=[ "Programming Language :: Python", "Programming Language :: Python :: 2", "License :: OSI Approved :: GNU General Public License (GPL)", "Operating System :: OS Independent", "Development Status :: 5 - Production/Stable", "Environment :: Console", "Intended Audience :: Science/Research", "Topic :: Scientific/Engineering :: Artificial Intelligence" ], long_description = """\ NuPIC is a library that provides the building blocks for online prediction systems. The library contains the Cortical Learning Algorithm (CLA), but also the Online Prediction Framework (OPF) that allows clients to build prediction systems out of encoders, models, and metrics. For more information, see numenta.org or the NuPIC wiki (https://github.com/numenta/nupic/wiki). """ ) # Build and setup NuPIC if mustBuildExtensions: buildExtensionsNupic() setupNupic()
1
16,782
Reflects where these files were moved for `pkg_resources`.
numenta-nupic
py
@@ -1,8 +1,12 @@ import AbstractIndexRoute from 'hospitalrun/routes/abstract-index-route'; import { translationMacro as t } from 'ember-i18n'; +import Ember from 'ember'; +const { computed } = Ember; export default AbstractIndexRoute.extend({ - pageTitle: t('admin.textReplacements.pageTitle'), + pageTitle: computed('i18n', () => { + return t('admin.textReplacements.pageTitle'); + }), hideNewButton: true, model() {
1
import AbstractIndexRoute from 'hospitalrun/routes/abstract-index-route'; import { translationMacro as t } from 'ember-i18n'; export default AbstractIndexRoute.extend({ pageTitle: t('admin.textReplacements.pageTitle'), hideNewButton: true, model() { let store = this.get('store'); return store.findAll('text-expansion').then((result) => { return result.filter((model) => { let isNew = model.get('isNew'); return !isNew; }); }); }, setupController(controller, model) { this._super(controller, model); controller.createExpansion(); }, actions: { addExpansion(newExpansion) { newExpansion.save() .then(() => { this.refresh(); }) .catch(() => { this.refresh(); }); }, deleteExpansion(expansion) { expansion.deleteRecord(); expansion.save() .then(() => { this.refresh(); }) .catch(() => { this.refresh(); }); } } });
1
13,690
This should be computed('i18n.locale'....
HospitalRun-hospitalrun-frontend
js
@@ -1351,6 +1351,12 @@ func describeBPFTests(opts ...bpfTestOpt) bool { // Add a route to felix[1] to be able to reach the nodeport _, err = eth20.RunCmd("ip", "route", "add", felixes[1].IP+"/32", "via", "10.0.0.20") Expect(err).NotTo(HaveOccurred()) + // This multi-NIC scenario works only if the kernel's RPF check + // is not strict so we need to override it for the test and must + // be set properly when product is deployed. We reply on + // iptables to do require check for us. + felixes[1].Exec("sysctl", "-w", "net.ipv4.conf.eth0.rp_filter=2") + felixes[1].Exec("sysctl", "-w", "net.ipv4.conf.eth20.rp_filter=2") }) By("setting up routes to .20 net on dest node to trigger RPF check", func() {
1
// Copyright (c) 2020 Tigera, Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // +build fvtests package fv_test import ( "context" "encoding/json" "fmt" "net" "os" "regexp" "sort" "strconv" "strings" "time" "github.com/davecgh/go-spew/spew" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "github.com/pkg/errors" log "github.com/sirupsen/logrus" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/kubernetes" "github.com/projectcalico/libcalico-go/lib/apiconfig" api "github.com/projectcalico/libcalico-go/lib/apis/v3" client "github.com/projectcalico/libcalico-go/lib/clientv3" "github.com/projectcalico/libcalico-go/lib/ipam" cnet "github.com/projectcalico/libcalico-go/lib/net" "github.com/projectcalico/libcalico-go/lib/numorstring" options2 "github.com/projectcalico/libcalico-go/lib/options" "github.com/projectcalico/felix/bpf" "github.com/projectcalico/felix/bpf/conntrack" "github.com/projectcalico/felix/bpf/nat" . "github.com/projectcalico/felix/fv/connectivity" "github.com/projectcalico/felix/fv/containers" "github.com/projectcalico/felix/fv/infrastructure" "github.com/projectcalico/felix/fv/utils" "github.com/projectcalico/felix/fv/workload" ) // We run with and without connection-time load balancing for a couple of reasons: // - We can only test the non-connection time NAT logic (and node ports) with it disabled. // - Since the connection time program applies to the whole host, the different felix nodes actually share the // connection-time program. This is a bit of a broken test but it's better than nothing since all felix nodes // should be programming the same NAT mappings. var _ = describeBPFTests(withProto("tcp"), withConnTimeLoadBalancingEnabled(), withNonProtocolDependentTests()) var _ = describeBPFTests(withProto("udp"), withConnTimeLoadBalancingEnabled()) var _ = describeBPFTests(withProto("udp"), withConnTimeLoadBalancingEnabled(), withUDPUnConnected()) var _ = describeBPFTests(withProto("tcp")) var _ = describeBPFTests(withProto("udp")) var _ = describeBPFTests(withProto("udp"), withUDPUnConnected()) var _ = describeBPFTests(withProto("udp"), withUDPConnectedRecvMsg(), withConnTimeLoadBalancingEnabled()) var _ = describeBPFTests(withTunnel("ipip"), withProto("tcp"), withConnTimeLoadBalancingEnabled()) var _ = describeBPFTests(withTunnel("ipip"), withProto("udp"), withConnTimeLoadBalancingEnabled()) var _ = describeBPFTests(withTunnel("ipip"), withProto("tcp")) var _ = describeBPFTests(withTunnel("ipip"), withProto("udp")) var _ = describeBPFTests(withProto("tcp"), withDSR()) var _ = describeBPFTests(withProto("udp"), withDSR()) var _ = describeBPFTests(withTunnel("ipip"), withProto("tcp"), withDSR()) var _ = describeBPFTests(withTunnel("ipip"), withProto("udp"), withDSR()) // Run a stripe of tests with BPF logging disabled since the compiler tends to optimise the code differently // with debug disabled and that can lead to verifier issues. var _ = describeBPFTests(withProto("tcp"), withConnTimeLoadBalancingEnabled(), withBPFLogLevel("info")) type bpfTestOptions struct { connTimeEnabled bool protocol string udpUnConnected bool bpfLogLevel string tunnel string dsr bool udpConnRecvMsg bool nonProtoTests bool } type bpfTestOpt func(opts *bpfTestOptions) func withProto(proto string) bpfTestOpt { return func(opts *bpfTestOptions) { opts.protocol = proto } } func withConnTimeLoadBalancingEnabled() bpfTestOpt { return func(opts *bpfTestOptions) { opts.connTimeEnabled = true } } func withNonProtocolDependentTests() bpfTestOpt { return func(opts *bpfTestOptions) { opts.nonProtoTests = true } } func withBPFLogLevel(level string) bpfTestOpt { return func(opts *bpfTestOptions) { opts.bpfLogLevel = level } } func withTunnel(tunnel string) bpfTestOpt { return func(opts *bpfTestOptions) { opts.tunnel = tunnel } } func withUDPUnConnected() bpfTestOpt { return func(opts *bpfTestOptions) { opts.udpUnConnected = true } } func withDSR() bpfTestOpt { return func(opts *bpfTestOptions) { opts.dsr = true } } func withUDPConnectedRecvMsg() bpfTestOpt { return func(opts *bpfTestOptions) { opts.udpConnRecvMsg = true } } const expectedRouteDump = `10.65.0.0/16: remote in-pool nat-out 10.65.0.2/32: local workload in-pool nat-out idx - 10.65.0.3/32: local workload in-pool nat-out idx - 10.65.0.4/32: local workload in-pool nat-out idx - 10.65.1.0/26: remote workload in-pool nat-out nh FELIX_1 10.65.2.0/26: remote workload in-pool nat-out nh FELIX_2 FELIX_0/32: local host FELIX_1/32: remote host FELIX_2/32: remote host` const expectedRouteDumpIPIP = `10.65.0.0/16: remote in-pool nat-out 10.65.0.1/32: local host 10.65.0.2/32: local workload in-pool nat-out idx - 10.65.0.3/32: local workload in-pool nat-out idx - 10.65.0.4/32: local workload in-pool nat-out idx - 10.65.1.0/26: remote workload in-pool nat-out nh FELIX_1 10.65.2.0/26: remote workload in-pool nat-out nh FELIX_2 FELIX_0/32: local host FELIX_1/32: remote host FELIX_2/32: remote host` func describeBPFTests(opts ...bpfTestOpt) bool { testOpts := bpfTestOptions{ bpfLogLevel: "debug", tunnel: "none", } for _, o := range opts { o(&testOpts) } protoExt := "" if testOpts.udpUnConnected { protoExt = "-unconnected" } if testOpts.udpConnRecvMsg { protoExt = "-conn-recvmsg" } desc := fmt.Sprintf("_BPF_ _BPF-SAFE_ BPF tests (%s%s, ct=%v, log=%s, tunnel=%s, dsr=%v)", testOpts.protocol, protoExt, testOpts.connTimeEnabled, testOpts.bpfLogLevel, testOpts.tunnel, testOpts.dsr, ) return infrastructure.DatastoreDescribe(desc, []apiconfig.DatastoreType{apiconfig.Kubernetes}, func(getInfra infrastructure.InfraFactory) { var ( infra infrastructure.DatastoreInfra felixes []*infrastructure.Felix calicoClient client.Interface cc *Checker externalClient *containers.Container deadWorkload *workload.Workload bpfLog *containers.Container options infrastructure.TopologyOptions numericProto uint8 expectedRoutes string ) switch testOpts.protocol { case "tcp": numericProto = 6 case "udp": numericProto = 17 default: Fail("bad protocol option") } BeforeEach(func() { if os.Getenv("FELIX_FV_ENABLE_BPF") != "true" { Skip("Skipping BPF test in non-BPF run.") } bpfLog = containers.Run("bpf-log", containers.RunOpts{AutoRemove: true}, "--privileged", "calico/bpftool:v5.3-amd64", "/bpftool", "prog", "tracelog") infra = getInfra() cc = &Checker{ CheckSNAT: true, } cc.Protocol = testOpts.protocol if testOpts.protocol == "udp" && testOpts.udpUnConnected { cc.Protocol += "-noconn" } if testOpts.protocol == "udp" && testOpts.udpConnRecvMsg { cc.Protocol += "-recvmsg" } options = infrastructure.DefaultTopologyOptions() options.FelixLogSeverity = "debug" options.NATOutgoingEnabled = true switch testOpts.tunnel { case "none": options.IPIPEnabled = false options.IPIPRoutesEnabled = false expectedRoutes = expectedRouteDump case "ipip": options.IPIPEnabled = true options.IPIPRoutesEnabled = true expectedRoutes = expectedRouteDumpIPIP default: Fail("bad tunnel option") } options.ExtraEnvVars["FELIX_BPFConnectTimeLoadBalancingEnabled"] = fmt.Sprint(testOpts.connTimeEnabled) options.ExtraEnvVars["FELIX_BPFLogLevel"] = fmt.Sprint(testOpts.bpfLogLevel) if testOpts.dsr { options.ExtraEnvVars["FELIX_BPFExternalServiceMode"] = "dsr" } }) JustAfterEach(func() { if CurrentGinkgoTestDescription().Failed { currBpfsvcs, currBpfeps := dumpNATmaps(felixes) for i, felix := range felixes { felix.Exec("iptables-save", "-c") felix.Exec("ip", "r") felix.Exec("ip", "route", "show", "cached") felix.Exec("calico-bpf", "ipsets", "dump") felix.Exec("calico-bpf", "routes", "dump") felix.Exec("calico-bpf", "nat", "dump") felix.Exec("calico-bpf", "conntrack", "dump") log.Infof("[%d]FrontendMap: %+v", i, currBpfsvcs[i]) log.Infof("[%d]NATBackend: %+v", i, currBpfeps[i]) log.Infof("[%d]SendRecvMap: %+v", i, dumpSendRecvMap(felix)) } externalClient.Exec("ip", "route", "show", "cached") } }) AfterEach(func() { log.Info("AfterEach starting") for _, f := range felixes { f.Exec("calico-bpf", "connect-time", "clean") f.Stop() } infra.Stop() externalClient.Stop() bpfLog.Stop() log.Info("AfterEach done") }) createPolicy := func(policy *api.GlobalNetworkPolicy) *api.GlobalNetworkPolicy { log.WithField("policy", dumpResource(policy)).Info("Creating policy") policy, err := calicoClient.GlobalNetworkPolicies().Create(utils.Ctx, policy, utils.NoOptions) Expect(err).NotTo(HaveOccurred()) return policy } updatePolicy := func(policy *api.GlobalNetworkPolicy) *api.GlobalNetworkPolicy { log.WithField("policy", dumpResource(policy)).Info("Updating policy") policy, err := calicoClient.GlobalNetworkPolicies().Update(utils.Ctx, policy, utils.NoOptions) Expect(err).NotTo(HaveOccurred()) return policy } _ = updatePolicy Describe("with a single node and an allow-all policy", func() { var ( hostW *workload.Workload w [2]*workload.Workload ) if !testOpts.connTimeEnabled { // These tests don't depend on NAT. return } JustBeforeEach(func() { felixes, calicoClient = infrastructure.StartNNodeTopology(1, options, infra) hostW = workload.Run( felixes[0], "host", "default", felixes[0].IP, // Same IP as felix means "run in the host's namespace" "8055", testOpts.protocol) // Start a couple of workloads so we can check workload-to-workload and workload-to-host. for i := 0; i < 2; i++ { wIP := fmt.Sprintf("10.65.0.%d", i+2) w[i] = workload.Run(felixes[0], fmt.Sprintf("w%d", i), "default", wIP, "8055", testOpts.protocol) w[i].WorkloadEndpoint.Labels = map[string]string{"name": w[i].Name} w[i].ConfigureInDatastore(infra) } err := infra.AddDefaultDeny() Expect(err).NotTo(HaveOccurred()) pol := api.NewGlobalNetworkPolicy() pol.Namespace = "fv" pol.Name = "policy-1" pol.Spec.Ingress = []api.Rule{{Action: "Allow"}} pol.Spec.Egress = []api.Rule{{Action: "Allow"}} pol.Spec.Selector = "all()" pol = createPolicy(pol) }) Describe("with DefaultEndpointToHostAction=DROP", func() { BeforeEach(func() { options.ExtraEnvVars["FELIX_DefaultEndpointToHostAction"] = "DROP" }) It("should only allow traffic from workload to workload", func() { cc.ExpectSome(w[0], w[1]) cc.ExpectSome(w[1], w[0]) cc.ExpectNone(w[1], hostW) cc.ExpectSome(hostW, w[0]) cc.CheckConnectivity() }) }) getMapIDByPath := func(felix *infrastructure.Felix, filename string) (int, error) { out, err := felix.ExecOutput("bpftool", "map", "show", "pinned", filename, "-j") if err != nil { return 0, err } var mapMeta struct { ID int `json:"id"` Error string `json:"error"` } err = json.Unmarshal([]byte(out), &mapMeta) if err != nil { return 0, err } if mapMeta.Error != "" { return 0, errors.New(mapMeta.Error) } return mapMeta.ID, nil } mustGetMapIDByPath := func(felix *infrastructure.Felix, filename string) int { var mapID int Eventually(func() error { var err error mapID, err = getMapIDByPath(felix, filename) return err }, "5s").ShouldNot(HaveOccurred()) return mapID } Describe("with DefaultEndpointToHostAction=ACCEPT", func() { BeforeEach(func() { options.ExtraEnvVars["FELIX_DefaultEndpointToHostAction"] = "ACCEPT" }) It("should traffic from workload to workload and to/from host", func() { cc.ExpectSome(w[0], w[1]) cc.ExpectSome(w[1], w[0]) cc.ExpectSome(w[1], hostW) cc.ExpectSome(hostW, w[0]) cc.CheckConnectivity() }) }) if testOpts.protocol != "udp" { // No need to run these tests per-protocol. mapPath := conntrack.Map(&bpf.MapContext{}).Path() Describe("with map repinning enabled", func() { BeforeEach(func() { options.ExtraEnvVars["FELIX_DebugBPFMapRepinEnabled"] = "true" }) It("should repin maps", func() { // Wait for the first felix to create its maps. mapID := mustGetMapIDByPath(felixes[0], mapPath) // Now, start a completely independent felix, which will get its own bpffs. It should re-pin the // maps, picking up the ones from the first felix. extraFelix, _ := infrastructure.StartSingleNodeTopology(options, infra) defer extraFelix.Stop() secondMapID := mustGetMapIDByPath(extraFelix, mapPath) Expect(mapID).NotTo(BeNumerically("==", 0)) Expect(mapID).To(BeNumerically("==", secondMapID)) }) }) Describe("with map repinning disabled", func() { It("should repin maps", func() { // Wait for the first felix to create its maps. mapID := mustGetMapIDByPath(felixes[0], mapPath) // Now, start a completely independent felix, which will get its own bpffs. It should make its own // maps. extraFelix, _ := infrastructure.StartSingleNodeTopology(options, infra) defer extraFelix.Stop() secondMapID := mustGetMapIDByPath(extraFelix, mapPath) Expect(mapID).NotTo(BeNumerically("==", 0)) Expect(mapID).NotTo(BeNumerically("==", secondMapID)) }) }) } if testOpts.nonProtoTests { // We can only test that felix _sets_ this because the flag is one-way and cannot be unset. It("should enable the kernel.unprivileged_bpf_disabled sysctl", func() { Eventually(func() string { out, err := felixes[0].ExecOutput("sysctl", "kernel.unprivileged_bpf_disabled") if err != nil { log.WithError(err).Error("Failed to run sysctl") } return out }).Should(ContainSubstring("kernel.unprivileged_bpf_disabled = 1")) }) } }) const numNodes = 3 Describe(fmt.Sprintf("with a %d node cluster", numNodes), func() { var ( w [numNodes][2]*workload.Workload hostW [numNodes]*workload.Workload ) BeforeEach(func() { felixes, calicoClient = infrastructure.StartNNodeTopology(numNodes, options, infra) addWorkload := func(run bool, ii, wi, port int, labels map[string]string) *workload.Workload { if labels == nil { labels = make(map[string]string) } wIP := fmt.Sprintf("10.65.%d.%d", ii, wi+2) wName := fmt.Sprintf("w%d%d", ii, wi) w := workload.New(felixes[ii], wName, "default", wIP, strconv.Itoa(port), testOpts.protocol) if run { w.Start() } labels["name"] = w.Name w.WorkloadEndpoint.Labels = labels w.ConfigureInDatastore(infra) // Assign the workload's IP in IPAM, this will trigger calculation of routes. err := calicoClient.IPAM().AssignIP(context.Background(), ipam.AssignIPArgs{ IP: cnet.MustParseIP(wIP), HandleID: &w.Name, Attrs: map[string]string{ ipam.AttributeNode: felixes[ii].Hostname, }, Hostname: felixes[ii].Hostname, }) Expect(err).NotTo(HaveOccurred()) return w } // Start a host networked workload on each host for connectivity checks. for ii := range felixes { // We tell each host-networked workload to open: // TODO: Copied from another test // - its normal (uninteresting) port, 8055 // - port 2379, which is both an inbound and an outbound failsafe port // - port 22, which is an inbound failsafe port. // This allows us to test the interaction between do-not-track policy and failsafe // ports. hostW[ii] = workload.Run( felixes[ii], fmt.Sprintf("host%d", ii), "default", felixes[ii].IP, // Same IP as felix means "run in the host's namespace" "8055", testOpts.protocol) // Two workloads on each host so we can check the same host and other host cases. w[ii][0] = addWorkload(true, ii, 0, 8055, map[string]string{"port": "8055"}) w[ii][1] = addWorkload(true, ii, 1, 8056, nil) } // Create a workload on node 0 that does not run, but we can use it to set up paths deadWorkload = addWorkload(false, 0, 2, 8057, nil) // We will use this container to model an external client trying to connect into // workloads on a host. Create a route in the container for the workload CIDR. // TODO: Copied from another test externalClient = containers.Run("external-client", containers.RunOpts{AutoRemove: true}, "--privileged", // So that we can add routes inside the container. utils.Config.BusyboxImage, "/bin/sh", "-c", "sleep 1000") _ = externalClient err := infra.AddDefaultDeny() Expect(err).NotTo(HaveOccurred()) }) It("should have correct routes", func() { dumpRoutes := func() string { out, err := felixes[0].ExecOutput("calico-bpf", "routes", "dump") if err != nil { return fmt.Sprint(err) } lines := strings.Split(out, "\n") var filteredLines []string idxRE := regexp.MustCompile(`idx \d+`) for _, l := range lines { l = strings.TrimLeft(l, " ") if len(l) == 0 { continue } l = strings.ReplaceAll(l, felixes[0].IP, "FELIX_0") l = strings.ReplaceAll(l, felixes[1].IP, "FELIX_1") l = strings.ReplaceAll(l, felixes[2].IP, "FELIX_2") l = idxRE.ReplaceAllLiteralString(l, "idx -") filteredLines = append(filteredLines, l) } sort.Strings(filteredLines) return strings.Join(filteredLines, "\n") } Eventually(dumpRoutes).Should(Equal(expectedRoutes)) }) It("should only allow traffic from the local host by default", func() { // Same host, other workload. cc.ExpectNone(w[0][0], w[0][1]) cc.ExpectNone(w[0][1], w[0][0]) // Workloads on other host. cc.ExpectNone(w[0][0], w[1][0]) cc.ExpectNone(w[1][0], w[0][0]) // Hosts. cc.ExpectSome(felixes[0], w[0][0]) cc.ExpectNone(felixes[1], w[0][0]) cc.CheckConnectivity() }) Context("with a policy allowing ingress to w[0][0] from all workloads", func() { var ( pol *api.GlobalNetworkPolicy k8sClient *kubernetes.Clientset ) BeforeEach(func() { pol = api.NewGlobalNetworkPolicy() pol.Namespace = "fv" pol.Name = "policy-1" pol.Spec.Ingress = []api.Rule{ { Action: "Allow", Source: api.EntityRule{ Selector: "all()", }, }, } pol.Spec.Egress = []api.Rule{ { Action: "Allow", Source: api.EntityRule{ Selector: "all()", }, }, } pol.Spec.Selector = "all()" pol = createPolicy(pol) k8sClient = infra.(*infrastructure.K8sDatastoreInfra).K8sClient _ = k8sClient }) It("should handle NAT outgoing", func() { By("SNATting outgoing traffic with the flag set") cc.ExpectSNAT(w[0][0], felixes[0].IP, hostW[1]) cc.CheckConnectivity() if testOpts.tunnel == "none" { By("Leaving traffic alone with the flag clear") pool, err := calicoClient.IPPools().Get(context.TODO(), "test-pool", options2.GetOptions{}) Expect(err).NotTo(HaveOccurred()) pool.Spec.NATOutgoing = false pool, err = calicoClient.IPPools().Update(context.TODO(), pool, options2.SetOptions{}) Expect(err).NotTo(HaveOccurred()) cc.ResetExpectations() cc.ExpectSNAT(w[0][0], w[0][0].IP, hostW[1]) cc.CheckConnectivity() By("SNATting again with the flag set") pool.Spec.NATOutgoing = true pool, err = calicoClient.IPPools().Update(context.TODO(), pool, options2.SetOptions{}) Expect(err).NotTo(HaveOccurred()) cc.ResetExpectations() cc.ExpectSNAT(w[0][0], felixes[0].IP, hostW[1]) cc.CheckConnectivity() } }) It("connectivity from all workloads via workload 0's main IP", func() { cc.ExpectSome(w[0][1], w[0][0]) cc.ExpectSome(w[1][0], w[0][0]) cc.ExpectSome(w[1][1], w[0][0]) cc.CheckConnectivity() }) It("should not be able to spoof IP", func() { if testOpts.protocol != "udp" { return } By("allowing any traffic", func() { pol.Spec.Ingress = []api.Rule{ { Action: "Allow", Source: api.EntityRule{ Nets: []string{ "0.0.0.0/0", }, }, }, } pol = updatePolicy(pol) cc.ExpectSome(w[1][0], w[0][0]) cc.ExpectSome(w[1][1], w[0][0]) cc.CheckConnectivity() }) By("testing that packet sent by another workload is dropped", func() { tcpdump := w[0][0].AttachTCPDump() tcpdump.SetLogEnabled(true) matcher := fmt.Sprintf("IP %s\\.30444 > %s\\.30444: UDP", w[1][0].IP, w[0][0].IP) tcpdump.AddMatcher("UDP-30444", regexp.MustCompile(matcher)) tcpdump.Start(testOpts.protocol, "port", "30444", "or", "port", "30445") defer tcpdump.Stop() // send a packet from the correct workload to create a conntrack entry _, err := w[1][0].RunCmd("/pktgen", w[1][0].IP, w[0][0].IP, "udp", "--port-src", "30444", "--port-dst", "30444") Expect(err).NotTo(HaveOccurred()) // We must eventually see the packet at the target Eventually(func() int { return tcpdump.MatchCount("UDP-30444") }). Should(BeNumerically("==", 1), matcher) // Send a spoofed packet from a different pod. Since we hit the // conntrack we would not do the WEP only RPF check. _, err = w[1][1].RunCmd("/pktgen", w[1][0].IP, w[0][0].IP, "udp", "--port-src", "30444", "--port-dst", "30444") Expect(err).NotTo(HaveOccurred()) // Since the packet will get dropped, we would not see it at the dest. // So we send another good packet from the spoofing workload, that we // will see at the dest. matcher2 := fmt.Sprintf("IP %s\\.30445 > %s\\.30445: UDP", w[1][1].IP, w[0][0].IP) tcpdump.AddMatcher("UDP-30445", regexp.MustCompile(matcher2)) _, err = w[1][1].RunCmd("/pktgen", w[1][1].IP, w[0][0].IP, "udp", "--port-src", "30445", "--port-dst", "30445") Expect(err).NotTo(HaveOccurred()) // Wait for the good packet from the bad workload Eventually(func() int { return tcpdump.MatchCount("UDP-30445") }). Should(BeNumerically("==", 1), matcher2) // Check that we have not seen the spoofed packet. If there was not // packet reordering, which in out setup is guaranteed not to happen, // we know that the spoofed packet was dropped. Expect(tcpdump.MatchCount("UDP-30444")).To(BeNumerically("==", 1), matcher) }) var eth20, eth30 *workload.Workload defer func() { if eth20 != nil { eth20.Stop() } if eth30 != nil { eth30.Stop() } }() fakeWorkloadIP := "10.65.15.15" By("setting up node's fake external ifaces", func() { // We name the ifaces ethXY since such ifaces are // treated by felix as external to the node // // Using a test-workload creates the namespaces and the // interfaces to emulate the host NICs eth20 = &workload.Workload{ Name: "eth20", C: felixes[1].Container, IP: "192.168.20.1", Ports: "57005", // 0xdead Protocol: testOpts.protocol, InterfaceName: "eth20", } eth20.Start() // assign address to eth20 and add route to the .20 network felixes[1].Exec("ip", "route", "add", "192.168.20.0/24", "dev", "eth20") felixes[1].Exec("ip", "addr", "add", "10.0.0.20/32", "dev", "eth20") _, err := eth20.RunCmd("ip", "route", "add", "10.0.0.20/32", "dev", "eth0") Expect(err).NotTo(HaveOccurred()) // Add a route to the test workload to the fake external // client emulated by the test-workload _, err = eth20.RunCmd("ip", "route", "add", w[1][1].IP+"/32", "via", "10.0.0.20") Expect(err).NotTo(HaveOccurred()) eth30 = &workload.Workload{ Name: "eth30", C: felixes[1].Container, IP: "192.168.30.1", Ports: "57005", // 0xdead Protocol: testOpts.protocol, InterfaceName: "eth30", } eth30.Start() // assign address to eth30 and add route to the .30 network felixes[1].Exec("ip", "route", "add", "192.168.30.0/24", "dev", "eth30") felixes[1].Exec("ip", "addr", "add", "10.0.0.30/32", "dev", "eth30") _, err = eth30.RunCmd("ip", "route", "add", "10.0.0.30/32", "dev", "eth0") Expect(err).NotTo(HaveOccurred()) // Add a route to the test workload to the fake external // client emulated by the test-workload _, err = eth30.RunCmd("ip", "route", "add", w[1][1].IP+"/32", "via", "10.0.0.30") Expect(err).NotTo(HaveOccurred()) // Make sure that networking with the .20 and .30 networks works cc.ResetExpectations() cc.ExpectSome(w[1][1], TargetIP(eth20.IP), 0xdead) cc.ExpectSome(w[1][1], TargetIP(eth30.IP), 0xdead) cc.CheckConnectivity() }) By("testing that external traffic updates the RPF check if routing changes", func() { // set the route to the fake workload to .20 network felixes[1].Exec("ip", "route", "add", fakeWorkloadIP+"/32", "dev", "eth20") tcpdump := w[1][1].AttachTCPDump() tcpdump.SetLogEnabled(true) matcher := fmt.Sprintf("IP %s\\.30446 > %s\\.30446: UDP", fakeWorkloadIP, w[1][1].IP) tcpdump.AddMatcher("UDP-30446", regexp.MustCompile(matcher)) tcpdump.Start() defer tcpdump.Stop() _, err := eth20.RunCmd("/pktgen", fakeWorkloadIP, w[1][1].IP, "udp", "--port-src", "30446", "--port-dst", "30446") Expect(err).NotTo(HaveOccurred()) // Expect to receive the packet from the .20 as the routing is correct Eventually(func() int { return tcpdump.MatchCount("UDP-30446") }). Should(BeNumerically("==", 1), matcher) ctBefore := dumpCTMap(felixes[1]) k := conntrack.NewKey(17, net.ParseIP(w[1][1].IP).To4(), 30446, net.ParseIP(fakeWorkloadIP).To4(), 30446) Expect(ctBefore).To(HaveKey(k)) // XXX Since the same code is used to do the drop of spoofed // packet between pods, we do not repeat it here as it is not 100% // bulletproof. // // We should perhaps compare the iptables counter and see if the // packet was dropped by the RPF check. // Change the routing to be from the .30 felixes[1].Exec("ip", "route", "del", fakeWorkloadIP+"/32", "dev", "eth20") felixes[1].Exec("ip", "route", "add", fakeWorkloadIP+"/32", "dev", "eth30") _, err = eth30.RunCmd("/pktgen", fakeWorkloadIP, w[1][1].IP, "udp", "--port-src", "30446", "--port-dst", "30446") Expect(err).NotTo(HaveOccurred()) // Expect the packet from the .30 to make it through as RPF will // allow it and we will update the expected interface Eventually(func() int { return tcpdump.MatchCount("UDP-30446") }). Should(BeNumerically("==", 2), matcher) ctAfter := dumpCTMap(felixes[1]) Expect(ctAfter).To(HaveKey(k)) // Ifindex must have changed // B2A because of IPA > IPB - deterministic Expect(ctBefore[k].Data().B2A.Ifindex).NotTo(BeNumerically("==", 0)) Expect(ctAfter[k].Data().B2A.Ifindex).NotTo(BeNumerically("==", 0)) Expect(ctBefore[k].Data().B2A.Ifindex). NotTo(BeNumerically("==", ctAfter[k].Data().B2A.Ifindex)) }) }) Context("with test-service configured 10.101.0.10:80 -> w[0][0].IP:8055", func() { var ( testSvc *v1.Service testSvcNamespace string ) testSvcName := "test-service" tgtPort := 8055 BeforeEach(func() { testSvc = k8sService(testSvcName, "10.101.0.10", w[0][0], 80, tgtPort, 0, testOpts.protocol) testSvcNamespace = testSvc.ObjectMeta.Namespace _, err := k8sClient.CoreV1().Services(testSvcNamespace).Create(testSvc) Expect(err).NotTo(HaveOccurred()) Eventually(k8sGetEpsForServiceFunc(k8sClient, testSvc), "10s").Should(HaveLen(1), "Service endpoints didn't get created? Is controller-manager happy?") }) It("should have connectivity from all workloads via a service to workload 0", func() { ip := testSvc.Spec.ClusterIP port := uint16(testSvc.Spec.Ports[0].Port) cc.ExpectSome(w[0][1], TargetIP(ip), port) cc.ExpectSome(w[1][0], TargetIP(ip), port) cc.ExpectSome(w[1][1], TargetIP(ip), port) cc.CheckConnectivity() }) if testOpts.connTimeEnabled { It("workload should have connectivity to self via a service", func() { ip := testSvc.Spec.ClusterIP port := uint16(testSvc.Spec.Ports[0].Port) cc.ExpectSome(w[0][0], TargetIP(ip), port) cc.CheckConnectivity() }) It("should only have connectivity from from the local host via a service to workload 0", func() { // Local host is always white-listed (for kubelet health checks). ip := testSvc.Spec.ClusterIP port := uint16(testSvc.Spec.Ports[0].Port) cc.ExpectSome(felixes[0], TargetIP(ip), port) cc.ExpectNone(felixes[1], TargetIP(ip), port) cc.CheckConnectivity() }) } else { It("should not have connectivity from from the local host via a service to workload 0", func() { // Local host is always white-listed (for kubelet health checks). ip := testSvc.Spec.ClusterIP port := uint16(testSvc.Spec.Ports[0].Port) cc.ExpectNone(felixes[0], TargetIP(ip), port) cc.ExpectNone(felixes[1], TargetIP(ip), port) cc.CheckConnectivity() }) } if testOpts.connTimeEnabled { Describe("after updating the policy to allow traffic from hosts", func() { BeforeEach(func() { pol.Spec.Ingress = []api.Rule{ { Action: "Allow", Source: api.EntityRule{ Nets: []string{ felixes[0].IP + "/32", felixes[1].IP + "/32", }, }, }, } switch testOpts.tunnel { case "ipip": pol.Spec.Ingress[0].Source.Nets = append(pol.Spec.Ingress[0].Source.Nets, felixes[0].ExpectedIPIPTunnelAddr+"/32", felixes[1].ExpectedIPIPTunnelAddr+"/32", ) } pol = updatePolicy(pol) }) It("should have connectivity from the hosts via a service to workload 0", func() { ip := testSvc.Spec.ClusterIP port := uint16(testSvc.Spec.Ports[0].Port) cc.ExpectSome(felixes[0], TargetIP(ip), port) cc.ExpectSome(felixes[1], TargetIP(ip), port) cc.ExpectNone(w[0][1], TargetIP(ip), port) cc.ExpectNone(w[1][0], TargetIP(ip), port) cc.CheckConnectivity() }) }) } It("should create sane conntrack entries and clean them up", func() { By("Generating some traffic") ip := testSvc.Spec.ClusterIP port := uint16(testSvc.Spec.Ports[0].Port) cc.ExpectSome(w[0][1], TargetIP(ip), port) cc.ExpectSome(w[1][0], TargetIP(ip), port) cc.CheckConnectivity() By("Checking timestamps on conntrack entries are sane") // This test verifies that we correctly interpret conntrack entry timestamps by reading them back // and checking that they're (a) in the past and (b) sensibly recent. ctDump, err := felixes[0].ExecOutput("calico-bpf", "conntrack", "dump") Expect(err).NotTo(HaveOccurred()) re := regexp.MustCompile(`LastSeen:\s*(\d+)`) matches := re.FindAllStringSubmatch(ctDump, -1) Expect(matches).ToNot(BeEmpty(), "didn't find any conntrack entries") for _, match := range matches { lastSeenNanos, err := strconv.ParseInt(match[1], 10, 64) Expect(err).NotTo(HaveOccurred()) nowNanos := bpf.KTimeNanos() age := time.Duration(nowNanos - lastSeenNanos) Expect(age).To(BeNumerically(">", 0)) Expect(age).To(BeNumerically("<", 60*time.Second)) } By("Checking conntrack entries are cleaned up") // We have UTs that check that all kinds of entries eventually get cleaned up. This // test is mainly to check that the cleanup code actually runs and is able to actually delete // entries. numWl0ConntrackEntries := func() int { ctDump, err := felixes[0].ExecOutput("calico-bpf", "conntrack", "dump") Expect(err).NotTo(HaveOccurred()) return strings.Count(ctDump, w[0][0].IP) } startingCTEntries := numWl0ConntrackEntries() Expect(startingCTEntries).To(BeNumerically(">", 0)) // TODO reduce timeouts just for this test. Eventually(numWl0ConntrackEntries, "180s", "5s").Should(BeNumerically("<", startingCTEntries)) }) Context("with test-service port updated", func() { var ( testSvcUpdated *v1.Service natBackBeforeUpdate []nat.BackendMapMem natBeforeUpdate []nat.MapMem ) BeforeEach(func() { ip := testSvc.Spec.ClusterIP portOld := uint16(testSvc.Spec.Ports[0].Port) ipv4 := net.ParseIP(ip) oldK := nat.NewNATKey(ipv4, portOld, numericProto) // Wait for the NAT maps to converge... log.Info("Waiting for NAT maps to converge...") startTime := time.Now() for { if time.Since(startTime) > 5*time.Second { Fail("NAT maps failed to converge") } natBeforeUpdate, natBackBeforeUpdate = dumpNATmaps(felixes) for i, m := range natBeforeUpdate { if natV, ok := m[oldK]; !ok { goto retry } else { bckCnt := natV.Count() if bckCnt != 1 { log.Debugf("Expected single backend, not %d; retrying...", bckCnt) goto retry } bckID := natV.ID() bckK := nat.NewNATBackendKey(bckID, 0) if _, ok := natBackBeforeUpdate[i][bckK]; !ok { log.Debugf("Backend not found %v; retrying...", bckK) goto retry } } } break retry: time.Sleep(100 * time.Millisecond) } log.Info("NAT maps converged.") testSvcUpdated = k8sService(testSvcName, "10.101.0.10", w[0][0], 88, 8055, 0, testOpts.protocol) svc, err := k8sClient.CoreV1(). Services(testSvcNamespace). Get(testSvcName, metav1.GetOptions{}) testSvcUpdated.ObjectMeta.ResourceVersion = svc.ObjectMeta.ResourceVersion _, err = k8sClient.CoreV1().Services(testSvcNamespace).Update(testSvcUpdated) Expect(err).NotTo(HaveOccurred()) Eventually(k8sGetEpsForServiceFunc(k8sClient, testSvc), "10s").Should(HaveLen(1), "Service endpoints didn't get created? Is controller-manager happy?") }) It("should have connectivity from all workloads via the new port", func() { ip := testSvcUpdated.Spec.ClusterIP port := uint16(testSvcUpdated.Spec.Ports[0].Port) cc.ExpectSome(w[0][1], TargetIP(ip), port) cc.ExpectSome(w[1][0], TargetIP(ip), port) cc.ExpectSome(w[1][1], TargetIP(ip), port) cc.CheckConnectivity() }) It("should not have connectivity from all workloads via the old port", func() { ip := testSvc.Spec.ClusterIP port := uint16(testSvc.Spec.Ports[0].Port) cc.ExpectNone(w[0][1], TargetIP(ip), port) cc.ExpectNone(w[1][0], TargetIP(ip), port) cc.ExpectNone(w[1][1], TargetIP(ip), port) cc.CheckConnectivity() natmaps, natbacks := dumpNATmaps(felixes) ipv4 := net.ParseIP(ip) portOld := uint16(testSvc.Spec.Ports[0].Port) oldK := nat.NewNATKey(ipv4, portOld, numericProto) portNew := uint16(testSvcUpdated.Spec.Ports[0].Port) natK := nat.NewNATKey(ipv4, portNew, numericProto) for i := range felixes { Expect(natmaps[i]).To(HaveKey(natK)) Expect(natmaps[i]).NotTo(HaveKey(nat.NewNATKey(ipv4, portOld, numericProto))) Expect(natBeforeUpdate[i]).To(HaveKey(oldK)) oldV := natBeforeUpdate[i][oldK] natV := natmaps[i][natK] bckCnt := natV.Count() bckID := natV.ID() log.WithField("backCnt", bckCnt).Debug("Backend count.") for ord := uint32(0); ord < uint32(bckCnt); ord++ { bckK := nat.NewNATBackendKey(bckID, ord) oldBckK := nat.NewNATBackendKey(oldV.ID(), ord) Expect(natbacks[i]).To(HaveKey(bckK)) Expect(natBackBeforeUpdate[i]).To(HaveKey(oldBckK)) Expect(natBackBeforeUpdate[i][oldBckK]).To(Equal(natbacks[i][bckK])) } } }) It("after removing service, should not have connectivity from workloads via a service to workload 0", func() { ip := testSvcUpdated.Spec.ClusterIP port := uint16(testSvcUpdated.Spec.Ports[0].Port) natK := nat.NewNATKey(net.ParseIP(ip), port, numericProto) var prevBpfsvcs []nat.MapMem Eventually(func() bool { prevBpfsvcs, _ = dumpNATmaps(felixes) for _, m := range prevBpfsvcs { if _, ok := m[natK]; !ok { return false } } return true }, "5s").Should(BeTrue(), "service NAT key didn't show up") err := k8sClient.CoreV1(). Services(testSvcNamespace). Delete(testSvcName, &metav1.DeleteOptions{}) Expect(err).NotTo(HaveOccurred()) Eventually(k8sGetEpsForServiceFunc(k8sClient, testSvc), "10s").Should(HaveLen(0)) cc.ExpectNone(w[0][1], TargetIP(ip), port) cc.ExpectNone(w[1][0], TargetIP(ip), port) cc.ExpectNone(w[1][1], TargetIP(ip), port) cc.CheckConnectivity() for i, f := range felixes { natV := prevBpfsvcs[i][natK] bckCnt := natV.Count() bckID := natV.ID() Eventually(func() bool { svcs := dumpNATMap(f) eps := dumpEPMap(f) if _, ok := svcs[natK]; ok { return false } for ord := uint32(0); ord < uint32(bckCnt); ord++ { bckK := nat.NewNATBackendKey(bckID, ord) if _, ok := eps[bckK]; ok { return false } } return true }, "5s").Should(BeTrue(), "service NAT key wasn't removed correctly") } }) }) }) Context("with test-service configured 10.101.0.10:80 -> w[*][0].IP:8055 and affinity", func() { var ( testSvc *v1.Service testSvcNamespace string ) testSvcName := "test-service" BeforeEach(func() { testSvc = k8sService(testSvcName, "10.101.0.10", w[0][0], 80, 8055, 0, testOpts.protocol) testSvcNamespace = testSvc.ObjectMeta.Namespace // select all pods with port 8055 testSvc.Spec.Selector = map[string]string{"port": "8055"} testSvc.Spec.SessionAffinity = "ClientIP" _, err := k8sClient.CoreV1().Services(testSvcNamespace).Create(testSvc) Expect(err).NotTo(HaveOccurred()) Eventually(k8sGetEpsForServiceFunc(k8sClient, testSvc), "10s").Should(HaveLen(1), "Service endpoints didn't get created? Is controller-manager happy?") }) It("should have connectivity from a workload to a service with multiple backends", func() { ip := testSvc.Spec.ClusterIP port := uint16(testSvc.Spec.Ports[0].Port) cc.ExpectSome(w[1][1], TargetIP(ip), port) cc.ExpectSome(w[1][1], TargetIP(ip), port) cc.ExpectSome(w[1][1], TargetIP(ip), port) cc.CheckConnectivity() if !testOpts.connTimeEnabled { // FIXME we can only do the test with regular NAT as // cgroup shares one random affinity map aff := dumpAffMap(felixes[1]) Expect(aff).To(HaveLen(1)) } }) }) npPort := uint16(30333) nodePortsTest := func(localOnly bool) { var ( testSvc *v1.Service testSvcNamespace string ) testSvcName := "test-service" BeforeEach(func() { k8sClient := infra.(*infrastructure.K8sDatastoreInfra).K8sClient testSvc = k8sService(testSvcName, "10.101.0.10", w[0][0], 80, 8055, int32(npPort), testOpts.protocol) if localOnly { testSvc.Spec.ExternalTrafficPolicy = "Local" } testSvcNamespace = testSvc.ObjectMeta.Namespace _, err := k8sClient.CoreV1().Services(testSvcNamespace).Create(testSvc) Expect(err).NotTo(HaveOccurred()) Eventually(k8sGetEpsForServiceFunc(k8sClient, testSvc), "10s").Should(HaveLen(1), "Service endpoints didn't get created? Is controller-manager happy?") }) It("should have connectivity from all workloads via a service to workload 0", func() { clusterIP := testSvc.Spec.ClusterIP port := uint16(testSvc.Spec.Ports[0].Port) cc.ExpectSome(w[0][1], TargetIP(clusterIP), port) cc.ExpectSome(w[1][0], TargetIP(clusterIP), port) cc.ExpectSome(w[1][1], TargetIP(clusterIP), port) cc.CheckConnectivity() }) if localOnly { It("should not have connectivity from all workloads via a nodeport to non-local workload 0", func() { node0IP := felixes[0].IP node1IP := felixes[1].IP // Via remote nodeport, should fail. cc.ExpectNone(w[0][1], TargetIP(node1IP), npPort) cc.ExpectNone(w[1][0], TargetIP(node1IP), npPort) cc.ExpectNone(w[1][1], TargetIP(node1IP), npPort) // Include a check that goes via the local nodeport to make sure the dataplane has converged. cc.ExpectSome(w[0][1], TargetIP(node0IP), npPort) cc.CheckConnectivity() }) } else { It("should have connectivity from all workloads via a nodeport to workload 0", func() { node1IP := felixes[1].IP cc.ExpectSome(w[0][1], TargetIP(node1IP), npPort) cc.ExpectSome(w[1][0], TargetIP(node1IP), npPort) cc.ExpectSome(w[1][1], TargetIP(node1IP), npPort) cc.CheckConnectivity() }) } if !localOnly { It("should have connectivity from a workload via a nodeport on another node to workload 0", func() { ip := felixes[1].IP cc.ExpectSome(w[2][1], TargetIP(ip), npPort) cc.CheckConnectivity() }) } It("workload should have connectivity to self via local/remote node", func() { if !testOpts.connTimeEnabled { Skip("FIXME pod cannot connect to self without connect time lb") } cc.ExpectSome(w[0][0], TargetIP(felixes[1].IP), npPort) cc.ExpectSome(w[0][0], TargetIP(felixes[0].IP), npPort) cc.CheckConnectivity() }) It("should not have connectivity from external to w[0] via local/remote node", func() { cc.ExpectNone(externalClient, TargetIP(felixes[1].IP), npPort) cc.ExpectNone(externalClient, TargetIP(felixes[0].IP), npPort) // Include a check that goes via the local nodeport to make sure the dataplane has converged. cc.ExpectSome(w[0][1], TargetIP(felixes[0].IP), npPort) cc.CheckConnectivity() }) Describe("after updating the policy to allow traffic from externalClient", func() { BeforeEach(func() { pol.Spec.Ingress = []api.Rule{ { Action: "Allow", Source: api.EntityRule{ Nets: []string{ externalClient.IP + "/32", }, }, }, } pol = updatePolicy(pol) }) if localOnly { It("should not have connectivity from external to w[0] via node1->node0 fwd", func() { if testOpts.connTimeEnabled { Skip("FIXME externalClient also does conntime balancing") } cc.ExpectNone(externalClient, TargetIP(felixes[1].IP), npPort) // Include a check that goes via the nodeport with a local backing pod to make sure the dataplane has converged. cc.ExpectSome(externalClient, TargetIP(felixes[0].IP), npPort) cc.CheckConnectivity() }) } else { It("should have connectivity from external to w[0] via node1->node0 fwd", func() { if testOpts.connTimeEnabled { Skip("FIXME externalClient also does conntime balancing") } cc.ExpectSome(externalClient, TargetIP(felixes[1].IP), npPort) cc.CheckConnectivity() }) It("should have connectivity from external to w[0] via node1IP2 -> nodeIP1 -> node0 fwd", func() { // 192.168.20.1 +----------|---------+ // | | | | // v | | V // eth20 eth0 | eth0 // 10.0.0.20:30333 --> felixes[1].IP | felixes[0].IP // | | // | V // | caliXYZ // | w[0][0].IP:8055 // | // node1 | node0 if testOpts.dsr { return // When DSR is enabled, we need to have away how to pass the // original traffic back. // // felixes[0].Exec("ip", "route", "add", "192.168.20.0/24", "via", felixes[1].IP) // // This does not work since the other node would treat it as // DNAT due to the existing CT entries and NodePort traffix // otherwise :-/ } if testOpts.connTimeEnabled { Skip("FIXME externalClient also does conntime balancing") } var eth20 *workload.Workload defer func() { if eth20 != nil { eth20.Stop() } }() By("setting up node's fake external iface", func() { // We name the iface eth20 since such ifaces are // treated by felix as external to the node // // Using a test-workload creates the namespaces and the // interfaces to emulate the host NICs eth20 = &workload.Workload{ Name: "eth20", C: felixes[1].Container, IP: "192.168.20.1", Ports: "57005", // 0xdead Protocol: testOpts.protocol, InterfaceName: "eth20", } eth20.Start() // assign address to eth20 and add route to the .20 network felixes[1].Exec("ip", "route", "add", "192.168.20.0/24", "dev", "eth20") felixes[1].Exec("ip", "addr", "add", "10.0.0.20/32", "dev", "eth20") _, err := eth20.RunCmd("ip", "route", "add", "10.0.0.20/32", "dev", "eth0") Expect(err).NotTo(HaveOccurred()) // Add a route to felix[1] to be able to reach the nodeport _, err = eth20.RunCmd("ip", "route", "add", felixes[1].IP+"/32", "via", "10.0.0.20") Expect(err).NotTo(HaveOccurred()) }) By("setting up routes to .20 net on dest node to trigger RPF check", func() { // set up a dummy interface just for the routing purpose felixes[0].Exec("ip", "link", "add", "dummy1", "type", "dummy") felixes[0].Exec("ip", "link", "set", "dummy1", "up") // set up route to the .20 net through the dummy iface. This // makes the .20 a universaly reachable external world from the // internal/private eth0 network felixes[0].Exec("ip", "route", "add", "192.168.20.0/24", "dev", "dummy1") // This multi-NIC scenario works only if the kernel's RPF check // is not strict so we need to override it for the test and must // be set properly when product is deployed. We reply on // iptables to do require check for us. felixes[0].Exec("sysctl", "-w", "net.ipv4.conf.eth0.rp_filter=2") felixes[0].Exec("sysctl", "-w", "net.ipv4.conf.dummy1.rp_filter=2") }) By("Allowing traffic from the eth20 network", func() { pol.Spec.Ingress = []api.Rule{ { Action: "Allow", Source: api.EntityRule{ Nets: []string{ eth20.IP + "/32", }, }, }, } pol = updatePolicy(pol) }) By("Checking that there is connectivity from eth20 network", func() { cc.ExpectSome(eth20, TargetIP(felixes[1].IP), npPort) cc.CheckConnectivity() }) }) if testOpts.protocol == "tcp" && !testOpts.connTimeEnabled { const ( npEncapOverhead = 50 hostIfaceMTU = 1500 podIfaceMTU = 1410 sendLen = hostIfaceMTU recvLen = podIfaceMTU - npEncapOverhead ) Context("with TCP, tx/rx close to MTU size on NP via node1->node0 ", func() { negative := "" adjusteMTU := podIfaceMTU - npEncapOverhead if testOpts.dsr { negative = "not " adjusteMTU = 0 } It("should "+negative+"adjust MTU on workload side", func() { // force non-GSO packets when workload replies _, err := w[0][0].RunCmd("ethtool", "-K", "eth0", "gso", "off") Expect(err).NotTo(HaveOccurred()) _, err = w[0][0].RunCmd("ethtool", "-K", "eth0", "tso", "off") Expect(err).NotTo(HaveOccurred()) pmtu, err := w[0][0].PathMTU(externalClient.IP) Expect(err).NotTo(HaveOccurred()) Expect(pmtu).To(Equal(0)) // nothing specific for this path yet port := []uint16{npPort} cc.ExpectConnectivity(externalClient, TargetIP(felixes[1].IP), port, ExpectWithSendLen(sendLen), ExpectWithRecvLen(recvLen), ExpectWithClientAdjustedMTU(hostIfaceMTU, hostIfaceMTU), ) cc.CheckConnectivity() pmtu, err = w[0][0].PathMTU(externalClient.IP) Expect(err).NotTo(HaveOccurred()) Expect(pmtu).To(Equal(adjusteMTU)) }) It("should not adjust MTU on client side if GRO off on nodes", func() { // force non-GSO packets on node ingress err := felixes[1].ExecMayFail("ethtool", "-K", "eth0", "gro", "off") Expect(err).NotTo(HaveOccurred()) port := []uint16{npPort} cc.ExpectConnectivity(externalClient, TargetIP(felixes[1].IP), port, ExpectWithSendLen(sendLen), ExpectWithRecvLen(recvLen), ExpectWithClientAdjustedMTU(hostIfaceMTU, hostIfaceMTU), ) cc.CheckConnectivity() }) }) } } It("should have connectivity from external to w[0] via node0", func() { if testOpts.connTimeEnabled { Skip("FIXME externalClient also does conntime balancing") } log.WithFields(log.Fields{ "externalClientIP": externalClient.IP, "nodePortIP": felixes[1].IP, }).Infof("external->nodeport connection") cc.ExpectSome(externalClient, TargetIP(felixes[0].IP), npPort) cc.CheckConnectivity() }) }) } Context("with test-service being a nodeport @ "+strconv.Itoa(int(npPort)), func() { nodePortsTest(false) }) // FIXME connect time shares the same NAT table and it is a lottery which one it gets if !testOpts.connTimeEnabled { Context("with test-service being a nodeport @ "+strconv.Itoa(int(npPort))+ " ExternalTrafficPolicy=local", func() { nodePortsTest(true) }) } Context("with icmp blocked from workloads, external client", func() { var ( testSvc *v1.Service testSvcNamespace string ) testSvcName := "test-service" BeforeEach(func() { icmpProto := numorstring.ProtocolFromString("icmp") pol.Spec.Ingress = []api.Rule{ { Action: "Allow", Source: api.EntityRule{ Nets: []string{"0.0.0.0/0"}, }, }, } pol.Spec.Egress = []api.Rule{ { Action: "Allow", Source: api.EntityRule{ Nets: []string{"0.0.0.0/0"}, }, }, { Action: "Deny", Protocol: &icmpProto, }, } pol = updatePolicy(pol) }) var tgtPort int var tgtWorkload *workload.Workload JustBeforeEach(func() { k8sClient := infra.(*infrastructure.K8sDatastoreInfra).K8sClient testSvc = k8sService(testSvcName, "10.101.0.10", tgtWorkload, 80, tgtPort, int32(npPort), testOpts.protocol) testSvcNamespace = testSvc.ObjectMeta.Namespace _, err := k8sClient.CoreV1().Services(testSvcNamespace).Create(testSvc) Expect(err).NotTo(HaveOccurred()) Eventually(k8sGetEpsForServiceFunc(k8sClient, testSvc), "10s").Should(HaveLen(1), "Service endpoints didn't get created? Is controller-manager happy?") // sync with NAT table being applied natFtKey := nat.NewNATKey(net.ParseIP(felixes[1].IP), npPort, numericProto) Eventually(func() bool { m := dumpNATMap(felixes[1]) v, ok := m[natFtKey] return ok && v.Count() > 0 }, 5*time.Second).Should(BeTrue()) // Sync with policy cc.ExpectSome(w[1][0], w[0][0]) cc.CheckConnectivity() }) Describe("with dead workload", func() { BeforeEach(func() { tgtPort = 8057 tgtWorkload = deadWorkload }) It("should get host unreachable from nodeport via node1->node0 fwd", func() { if testOpts.connTimeEnabled { Skip("FIXME externalClient also does conntime balancing") } err := felixes[0].ExecMayFail("ip", "route", "add", "unreachable", deadWorkload.IP) Expect(err).NotTo(HaveOccurred()) tcpdump := externalClient.AttachTCPDump("any") tcpdump.SetLogEnabled(true) matcher := fmt.Sprintf("IP %s > %s: ICMP host %s unreachable", felixes[1].IP, externalClient.IP, felixes[1].IP) tcpdump.AddMatcher("ICMP", regexp.MustCompile(matcher)) tcpdump.Start(testOpts.protocol, "port", strconv.Itoa(int(npPort)), "or", "icmp") defer tcpdump.Stop() cc.ExpectNone(externalClient, TargetIP(felixes[1].IP), npPort) cc.CheckConnectivity() Eventually(func() int { return tcpdump.MatchCount("ICMP") }). Should(BeNumerically(">", 0), matcher) }) }) Describe("with wrong target port", func() { // TCP would send RST instead of ICMP, it is enough to test one way of // triggering the ICMP message if testOpts.protocol != "udp" { return } BeforeEach(func() { tgtPort = 0xdead tgtWorkload = w[0][0] }) It("should get port unreachable via node1->node0 fwd", func() { if testOpts.connTimeEnabled { Skip("FIXME externalClient also does conntime balancing") } tcpdump := externalClient.AttachTCPDump("any") tcpdump.SetLogEnabled(true) matcher := fmt.Sprintf("IP %s > %s: ICMP %s udp port %d unreachable", felixes[1].IP, externalClient.IP, felixes[1].IP, npPort) tcpdump.AddMatcher("ICMP", regexp.MustCompile(matcher)) tcpdump.Start(testOpts.protocol, "port", strconv.Itoa(int(npPort)), "or", "icmp") defer tcpdump.Stop() cc.ExpectNone(externalClient, TargetIP(felixes[1].IP), npPort) cc.CheckConnectivity() Eventually(func() int { return tcpdump.MatchCount("ICMP") }). Should(BeNumerically(">", 0), matcher) }) It("should get port unreachable workload to workload", func() { tcpdump := w[1][1].AttachTCPDump() tcpdump.SetLogEnabled(true) matcher := fmt.Sprintf("IP %s > %s: ICMP %s udp port %d unreachable", tgtWorkload.IP, w[1][1].IP, tgtWorkload.IP, tgtPort) tcpdump.AddMatcher("ICMP", regexp.MustCompile(matcher)) tcpdump.Start(testOpts.protocol, "port", strconv.Itoa(tgtPort), "or", "icmp") defer tcpdump.Stop() cc.ExpectNone(w[1][1], TargetIP(tgtWorkload.IP), uint16(tgtPort)) cc.CheckConnectivity() Eventually(func() int { return tcpdump.MatchCount("ICMP") }). Should(BeNumerically(">", 0), matcher) }) It("should get port unreachable workload to workload through NP", func() { tcpdump := w[1][1].AttachTCPDump() tcpdump.SetLogEnabled(true) var matcher string if testOpts.connTimeEnabled { matcher = fmt.Sprintf("IP %s > %s: ICMP %s udp port %d unreachable", tgtWorkload.IP, w[1][1].IP, w[0][0].IP, tgtPort) tcpdump.AddMatcher("ICMP", regexp.MustCompile(matcher)) tcpdump.Start(testOpts.protocol, "port", strconv.Itoa(tgtPort), "or", "icmp") } else { matcher = fmt.Sprintf("IP %s > %s: ICMP %s udp port %d unreachable", tgtWorkload.IP, w[1][1].IP, felixes[1].IP, npPort) tcpdump.AddMatcher("ICMP", regexp.MustCompile(matcher)) tcpdump.Start(testOpts.protocol, "port", strconv.Itoa(int(npPort)), "or", "icmp") } defer tcpdump.Stop() cc.ExpectNone(w[1][1], TargetIP(felixes[1].IP), npPort) cc.CheckConnectivity() Eventually(func() int { return tcpdump.MatchCount("ICMP") }). Should(BeNumerically(">", 0), matcher) }) }) }) }) }) }) } func typeMetaV1(kind string) metav1.TypeMeta { return metav1.TypeMeta{ Kind: kind, APIVersion: "v1", } } func objectMetaV1(name string) metav1.ObjectMeta { return metav1.ObjectMeta{ Name: name, Namespace: "default", } } func dumpNATmaps(felixes []*infrastructure.Felix) ([]nat.MapMem, []nat.BackendMapMem) { bpfsvcs := make([]nat.MapMem, len(felixes)) bpfeps := make([]nat.BackendMapMem, len(felixes)) for i, felix := range felixes { bpfsvcs[i], bpfeps[i] = dumpNATMaps(felix) } return bpfsvcs, bpfeps } func dumpNATMaps(felix *infrastructure.Felix) (nat.MapMem, nat.BackendMapMem) { return dumpNATMap(felix), dumpEPMap(felix) } func dumpBPFMap(felix *infrastructure.Felix, m bpf.Map, iter bpf.MapIter) { // Wait for the map to exist before trying to access it. Otherwise, we // might fail a test that was retrying this dump anyway. Eventually(func() bool { return felix.FileExists(m.Path()) }).Should(BeTrue(), fmt.Sprintf("dumpBPFMap: map %s didn't show up inside container", m.Path())) cmd, err := bpf.DumpMapCmd(m) Expect(err).NotTo(HaveOccurred(), "Failed to get BPF map dump command: "+m.Path()) log.WithField("cmd", cmd).Debug("dumpBPFMap") out, err := felix.ExecOutput(cmd...) Expect(err).NotTo(HaveOccurred(), "Failed to get dump BPF map: "+m.Path()) err = bpf.IterMapCmdOutput([]byte(out), iter) Expect(err).NotTo(HaveOccurred(), "Failed to parse BPF map dump: "+m.Path()) } func dumpNATMap(felix *infrastructure.Felix) nat.MapMem { bm := nat.FrontendMap(&bpf.MapContext{}) m := make(nat.MapMem) dumpBPFMap(felix, bm, nat.MapMemIter(m)) return m } func dumpEPMap(felix *infrastructure.Felix) nat.BackendMapMem { bm := nat.BackendMap(&bpf.MapContext{}) m := make(nat.BackendMapMem) dumpBPFMap(felix, bm, nat.BackendMapMemIter(m)) return m } func dumpAffMap(felix *infrastructure.Felix) nat.AffinityMapMem { bm := nat.AffinityMap(&bpf.MapContext{}) m := make(nat.AffinityMapMem) dumpBPFMap(felix, bm, nat.AffinityMapMemIter(m)) return m } func dumpCTMap(felix *infrastructure.Felix) conntrack.MapMem { bm := conntrack.Map(&bpf.MapContext{}) m := make(conntrack.MapMem) dumpBPFMap(felix, bm, conntrack.MapMemIter(m)) return m } func dumpSendRecvMap(felix *infrastructure.Felix) nat.SendRecvMsgMapMem { bm := nat.SendRecvMsgMap(&bpf.MapContext{}) m := make(nat.SendRecvMsgMapMem) dumpBPFMap(felix, bm, nat.SendRecvMsgMapMemIter(m)) return m } func k8sService(name, clusterIP string, w *workload.Workload, port, tgtPort int, nodePort int32, protocol string) *v1.Service { k8sProto := v1.ProtocolTCP if protocol == "udp" { k8sProto = v1.ProtocolUDP } svcType := v1.ServiceTypeClusterIP if nodePort != 0 { svcType = v1.ServiceTypeNodePort } return &v1.Service{ TypeMeta: typeMetaV1("Service"), ObjectMeta: objectMetaV1(name), Spec: v1.ServiceSpec{ ClusterIP: clusterIP, Type: svcType, Selector: map[string]string{ "name": w.Name, }, Ports: []v1.ServicePort{ { Protocol: k8sProto, Port: int32(port), NodePort: nodePort, Name: fmt.Sprintf("port-%d", tgtPort), TargetPort: intstr.FromInt(tgtPort), }, }, }, } } func k8sGetEpsForService(k8s kubernetes.Interface, svc *v1.Service) []v1.EndpointSubset { ep, _ := k8s.CoreV1(). Endpoints(svc.ObjectMeta.Namespace). Get(svc.ObjectMeta.Name, metav1.GetOptions{}) log.WithField("endpoints", spew.Sprint(ep)).Infof("Got endpoints for %s", svc.ObjectMeta.Name) return ep.Subsets } func k8sGetEpsForServiceFunc(k8s kubernetes.Interface, svc *v1.Service) func() []v1.EndpointSubset { return func() []v1.EndpointSubset { return k8sGetEpsForService(k8s, svc) } }
1
17,879
Can you explain to me what goes wrong here? Can the test be adjusted to set up working routing instead?
projectcalico-felix
c
@@ -32,10 +32,10 @@ import org.openqa.selenium.remote.tracing.HttpTracing; import org.openqa.selenium.remote.tracing.Tracer; import java.net.URL; +import java.util.Objects; import java.util.UUID; import java.util.logging.Logger; -import static org.openqa.selenium.net.Urls.fromUri; import static org.openqa.selenium.remote.http.Contents.asJson; import static org.openqa.selenium.remote.http.HttpMethod.DELETE; import static org.openqa.selenium.remote.http.HttpMethod.GET;
1
// Licensed to the Software Freedom Conservancy (SFC) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The SFC licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package org.openqa.selenium.grid.distributor.remote; import org.openqa.selenium.SessionNotCreatedException; import org.openqa.selenium.grid.data.CreateSessionResponse; import org.openqa.selenium.grid.data.DistributorStatus; import org.openqa.selenium.grid.distributor.Distributor; import org.openqa.selenium.grid.node.Node; import org.openqa.selenium.grid.web.Values; import org.openqa.selenium.internal.Require; import org.openqa.selenium.remote.http.HttpClient; import org.openqa.selenium.remote.http.HttpHandler; import org.openqa.selenium.remote.http.HttpRequest; import org.openqa.selenium.remote.http.HttpResponse; import org.openqa.selenium.remote.tracing.HttpTracing; import org.openqa.selenium.remote.tracing.Tracer; import java.net.URL; import java.util.UUID; import java.util.logging.Logger; import static org.openqa.selenium.net.Urls.fromUri; import static org.openqa.selenium.remote.http.Contents.asJson; import static org.openqa.selenium.remote.http.HttpMethod.DELETE; import static org.openqa.selenium.remote.http.HttpMethod.GET; import static org.openqa.selenium.remote.http.HttpMethod.POST; public class RemoteDistributor extends Distributor { private static final Logger LOG = Logger.getLogger("Selenium Distributor (Remote)"); private final HttpHandler client; public RemoteDistributor(Tracer tracer, HttpClient.Factory factory, URL url) { super(tracer, factory); this.client = factory.createClient(url); } @Override public boolean isReady() { try { return client.execute(new HttpRequest(GET, "/readyz")).isSuccessful(); } catch (Exception e) { return false; } } @Override public CreateSessionResponse newSession(HttpRequest request) throws SessionNotCreatedException { HttpRequest upstream = new HttpRequest(POST, "/se/grid/distributor/session"); HttpTracing.inject(tracer, tracer.getCurrentContext(), upstream); upstream.setContent(request.getContent()); HttpResponse response = client.execute(upstream); return Values.get(response, CreateSessionResponse.class); } @Override public RemoteDistributor add(Node node) { HttpRequest request = new HttpRequest(POST, "/se/grid/distributor/node"); HttpTracing.inject(tracer, tracer.getCurrentContext(), request); request.setContent(asJson(node.getStatus())); HttpResponse response = client.execute(request); Values.get(response, Void.class); LOG.info(String.format("Added node %s.", node.getId())); return this; } @Override public void remove(UUID nodeId) { Require.nonNull("Node ID", nodeId); HttpRequest request = new HttpRequest(DELETE, "/se/grid/distributor/node/" + nodeId); HttpTracing.inject(tracer, tracer.getCurrentContext(), request); HttpResponse response = client.execute(request); Values.get(response, Void.class); } @Override public DistributorStatus getStatus() { HttpRequest request = new HttpRequest(GET, "/se/grid/distributor/status"); HttpTracing.inject(tracer, tracer.getCurrentContext(), request); HttpResponse response = client.execute(request); return Values.get(response, DistributorStatus.class); } }
1
17,752
We can get rid of this import then.
SeleniumHQ-selenium
py
@@ -36,14 +36,14 @@ type morqaTransport struct { } func (transport *morqaTransport) SendEvent(event Event) error { - if metric := mapEventToMetric(event); metric != nil { - return transport.morqaClient.SendMetric(metric) + if id, metric := mapEventToMetric(event); metric != nil { + return transport.morqaClient.SendMetric(id, metric) } return fmt.Errorf("event not implemented") } -func mapEventToMetric(event Event) *metrics.Event { +func mapEventToMetric(event Event) (string, *metrics.Event) { switch event.EventName { case startupEventName: return nodeVersionToMetricsEvent(event.Application)
1
/* * Copyright (C) 2019 The "MysteriumNetwork/node" Authors. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package quality import ( "fmt" "github.com/mysteriumnetwork/metrics" "github.com/mysteriumnetwork/node/market" ) // NewMORQATransport creates transport allowing to send events to Mysterium Quality Oracle - MORQA func NewMORQATransport(morqaClient *MysteriumMORQA) *morqaTransport { return &morqaTransport{ morqaClient: morqaClient, } } type morqaTransport struct { morqaClient *MysteriumMORQA } func (transport *morqaTransport) SendEvent(event Event) error { if metric := mapEventToMetric(event); metric != nil { return transport.morqaClient.SendMetric(metric) } return fmt.Errorf("event not implemented") } func mapEventToMetric(event Event) *metrics.Event { switch event.EventName { case startupEventName: return nodeVersionToMetricsEvent(event.Application) case sessionEventName: return sessionEventToMetricsEvent(event.Context.(sessionEventContext)) case sessionDataName: return sessionDataToMetricsEvent(event.Context.(sessionDataContext)) case sessionTokensName: return sessionTokensToMetricsEvent(event.Context.(sessionTokensContext)) case proposalEventName: return proposalEventToMetricsEvent(event.Context.(market.ServiceProposal), event.Application) } return nil } func nodeVersionToMetricsEvent(info appInfo) *metrics.Event { return &metrics.Event{ Metric: &metrics.Event_VersionPayload{ VersionPayload: &metrics.VersionPayload{ Version: info.Version, Os: info.OS, Arch: info.Arch, }, }, } } func sessionEventToMetricsEvent(ctx sessionEventContext) *metrics.Event { return &metrics.Event{ Signature: ctx.Consumer, TargetId: ctx.Provider, IsProvider: false, Metric: &metrics.Event_SessionEventPayload{ SessionEventPayload: &metrics.SessionEventPayload{ Event: ctx.Event, Session: &metrics.SessionPayload{ Id: ctx.ID, ServiceType: ctx.ServiceType, ProviderContry: ctx.ProviderCountry, ConsumerContry: ctx.ConsumerCountry, AccountantId: ctx.AccountantID, }, }, }, } } func sessionDataToMetricsEvent(ctx sessionDataContext) *metrics.Event { return &metrics.Event{ Signature: ctx.Consumer, TargetId: ctx.Provider, IsProvider: false, Metric: &metrics.Event_SessionStatisticsPayload{ SessionStatisticsPayload: &metrics.SessionStatisticsPayload{ BytesSent: ctx.Tx, BytesReceived: ctx.Rx, Session: &metrics.SessionPayload{ Id: ctx.ID, ServiceType: ctx.ServiceType, ProviderContry: ctx.ProviderCountry, ConsumerContry: ctx.ConsumerCountry, AccountantId: ctx.AccountantID, }, }, }, } } func sessionTokensToMetricsEvent(ctx sessionTokensContext) *metrics.Event { return &metrics.Event{ Signature: ctx.Consumer, TargetId: ctx.Provider, IsProvider: false, Metric: &metrics.Event_SessionTokensPayload{ SessionTokensPayload: &metrics.SessionTokensPayload{ Tokens: ctx.Tokens, Session: &metrics.SessionPayload{ Id: ctx.ID, ServiceType: ctx.ServiceType, ProviderContry: ctx.ProviderCountry, ConsumerContry: ctx.ConsumerCountry, AccountantId: ctx.AccountantID, }, }, }, } } func proposalEventToMetricsEvent(ctx market.ServiceProposal, info appInfo) *metrics.Event { location := ctx.ServiceDefinition.GetLocation() return &metrics.Event{ Metric: &metrics.Event_ProposalPayload{ ProposalPayload: &metrics.ProposalPayload{ ProviderId: ctx.ProviderID, ServiceType: ctx.ServiceType, NodeType: location.NodeType, Country: location.Country, Version: &metrics.VersionPayload{ Version: info.Version, Os: info.OS, Arch: info.Arch, }, }, }, } }
1
16,364
Not sure how useful is unlock event.
mysteriumnetwork-node
go
@@ -31,8 +31,8 @@ import ( // This code is copied from memdocstore/codec.go, with some changes: // - special treatment for primitive.Binary -func encodeDoc(doc driver.Document) (map[string]interface{}, error) { - var e encoder +func encodeDoc(doc driver.Document, lowercaseFields bool) (map[string]interface{}, error) { + e := encoder{lowercaseFields: lowercaseFields} if err := doc.Encode(&e); err != nil { return nil, err }
1
// Copyright 2019 The Go Cloud Development Kit Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package mongodocstore import ( "fmt" "reflect" "strings" "time" "go.mongodb.org/mongo-driver/bson/primitive" "gocloud.dev/internal/docstore/driver" ) // Encode and decode to map[string]interface{}. // This isn't ideal, because the mongo client encodes/decodes a second time. // TODO(jba): find a way do only one encode/decode. // This code is copied from memdocstore/codec.go, with some changes: // - special treatment for primitive.Binary func encodeDoc(doc driver.Document) (map[string]interface{}, error) { var e encoder if err := doc.Encode(&e); err != nil { return nil, err } return e.val.(map[string]interface{}), nil } func encodeValue(x interface{}) (interface{}, error) { var e encoder if err := driver.Encode(reflect.ValueOf(x), &e); err != nil { return nil, err } return e.val, nil } type encoder struct { val interface{} } func (e *encoder) EncodeNil() { e.val = nil } func (e *encoder) EncodeBool(x bool) { e.val = x } func (e *encoder) EncodeInt(x int64) { e.val = x } func (e *encoder) EncodeUint(x uint64) { e.val = int64(x) } func (e *encoder) EncodeBytes(x []byte) { e.val = x } func (e *encoder) EncodeFloat(x float64) { e.val = x } func (e *encoder) EncodeComplex(x complex128) { e.val = []float64{real(x), imag(x)} } func (e *encoder) EncodeString(x string) { e.val = x } func (e *encoder) ListIndex(int) { panic("impossible") } func (e *encoder) MapKey(string) { panic("impossible") } var typeOfGoTime = reflect.TypeOf(time.Time{}) func (e *encoder) EncodeSpecial(v reflect.Value) (bool, error) { // Treat time "specially" as itself (otherwise its BinaryMarshal method will be called). if v.Type() == typeOfGoTime { e.val = v.Interface() return true, nil } return false, nil } func (e *encoder) EncodeList(n int) driver.Encoder { // All slices and arrays are encoded as []interface{} s := make([]interface{}, n) e.val = s return &listEncoder{s: s} } type listEncoder struct { s []interface{} encoder } func (e *listEncoder) ListIndex(i int) { e.s[i] = e.val } type mapEncoder struct { m map[string]interface{} isStruct bool encoder } func (e *encoder) EncodeMap(n int, isStruct bool) driver.Encoder { m := make(map[string]interface{}, n) e.val = m return &mapEncoder{m: m, isStruct: isStruct} } func (e *mapEncoder) MapKey(k string) { // The BSON codec encodes structs by lower-casing field names. if e.isStruct { k = strings.ToLower(k) } e.m[k] = e.val } //////////////////////////////////////////////////////////////// // decodeDoc decodes m into ddoc. func decodeDoc(m map[string]interface{}, ddoc driver.Document, idField string) error { switch idField { case mongoIDField: // do nothing case "": // user uses idFunc delete(m, mongoIDField) default: // user documents have a different ID field m[idField] = m[mongoIDField] delete(m, mongoIDField) } return ddoc.Decode(decoder{m}) } type decoder struct { val interface{} } func (d decoder) String() string { return fmt.Sprint(d.val) } func (d decoder) AsNull() bool { return d.val == nil } func (d decoder) AsBool() (bool, bool) { b, ok := d.val.(bool) return b, ok } func (d decoder) AsString() (string, bool) { s, ok := d.val.(string) return s, ok } func (d decoder) AsInt() (int64, bool) { switch v := d.val.(type) { case int64: return v, true case int32: return int64(v), true default: return 0, false } } func (d decoder) AsUint() (uint64, bool) { i, ok := d.val.(int64) return uint64(i), ok } func (d decoder) AsFloat() (float64, bool) { f, ok := d.val.(float64) return f, ok } func (d decoder) AsComplex() (complex128, bool) { switch v := d.val.(type) { case []float64: if len(v) != 2 { return 0, false } return complex(v[0], v[1]), true case primitive.A: // []interface{} if len(v) != 2 { return 0, false } r, ok := v[0].(float64) if !ok { return 0, false } i, ok := v[1].(float64) if !ok { return 0, false } return complex(r, i), true default: return 0, false } } func (d decoder) AsBytes() ([]byte, bool) { switch v := d.val.(type) { case []byte: return v, true case primitive.Binary: return v.Data, true default: return nil, false } } func (d decoder) AsInterface() (interface{}, error) { return toGoValue(d.val) } func toGoValue(v interface{}) (interface{}, error) { switch v := v.(type) { case primitive.A: r := make([]interface{}, len(v)) for i, e := range v { d, err := toGoValue(e) if err != nil { return nil, err } r[i] = d } return r, nil case primitive.Binary: return v.Data, nil case primitive.DateTime: return bsonDateTimeToTime(v), nil case map[string]interface{}: r := map[string]interface{}{} for k, e := range v { d, err := toGoValue(e) if err != nil { return nil, err } r[k] = d } return r, nil default: return v, nil } } func (d decoder) ListLen() (int, bool) { if s, ok := d.val.(primitive.A); ok { return len(s), true } return 0, false } func (d decoder) DecodeList(f func(i int, d2 driver.Decoder) bool) { for i, e := range d.val.(primitive.A) { if !f(i, decoder{e}) { return } } } func (d decoder) MapLen() (int, bool) { if m, ok := d.val.(map[string]interface{}); ok { return len(m), true } return 0, false } func (d decoder) DecodeMap(f func(key string, d2 driver.Decoder) bool) { for k, v := range d.val.(map[string]interface{}) { if !f(k, decoder{v}) { return } } } func (d decoder) AsSpecial(v reflect.Value) (bool, interface{}, error) { switch v := d.val.(type) { case primitive.Binary: return true, v.Data, nil case primitive.DateTime: // A DateTime represents milliseconds since the Unix epoch. return true, bsonDateTimeToTime(v), nil default: return false, nil, nil } } func bsonDateTimeToTime(dt primitive.DateTime) time.Time { return time.Unix(int64(dt)/1000, int64(dt)%1000*1e6) }
1
17,156
Consider make the second argument a `encoderOptions` which includes the `lowercaseFields` just like opening a collection.
google-go-cloud
go
@@ -82,11 +82,13 @@ public class HiveIcebergOutputCommitter extends OutputCommitter { /** * Collects the generated data files and creates a commit file storing the data file list. - * @param context The job context + * @param ctx The task attempt context * @throws IOException Thrown if there is an error writing the commit file */ @Override - public void commitTask(TaskAttemptContext context) throws IOException { + public void commitTask(TaskAttemptContext ctx) throws IOException { + TaskAttemptContext context = TezUtil.enrichContextWithAttemptWrapper(ctx); + TaskAttemptID attemptID = context.getTaskAttemptID(); String fileForCommitLocation = generateFileForCommitLocation(context.getJobConf(), attemptID.getJobID(), attemptID.getTaskID().getId());
1
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.iceberg.mr.hive; import java.io.IOException; import java.io.ObjectInputStream; import java.io.ObjectOutputStream; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.JobContext; import org.apache.hadoop.mapred.OutputCommitter; import org.apache.hadoop.mapred.TaskAttemptContext; import org.apache.hadoop.mapred.TaskAttemptID; import org.apache.hadoop.mapreduce.JobID; import org.apache.hadoop.mapreduce.TaskType; import org.apache.iceberg.AppendFiles; import org.apache.iceberg.DataFile; import org.apache.iceberg.Table; import org.apache.iceberg.exceptions.NotFoundException; import org.apache.iceberg.hadoop.Util; import org.apache.iceberg.io.FileIO; import org.apache.iceberg.io.OutputFile; import org.apache.iceberg.mr.Catalogs; import org.apache.iceberg.mr.InputFormatConfig; import org.apache.iceberg.relocated.com.google.common.annotations.VisibleForTesting; import org.apache.iceberg.relocated.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.iceberg.util.Tasks; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * An Iceberg table committer for adding data files to the Iceberg tables. * Currently independent of the Hive ACID transactions. */ public class HiveIcebergOutputCommitter extends OutputCommitter { private static final String FOR_COMMIT_EXTENSION = ".forCommit"; private static final Logger LOG = LoggerFactory.getLogger(HiveIcebergOutputCommitter.class); @Override public void setupJob(JobContext jobContext) { // do nothing. } @Override public void setupTask(TaskAttemptContext taskAttemptContext) { // do nothing. } @Override public boolean needsTaskCommit(TaskAttemptContext context) { // We need to commit if this is the last phase of a MapReduce process return TaskType.REDUCE.equals(context.getTaskAttemptID().getTaskID().getTaskType()) || context.getJobConf().getNumReduceTasks() == 0; } /** * Collects the generated data files and creates a commit file storing the data file list. * @param context The job context * @throws IOException Thrown if there is an error writing the commit file */ @Override public void commitTask(TaskAttemptContext context) throws IOException { TaskAttemptID attemptID = context.getTaskAttemptID(); String fileForCommitLocation = generateFileForCommitLocation(context.getJobConf(), attemptID.getJobID(), attemptID.getTaskID().getId()); HiveIcebergRecordWriter writer = HiveIcebergRecordWriter.getWriter(attemptID); DataFile[] closedFiles; if (writer != null) { closedFiles = writer.dataFiles(); } else { closedFiles = new DataFile[0]; } // Creating the file containing the data files generated by this task createFileForCommit(closedFiles, fileForCommitLocation, HiveIcebergStorageHandler.io(context.getJobConf())); // remove the writer to release the object HiveIcebergRecordWriter.removeWriter(attemptID); } /** * Removes files generated by this task. * @param context The task context * @throws IOException Thrown if there is an error closing the writer */ @Override public void abortTask(TaskAttemptContext context) throws IOException { // Clean up writer data from the local store HiveIcebergRecordWriter writer = HiveIcebergRecordWriter.removeWriter(context.getTaskAttemptID()); // Remove files if it was not done already if (writer != null) { writer.close(true); } } /** * Reads the commit files stored in the temp directory and collects the generated committed data files. * Appends the data files to the table. At the end removes the temporary directory. * @param jobContext The job context * @throws IOException if there is a failure deleting the files */ @Override public void commitJob(JobContext jobContext) throws IOException { JobConf conf = jobContext.getJobConf(); Table table = Catalogs.loadTable(conf); long startTime = System.currentTimeMillis(); LOG.info("Committing job has started for table: {}, using location: {}", table, generateJobLocation(conf, jobContext.getJobID())); FileIO io = HiveIcebergStorageHandler.io(jobContext.getJobConf()); List<DataFile> dataFiles = dataFiles(jobContext, io, true); if (dataFiles.size() > 0) { // Appending data files to the table AppendFiles append = table.newAppend(); dataFiles.forEach(append::appendFile); append.commit(); LOG.info("Commit took {} ms for table: {} with {} file(s)", System.currentTimeMillis() - startTime, table, dataFiles.size()); LOG.debug("Added files {}", dataFiles); } else { LOG.info("Commit took {} ms for table: {} with no new files", System.currentTimeMillis() - startTime, table); } cleanup(jobContext); } /** * Removes the generated data files, if there is a commit file already generated for them. * The cleanup at the end removes the temporary directory as well. * @param jobContext The job context * @param status The status of the job * @throws IOException if there is a failure deleting the files */ @Override public void abortJob(JobContext jobContext, int status) throws IOException { String location = generateJobLocation(jobContext.getJobConf(), jobContext.getJobID()); LOG.info("Job {} is aborted. Cleaning job location {}", jobContext.getJobID(), location); FileIO io = HiveIcebergStorageHandler.io(jobContext.getJobConf()); List<DataFile> dataFiles = dataFiles(jobContext, io, false); // Check if we have files already committed and remove data files if there are any if (dataFiles.size() > 0) { Tasks.foreach(dataFiles) .retry(3) .suppressFailureWhenFinished() .onFailure((file, exc) -> LOG.debug("Failed on to remove data file {} on abort job", file.path(), exc)) .run(file -> io.deleteFile(file.path().toString())); } cleanup(jobContext); } /** * Cleans up the jobs temporary location. * @param jobContext The job context * @throws IOException if there is a failure deleting the files */ private void cleanup(JobContext jobContext) throws IOException { String location = generateJobLocation(jobContext.getJobConf(), jobContext.getJobID()); LOG.info("Cleaning for job: {} on location: {}", jobContext.getJobID(), location); // Remove the job's temp directory recursively. // Intentionally used foreach on a single item. Using the Tasks API here only for the retry capability. Tasks.foreach(location) .retry(3) .suppressFailureWhenFinished() .onFailure((file, exc) -> LOG.debug("Failed on to remove directory {} on cleanup job", file, exc)) .run(file -> { Path toDelete = new Path(file); FileSystem fs = Util.getFs(toDelete, jobContext.getJobConf()); fs.delete(toDelete, true); }, IOException.class); } /** * Get the data committed data files for this job. * @param jobContext The job context * @param io The FileIO used for reading a files generated for commit * @param throwOnFailure If <code>true</code> then it throws an exception on failure * @return The list of the committed data files */ private static List<DataFile> dataFiles(JobContext jobContext, FileIO io, boolean throwOnFailure) { JobConf conf = jobContext.getJobConf(); // If there are reducers, then every reducer will generate a result file. // If this is a map only task, then every mapper will generate a result file. int expectedFiles = conf.getNumReduceTasks() != 0 ? conf.getNumReduceTasks() : conf.getNumMapTasks(); ExecutorService executor = null; try { // Creating executor service for parallel handling of file reads executor = Executors.newFixedThreadPool( conf.getInt(InputFormatConfig.COMMIT_THREAD_POOL_SIZE, InputFormatConfig.COMMIT_THREAD_POOL_SIZE_DEFAULT), new ThreadFactoryBuilder() .setDaemon(true) .setPriority(Thread.NORM_PRIORITY) .setNameFormat("iceberg-commit-pool-%d") .build()); List<DataFile> dataFiles = Collections.synchronizedList(new ArrayList<>()); // Reading the committed files. The assumption here is that the taskIds are generated in sequential order // starting from 0. Tasks.range(expectedFiles) .throwFailureWhenFinished(throwOnFailure) .executeWith(executor) .retry(3) .run(taskId -> { String taskFileName = generateFileForCommitLocation(conf, jobContext.getJobID(), taskId); dataFiles.addAll(Arrays.asList(readFileForCommit(taskFileName, io))); }); return dataFiles; } finally { if (executor != null) { executor.shutdown(); } } } /** * Generates the job temp location based on the job configuration. * Currently it uses QUERY_LOCATION-jobId. * @param conf The job's configuration * @param jobId The JobID for the task * @return The file to store the results */ @VisibleForTesting static String generateJobLocation(Configuration conf, JobID jobId) { String tableLocation = conf.get(InputFormatConfig.TABLE_LOCATION); String queryId = conf.get(HiveConf.ConfVars.HIVEQUERYID.varname); return tableLocation + "/temp/" + queryId + "-" + jobId; } /** * Generates file location based on the task configuration and a specific task id. * This file will be used to store the data required to generate the Iceberg commit. * Currently it uses QUERY_LOCATION-jobId/task-[0..numTasks).forCommit. * @param conf The job's configuration * @param jobId The jobId for the task * @param taskId The taskId for the commit file * @return The file to store the results */ private static String generateFileForCommitLocation(Configuration conf, JobID jobId, int taskId) { return generateJobLocation(conf, jobId) + "/task-" + taskId + FOR_COMMIT_EXTENSION; } private static void createFileForCommit(DataFile[] closedFiles, String location, FileIO io) throws IOException { OutputFile fileForCommit = io.newOutputFile(location); try (ObjectOutputStream oos = new ObjectOutputStream(fileForCommit.createOrOverwrite())) { oos.writeObject(closedFiles); } LOG.debug("Iceberg committed file is created {}", fileForCommit); } private static DataFile[] readFileForCommit(String fileForCommitLocation, FileIO io) { try (ObjectInputStream ois = new ObjectInputStream(io.newInputFile(fileForCommitLocation).newStream())) { return (DataFile[]) ois.readObject(); } catch (ClassNotFoundException | IOException e) { throw new NotFoundException("Can not read or parse committed file: %s", fileForCommitLocation); } } }
1
32,776
What is the value of renaming this to `ctx`? We generally prefer the longer names because they are easier to read and to type.
apache-iceberg
java
@@ -352,8 +352,14 @@ bool TurnLaneHandler::isSimpleIntersection(const LaneDataVector &lane_data, if (lane_data.back().tag == TurnLaneType::uturn) return findBestMatchForReverse(lane_data[lane_data.size() - 2].tag, intersection); - BOOST_ASSERT(lane_data.front().tag == TurnLaneType::uturn); - return findBestMatchForReverse(lane_data[1].tag, intersection); + // TODO(mokob): #2730 have a look please + // BOOST_ASSERT(lane_data.front().tag == TurnLaneType::uturn); + // return findBestMatchForReverse(lane_data[1].tag, intersection); + // + if (lane_data.front().tag == TurnLaneType::uturn) + return findBestMatchForReverse(lane_data[1].tag, intersection); + + return findBestMatch(data.tag, intersection); }(); std::size_t match_index = std::distance(intersection.begin(), best_match); all_simple &= (matched_indices.count(match_index) == 0);
1
#include "extractor/guidance/turn_lane_handler.hpp" #include "extractor/guidance/constants.hpp" #include "extractor/guidance/turn_discovery.hpp" #include "extractor/guidance/turn_lane_augmentation.hpp" #include "extractor/guidance/turn_lane_matcher.hpp" #include "util/simple_logger.hpp" #include "util/typedefs.hpp" #include <cstdint> #include <boost/algorithm/string/predicate.hpp> #include <boost/numeric/conversion/cast.hpp> namespace osrm { namespace extractor { namespace guidance { namespace lanes { namespace { std::size_t getNumberOfTurns(const Intersection &intersection) { return std::count_if(intersection.begin(), intersection.end(), [](const ConnectedRoad &road) { return road.entry_allowed; }); } } // namespace TurnLaneHandler::TurnLaneHandler(const util::NodeBasedDynamicGraph &node_based_graph, const std::vector<std::uint32_t> &turn_lane_offsets, const std::vector<TurnLaneType::Mask> &turn_lane_masks, const TurnAnalysis &turn_analysis) : node_based_graph(node_based_graph), turn_lane_offsets(turn_lane_offsets), turn_lane_masks(turn_lane_masks), turn_analysis(turn_analysis) { } /* Turn lanes are given in the form of strings that closely correspond to the direction modifiers we use for our turn types. However, we still cannot simply perform a 1:1 assignment. This function parses the turn_lane_descriptions of a format that describes an intersection as: ---------- A -^ ---------- B -> -v ---------- C -v ---------- witch is the result of a string like looking |left|through;right|right| and performs an assignment onto the turns. For example: (130, turn slight right), (180, ramp straight), (320, turn sharp left). */ Intersection TurnLaneHandler::assignTurnLanes(const NodeID at, const EdgeID via_edge, Intersection intersection, LaneDataIdMap &id_map) const { // if only a uturn exists, there is nothing we can do if (intersection.size() == 1) return intersection; const auto &data = node_based_graph.GetEdgeData(via_edge); // Extract a lane description for the ID const auto turn_lane_description = data.lane_description_id != INVALID_LANE_DESCRIPTIONID ? TurnLaneDescription( turn_lane_masks.begin() + turn_lane_offsets[data.lane_description_id], turn_lane_masks.begin() + turn_lane_offsets[data.lane_description_id + 1]) : TurnLaneDescription(); BOOST_ASSERT(turn_lane_description.empty() || turn_lane_description.size() == (turn_lane_offsets[data.lane_description_id + 1] - turn_lane_offsets[data.lane_description_id])); // going straight, due to traffic signals, we can have uncompressed geometry if (intersection.size() == 2 && ((data.lane_description_id != INVALID_LANE_DESCRIPTIONID && data.lane_description_id == node_based_graph.GetEdgeData(intersection[1].turn.eid).lane_description_id) || angularDeviation(intersection[1].turn.angle, STRAIGHT_ANGLE) < FUZZY_ANGLE_DIFFERENCE)) return intersection; auto lane_data = laneDataFromDescription(turn_lane_description); // if we see an invalid conversion, we stop immediately if (!turn_lane_description.empty() && lane_data.empty()) return intersection; // might be reasonable to handle multiple turns, if we know of a sequence of lanes // e.g. one direction per lane, if three lanes and right, through, left available if (!turn_lane_description.empty() && lane_data.size() == 1 && lane_data[0].tag == TurnLaneType::none) return intersection; const std::size_t possible_entries = getNumberOfTurns(intersection); // merge does not justify an instruction const bool has_merge_lane = hasTag(TurnLaneType::merge_to_left | TurnLaneType::merge_to_right, lane_data); // Dead end streets that don't have any left-tag. This can happen due to the fallbacks for // broken data/barriers. const bool has_non_usable_u_turn = (intersection[0].entry_allowed && !hasTag(TurnLaneType::none | TurnLaneType::left | TurnLaneType::sharp_left | TurnLaneType::uturn, lane_data) && lane_data.size() + 1 == possible_entries); if (has_merge_lane || has_non_usable_u_turn) return intersection; if (!lane_data.empty() && canMatchTrivially(intersection, lane_data) && lane_data.size() != static_cast<std::size_t>( lane_data.back().tag != TurnLaneType::uturn && intersection[0].entry_allowed ? 1 : 0) + possible_entries && intersection[0].entry_allowed && !hasTag(TurnLaneType::none, lane_data)) lane_data.push_back({TurnLaneType::uturn, lane_data.back().to, lane_data.back().to}); bool is_simple = isSimpleIntersection(lane_data, intersection); // simple intersections can be assigned directly if (is_simple) { lane_data = handleNoneValueAtSimpleTurn(std::move(lane_data), intersection); return simpleMatchTuplesToTurns( std::move(intersection), lane_data, data.lane_description_id, id_map); } // if the intersection is not simple but we have lane data, we check for intersections with // middle islands. We have two cases. The first one is providing lane data on the current // segment and we only need to consider the part of the current segment. In this case we // partition the data and only consider the first part. else if (!lane_data.empty()) { if (lane_data.size() >= possible_entries) { lane_data = partitionLaneData(node_based_graph.GetTarget(via_edge), std::move(lane_data), intersection) .first; // check if we were successfull in trimming if (lane_data.size() == possible_entries && isSimpleIntersection(lane_data, intersection)) { lane_data = handleNoneValueAtSimpleTurn(std::move(lane_data), intersection); return simpleMatchTuplesToTurns( std::move(intersection), lane_data, data.lane_description_id, id_map); } } } // The second part does not provide lane data on the current segment, but on the segment prior // to the turn. We try to partition the data and only consider the second part. else if (turn_lane_description.empty()) { // acquire the lane data of a previous segment and, if possible, use it for the current // intersection. return handleTurnAtPreviousIntersection(at, via_edge, std::move(intersection), id_map); } return intersection; } // At segregated intersections, turn lanes will often only be specified up until the first turn. To // actually take the turn, we need to look back to the edge we drove onto the intersection with. Intersection TurnLaneHandler::handleTurnAtPreviousIntersection(const NodeID at, const EdgeID via_edge, Intersection intersection, LaneDataIdMap &id_map) const { NodeID previous_node = SPECIAL_NODEID; Intersection previous_intersection; EdgeID previous_id = SPECIAL_EDGEID; LaneDataVector lane_data; // Get the previous lane string. We only accept strings that stem from a not-simple intersection // and are not empty. const auto previous_lane_description = [&]() -> TurnLaneDescription { if (!findPreviousIntersection(at, via_edge, intersection, turn_analysis, node_based_graph, previous_node, previous_id, previous_intersection)) return {}; BOOST_ASSERT(previous_id != SPECIAL_EDGEID); const auto &previous_edge_data = node_based_graph.GetEdgeData(previous_id); // TODO access correct data const auto previous_description = previous_edge_data.lane_description_id != INVALID_LANE_DESCRIPTIONID ? TurnLaneDescription( turn_lane_masks.begin() + turn_lane_offsets[previous_edge_data.lane_description_id], turn_lane_masks.begin() + turn_lane_offsets[previous_edge_data.lane_description_id + 1]) : TurnLaneDescription(); if (previous_description.empty()) return previous_description; previous_intersection = turn_analysis.assignTurnTypes( previous_node, previous_id, std::move(previous_intersection)); lane_data = laneDataFromDescription(previous_description); if (isSimpleIntersection(lane_data, previous_intersection)) return {}; else return previous_description; }(); // no lane string, no problems if (previous_lane_description.empty()) return intersection; // stop on invalid lane data conversion if (lane_data.empty()) return intersection; const auto &previous_data = node_based_graph.GetEdgeData(previous_id); const auto is_simple = isSimpleIntersection(lane_data, intersection); if (is_simple) { lane_data = handleNoneValueAtSimpleTurn(std::move(lane_data), intersection); return simpleMatchTuplesToTurns( std::move(intersection), lane_data, previous_data.lane_description_id, id_map); } else { if (lane_data.size() >= getNumberOfTurns(previous_intersection) && previous_intersection.size() != 2) { lane_data = partitionLaneData(node_based_graph.GetTarget(previous_id), std::move(lane_data), previous_intersection) .second; std::sort(lane_data.begin(), lane_data.end()); // check if we were successfull in trimming if (lane_data.size() == getNumberOfTurns(intersection) && isSimpleIntersection(lane_data, intersection)) { lane_data = handleNoneValueAtSimpleTurn(std::move(lane_data), intersection); return simpleMatchTuplesToTurns( std::move(intersection), lane_data, previous_data.lane_description_id, id_map); } } } return intersection; } /* A simple intersection does not depend on the next intersection coming up. This is important * for turn lanes, since traffic signals and/or segregated a intersection can influence the * interpretation of turn-lanes at a given turn. * * Here we check for a simple intersection. A simple intersection has a long enough segment * followin the turn, offers no straight turn, or only non-trivial turn operations. */ bool TurnLaneHandler::isSimpleIntersection(const LaneDataVector &lane_data, const Intersection &intersection) const { if (lane_data.empty()) return false; // if we are on a straight road, turn lanes are only reasonable in connection to the next // intersection, or in case of a merge. If not all but one (straight) are merges, we don't // consider the intersection simple if (intersection.size() == 2) { return std::count_if( lane_data.begin(), lane_data.end(), [](const TurnLaneData &data) { return ((data.tag & TurnLaneType::merge_to_left) != TurnLaneType::empty) || ((data.tag & TurnLaneType::merge_to_right) != TurnLaneType::empty); }) + std::size_t{1} >= lane_data.size(); } // in case an intersection offers far more lane data items than actual turns, some of them // have // to be for another intersection. A single additional item can be for an invalid bus lane. const auto num_turns = [&]() { auto count = getNumberOfTurns(intersection); if (count < lane_data.size() && !intersection[0].entry_allowed && lane_data.back().tag == TurnLaneType::uturn) return count + 1; return count; }(); // more than two additional lane data entries -> lanes target a different intersection if (num_turns + std::size_t{2} <= lane_data.size()) { return false; } // single additional lane data entry is alright, if it is none at the side. This usually // refers to a bus-lane if (num_turns + std::size_t{1} == lane_data.size() && lane_data.front().tag != TurnLaneType::none && lane_data.back().tag != TurnLaneType::none) { return false; } // more turns than lane data if (num_turns > lane_data.size() && lane_data.end() == std::find_if(lane_data.begin(), lane_data.end(), [](const TurnLaneData &data) { return data.tag == TurnLaneType::none; })) { return false; } if (num_turns > lane_data.size() && intersection[0].entry_allowed && !(hasTag(TurnLaneType::uturn, lane_data) || (lane_data.back().tag != TurnLaneType::left && lane_data.back().tag != TurnLaneType::sharp_left))) { return false; } // check if we can find a valid 1:1 mapping in a straightforward manner bool all_simple = true; bool has_none = false; std::unordered_set<std::size_t> matched_indices; for (const auto &data : lane_data) { if (data.tag == TurnLaneType::none) { has_none = true; continue; } const auto best_match = [&]() { if (data.tag != TurnLaneType::uturn || lane_data.size() == 1) return findBestMatch(data.tag, intersection); // lane_data.size() > 1 if (lane_data.back().tag == TurnLaneType::uturn) return findBestMatchForReverse(lane_data[lane_data.size() - 2].tag, intersection); BOOST_ASSERT(lane_data.front().tag == TurnLaneType::uturn); return findBestMatchForReverse(lane_data[1].tag, intersection); }(); std::size_t match_index = std::distance(intersection.begin(), best_match); all_simple &= (matched_indices.count(match_index) == 0); matched_indices.insert(match_index); // in case of u-turns, we might need to activate them first all_simple &= (best_match->entry_allowed || match_index == 0); all_simple &= isValidMatch(data.tag, best_match->turn.instruction); } // either all indices are matched, or we have a single none-value if (all_simple && (matched_indices.size() == lane_data.size() || (matched_indices.size() + 1 == lane_data.size() && has_none))) return true; // better save than sorry return false; } std::pair<LaneDataVector, LaneDataVector> TurnLaneHandler::partitionLaneData( const NodeID at, LaneDataVector turn_lane_data, const Intersection &intersection) const { BOOST_ASSERT(turn_lane_data.size() >= getNumberOfTurns(intersection)); /* * A Segregated intersection can provide turn lanes for turns that are not yet possible. * The straightforward example would be coming up to the following situation: * (1) (2) * | A | | A | * | | | | ^ | * | v | | | | * ------- ----------- ------ * B ->-^ B * ------- ----------- ------ * B ->-v B * ------- ----------- ------ * | A | | A | * * Traveling on road B, we have to pass A at (1) to turn left onto A at (2). The turn * lane itself may only be specified prior to (1) and/or could be repeated between (1) * and (2). To make sure to announce the lane correctly, we need to treat the (in this * case left) turn lane as if it were to continue straight onto the intersection and * look back between (1) and (2) to make sure we find the correct lane for the left-turn. * * Intersections like these have two parts. Turns that can be made at the first intersection and * turns that have to be made at the second. The partitioning returns the lane data split into * two parts, one for the first and one for the second intersection. */ // Try and maitch lanes to available turns. For Turns that are not directly matchable, check // whether we can match them at the upcoming intersection. const auto straightmost = findClosestTurn(intersection, STRAIGHT_ANGLE); BOOST_ASSERT(straightmost < intersection.cend()); // we need to be able to enter the straightmost turn if (!straightmost->entry_allowed) return {turn_lane_data, {}}; std::vector<bool> matched_at_first(turn_lane_data.size(), false); std::vector<bool> matched_at_second(turn_lane_data.size(), false); // find out about the next intersection. To check for valid matches, we also need the turn types auto next_intersection = turn_analysis.getIntersection(at, straightmost->turn.eid); next_intersection = turn_analysis.assignTurnTypes(at, straightmost->turn.eid, std::move(next_intersection)); // check where we can match turn lanes std::size_t straightmost_tag_index = turn_lane_data.size(); for (std::size_t lane = 0; lane < turn_lane_data.size(); ++lane) { if ((turn_lane_data[lane].tag & (TurnLaneType::none | TurnLaneType::uturn)) != TurnLaneType::empty) continue; const auto best_match = findBestMatch(turn_lane_data[lane].tag, intersection); if (isValidMatch(turn_lane_data[lane].tag, best_match->turn.instruction)) { matched_at_first[lane] = true; if (straightmost == best_match) straightmost_tag_index = lane; } const auto best_match_at_next_intersection = findBestMatch(turn_lane_data[lane].tag, next_intersection); if (isValidMatch(turn_lane_data[lane].tag, best_match_at_next_intersection->turn.instruction)) matched_at_second[lane] = true; // we need to match all items to either the current or the next intersection if (!(matched_at_first[lane] || matched_at_second[lane])) return {turn_lane_data, {}}; } std::size_t none_index = std::distance(turn_lane_data.begin(), findTag(TurnLaneType::none, turn_lane_data)); // if the turn lanes are pull forward, we might have to add an additional straight tag // did we find something that matches against the straightmost road? if (straightmost_tag_index == turn_lane_data.size()) { if (none_index != turn_lane_data.size()) straightmost_tag_index = none_index; } // TODO handle reverse // handle none values if (none_index != turn_lane_data.size()) { if (static_cast<std::size_t>( std::count(matched_at_first.begin(), matched_at_first.end(), true)) <= getNumberOfTurns(intersection)) matched_at_first[none_index] = true; if (static_cast<std::size_t>( std::count(matched_at_second.begin(), matched_at_second.end(), true)) <= getNumberOfTurns(next_intersection)) matched_at_second[none_index] = true; } const auto augmentEntry = [&](TurnLaneData &data) { for (std::size_t lane = 0; lane < turn_lane_data.size(); ++lane) if (matched_at_second[lane]) { data.from = std::min(turn_lane_data[lane].from, data.from); data.to = std::max(turn_lane_data[lane].to, data.to); } }; LaneDataVector first, second; for (std::size_t lane = 0; lane < turn_lane_data.size(); ++lane) { if (matched_at_second[lane]) second.push_back(turn_lane_data[lane]); // augment straightmost at this intersection to match all turns that happen at the next if (lane == straightmost_tag_index) { augmentEntry(turn_lane_data[straightmost_tag_index]); } if (matched_at_first[lane]) first.push_back(turn_lane_data[lane]); } if (straightmost_tag_index == turn_lane_data.size() && static_cast<std::size_t>( std::count(matched_at_second.begin(), matched_at_second.end(), true)) == getNumberOfTurns(next_intersection)) { TurnLaneData data = {TurnLaneType::straight, 255, 0}; augmentEntry(data); first.push_back(data); std::sort(first.begin(), first.end()); } // TODO augment straightmost turn return {std::move(first), std::move(second)}; } Intersection TurnLaneHandler::simpleMatchTuplesToTurns(Intersection intersection, const LaneDataVector &lane_data, const LaneDescriptionID lane_description_id, LaneDataIdMap &id_map) const { if (lane_data.empty() || !canMatchTrivially(intersection, lane_data)) return intersection; BOOST_ASSERT( !hasTag(TurnLaneType::none | TurnLaneType::merge_to_left | TurnLaneType::merge_to_right, lane_data)); return triviallyMatchLanesToTurns( std::move(intersection), lane_data, node_based_graph, lane_description_id, id_map); } } // namespace lanes } // namespace guidance } // namespace extractor } // namespace osrm
1
17,232
Hm. This could be a case of left-sided driving, having u-turn lanes on the right side. Good catch, but looks good to me.
Project-OSRM-osrm-backend
cpp
@@ -21,12 +21,12 @@ namespace Nethermind.Merge.Plugin.Data { public class Result { - public static readonly Result Success = new Result() {Value = true}; - public static readonly Result Fail = new Result() {Value = false}; + public static readonly Result OK = new() {success = true}; + public static readonly Result Fail = new() {success = false}; - public static implicit operator Result(bool value) => value ? Success : Fail; + public static implicit operator Result(bool value) => value ? OK : Fail; [JsonPropertyName("success")] - public bool Value { get; init; } + public bool success { get; init; } } }
1
// Copyright (c) 2021 Demerzel Solutions Limited // This file is part of the Nethermind library. // // The Nethermind library is free software: you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // The Nethermind library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License // along with the Nethermind. If not, see <http://www.gnu.org/licenses/>. // using System.Text.Json.Serialization; namespace Nethermind.Merge.Plugin.Data { public class Result { public static readonly Result Success = new Result() {Value = true}; public static readonly Result Fail = new Result() {Value = false}; public static implicit operator Result(bool value) => value ? Success : Fail; [JsonPropertyName("success")] public bool Value { get; init; } } }
1
25,329
There was a [JsonPropertyName("success")] here it did serialize fine for me, so I am not sure what was the matter? Also maybe uppercase?
NethermindEth-nethermind
.cs
@@ -246,7 +246,8 @@ namespace Nethermind.Runner.Ethereum.Steps case SealEngineType.AuRa: AbiEncoder abiEncoder = new AbiEncoder(); _context.ValidatorStore = new ValidatorStore(_context.DbProvider.BlockInfosDb); - IAuRaValidatorProcessor validatorProcessor = new AuRaAdditionalBlockProcessorFactory(_context.StateProvider, abiEncoder, _context.TransactionProcessor, _context.BlockTree, _context.ReceiptStorage, _context.ValidatorStore, _context.LogManager) + ITransactionProcessorFactory readOnlyTransactionProcessorFactory = new ReadOnlyTransactionProcessorFactory(new ReadOnlyDbProvider(_context.DbProvider, false), _context.BlockTree, _context.SpecProvider, _context.LogManager); + IAuRaValidatorProcessor validatorProcessor = new AuRaAdditionalBlockProcessorFactory(_context.StateProvider, abiEncoder, _context.TransactionProcessor, readOnlyTransactionProcessorFactory, _context.BlockTree, _context.ReceiptStorage, _context.ValidatorStore, _context.LogManager) .CreateValidatorProcessor(_context.ChainSpec.AuRa.Validators); AuRaStepCalculator auRaStepCalculator = new AuRaStepCalculator(_context.ChainSpec.AuRa.StepDuration, _context.Timestamper);
1
// Copyright (c) 2018 Demerzel Solutions Limited // This file is part of the Nethermind library. // // The Nethermind library is free software: you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // The Nethermind library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License // along with the Nethermind. If not, see <http://www.gnu.org/licenses/>. using System; using System.Collections.Generic; using System.Linq; using System.Threading.Tasks; using Nethermind.Abi; using Nethermind.AuRa; using Nethermind.AuRa.Rewards; using Nethermind.AuRa.Validators; using Nethermind.Blockchain; using Nethermind.Blockchain.Receipts; using Nethermind.Blockchain.Rewards; using Nethermind.Blockchain.Synchronization; using Nethermind.Blockchain.TxPools; using Nethermind.Blockchain.TxPools.Storages; using Nethermind.Blockchain.Validators; using Nethermind.Clique; using Nethermind.Core; using Nethermind.Core.Attributes; using Nethermind.Crypto; using Nethermind.Specs.ChainSpecStyle; using Nethermind.Evm; using Nethermind.Mining; using Nethermind.Mining.Difficulty; using Nethermind.PubSub; using Nethermind.Stats; using Nethermind.Store; using Nethermind.Store.Repositories; using Nethermind.Wallet; namespace Nethermind.Runner.Ethereum.Steps { [RunnerStepDependency(typeof(InitRlp), typeof(LoadChainspec), typeof(InitDatabase))] public class InitializeBlockchain : IStep { private readonly EthereumRunnerContext _context; public InitializeBlockchain(EthereumRunnerContext context) { _context = context; } public async Task Execute() { await InitBlockchain(); } [Todo(Improve.Refactor, "Use chain spec for all chain configuration")] private Task InitBlockchain() { Account.AccountStartNonce = _context.ChainSpec.Parameters.AccountStartNonce; _context.StateProvider = new StateProvider( _context.DbProvider.StateDb, _context.DbProvider.CodeDb, _context.LogManager); _context.EthereumEcdsa = new EthereumEcdsa(_context.SpecProvider, _context.LogManager); _context.TxPool = new TxPool( new PersistentTxStorage(_context.DbProvider.PendingTxsDb, _context.SpecProvider), Timestamper.Default, _context.EthereumEcdsa, _context.SpecProvider, _context.Config<ITxPoolConfig>(), _context.StateProvider, _context.LogManager); _context.ReceiptStorage = new PersistentReceiptStorage(_context.DbProvider.ReceiptsDb, _context.SpecProvider, _context.LogManager); _context.ChainLevelInfoRepository = new ChainLevelInfoRepository(_context.DbProvider.BlockInfosDb); _context.BlockTree = new BlockTree( _context.DbProvider.BlocksDb, _context.DbProvider.HeadersDb, _context.DbProvider.BlockInfosDb, _context.ChainLevelInfoRepository, _context.SpecProvider, _context.TxPool, _context.Config<ISyncConfig>(), _context.LogManager); // Init state if we need system calls before actual processing starts if (_context.BlockTree.Head != null) { _context.StateProvider.StateRoot = _context.BlockTree.Head.StateRoot; } _context.RecoveryStep = new TxSignaturesRecoveryStep(_context.EthereumEcdsa, _context.TxPool, _context.LogManager); _context.SnapshotManager = null; _context.StorageProvider = new StorageProvider( _context.DbProvider.StateDb, _context.StateProvider, _context.LogManager); // blockchain processing BlockhashProvider blockhashProvider = new BlockhashProvider( _context.BlockTree, _context.LogManager); VirtualMachine virtualMachine = new VirtualMachine( _context.StateProvider, _context.StorageProvider, blockhashProvider, _context.SpecProvider, _context.LogManager); _context.TransactionProcessor = new TransactionProcessor( _context.SpecProvider, _context.StateProvider, _context.StorageProvider, virtualMachine, _context.LogManager); _context.AdditionalBlockProcessors = InitSealEngine(); /* validation */ _context.HeaderValidator = new HeaderValidator( _context.BlockTree, _context.SealValidator, _context.SpecProvider, _context.LogManager); OmmersValidator ommersValidator = new OmmersValidator( _context.BlockTree, _context.HeaderValidator, _context.LogManager); TxValidator txValidator = new TxValidator(_context.SpecProvider.ChainId); _context.BlockValidator = new BlockValidator( txValidator, _context.HeaderValidator, ommersValidator, _context.SpecProvider, _context.LogManager); _context.TxPoolInfoProvider = new TxPoolInfoProvider(_context.StateProvider, _context.TxPool); _context.BlockProcessor = new BlockProcessor( _context.SpecProvider, _context.BlockValidator, _context.RewardCalculator, _context.TransactionProcessor, _context.DbProvider.StateDb, _context.DbProvider.CodeDb, _context.StateProvider, _context.StorageProvider, _context.TxPool, _context.ReceiptStorage, _context.LogManager, _context.AdditionalBlockProcessors); BlockchainProcessor processor = new BlockchainProcessor( _context.BlockTree, _context.BlockProcessor, _context.RecoveryStep, _context.LogManager, _context.Config<IInitConfig>().StoreReceipts); _context.BlockchainProcessor = processor; _context.BlockProcessingQueue = processor; // create shared objects between discovery and peer manager IStatsConfig statsConfig = _context.Config<IStatsConfig>(); _context.NodeStatsManager = new NodeStatsManager(statsConfig, _context.LogManager); _context.BlockchainProcessor.Start(); ISubscription subscription; if (_context.Producers.Any()) { subscription = new Subscription(_context.Producers, _context.BlockProcessor, _context.LogManager); } else { subscription = new EmptySubscription(); } _context.DisposeStack.Push(subscription); return Task.CompletedTask; } private IAdditionalBlockProcessor[] InitSealEngine() { IList<IAdditionalBlockProcessor> blockProcessors = new List<IAdditionalBlockProcessor>(); switch (_context.ChainSpec.SealEngineType) { case SealEngineType.None: _context.Sealer = NullSealEngine.Instance; _context.SealValidator = NullSealEngine.Instance; _context.RewardCalculator = NoBlockRewards.Instance; break; case SealEngineType.Clique: _context.RewardCalculator = NoBlockRewards.Instance; CliqueConfig cliqueConfig = new CliqueConfig(); cliqueConfig.BlockPeriod = _context.ChainSpec.Clique.Period; cliqueConfig.Epoch = _context.ChainSpec.Clique.Epoch; _context.SnapshotManager = new SnapshotManager(cliqueConfig, _context.DbProvider.BlocksDb, _context.BlockTree, _context.EthereumEcdsa, _context.LogManager); _context.SealValidator = new CliqueSealValidator(cliqueConfig, _context.SnapshotManager, _context.LogManager); _context.RecoveryStep = new CompositeDataRecoveryStep(_context.RecoveryStep, new AuthorRecoveryStep(_context.SnapshotManager)); if (_context.Config<IInitConfig>().IsMining) { _context.Sealer = new CliqueSealer(new BasicWallet(_context.NodeKey), cliqueConfig, _context.SnapshotManager, _context.NodeKey.Address, _context.LogManager); } else { _context.Sealer = NullSealEngine.Instance; } break; case SealEngineType.NethDev: _context.Sealer = NullSealEngine.Instance; _context.SealValidator = NullSealEngine.Instance; _context.RewardCalculator = NoBlockRewards.Instance; break; case SealEngineType.Ethash: _context.RewardCalculator = new RewardCalculator(_context.SpecProvider); DifficultyCalculator difficultyCalculator = new DifficultyCalculator(_context.SpecProvider); if (_context.Config<IInitConfig>().IsMining) { _context.Sealer = new EthashSealer(new Ethash(_context.LogManager), _context.LogManager); } else { _context.Sealer = NullSealEngine.Instance; } _context.SealValidator = new EthashSealValidator(_context.LogManager, difficultyCalculator, _context.CryptoRandom, new Ethash(_context.LogManager)); break; case SealEngineType.AuRa: AbiEncoder abiEncoder = new AbiEncoder(); _context.ValidatorStore = new ValidatorStore(_context.DbProvider.BlockInfosDb); IAuRaValidatorProcessor validatorProcessor = new AuRaAdditionalBlockProcessorFactory(_context.StateProvider, abiEncoder, _context.TransactionProcessor, _context.BlockTree, _context.ReceiptStorage, _context.ValidatorStore, _context.LogManager) .CreateValidatorProcessor(_context.ChainSpec.AuRa.Validators); AuRaStepCalculator auRaStepCalculator = new AuRaStepCalculator(_context.ChainSpec.AuRa.StepDuration, _context.Timestamper); _context.SealValidator = new AuRaSealValidator(_context.ChainSpec.AuRa, auRaStepCalculator, _context.ValidatorStore, _context.EthereumEcdsa, _context.LogManager); _context.RewardCalculator = new AuRaRewardCalculator(_context.ChainSpec.AuRa, abiEncoder, _context.TransactionProcessor); _context.Sealer = new AuRaSealer(_context.BlockTree, validatorProcessor, _context.ValidatorStore, auRaStepCalculator, _context.NodeKey.Address, new BasicWallet(_context.NodeKey), new ValidSealerStrategy(), _context.LogManager); blockProcessors.Add(validatorProcessor); break; default: throw new NotSupportedException($"Seal engine type {_context.ChainSpec.SealEngineType} is not supported in Nethermind"); } return blockProcessors.ToArray(); } } }
1
23,154
we can just use read env here, pretty sure
NethermindEth-nethermind
.cs
@@ -91,6 +91,7 @@ class MergeCells extends BasePlugin { this.autofillCalculations = new AutofillCalculations(this); this.selectionCalculations = new SelectionCalculations(); + this.hot.selection.transformation.addLocalHook('afterTransformStart', (...args) => this.onAfterLocalTransformStart(...args)); this.addHook('afterInit', (...args) => this.onAfterInit(...args)); this.addHook('beforeKeyDown', (...args) => this.onBeforeKeyDown(...args)); this.addHook('modifyTransformStart', (...args) => this.onModifyTransformStart(...args));
1
import BasePlugin from './../_base'; import Hooks from './../../pluginHooks'; import {registerPlugin} from './../../plugins'; import {stopImmediatePropagation} from './../../helpers/dom/event'; import {CellCoords, CellRange} from './../../3rdparty/walkontable/src'; import MergedCellsCollection from './cellsCollection'; import MergedCellCoords from './cellCoords'; import AutofillCalculations from './calculations/autofill'; import SelectionCalculations from './calculations/selection'; import toggleMergeItem from './contextMenuItem/toggleMerge'; import {arrayEach} from '../../helpers/array'; import {clone} from '../../helpers/object'; import {rangeEach} from '../../helpers/number'; import {applySpanProperties} from './utils'; import './mergeCells.css'; Hooks.getSingleton().register('beforeMergeCells'); Hooks.getSingleton().register('afterMergeCells'); Hooks.getSingleton().register('beforeUnmergeCells'); Hooks.getSingleton().register('afterUnmergeCells'); const privatePool = new WeakMap(); /** * @plugin MergeCells * * @description Plugin, which allows merging cells in the table (using the initial configuration, API or context menu). * * @example * * ```js * ... * let hot = new Handsontable(document.getElementById('example'), { * data: getData(), * mergeCells: [ * {row: 0, col: 3, rowspan: 3, colspan: 3}, * {row: 2, col: 6, rowspan: 2, colspan: 2}, * {row: 4, col: 8, rowspan: 3, colspan: 3} * ], * ... * ``` */ class MergeCells extends BasePlugin { constructor(hotInstance) { super(hotInstance); privatePool.set(this, { lastDesiredCoords: null }); /** * A container for all the merged cells. * * @type {MergedCellsCollection} */ this.mergedCellsCollection = null; /** * Instance of the class responsible for all the autofill-related calculations. * * @private * @type {AutofillCalculations} */ this.autofillCalculations = null; /** * Instance of the class responsible for the selection-related calculations. * * @private * @type {SelectionCalculations} */ this.selectionCalculations = null; } /** * Check if the plugin is enabled in the Handsontable settings. * * @returns {Boolean} */ isEnabled() { return !!this.hot.getSettings().mergeCells; } /** * Enable the plugin. */ enablePlugin() { if (this.enabled) { return; } this.mergedCellsCollection = new MergedCellsCollection(this); this.autofillCalculations = new AutofillCalculations(this); this.selectionCalculations = new SelectionCalculations(); this.addHook('afterInit', (...args) => this.onAfterInit(...args)); this.addHook('beforeKeyDown', (...args) => this.onBeforeKeyDown(...args)); this.addHook('modifyTransformStart', (...args) => this.onModifyTransformStart(...args)); this.addHook('modifyTransformEnd', (...args) => this.onModifyTransformEnd(...args)); this.addHook('modifyGetCellCoords', (...args) => this.onModifyGetCellCoords(...args)); this.addHook('beforeSetRangeEnd', (...args) => this.onBeforeSetRangeEnd(...args)); this.addHook('afterIsMultipleSelection', (...args) => this.onAfterIsMultipleSelection(...args)); this.addHook('afterRenderer', (...args) => this.onAfterRenderer(...args)); this.addHook('afterContextMenuDefaultOptions', (...args) => this.addMergeActionsToContextMenu(...args)); this.addHook('afterGetCellMeta', (...args) => this.onAfterGetCellMeta(...args)); this.addHook('afterViewportRowCalculatorOverride', (...args) => this.onAfterViewportRowCalculatorOverride(...args)); this.addHook('afterViewportColumnCalculatorOverride', (...args) => this.onAfterViewportColumnCalculatorOverride(...args)); this.addHook('modifyAutofillRange', (...args) => this.onModifyAutofillRange(...args)); this.addHook('afterCreateCol', (...args) => this.onAfterCreateCol(...args)); this.addHook('afterRemoveCol', (...args) => this.onAfterRemoveCol(...args)); this.addHook('afterCreateRow', (...args) => this.onAfterCreateRow(...args)); this.addHook('afterRemoveRow', (...args) => this.onAfterRemoveRow(...args)); this.addHook('afterChange', (...args) => this.onAfterChange(...args)); this.addHook('beforeDrawBorders', (...args) => this.onBeforeDrawAreaBorders(...args)); super.enablePlugin(); } /** * Disable the plugin. */ disablePlugin() { this.clearCollections(); this.hot.render(); super.disablePlugin(); } /** * Update the plugin (after using the `updateSettings` method) */ updatePlugin() { const settings = this.hot.getSettings().mergeCells; this.clearCollections(); this.disablePlugin(); this.enablePlugin(); this.generateFromSettings(settings); super.updatePlugin(); } /** * Validate a single setting object, represented by a single merged cell information object. * * @private * @param {Object} setting An object with `row`, `col`, `rowspan` and `colspan` properties. * @return {Boolean} */ validateSetting(setting) { let valid = true; if (!setting) { return false; } if (MergedCellCoords.containsNegativeValues(setting)) { console.warn(MergedCellCoords.NEGATIVE_VALUES_WARNING(setting)); valid = false; } else if (MergedCellCoords.isOutOfBounds(setting, this.hot.countRows(), this.hot.countCols())) { console.warn(MergedCellCoords.IS_OUT_OF_BOUNDS_WARNING(setting)); valid = false; } else if (MergedCellCoords.isSingleCell(setting)) { console.warn(MergedCellCoords.IS_SINGLE_CELL(setting)); valid = false; } else if (MergedCellCoords.containsZeroSpan(setting)) { console.warn(MergedCellCoords.ZERO_SPAN_WARNING(setting)); valid = false; } return valid; } /** * Generate the merged cells from the settings provided to the plugin. * * @private * @param {Array|Boolean} settings The settings provided to the plugin. */ generateFromSettings(settings) { if (Array.isArray(settings)) { let populationArgumentsList = []; arrayEach(settings, (setting) => { if (!this.validateSetting(setting)) { return; } const highlight = new CellCoords(setting.row, setting.col); const rangeEnd = new CellCoords(setting.row + setting.rowspan - 1, setting.col + setting.colspan - 1); const mergeRange = new CellRange(highlight, highlight, rangeEnd); populationArgumentsList.push(this.mergeRange(mergeRange, true, true)); }); // remove 'empty' setting objects, caused by improper merge range declarations populationArgumentsList = populationArgumentsList.filter((value) => value !== true); const bulkPopulationData = this.getBulkCollectionData(populationArgumentsList); this.hot.populateFromArray(...bulkPopulationData); } } /** * Generates a bulk set of all the data to be populated to fill the data "under" the added merged cells. * * @private * @param {Array} populationArgumentsList Array in a form of `[row, column, dataUnderCollection]`. * @return {Array} Array in a form of `[row, column, dataOfAllCollections]`. */ getBulkCollectionData(populationArgumentsList) { const populationDataRange = this.getBulkCollectionDataRange(populationArgumentsList); const dataAtRange = this.hot.getData(...populationDataRange); const newDataAtRange = dataAtRange.splice(0); arrayEach(populationArgumentsList, (mergedCellArguments) => { const [mergedCellRowIndex, mergedCellColumnIndex, mergedCellData] = mergedCellArguments; arrayEach(mergedCellData, (mergedCellRow, rowIndex) => { arrayEach(mergedCellRow, (mergedCellElement, columnIndex) => { newDataAtRange[mergedCellRowIndex - populationDataRange[0] + rowIndex][mergedCellColumnIndex - populationDataRange[1] + columnIndex] = mergedCellElement; }); }); }); return [populationDataRange[0], populationDataRange[1], newDataAtRange]; } /** * Get the range of combined data ranges provided in a form of an array of arrays ([row, column, dataUnderCollection]) * * @private * @param {Array} populationArgumentsList Array containing argument lists for the `populateFromArray` method - row, column and data for population. * @return {Array[]} Start and end coordinates of the merged cell range. (in a form of [rowIndex, columnIndex]) */ getBulkCollectionDataRange(populationArgumentsList) { let start = [0, 0]; let end = [0, 0]; let mergedCellRow = null; let mergedCellColumn = null; let mergedCellData = null; arrayEach(populationArgumentsList, (mergedCellArguments) => { mergedCellRow = mergedCellArguments[0]; mergedCellColumn = mergedCellArguments[1]; mergedCellData = mergedCellArguments[2]; start[0] = Math.min(mergedCellRow, start[0]); start[1] = Math.min(mergedCellColumn, start[1]); end[0] = Math.max(mergedCellRow + mergedCellData.length - 1, end[0]); end[1] = Math.max(mergedCellColumn + mergedCellData[0].length - 1, end[1]); }); return [...start, ...end]; } /** * Clear the merged cells from the merged cell container. */ clearCollections() { this.mergedCellsCollection.clear(); } /** * Returns `true` if a range is mergeable. * * @private * @param {Object} newMergedCellInfo Merged cell information object to test. * @param {Boolean} [auto=false] `true` if triggered at initialization. * @returns {Boolean} */ canMergeRange(newMergedCellInfo, auto = false) { return auto ? true : this.validateSetting(newMergedCellInfo); } /** * Merge or unmerge, based on last selected range. * * @private */ toggleMergeOnSelection() { const currentRange = this.hot.getSelectedRangeLast(); if (!currentRange) { return; } currentRange.setDirection('NW-SE'); const {from, to} = currentRange; this.toggleMerge(currentRange); this.hot.selectCell(from.row, from.col, to.row, to.col, false); } /** * Merge the selection provided as a cell range. * * @param {CellRange} [cellRange] Selection cell range. */ mergeSelection(cellRange = this.hot.getSelectedRangeLast()) { if (!cellRange) { return; } cellRange.setDirection('NW-SE'); const {from, to} = cellRange; this.unmergeRange(cellRange, true); this.mergeRange(cellRange); this.hot.selectCell(from.row, from.col, to.row, to.col, false); } /** * Unmerge the selection provided as a cell range. * * @param {CellRange} [cellRange] Selection cell range. */ unmergeSelection(cellRange = this.hot.getSelectedRangeLast()) { if (!cellRange) { return; } const {from, to} = cellRange; this.unmergeRange(cellRange, true); this.hot.selectCell(from.row, from.col, to.row, to.col, false); } /** * Merge cells in the provided cell range. * * @private * @param {CellRange} cellRange Cell range to merge. * @param {Boolean} [auto=false] `true` if is called automatically, e.g. at initialization. * @param {Boolean} [preventPopulation=false] `true`, if the method should not run `populateFromArray` at the end, but rather return its arguments. * @returns {Array|Boolean} Returns an array of [row, column, dataUnderCollection] if preventPopulation is set to true. If the the merging process went successful, it returns `true`, otherwise - `false`. * @fires Hooks#beforeMergeCells * @fires Hooks#afterMergeCells */ mergeRange(cellRange, auto = false, preventPopulation = false) { const topLeft = cellRange.getTopLeftCorner(); const bottomRight = cellRange.getBottomRightCorner(); const mergeParent = { row: topLeft.row, col: topLeft.col, rowspan: bottomRight.row - topLeft.row + 1, colspan: bottomRight.col - topLeft.col + 1 }; const clearedData = []; let populationInfo = null; if (!this.canMergeRange(mergeParent, auto)) { return false; } this.hot.runHooks('beforeMergeCells', cellRange, auto); rangeEach(0, mergeParent.rowspan - 1, (i) => { rangeEach(0, mergeParent.colspan - 1, (j) => { let clearedValue = null; if (!clearedData[i]) { clearedData[i] = []; } if (i === 0 && j === 0) { clearedValue = this.hot.getDataAtCell(mergeParent.row, mergeParent.col); } else { this.hot.setCellMeta(mergeParent.row + i, mergeParent.col + j, 'hidden', true); } clearedData[i][j] = clearedValue; }); }); this.hot.setCellMeta(mergeParent.row, mergeParent.col, 'spanned', true); let mergedCellAdded = this.mergedCellsCollection.add(mergeParent); if (mergedCellAdded) { if (preventPopulation) { populationInfo = [mergeParent.row, mergeParent.col, clearedData]; } else { this.hot.populateFromArray(mergeParent.row, mergeParent.col, clearedData, void 0, void 0, this.pluginName); } this.hot.runHooks('afterMergeCells', cellRange, mergeParent, auto); return populationInfo; } return true; } /** * Unmerge the selection provided as a cell range. If no cell range is provided, it uses the current selection. * * @private * @param {CellRange} cellRange Selection cell range. * @param {Boolean} [auto=false] `true` if called automatically by the plugin. */ unmergeRange(cellRange, auto = false) { const mergedCells = this.mergedCellsCollection.getWithinRange(cellRange); if (!mergedCells) { return; } this.hot.runHooks('beforeUnmergeCells', cellRange, auto); arrayEach(mergedCells, (currentCollection) => { this.mergedCellsCollection.remove(currentCollection.row, currentCollection.col); rangeEach(0, currentCollection.rowspan - 1, (i) => { rangeEach(0, currentCollection.colspan - 1, (j) => { this.hot.removeCellMeta(currentCollection.row + i, currentCollection.col + j, 'hidden'); }); }); this.hot.removeCellMeta(currentCollection.row, currentCollection.col, 'spanned'); }); this.hot.render(); this.hot.runHooks('afterUnmergeCells', cellRange, auto); } /** * Merge or unmerge, based on the cell range provided as `cellRange`. * * @private * @param {CellRange} cellRange The cell range to merge or unmerged. */ toggleMerge(cellRange) { const mergedCell = this.mergedCellsCollection.get(cellRange.from.row, cellRange.from.col); const mergedCellCoversWholeRange = mergedCell.row === cellRange.from.row && mergedCell.col === cellRange.from.col && mergedCell.row + mergedCell.rowspan - 1 === cellRange.to.row && mergedCell.col + mergedCell.colspan - 1 === cellRange.to.col; if (mergedCellCoversWholeRange) { this.unmergeRange(cellRange); } else { this.mergeSelection(cellRange); } } /** * Merge the specified range. * * @param {Number} startRow Start row of the merged cell. * @param {Number} startColumn Start column of the merged cell. * @param {Number} endRow End row of the merged cell. * @param {Number} endColumn End column of the merged cell. */ merge(startRow, startColumn, endRow, endColumn) { const start = new CellCoords(startRow, startColumn); const end = new CellCoords(endRow, endColumn); this.mergeRange(new CellRange(start, start, end)); } /** * Unmerge the merged cell in the provided range. * * @param {Number} startRow Start row of the merged cell. * @param {Number} startColumn Start column of the merged cell. * @param {Number} endRow End row of the merged cell. * @param {Number} endColumn End column of the merged cell. */ unmerge(startRow, startColumn, endRow, endColumn) { const start = new CellCoords(startRow, startColumn); const end = new CellCoords(endRow, endColumn); this.unmergeRange(new CellRange(start, start, end)); } /** * `afterInit` hook callback. * * @private */ onAfterInit() { this.generateFromSettings(this.hot.getSettings().mergeCells); this.hot.render(); } /** * `beforeKeyDown` hook callback. * * @private * @param {KeyboardEvent} event The `keydown` event object. */ onBeforeKeyDown(event) { const ctrlDown = (event.ctrlKey || event.metaKey) && !event.altKey; if (ctrlDown && event.keyCode === 77) { // CTRL + M this.toggleMerge(this.hot.getSelectedRangeLast()); this.hot.render(); stopImmediatePropagation(event); } } /** * Modify the information on whether the current selection contains multiple cells. The `afterIsMultipleSelection` hook callback. * * @private * @param {Boolean} isMultiple * @returns {Boolean} */ onAfterIsMultipleSelection(isMultiple) { if (isMultiple) { let mergedCells = this.mergedCellsCollection.mergedCells; let selectionRange = this.hot.getSelectedRangeLast(); for (let group = 0; group < mergedCells.length; group += 1) { if (selectionRange.highlight.row === mergedCells[group].row && selectionRange.highlight.col === mergedCells[group].col && selectionRange.to.row === mergedCells[group].row + mergedCells[group].rowspan - 1 && selectionRange.to.col === mergedCells[group].col + mergedCells[group].colspan - 1) { return false; } } } return isMultiple; } /** * `modifyTransformStart` hook callback. * * @private * @param {Object} delta The transformation delta. */ onModifyTransformStart(delta) { const priv = privatePool.get(this); const currentlySelectedRange = this.hot.getSelectedRangeLast(); let newDelta = { row: delta.row, col: delta.col, }; let nextPosition = null; let currentPosition = new CellCoords(currentlySelectedRange.highlight.row, currentlySelectedRange.highlight.col); let mergedParent = this.mergedCellsCollection.get(currentPosition.row, currentPosition.col); if (!priv.lastDesiredCoords) { priv.lastDesiredCoords = new CellCoords(null, null); } if (mergedParent) { // only merge selected let mergeTopLeft = new CellCoords(mergedParent.row, mergedParent.col); let mergeBottomRight = new CellCoords(mergedParent.row + mergedParent.rowspan - 1, mergedParent.col + mergedParent.colspan - 1); let mergeRange = new CellRange(mergeTopLeft, mergeTopLeft, mergeBottomRight); if (!mergeRange.includes(priv.lastDesiredCoords)) { priv.lastDesiredCoords = new CellCoords(null, null); // reset outdated version of lastDesiredCoords } newDelta.row = priv.lastDesiredCoords.row ? priv.lastDesiredCoords.row - currentPosition.row : newDelta.row; newDelta.col = priv.lastDesiredCoords.col ? priv.lastDesiredCoords.col - currentPosition.col : newDelta.col; if (delta.row > 0) { // moving down newDelta.row = mergedParent.row + mergedParent.rowspan - 1 - currentPosition.row + delta.row; } else if (delta.row < 0) { // moving up newDelta.row = currentPosition.row - mergedParent.row + delta.row; } if (delta.col > 0) { // moving right newDelta.col = mergedParent.col + mergedParent.colspan - 1 - currentPosition.col + delta.col; } else if (delta.col < 0) { // moving left newDelta.col = currentPosition.col - mergedParent.col + delta.col; } } nextPosition = new CellCoords(currentlySelectedRange.highlight.row + newDelta.row, currentlySelectedRange.highlight.col + newDelta.col); let nextParentIsMerged = this.mergedCellsCollection.get(nextPosition.row, nextPosition.col); if (nextParentIsMerged) { // skipping the invisible cells in the merge range priv.lastDesiredCoords = nextPosition; newDelta = { row: nextParentIsMerged.row - currentPosition.row, col: nextParentIsMerged.col - currentPosition.col }; } if (newDelta.row !== 0) { delta.row = newDelta.row; } if (newDelta.col !== 0) { delta.col = newDelta.col; } } /** * `modifyTransformEnd` hook callback. Needed to handle "jumping over" merged merged cells, while selecting. * * @private * @param {Object} delta The transformation delta. */ onModifyTransformEnd(delta) { let currentSelectionRange = this.hot.getSelectedRangeLast(); let newDelta = clone(delta); let newSelectionRange = this.selectionCalculations.getUpdatedSelectionRange(currentSelectionRange, delta); let tempDelta = clone(newDelta); const mergedCellsWithinRange = this.mergedCellsCollection.getWithinRange(newSelectionRange, true); do { tempDelta = clone(newDelta); this.selectionCalculations.getUpdatedSelectionRange(currentSelectionRange, newDelta); arrayEach(mergedCellsWithinRange, (mergedCell) => { this.selectionCalculations.snapDelta(newDelta, currentSelectionRange, mergedCell); }); } while (newDelta.row !== tempDelta.row || newDelta.col !== tempDelta.col); delta.row = newDelta.row; delta.col = newDelta.col; } /** * `modifyGetCellCoords` hook callback. Swaps the `getCell` coords with the merged parent coords. * * @private * @param {Number} row Row index. * @param {Number} column Column index. * @returns {Array} */ onModifyGetCellCoords(row, column) { const mergeParent = this.mergedCellsCollection.get(row, column); return mergeParent ? [ mergeParent.row, mergeParent.col, mergeParent.row + mergeParent.rowspan - 1, mergeParent.col + mergeParent.colspan - 1] : void 0; } /** * `afterContextMenuDefaultOptions` hook callback. * * @private * @param {Object} defaultOptions The default context menu options. */ addMergeActionsToContextMenu(defaultOptions) { defaultOptions.items.push( { name: '---------', }, toggleMergeItem(this) ); } /** * `afterRenderer` hook callback. * * @private * @param {HTMLElement} TD The cell to be modified. * @param {Number} row Row index. * @param {Number} col Column index. */ onAfterRenderer(TD, row, col) { let mergedCell = this.mergedCellsCollection.get(row, col); applySpanProperties(TD, mergedCell, row, col); } /** * `beforeSetRangeEnd` hook callback. * While selecting cells with keyboard or mouse, make sure that rectangular area is expanded to the extent of the merged cell * * @private * @param {Object} coords Cell coords. */ onBeforeSetRangeEnd(coords) { let selRange = this.hot.getSelectedRangeLast(); selRange.highlight = new CellCoords(selRange.highlight.row, selRange.highlight.col); // clone in case we will modify its reference selRange.to = coords; let rangeExpanded = false; if ((selRange.from.row === 0 && selRange.to.row === this.hot.countRows() - 1) || (selRange.from.col === 0 && selRange.to.col === this.hot.countCols() - 1)) { return; } do { rangeExpanded = false; for (let i = 0; i < this.mergedCellsCollection.mergedCells.length; i++) { let cellInfo = this.mergedCellsCollection.mergedCells[i]; let mergedCellRange = cellInfo.getRange(); if (selRange.expandByRange(mergedCellRange)) { coords.row = selRange.to.row; coords.col = selRange.to.col; rangeExpanded = true; } } } while (rangeExpanded); } /** * The `afterGetCellMeta` hook callback. * * @private * @param {Number} row Row index. * @param {Number} col Column index. * @param {Object} cellProperties The cell properties object. */ onAfterGetCellMeta(row, col, cellProperties) { let mergeParent = this.mergedCellsCollection.get(row, col); if (mergeParent && (mergeParent.row !== row || mergeParent.col !== col)) { cellProperties.copyable = false; } } /** * `afterViewportRowCalculatorOverride` hook callback. * * @private * @param {Object} calc The row calculator object. */ onAfterViewportRowCalculatorOverride(calc) { let colCount = this.hot.countCols(); let mergeParent; rangeEach(0, colCount - 1, (c) => { mergeParent = this.mergedCellsCollection.get(calc.startRow, c); if (mergeParent) { if (mergeParent.row < calc.startRow) { calc.startRow = mergeParent.row; return this.onAfterViewportRowCalculatorOverride.call(this, calc); // recursively search upwards } } mergeParent = this.mergedCellsCollection.get(calc.endRow, c); if (mergeParent) { let mergeEnd = mergeParent.row + mergeParent.rowspan - 1; if (mergeEnd > calc.endRow) { calc.endRow = mergeEnd; return this.onAfterViewportRowCalculatorOverride.call(this, calc); // recursively search upwards } } return true; }); } /** * `afterViewportColumnCalculatorOverride` hook callback. * * @private * @param {Object} calc The column calculator object. */ onAfterViewportColumnCalculatorOverride(calc) { let rowCount = this.hot.countRows(); let mergeParent; rangeEach(0, rowCount - 1, (r) => { mergeParent = this.mergedCellsCollection.get(r, calc.startColumn); if (mergeParent && mergeParent.col < calc.startColumn) { calc.startColumn = mergeParent.col; return this.onAfterViewportColumnCalculatorOverride.call(this, calc); // recursively search upwards } mergeParent = this.mergedCellsCollection.get(r, calc.endColumn); if (mergeParent) { let mergeEnd = mergeParent.col + mergeParent.colspan - 1; if (mergeEnd > calc.endColumn) { calc.endColumn = mergeEnd; return this.onAfterViewportColumnCalculatorOverride.call(this, calc); // recursively search upwards } } return true; }); } /** * The `modifyAutofillRange` hook callback. * * @private * @param {Array} drag The drag area coordinates. * @param {Array} select The selection information. * @return {Array} The new drag area. */ onModifyAutofillRange(drag, select) { this.autofillCalculations.correctSelectionAreaSize(select); const dragDirection = this.autofillCalculations.getDirection(select, drag); if (this.autofillCalculations.dragAreaOverlapsCollections(select, drag, dragDirection)) { drag = select; return drag; } const mergedCellsWithinSelectionArea = this.mergedCellsCollection.getWithinRange({ from: {row: select[0], col: select[1]}, to: {row: select[2], col: select[3]} }); if (!mergedCellsWithinSelectionArea) { return drag; } drag = this.autofillCalculations.snapDragArea(select, drag, dragDirection, mergedCellsWithinSelectionArea); return drag; } /** * `afterCreateCol` hook callback. * * @private * @param {Number} column Column index. * @param {Number} count Number of created columns. */ onAfterCreateCol(column, count) { this.mergedCellsCollection.shiftCollections('right', column, count); } /** * `afterRemoveCol` hook callback. * * @private * @param {Number} column Column index. * @param {Number} count Number of removed columns. */ onAfterRemoveCol(column, count) { this.mergedCellsCollection.shiftCollections('left', column, count); } /** * `afterCreateRow` hook callback. * * @private * @param {Number} row Row index. * @param {Number} count Number of created rows. * @param {String} source Source of change. */ onAfterCreateRow(row, count, source) { if (source === 'auto') { return; } this.mergedCellsCollection.shiftCollections('down', row, count); } /** * `afterRemoveRow` hook callback. * * @private * @param {Number} row Row index. * @param {Number} count Number of removed rows. */ onAfterRemoveRow(row, count) { this.mergedCellsCollection.shiftCollections('up', row, count); } /** * `afterChange` hook callback. Used to propagate merged cells after using Autofill. * * @private * @param {Array} changes The changes array. * @param {String} source Determines the source of the change. */ onAfterChange(changes, source) { if (source !== 'Autofill.fill') { return; } this.autofillCalculations.recreateAfterDataPopulation(changes); } /** * `beforeDrawAreaBorders` hook callback. * * @private * @param {Array} corners Coordinates of the area corners. * @param {String} className Class name for the area. */ onBeforeDrawAreaBorders(corners, className) { if (className && className === 'area') { const selectedRange = this.hot.getSelectedRangeLast(); const mergedCellsWithinRange = this.mergedCellsCollection.getWithinRange(selectedRange); arrayEach(mergedCellsWithinRange, (mergedCell) => { if (selectedRange.getBottomRightCorner().row === mergedCell.getLastRow() && selectedRange.getBottomRightCorner().col === mergedCell.getLastColumn()) { corners[2] = mergedCell.row; corners[3] = mergedCell.col; } }); } } } registerPlugin('mergeCells', MergeCells); export default MergeCells;
1
14,685
Could you replace local hook with global hook `afterModifyTransformStart`?
handsontable-handsontable
js
@@ -854,3 +854,10 @@ class FileArchive(Archive): def listing(self): "Return a list of filename entries currently in the archive" return ['.'.join([f,ext]) if ext else f for (f,ext) in self._files.keys()] + + def clear(self): + "Clears the file archive" + self._files.clear() + + +
1
""" Module defining input/output interfaces to HoloViews. There are two components for input/output: Exporters: Process (composite) HoloViews objects one at a time. For instance, an exporter may render a HoloViews object as a svg or perhaps pickle it. Archives: A collection of HoloViews objects that are first collected then processed together. For instance, collecting HoloViews objects for a report then generating a PDF or collecting HoloViews objects to dump to HDF5. """ from __future__ import absolute_import import re, os, time, string, zipfile, tarfile, shutil, itertools, pickle from collections import defaultdict from io import BytesIO from hashlib import sha256 import param from param.parameterized import bothmethod from .dimension import LabelledData from .element import Collator, Element from .overlay import Overlay, Layout from .ndmapping import OrderedDict, NdMapping, UniformNdMapping from .options import Store from .util import unique_iterator, group_sanitizer, label_sanitizer def sanitizer(name, replacements=[(':','_'), ('/','_'), ('\\','_')]): """ String sanitizer to avoid problematic characters in filenames. """ for old,new in replacements: name = name.replace(old,new) return name class Reference(param.Parameterized): """ A Reference allows access to an object to be deferred until it is needed in the appropriate context. References are used by Collector to capture the state of an object at collection time. One particularly important property of references is that they should be pickleable. This means that you can pickle Collectors so that you can unpickle them in different environments and still collect from the required object. A Reference only needs to have a resolved_type property and a resolve method. The constructor will take some specification of where to find the target object (may be the object itself). """ @property def resolved_type(self): """ Returns the type of the object resolved by this references. If multiple types are possible, the return is a tuple of types. """ raise NotImplementedError def resolve(self, container=None): """ Return the referenced object. Optionally, a container may be passed in from which the object is to be resolved. """ raise NotImplementedError class Exporter(param.ParameterizedFunction): """ An Exporter is a parameterized function that accepts a HoloViews object and converts it to a new some new format. This mechanism is designed to be very general so here are a few examples: Pickling: Native Python, supported by HoloViews. Rendering: Any plotting backend may be used (default uses matplotlib) Storage: Saving to a database (e.g SQL), HDF5 etc. """ # Mime-types that need encoding as utf-8 upon export utf8_mime_types = ['image/svg+xml', 'text/html', 'text/json'] key_fn = param.Callable(doc=""" Function that generates the metadata key from the HoloViews object being saved. The metadata key is a single high-dimensional key of values associated with dimension labels. The returned dictionary must have string keys and simple literals that may be conviently used for dictionary-style indexing. Returns an empty dictionary by default.""") info_fn = param.Callable(lambda x: {'repr':repr(x)}, doc=""" Function that generates additional metadata information from the HoloViews object being saved. Unlike metadata keys, the information returned may be unsuitable for use as a key index and may include entries such as the object's repr. Regardless, the info metadata should still only contain items that will be quick to load and inspect. """) @classmethod def encode(cls, entry): """ Classmethod that applies conditional encoding based on mime-type. Given an entry as returned by __call__ return the data in the appropriate encoding. """ (data, info) = entry if info['mime_type'] in cls.utf8_mime_types: return data.encode('utf-8') else: return data @bothmethod def _filename(self_or_cls, filename): "Add the file extension if not already present" if not filename.endswith(self_or_cls.file_ext): return '%s.%s' % (filename, self_or_cls.file_ext) else: return filename @bothmethod def _merge_metadata(self_or_cls, obj, fn, *dicts): """ Returns a merged metadata info dictionary from the supplied function and additional dictionaries """ merged = dict([(k,v) for d in dicts for (k,v) in d.items()]) return dict(merged, **fn(obj)) if fn else merged def __call__(self, obj, fmt=None): """ Given a HoloViews object return the raw exported data and corresponding metadata as the tuple (data, metadata). The metadata should include: 'file-ext' : The file extension if applicable (else empty string) 'mime_type': The mime-type of the data. The fmt argument may be used with exporters that support multiple output formats. If not supplied, the exporter is to pick an appropriate format automatically. """ raise NotImplementedError("Exporter not implemented.") @bothmethod def save(self_or_cls, obj, basename, fmt=None, key={}, info={}, **kwargs): """ Similar to the call method except saves exporter data to disk into a file with specified basename. For exporters that support multiple formats, the fmt argument may also be supplied (which typically corresponds to the file-extension). The supplied metadata key and info dictionaries will be used to update the output of the relevant key and info functions which is then saved (if supported). """ raise NotImplementedError("Exporter save method not implemented.") class Importer(param.ParameterizedFunction): """ An Importer is a parameterized function that accepts some data in some format and returns a HoloViews object. This mechanism is designed to be very general so here are a few examples: Unpickling: Native Python, supported by HoloViews. Servers: Loading data over a network connection. Storage: Loading from a database (e.g SQL), HDF5 etc. """ def __call__(self, data): """ Given raw data in the appropriate format return the corresponding HoloViews object. Acts as the inverse of Exporter when supplied the data portion of an Exporter's output. """ raise NotImplementedError("Importer not implemented.") @bothmethod def load(self_or_cls, src, entries=None): """ Given some source (e.g. a filename, a network connection etc), return the loaded HoloViews object. """ raise NotImplementedError("Importer load method not implemented.") @bothmethod def loader(self_or_cls, kwargs): return self_or_cls.load(**kwargs) @bothmethod def info(self_or_cls, src): """ Returns the 'info' portion of the metadata (if available). """ raise NotImplementedError("Importer info method not implemented.") @bothmethod def key(self_or_cls, src): """ Returns the metadata key (if available). """ raise NotImplementedError("Importer keys method not implemented.") class Serializer(Exporter): "A generic exporter that supports any arbitrary serializer" serializer=param.Callable(Store.dumps, doc=""" The serializer function, set to Store.dumps by default. The serializer should take an object and output a serialization as a string or byte stream. Any suitable serializer may be used. For instance, pickle.dumps may be used although this will not save customized options.""") mime_type=param.String('application/python-pickle', allow_None=True, doc=""" The mime-type associated with the serializer (if applicable).""") file_ext = param.String('pkl', doc=""" The file extension associated with the corresponding file format (if applicable).""") def __call__(self, obj, **kwargs): data = self.serializer(obj) return data, {'file-ext': self.file_ext, 'mime_type':self.mime_type} @bothmethod def save(self_or_cls, obj, filename, info={}, key={}, **kwargs): data, base_info = self_or_cls(obj, **kwargs) key = self_or_cls._merge_metadata(obj, self_or_cls.key_fn, key) info = self_or_cls._merge_metadata(obj, self_or_cls.info_fn, info, base_info) metadata, _ = self_or_cls({'info':info, 'key':key}, **kwargs) filename = self_or_cls._filename(filename) with open(filename, 'ab') as f: f.write(metadata) f.write(data) class Deserializer(Importer): "A generic importer that supports any arbitrary de-serializer." deserializer=param.Callable(Store.load, doc=""" The deserializer function, set to Store.load by default. The deserializer should take a file-like object that can be read from until the first object has been deserialized. If the file has not been exhausted, the deserializer should be able to continue parsing and loading objects. Any suitable deserializer may be used. For instance, pickle.load may be used although this will not load customized options.""") def __call__(self, data): return self.deserializer(BytesIO(data)) @bothmethod def load(self_or_cls, filename): with open(filename, 'rb') as f: data = self_or_cls.deserializer(f) try: data = self_or_cls.deserializer(f) except: pass return data @bothmethod def key(self_or_cls, filename): with open(filename, "rb") as f: metadata = self_or_cls.deserializer(f) metadata = metadata if isinstance(metadata, dict) else {} return metadata.get('key', {}) @bothmethod def info(self_or_cls, filename): with open(filename, "rb") as f: metadata = self_or_cls.deserializer(f) metadata = metadata if isinstance(metadata, dict) else {} return metadata.get('info', {}) class Pickler(Exporter): """ The recommended pickler for serializing HoloViews object to a .hvz file (a simple zip archive of pickle files). In addition to the functionality offered by Store.dump and Store.load, this file format offers three additional features: 1. Optional (zip) compression. 2. Ability to save and load components of a Layout independently. 3. Support for metadata per saved component. The output file with the .hvz file extension is simply a zip archive containing pickled HoloViews objects. """ protocol = param.Integer(default=2, doc=""" The pickling protocol where 0 is ASCII, 1 supports old Python versions and 2 is efficient for new style classes.""") compress = param.Boolean(default=True, doc=""" Whether compression is enabled or not""") mime_type = 'application/zip' file_ext = 'hvz' def __call__(self, obj, key={}, info={}, **kwargs): buff = BytesIO() self.save(obj, buff, key=key, info=info, **kwargs) buff.seek(0) return buff.read(), {'file-ext': 'hvz', 'mime_type':self.mime_type} @bothmethod def save(self_or_cls, obj, filename, key={}, info={}, **kwargs): base_info = {'file-ext': 'hvz', 'mime_type':self_or_cls.mime_type} key = self_or_cls._merge_metadata(obj, self_or_cls.key_fn, key) info = self_or_cls._merge_metadata(obj, self_or_cls.info_fn, info, base_info) compression = zipfile.ZIP_STORED if self_or_cls.compress else zipfile.ZIP_DEFLATED filename = self_or_cls._filename(filename) if isinstance(filename, str) else filename with zipfile.ZipFile(filename, 'w', compression=compression) as f: if isinstance(obj, Layout) and not isinstance(obj, Overlay): entries = ['.'.join(k) for k in obj.data.keys()] components = list(obj.data.values()) entries = entries if len(entries) > 1 else [entries[0]+'(L)'] else: entries = ['%s.%s' % (group_sanitizer(obj.group, False), label_sanitizer(obj.label, False))] components = [obj] for component, entry in zip(components, entries): f.writestr(entry, Store.dumps(component, protocol=self_or_cls.protocol)) f.writestr('metadata', pickle.dumps({'info':info, 'key':key})) class Unpickler(Importer): """ The inverse of Pickler used to load the .hvz file format which is simply a zip archive of pickle objects. Unlike a regular pickle file, info and key metadata as well as individual components of a Layout may be loaded without needing to load the entire file into memory. The components that may be individually loaded may be found using the entries method. """ def __call__(self, data, entries=None): buff = BytesIO(data) return self.load(buff, entries=entries) @bothmethod def load(self_or_cls, filename, entries=None): components, single_layout = [], False entries = entries if entries else self_or_cls.entries(filename) with zipfile.ZipFile(filename, 'r') as f: for entry in entries: if entry not in f.namelist(): raise Exception("Entry %s not available" % entry) components.append(Store.loads(f.read(entry))) single_layout = entry.endswith('(L)') if len(components) == 1 and not single_layout: return components[0] else: return Layout(components) @bothmethod def _load_metadata(self_or_cls, filename, name): with zipfile.ZipFile(filename, 'r') as f: if 'metadata' not in f.namelist(): raise Exception("No metadata available") metadata = pickle.loads(f.read('metadata')) if name not in metadata: raise KeyError("Entry %s is missing from the metadata" % name) return metadata[name] @bothmethod def key(self_or_cls, filename): return self_or_cls._load_metadata(filename, 'key') @bothmethod def info(self_or_cls, filename): return self_or_cls._load_metadata(filename, 'info') @bothmethod def entries(self_or_cls, filename): with zipfile.ZipFile(filename, 'r') as f: return [el for el in f.namelist() if el != 'metadata'] @bothmethod def collect(self_or_cls, files, drop=[], metadata=True): """ Given a list or NdMapping type containing file paths return a Layout of Collators, which can be called to load a given set of files using the current Importer. If supplied as a list each file is expected to disambiguate itself with contained metadata. If an NdMapping type is supplied additional key dimensions may be supplied as long as they do not clash with the file metadata. Any key dimension may be dropped by name by supplying a drop argument. """ aslist = not isinstance(files, (NdMapping, Element)) if isinstance(files, Element): files = Collator(files) file_kdims = files.kdims else: file_kdims = files.kdims drop_extra = files.drop if isinstance(files, Collator) else [] mdata_dims = [] if metadata: fnames = [fname[0] if isinstance(fname, tuple) else fname for fname in files.values()] mdata_dims = {kdim for fname in fnames for kdim in self_or_cls.key(fname).keys()} file_dims = set(files.dimensions('key', label=True)) added_dims = set(mdata_dims) - file_dims overlap_dims = file_dims & set(mdata_dims) kwargs = dict(kdims=file_kdims + sorted(added_dims), vdims=['filename', 'entries'], value_transform=self_or_cls.loader, drop=drop_extra + drop) layout_data = defaultdict(lambda: Collator(None, **kwargs)) for key, fname in files.data.items(): fname = fname[0] if isinstance(fname, tuple) else fname mdata = self_or_cls.key(fname) if metadata else {} for odim in overlap_dims: kval = key[files.get_dimension_index(odim)] if kval != mdata[odim]: raise KeyError("Metadata supplies inconsistent " "value for dimension %s" % odim) mkey = tuple(mdata.get(d, None) for d in added_dims) key = mkey if aslist else key + mkey if isinstance(fname, tuple) and len(fname) == 1: (fname,) = fname for entry in self_or_cls.entries(fname): layout_data[entry][key] = (fname, [entry]) return Layout(layout_data.items()) class Archive(param.Parameterized): """ An Archive is a means to collect and store a collection of HoloViews objects in any number of different ways. Examples of possible archives: * Generating tar or zip files (compressed or uncompressed). * Collating a report or document (e.g. PDF, HTML, LaTex). * Storing a collection of HoloViews objects to a database or HDF5. """ exporters= param.List(default=[], doc=""" The exporter functions used to convert HoloViews objects into the appropriate format(s).""" ) def add(self, obj, *args, **kwargs): """ Add a HoloViews object to the archive. """ raise NotImplementedError def export(self,*args, **kwargs): """ Finalize and close the archive. """ raise NotImplementedError def simple_name_generator(obj): """ Simple name_generator designed for HoloViews objects. Objects are labeled with {group}-{label} for each nested object, based on a depth-first search. Adjacent objects with identical representations yield only a single copy of the representation, to avoid long names for the common case of a container whose element(s) share the same group and label. """ if isinstance(obj, LabelledData): labels = obj.traverse(lambda x: (x.group + ('-' +x.label if x.label else ''))) labels=[l[0] for l in itertools.groupby(labels)] obj_str = ','.join(labels) else: obj_str = repr(obj) return obj_str class FileArchive(Archive): """ A file archive stores files on disk, either unpacked in a directory or in an archive format (e.g. a zip file). """ exporters= param.List(default=[Pickler], doc=""" The exporter functions used to convert HoloViews objects into the appropriate format(s).""") dimension_formatter = param.String("{name}_{range}", doc=""" A string formatter for the output file based on the supplied HoloViews objects dimension names and values. Valid fields are the {name}, {range} and {unit} of the dimensions.""") object_formatter = param.Callable(default=simple_name_generator, doc=""" Callable that given an object returns a string suitable for inclusion in file and directory names. This is what generates the value used in the {obj} field of the filename formatter.""") filename_formatter = param.String('{dimensions},{obj}', doc=""" A string formatter for output filename based on the HoloViews object that is being rendered to disk. The available fields are the {type}, {group}, {label}, {obj} of the holoviews object added to the archive as well as {timestamp}, {obj} and {SHA}. The {timestamp} is the export timestamp using timestamp_format, {obj} is the object representation as returned by object_formatter and {SHA} is the SHA of the {obj} value used to compress it into a shorter string.""") timestamp_format = param.String("%Y_%m_%d-%H_%M_%S", doc=""" The timestamp format that will be substituted for the {timestamp} field in the export name.""") root = param.String('.', doc=""" The root directory in which the output directory is located. May be an absolute or relative path.""") archive_format = param.ObjectSelector('zip', objects=['zip', 'tar'], doc=""" The archive format to use if there are multiple files and pack is set to True. Supported formats include 'zip' and 'tar'.""") pack = param.Boolean(default=False, doc=""" Whether or not to pack to contents into the specified archive format. If pack is False, the contents will be output to a directory. Note that if there is only a single file in the archive, no packing will occur and no directory is created. Instead, the file is treated as a single-file archive.""") export_name = param.String(default='{timestamp}', doc=""" The name assigned to the overall export. If an archive file is used, this is the correspond filename (e.g of the exporter zip file). Alternatively, if unpack=False, this is the name of the output directory. Lastly, for archives of a single file, this is the basename of the output file. The {timestamp} field is available to include the timestamp at the time of export in the chosen timestamp format.""") unique_name = param.Boolean(default=False, doc=""" Whether the export name should be made unique with a numeric suffix. If set to False, any existing export of the same name will be removed and replaced.""") max_filename = param.Integer(default=100, bounds=(0,None), doc=""" Maximum length to enforce on generated filenames. 100 is the practical maximum for zip and tar file generation, but you may wish to use a lower value to avoid long filenames.""") flush_archive = param.Boolean(default=True, doc=""" Flushed the contents of the archive after export. """) ffields = {'type', 'group', 'label', 'obj', 'SHA', 'timestamp', 'dimensions'} efields = {'timestamp'} @classmethod def parse_fields(cls, formatter): "Returns the format fields otherwise raise exception" if formatter is None: return [] try: parse = list(string.Formatter().parse(formatter)) return set(f for f in list(zip(*parse))[1] if f is not None) except: raise SyntaxError("Could not parse formatter %r" % formatter) def __init__(self, **params): super(FileArchive, self).__init__(**params) # Items with key: (basename,ext) and value: (data, info) self._files = OrderedDict() self._validate_formatters() def _dim_formatter(self, obj): if not obj: return '' key_dims = obj.traverse(lambda x: x.kdims, [UniformNdMapping]) constant_dims = obj.traverse(lambda x: x.cdims) dims = [] map(dims.extend, key_dims + constant_dims) dims = unique_iterator(dims) dim_strings = [] for dim in dims: lower, upper = obj.range(dim.name) lower, upper = (dim.pprint_value(lower), dim.pprint_value(upper)) if lower == upper: range = dim.pprint_value(lower) else: range = "%s-%s" % (lower, upper) formatters = {'name': dim.name, 'range': range, 'unit': dim.unit} dim_strings.append(self.dimension_formatter.format(**formatters)) return '_'.join(dim_strings) def _validate_formatters(self): if not self.parse_fields(self.filename_formatter).issubset(self.ffields): raise Exception("Valid filename fields are: %s" % ','.join(sorted(self.ffields))) elif not self.parse_fields(self.export_name).issubset(self.efields): raise Exception("Valid export fields are: %s" % ','.join(sorted(self.efields))) try: time.strftime(self.timestamp_format, tuple(time.localtime())) except: raise Exception("Timestamp format invalid") def add(self, obj=None, filename=None, data=None, info={}, **kwargs): """ If a filename is supplied, it will be used. Otherwise, a filename will be generated from the supplied object. Note that if the explicit filename uses the {timestamp} field, it will be formatted upon export. The data to be archived is either supplied explicitly as 'data' or automatically rendered from the object. """ if [filename, obj] == [None, None]: raise Exception("Either filename or a HoloViews object is " "needed to create an entry in the archive.") elif obj is None and not self.parse_fields(filename).issubset({'timestamp'}): raise Exception("Only the {timestamp} formatter may be used unless an object is supplied.") elif [obj, data] == [None, None]: raise Exception("Either an object or explicit data must be " "supplied to create an entry in the archive.") elif data and 'mime_type' not in info: raise Exception("The mime-type must be supplied in the info dictionary " "when supplying data directly") self._validate_formatters() entries = [] if data is None: for exporter in self.exporters: rendered = exporter(obj) if rendered is None: continue (data, new_info) = rendered info = dict(info, **new_info) entries.append((data, info)) else: entries.append((data, info)) for (data, info) in entries: self._add_content(obj, data, info, filename=filename) def _add_content(self, obj, data, info, filename=None): (unique_key, ext) = self._compute_filename(obj, info, filename=filename) self._files[(unique_key, ext)] = (data, info) def _compute_filename(self, obj, info, filename=None): if filename is None: hashfn = sha256() obj_str = 'None' if obj is None else self.object_formatter(obj) dimensions = self._dim_formatter(obj) dimensions = dimensions if dimensions else '' hashfn.update(obj_str.encode('utf-8')) label = sanitizer(getattr(obj, 'label', 'no-label')) group = sanitizer(getattr(obj, 'group', 'no-group')) format_values = {'timestamp': '{timestamp}', 'dimensions': dimensions, 'group': group, 'label': label, 'type': obj.__class__.__name__, 'obj': sanitizer(obj_str), 'SHA': hashfn.hexdigest()} filename = self._format(self.filename_formatter, dict(info, **format_values)) filename = self._normalize_name(filename) ext = info.get('file-ext', '') (unique_key, ext) = self._unique_name(filename, ext, self._files.keys(), force=True) return (unique_key, ext) def _zip_archive(self, export_name, files, root): archname = '.'.join(self._unique_name(export_name, 'zip', root)) with zipfile.ZipFile(os.path.join(root, archname), 'w') as zipf: for (basename, ext), entry in files: filename = self._truncate_name(basename, ext) zipf.writestr(('%s/%s' % (export_name, filename)),Exporter.encode(entry)) def _tar_archive(self, export_name, files, root): archname = '.'.join(self._unique_name(export_name, 'tar', root)) with tarfile.TarFile(os.path.join(root, archname), 'w') as tarf: for (basename, ext), entry in files: filename = self._truncate_name(basename, ext) tarinfo = tarfile.TarInfo('%s/%s' % (export_name, filename)) filedata = Exporter.encode(entry) tarinfo.size = len(filedata) tarf.addfile(tarinfo, BytesIO(filedata)) def _single_file_archive(self, export_name, files, root): ((basename, ext), entry) = files[0] full_fname = '%s_%s' % (export_name, basename) (unique_name, ext) = self._unique_name(full_fname, ext, root) filename = self._truncate_name(self._normalize_name(unique_name), ext=ext) fpath = os.path.join(root, filename) with open(fpath, 'wb') as f: f.write(Exporter.encode(entry)) def _directory_archive(self, export_name, files, root): output_dir = os.path.join(root, self._unique_name(export_name,'', root)[0]) if os.path.isdir(output_dir): shutil.rmtree(output_dir) os.makedirs(output_dir) for (basename, ext), entry in files: filename = self._truncate_name(basename, ext) fpath = os.path.join(output_dir, filename) with open(fpath, 'wb') as f: f.write(Exporter.encode(entry)) def _unique_name(self, basename, ext, existing, force=False): """ Find a unique basename for a new file/key where existing is either a list of (basename, ext) pairs or an absolute path to a directory. By default, uniqueness is enforced depending on the state of the unique_name parameter (for export names). If force is True, this parameter is ignored and uniqueness is guaranteed. """ skip = False if force else (not self.unique_name) if skip: return (basename, ext) ext = '' if ext is None else ext if isinstance(existing, str): split = [os.path.splitext(el) for el in os.listdir(os.path.abspath(existing))] existing = [(n, ex if not ex else ex[1:]) for (n, ex) in split] new_name, counter = basename, 1 while (new_name, ext) in existing: new_name = basename+'-'+str(counter) counter += 1 return (sanitizer(new_name), ext) def _truncate_name(self, basename, ext='', tail=10, join='...', maxlen=None): maxlen = self.max_filename if maxlen is None else maxlen max_len = maxlen-len(ext) if len(basename) > max_len: start = basename[:max_len-(tail + len(join))] end = basename[-tail:] basename = start + join + end filename = '%s.%s' % (basename, ext) if ext else basename return filename def _normalize_name(self, basename): basename=re.sub('-+','-',basename) basename=re.sub('^[-,_]','',basename) return basename.replace(' ', '_') def export(self, timestamp=None, info={}): """ Export the archive, directory or file. """ tval = tuple(time.localtime()) if timestamp is None else timestamp tstamp = time.strftime(self.timestamp_format, tval) info = dict(info, timestamp=tstamp) export_name = self._format(self.export_name, info) files = [((self._format(base, info), ext), val) for ((base, ext), val) in self._files.items()] root = os.path.abspath(self.root) # Make directory and populate if multiple files and not packed if len(self) > 1 and not self.pack: self._directory_archive(export_name, files, root) elif len(files) == 1: self._single_file_archive(export_name, files, root) elif self.archive_format == 'zip': self._zip_archive(export_name, files, root) elif self.archive_format == 'tar': self._tar_archive(export_name, files, root) if self.flush_archive: self._files = OrderedDict() def _format(self, formatter, info): filtered = {k:v for k,v in info.items() if k in self.parse_fields(formatter)} return formatter.format(**filtered) def __len__(self): "The number of files currently specified in the archive" return len(self._files) def __repr__(self): return self.pprint() def contents(self, maxlen=70): "Print the current (unexported) contents of the archive" lines = [] if len(self._files) == 0: print("Empty %s" % self.__class__.__name__) return fnames = [self._truncate_name(maxlen=maxlen, *k) for k in self._files] max_len = max([len(f) for f in fnames]) for name,v in zip(fnames, self._files.values()): mime_type = v[1].get('mime_type', 'no mime type') lines.append('%s : %s' % (name.ljust(max_len), mime_type)) print('\n'.join(lines)) def listing(self): "Return a list of filename entries currently in the archive" return ['.'.join([f,ext]) if ext else f for (f,ext) in self._files.keys()]
1
23,635
You seem to be basing your PRs off an commit, which keeps reintroducing these changes, which makes it harder to review your PRs.
holoviz-holoviews
py
@@ -52,6 +52,8 @@ var Server = function(requestHandler) { * with the server host when it has fully started. */ this.start = function(opt_port) { + assert(typeof opt_port !== 'function', + "start invoked with function, not port (mocha callback)?"); var port = opt_port || portprober.findFreePort('localhost'); return promise.when(port, function(port) { return promise.checkedNodeCall(
1
// Copyright 2013 Selenium committers // Copyright 2013 Software Freedom Conservancy // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. 'use strict'; var assert = require('assert'), http = require('http'), url = require('url'); var net = require('../../net'), portprober = require('../../net/portprober'), promise = require('../..').promise; /** * Encapsulates a simple HTTP server for testing. The {@code onrequest} * function should be overridden to define request handling behavior. * @param {function(!http.ServerRequest, !http.ServerResponse)} requestHandler * The request handler for the server. * @constructor */ var Server = function(requestHandler) { var server = http.createServer(function(req, res) { requestHandler(req, res); }); server.on('connection', function(stream) { stream.setTimeout(4000); }); /** @typedef {{port: number, address: string, family: string}} */ var Host; /** * Starts the server on the given port. If no port, or 0, is provided, * the server will be started on a random port. * @param {number=} opt_port The port to start on. * @return {!webdriver.promise.Promise.<Host>} A promise that will resolve * with the server host when it has fully started. */ this.start = function(opt_port) { var port = opt_port || portprober.findFreePort('localhost'); return promise.when(port, function(port) { return promise.checkedNodeCall( server.listen.bind(server, port, 'localhost')); }).then(function() { return server.address(); }); }; /** * Stops the server. * @return {!webdriver.promise.Promise} A promise that will resolve when the * server has closed all connections. */ this.stop = function() { var d = promise.defer(); server.close(d.fulfill); return d.promise; }; /** * @return {Host} This server's host info. * @throws {Error} If the server is not running. */ this.address = function() { var addr = server.address(); if (!addr) { throw Error('There server is not running!'); } return addr; }; /** * return {string} The host:port of this server. * @throws {Error} If the server is not running. */ this.host = function() { return net.getLoopbackAddress() + ':' + this.address().port; }; /** * Formats a URL for this server. * @param {string=} opt_pathname The desired pathname on the server. * @return {string} The formatted URL. * @throws {Error} If the server is not running. */ this.url = function(opt_pathname) { var addr = this.address(); var pathname = opt_pathname || ''; return url.format({ protocol: 'http', hostname: net.getLoopbackAddress(), port: addr.port, pathname: pathname }); }; }; // PUBLIC API exports.Server = Server;
1
11,552
Maybe it would simpler to ignore opt_port if type !== 'number'?
SeleniumHQ-selenium
py
@@ -38,7 +38,7 @@ namespace OpenTelemetry.Trace /// <summary> /// Gets or sets attributes known prior to span creation. /// </summary> - public IDictionary<string, object> Attributes { get; set; } + public IEnumerable<KeyValuePair<string, object>> Attributes { get; set; } /// <summary> /// Gets or sets Links factory. Use it to deserialize list of <see cref="Link"/> lazily
1
// <copyright file="SpanCreationOptions.cs" company="OpenTelemetry Authors"> // Copyright 2018, OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // </copyright> using System; using System.Collections.Generic; namespace OpenTelemetry.Trace { /// <summary> /// Span creation options for advanced scenarios. /// </summary> public class SpanCreationOptions { /// <summary> /// Gets or sets explicit span start timestamp. /// Use it when span has started in the past and created later. /// </summary> public DateTimeOffset StartTimestamp { get; set; } /// <summary> /// Gets or sets list of <see cref="Link"/>. /// </summary> public IEnumerable<Link> Links { get; set; } /// <summary> /// Gets or sets attributes known prior to span creation. /// </summary> public IDictionary<string, object> Attributes { get; set; } /// <summary> /// Gets or sets Links factory. Use it to deserialize list of <see cref="Link"/> lazily /// when application configures OpenTelemetry implementation that supports links. /// </summary> public Func<IEnumerable<Link>> LinksFactory { get; set; } } }
1
13,618
As far as I can tell, `IDictionary<string, object>` implements `IEnumerable<KeyValuePair,string, object>>` so we are just making it more generic. From the issue, I understood that we want to maintain sequence/order. I believe `IEnumerable<>` won't fix the issue.
open-telemetry-opentelemetry-dotnet
.cs
@@ -124,7 +124,7 @@ public class DockerOptions { for (int i = 0; i < maxContainerCount; i++) { node.add(caps, new DockerSessionFactory(clientFactory, docker, image, caps)); } - LOG.info(String.format( + LOG.finest(String.format( "Mapping %s to docker image %s %d times", caps, name,
1
// Licensed to the Software Freedom Conservancy (SFC) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The SFC licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package org.openqa.selenium.grid.docker; import static java.util.logging.Level.WARNING; import static org.openqa.selenium.remote.http.HttpMethod.GET; import com.google.common.collect.HashMultimap; import com.google.common.collect.Multimap; import org.openqa.selenium.Capabilities; import org.openqa.selenium.docker.Docker; import org.openqa.selenium.docker.DockerException; import org.openqa.selenium.docker.Image; import org.openqa.selenium.docker.ImageNamePredicate; import org.openqa.selenium.grid.config.Config; import org.openqa.selenium.grid.config.ConfigException; import org.openqa.selenium.grid.node.local.LocalNode; import org.openqa.selenium.json.Json; import org.openqa.selenium.remote.http.HttpClient; import org.openqa.selenium.remote.http.HttpRequest; import org.openqa.selenium.remote.http.HttpResponse; import java.io.IOException; import java.io.UncheckedIOException; import java.net.MalformedURLException; import java.net.URL; import java.util.Arrays; import java.util.List; import java.util.Objects; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; import java.util.logging.Logger; public class DockerOptions { private static final Logger LOG = Logger.getLogger(DockerOptions.class.getName()); private static final Json JSON = new Json(); private final Config config; public DockerOptions(Config config) { this.config = Objects.requireNonNull(config); } private URL getDockerUrl() { try { String raw = config.get("docker", "url") .orElseThrow(() -> new ConfigException("No docker url configured")); return new URL(raw); } catch (MalformedURLException e) { throw new UncheckedIOException(e); } } private boolean isEnabled(HttpClient.Factory clientFactory) { if (!config.getAll("docker", "configs").isPresent()) { return false; } // Is the daemon up and running? URL url = getDockerUrl(); HttpClient client = clientFactory.createClient(url); try { HttpResponse response = client.execute(new HttpRequest(GET, "/_ping")); if (response.getStatus() != 200) { LOG.warning(String.format("Docker config enabled, but daemon unreachable: %s", url)); return false; } return true; } catch (IOException e) { LOG.log(WARNING, "Unable to ping docker daemon. Docker disabled: " + e.getMessage()); return false; } } public void configure(HttpClient.Factory clientFactory, LocalNode.Builder node) throws IOException { if (!isEnabled(clientFactory)) { return; } List<String> allConfigs = config.getAll("docker", "configs") .orElseThrow(() -> new DockerException("Unable to find docker configs")); Multimap<String, Capabilities> kinds = HashMultimap.create(); for (int i = 0; i < allConfigs.size(); i++) { String imageName = allConfigs.get(i); i++; if (i == allConfigs.size()) { throw new DockerException("Unable to find JSON config"); } Capabilities stereotype = JSON.toType(allConfigs.get(i), Capabilities.class); kinds.put(imageName, stereotype); } HttpClient client = clientFactory.createClient(new URL("http://localhost:2375")); Docker docker = new Docker(client); loadImages(docker, kinds.keySet().toArray(new String[0])); int maxContainerCount = Runtime.getRuntime().availableProcessors(); kinds.forEach((name, caps) -> { Image image = docker.findImage(new ImageNamePredicate(name)) .orElseThrow(() -> new DockerException( String.format("Cannot find image matching: %s", name))); for (int i = 0; i < maxContainerCount; i++) { node.add(caps, new DockerSessionFactory(clientFactory, docker, image, caps)); } LOG.info(String.format( "Mapping %s to docker image %s %d times", caps, name, maxContainerCount)); }); } private void loadImages(Docker docker, String... imageNames) { CompletableFuture<Void> cd = CompletableFuture.allOf( Arrays.stream(imageNames) .map(entry -> { int index = entry.lastIndexOf(':'); if (index == -1) { throw new RuntimeException("Unable to determine tag from " + entry); } String name = entry.substring(0, index); String version = entry.substring(index + 1); return CompletableFuture.supplyAsync(() -> docker.pull(name, version)); }).toArray(CompletableFuture[]::new)); try { cd.get(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new RuntimeException(e); } catch (ExecutionException e) { Throwable cause = e.getCause() != null ? e.getCause() : e; if (cause instanceof RuntimeException) { throw (RuntimeException) cause; } throw new RuntimeException(cause); } } }
1
16,469
This change prevents a user understanding how their server is configured. Best to leave at `info` level.
SeleniumHQ-selenium
java
@@ -32,6 +32,15 @@ import ( "google.golang.org/grpc/status" ) +var ( + ephemeralDenyList = []string{ + api.SpecPriorityAlias, + api.SpecPriority, + api.SpecSticky, + api.SpecScale, + } +) + func (s *OsdCsiServer) NodeGetInfo( ctx context.Context, req *csi.NodeGetInfoRequest,
1
/* Package csi is CSI driver interface for OSD Copyright 2017 Portworx Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package csi import ( "fmt" "os" "strings" "github.com/libopenstorage/openstorage/api" "github.com/portworx/kvdb" csi "github.com/container-storage-interface/spec/lib/go/csi" "github.com/sirupsen/logrus" "golang.org/x/net/context" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) func (s *OsdCsiServer) NodeGetInfo( ctx context.Context, req *csi.NodeGetInfoRequest, ) (*csi.NodeGetInfoResponse, error) { clus, err := s.cluster.Enumerate() if err != nil { return nil, status.Errorf(codes.Internal, "Unable to Enumerate cluster: %s", err) } result := &csi.NodeGetInfoResponse{ NodeId: clus.NodeId, } return result, nil } // NodePublishVolume is a CSI API call which mounts the volume on the specified // target path on the node. // // TODO: Support READ ONLY Mounts // func (s *OsdCsiServer) NodePublishVolume( ctx context.Context, req *csi.NodePublishVolumeRequest, ) (*csi.NodePublishVolumeResponse, error) { logrus.Debugf("csi.NodePublishVolume request received. VolumeID: %s, TargetPath: %s", req.GetVolumeId(), req.GetTargetPath()) // Check arguments if len(req.GetVolumeId()) == 0 { return nil, status.Error(codes.InvalidArgument, "Volume id must be provided") } if len(req.GetTargetPath()) == 0 { return nil, status.Error(codes.InvalidArgument, "Target path must be provided") } if req.GetVolumeCapability() == nil || req.GetVolumeCapability().GetAccessMode() == nil { return nil, status.Error(codes.InvalidArgument, "Volume access mode must be provided") } if req.GetVolumeCapability().GetBlock() != nil { return nil, status.Errorf(codes.Unimplemented, "CSI raw block is not supported") } // Ensure target location is created correctly if err := ensureMountPathCreated(req.GetTargetPath()); err != nil { return nil, status.Errorf( codes.Aborted, "Failed to use target location %s: %s", req.GetTargetPath(), err.Error()) } // Get grpc connection conn, err := s.getConn() if err != nil { return nil, status.Errorf( codes.Internal, "Unable to connect to SDK server: %v", err) } // Get secret if any was passed ctx = s.setupContextWithToken(ctx, req.GetSecrets()) // Check if block device driverType := s.driver.Type() if driverType != api.DriverType_DRIVER_TYPE_BLOCK && req.GetVolumeCapability().GetBlock() != nil { return nil, status.Errorf(codes.InvalidArgument, "Trying to attach as block a non block device") } // Gather volume attributes spec, locator, _, err := s.specHandler.SpecFromOpts(req.GetVolumeContext()) if err != nil { return nil, status.Errorf( codes.InvalidArgument, "Invalid volume attributes: %#v", req.GetVolumeContext()) } // Get volume encryption info from req.Secrets driverOpts := s.addEncryptionInfoToLabels(make(map[string]string), req.GetSecrets()) // Parse storage class 'mountOptions' flags from CSI req // flags from 'mountOptions' will be used as the only source of truth for Pure volumes upon mounting if req.GetVolumeCapability() != nil && req.GetVolumeCapability().GetMount() != nil { mountFlags := strings.Join(req.GetVolumeCapability().GetMount().GetMountFlags(), ",") if mountFlags != "" { driverOpts[api.SpecCSIMountOptions] = mountFlags } } // prepare for mount/attaching mounts := api.NewOpenStorageMountAttachClient(conn) opts := &api.SdkVolumeAttachOptions{ SecretName: spec.GetPassphrase(), } // can use either spec.Ephemeral or VolumeContext label volumeId := req.GetVolumeId() if req.GetVolumeContext()["csi.storage.k8s.io/ephemeral"] == "true" || spec.Ephemeral { spec.Ephemeral = true volumes := api.NewOpenStorageVolumeClient(conn) resp, err := volumes.Create(ctx, &api.SdkVolumeCreateRequest{ Name: req.GetVolumeId(), Spec: spec, Labels: locator.GetVolumeLabels(), }) if err != nil { return nil, err } volumeId = resp.VolumeId } if driverType == api.DriverType_DRIVER_TYPE_BLOCK { if _, err = mounts.Attach(ctx, &api.SdkVolumeAttachRequest{ VolumeId: volumeId, Options: opts, DriverOptions: driverOpts, }); err != nil { if spec.Ephemeral { logrus.Errorf("Failed to attach ephemeral volume %s: %v", volumeId, err.Error()) s.cleanupEphemeral(ctx, conn, volumeId, false) } return nil, err } } // Mount volume onto the path if _, err := mounts.Mount(ctx, &api.SdkVolumeMountRequest{ VolumeId: volumeId, MountPath: req.GetTargetPath(), Options: opts, DriverOptions: driverOpts, }); err != nil { if spec.Ephemeral { logrus.Errorf("Failed to mount ephemeral volume %s: %v", volumeId, err.Error()) s.cleanupEphemeral(ctx, conn, volumeId, true) } return nil, err } logrus.Infof("CSI Volume %s mounted on %s", volumeId, req.GetTargetPath()) return &csi.NodePublishVolumeResponse{}, nil } // NodeUnpublishVolume is a CSI API call which unmounts the volume. func (s *OsdCsiServer) NodeUnpublishVolume( ctx context.Context, req *csi.NodeUnpublishVolumeRequest, ) (*csi.NodeUnpublishVolumeResponse, error) { logrus.Debugf("csi.NodeUnpublishVolume request received. VolumeID: %s, TargetPath: %s", req.GetVolumeId(), req.GetTargetPath()) // Check arguments if len(req.GetVolumeId()) == 0 { return nil, status.Error(codes.InvalidArgument, "Volume id must be provided") } if len(req.GetTargetPath()) == 0 { return nil, status.Error(codes.InvalidArgument, "Target path must be provided") } // Get volume information vols, err := s.driver.Inspect([]string{req.GetVolumeId()}) if err != nil || len(vols) < 1 { if err == kvdb.ErrNotFound { logrus.Infof("Volume %s was deleted or cannot be found: %s", req.GetVolumeId(), err.Error()) return &csi.NodeUnpublishVolumeResponse{}, nil } else if err != nil { return nil, status.Errorf(codes.NotFound, "Volume id %s not found: %s", req.GetVolumeId(), err.Error()) } else { logrus.Infof("Volume %s was deleted or cannot be found", req.GetVolumeId()) return &csi.NodeUnpublishVolumeResponse{}, nil } } // Mount volume onto the path if err = s.driver.Unmount(req.GetVolumeId(), req.GetTargetPath(), nil); err != nil { logrus.Infof("Unable to unmount volume %s onto %s: %s", req.GetVolumeId(), req.GetTargetPath(), err.Error()) } if s.driver.Type() == api.DriverType_DRIVER_TYPE_BLOCK { if err = s.driver.Detach(req.GetVolumeId(), nil); err != nil { return nil, status.Errorf( codes.Internal, "Unable to detach volume: %s", err.Error()) } } // Attempt to remove volume path // Kubernetes handles this after NodeUnpublishVolume finishes, but this allows for cross-CO compatibility if err := os.Remove(req.GetTargetPath()); err != nil && !os.IsNotExist(err) { logrus.Warnf("Failed to delete mount path %s: %s", req.GetTargetPath(), err.Error()) } // Return error to Kubelet if mount path still exists to force a retry if _, err := os.Stat(req.GetTargetPath()); !os.IsNotExist(err) { return nil, status.Errorf( codes.Internal, "Mount path still exists: %s", req.GetTargetPath()) } logrus.Infof("CSI Volume %s unmounted from path %s", req.GetVolumeId(), req.GetTargetPath()) return &csi.NodeUnpublishVolumeResponse{}, nil } // NodeGetCapabilities is a CSI API function which seems to be setup for // future patches func (s *OsdCsiServer) NodeGetCapabilities( ctx context.Context, req *csi.NodeGetCapabilitiesRequest, ) (*csi.NodeGetCapabilitiesResponse, error) { logrus.Debugf("csi.NodeGetCapabilities request received") caps := []csi.NodeServiceCapability_RPC_Type{ // Getting volume stats for volume health monitoring csi.NodeServiceCapability_RPC_GET_VOLUME_STATS, // Indicates that the Node service can report volume conditions. csi.NodeServiceCapability_RPC_VOLUME_CONDITION, } var serviceCapabilities []*csi.NodeServiceCapability for _, cap := range caps { serviceCapabilities = append(serviceCapabilities, &csi.NodeServiceCapability{ Type: &csi.NodeServiceCapability_Rpc{ Rpc: &csi.NodeServiceCapability_RPC{ Type: cap, }, }, }) } return &csi.NodeGetCapabilitiesResponse{ Capabilities: serviceCapabilities, }, nil } func getVolumeCondition(vol *api.Volume) *csi.VolumeCondition { condition := &csi.VolumeCondition{} if vol.Status != api.VolumeStatus_VOLUME_STATUS_UP { condition.Abnormal = true } switch vol.Status { case api.VolumeStatus_VOLUME_STATUS_UP: condition.Message = "Volume status is up" case api.VolumeStatus_VOLUME_STATUS_NOT_PRESENT: condition.Message = "Volume status is not present" case api.VolumeStatus_VOLUME_STATUS_DOWN: condition.Message = "Volume status is down" case api.VolumeStatus_VOLUME_STATUS_DEGRADED: condition.Message = "Volume status is degraded" default: condition.Message = "Volume status is unknown" } return condition } // NodeGetVolumeStats get volume stats for a given node. // This function skips auth and directly hits the driver as it is read-only // and only exposed via the CSI unix domain socket. If a secrets field is added // in csi.NodeGetVolumeStatsRequest, we can update this to hit the SDK and use auth. func (s *OsdCsiServer) NodeGetVolumeStats(ctx context.Context, req *csi.NodeGetVolumeStatsRequest) (*csi.NodeGetVolumeStatsResponse, error) { logrus.Debugf("NodeGetVolumeStats request received. VolumeID: %s, VolumePath: %s", req.GetVolumeId(), req.GetVolumePath()) // Check arguments id := req.GetVolumeId() if len(id) == 0 { return nil, status.Error(codes.InvalidArgument, "Volume id must be provided") } path := req.GetVolumePath() if len(path) == 0 { return nil, status.Error(codes.InvalidArgument, "Volume path must be provided") } // Driver inspect as NodeGetVolumeStatsRequest does not support secrets vol, err := s.driverGetVolume(req.GetVolumeId()) if err != nil { return nil, err } var attachPathMatch bool for _, attachPath := range vol.AttachPath { if attachPath == path { attachPathMatch = true } } if !attachPathMatch { return nil, status.Errorf(codes.NotFound, "Volume %s not mounted on path %s", id, path) } // Define volume usage total := int64(vol.Spec.Size) used := int64(vol.Usage) usage := &csi.VolumeUsage{ Available: total - used, Total: total, Used: used, Unit: csi.VolumeUsage_BYTES, } // Define volume condition return &csi.NodeGetVolumeStatsResponse{ Usage: []*csi.VolumeUsage{ usage, }, VolumeCondition: getVolumeCondition(vol), }, nil } // cleanupEphemeral detaches and deletes an ephemeral volume if either attach or mount fails func (s *OsdCsiServer) cleanupEphemeral(ctx context.Context, conn *grpc.ClientConn, volumeId string, detach bool) { if detach { mounts := api.NewOpenStorageMountAttachClient(conn) if _, err := mounts.Detach(ctx, &api.SdkVolumeDetachRequest{ VolumeId: volumeId, }); err != nil { logrus.Errorf("Failed to detach ephemeral volume %s during cleanup: %v", volumeId, err.Error()) return } } volumes := api.NewOpenStorageVolumeClient(conn) if _, err := volumes.Delete(ctx, &api.SdkVolumeDeleteRequest{ VolumeId: volumeId, }); err != nil { logrus.Errorf("Failed to delete ephemeral volume %s during cleanup: %v", volumeId, err.Error()) } } func ensureMountPathCreated(targetPath string) error { fileInfo, err := os.Lstat(targetPath) if err != nil && os.IsNotExist(err) { err = os.MkdirAll(targetPath, 0750) if err != nil { return fmt.Errorf( "Failed to create target path %s: %s", targetPath, err.Error()) } } else if err != nil { return fmt.Errorf( "Unknown error while verifying target location %s: %s", targetPath, err.Error()) } else { if !fileInfo.IsDir() { return fmt.Errorf("Target location %s is not a directory", targetPath) } } return nil }
1
8,812
I will add the following to the list: - api.SpecScale - api.SpecSticky
libopenstorage-openstorage
go
@@ -58,7 +58,7 @@ bool EprosimaClient::init() //CREATE RTPSParticipant ParticipantAttributes PParam; - PParam.rtps.defaultSendPort = 10042; + //PParam.rtps.defaultSendPort = 10042; // TODO Create transport? PParam.rtps.builtin.domainId = 80; PParam.rtps.builtin.use_SIMPLE_EndpointDiscoveryProtocol = true; PParam.rtps.builtin.use_SIMPLE_RTPSParticipantDiscoveryProtocol = true;
1
// Copyright 2016 Proyectos y Sistemas de Mantenimiento SL (eProsima). // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /** * @file EprosimaClient.cpp * */ #include "EprosimaClient.h" #include "fastrtps/fastrtps_all.h" using namespace eprosima::fastrtps; using namespace eprosima::fastrtps::rtps; using namespace clientserver; EprosimaClient::EprosimaClient(): mp_operation_pub(nullptr), mp_result_sub(nullptr), mp_participant(nullptr), mp_resultdatatype(nullptr), mp_operationdatatype(nullptr), m_operationsListener(nullptr), m_resultsListener(nullptr), m_isReady(false), m_operationMatched(0), m_resultMatched(0) { m_operationsListener.mp_up = this; m_resultsListener.mp_up = this; } EprosimaClient::~EprosimaClient() { Domain::removeParticipant(mp_participant); if(mp_resultdatatype!=nullptr) delete(mp_resultdatatype); if(mp_operationdatatype!=nullptr) delete(mp_operationdatatype); } bool EprosimaClient::init() { //CREATE RTPSParticipant ParticipantAttributes PParam; PParam.rtps.defaultSendPort = 10042; PParam.rtps.builtin.domainId = 80; PParam.rtps.builtin.use_SIMPLE_EndpointDiscoveryProtocol = true; PParam.rtps.builtin.use_SIMPLE_RTPSParticipantDiscoveryProtocol = true; PParam.rtps.builtin.m_simpleEDP.use_PublicationReaderANDSubscriptionWriter = true; PParam.rtps.builtin.m_simpleEDP.use_PublicationWriterANDSubscriptionReader = true; PParam.rtps.builtin.leaseDuration = c_TimeInfinite; PParam.rtps.setName( "client_RTPSParticipant"); mp_participant = Domain::createParticipant(PParam); if(mp_participant == nullptr) return false; //REGISTER TYPES mp_resultdatatype = new ResultDataType(); mp_operationdatatype = new OperationDataType(); Domain::registerType(mp_participant,mp_resultdatatype); Domain::registerType(mp_participant,mp_operationdatatype); // DATA PUBLISHER PublisherAttributes PubDataparam; PubDataparam.topic.topicDataType = "Operation"; PubDataparam.topic.topicKind = NO_KEY; PubDataparam.topic.topicName = "Operations"; PubDataparam.topic.historyQos.kind = KEEP_LAST_HISTORY_QOS; PubDataparam.topic.historyQos.depth = 2; PubDataparam.topic.resourceLimitsQos.max_samples = 50; PubDataparam.topic.resourceLimitsQos.allocated_samples = 50; PubDataparam.qos.m_reliability.kind = RELIABLE_RELIABILITY_QOS; mp_operation_pub = Domain::createPublisher(mp_participant,PubDataparam,(PublisherListener*)&this->m_operationsListener); if(mp_operation_pub == nullptr) return false; //DATA SUBSCRIBER SubscriberAttributes SubDataparam; Locator_t loc; loc.set_port(7555); PubDataparam.unicastLocatorList.push_back(loc); SubDataparam.topic.topicDataType = "Result"; SubDataparam.topic.topicKind = NO_KEY; SubDataparam.topic.topicName = "Results"; SubDataparam.topic.historyQos.kind = KEEP_LAST_HISTORY_QOS; SubDataparam.topic.historyQos.depth = 100; SubDataparam.topic.resourceLimitsQos.max_samples = 100; SubDataparam.topic.resourceLimitsQos.allocated_samples = 100; mp_result_sub = Domain::createSubscriber(mp_participant,SubDataparam,(SubscriberListener*)&this->m_resultsListener); if(mp_result_sub == nullptr) return false; return true; } Result::RESULTTYPE EprosimaClient::calculate(Operation::OPERATIONTYPE type, int32_t num1,int32_t num2,int32_t* result) { if(!m_isReady) return Result::SERVER_NOT_READY; m_operation.m_operationId++; m_operation.m_operationType = type; m_operation.m_num1 = num1; m_operation.m_num2 = num2; mp_operation_pub->write((void*)&m_operation); do{ resetResult(); mp_result_sub->waitForUnreadMessage(); mp_result_sub->takeNextData((void*)&m_result,&m_sampleInfo); }while(m_sampleInfo.sampleKind !=ALIVE || m_result.m_guid != m_operation.m_guid || m_result.m_operationId != m_operation.m_operationId); if(m_result.m_resultType == Result::GOOD_RESULT) { *result = m_result.m_result; } return m_result.m_resultType; } void EprosimaClient::resetResult() { m_result.m_guid = c_Guid_Unknown; m_result.m_operationId = 0; m_result.m_result = 0; } void EprosimaClient::OperationListener::onPublicationMatched(Publisher*, MatchingInfo& info) { if(info.status == MATCHED_MATCHING) { mp_up->m_operationMatched++; } else mp_up->m_operationMatched--; mp_up->isReady(); } void EprosimaClient::ResultListener::onSubscriptionMatched(Subscriber*, MatchingInfo& info) { if(info.status == MATCHED_MATCHING) { mp_up->m_resultMatched++; } else mp_up->m_resultMatched--; mp_up->isReady(); } void EprosimaClient::ResultListener::onNewDataMessage(Subscriber*) { } bool EprosimaClient::isReady() { if(m_operationMatched == 1 && m_resultMatched == 1) m_isReady = true; else m_isReady = false; return m_isReady; }
1
12,892
As defaultSendPort is being removed, and I don't like TODOs on examples, please remove the whole line
eProsima-Fast-DDS
cpp
@@ -158,7 +158,6 @@ namespace OpenTelemetry.Resources.Tests { "dynamic", new { } }, { "array", new int[1] }, { "complex", this }, - { "float", 0.1f }, }; var resource = new Resource(attributes);
1
// <copyright file="ResourceTest.cs" company="OpenTelemetry Authors"> // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // </copyright> using System; using System.Collections.Generic; using System.Linq; using Xunit; namespace OpenTelemetry.Resources.Tests { public class ResourceTest : IDisposable { private const string KeyName = "key"; private const string ValueName = "value"; private const string OtelEnvVarKey = "OTEL_RESOURCE_ATTRIBUTES"; public ResourceTest() { Environment.SetEnvironmentVariable(OtelEnvVarKey, null); } [Fact] public static void CreateResource_NullAttributeCollection() { // Act and Assert var resource = new Resource(null); Assert.Empty(resource.Attributes); } [Fact] public void CreateResource_NullAttributeValue() { // Arrange var attributes = new Dictionary<string, object> { { "NullValue", null } }; // Act var resource = new Resource(attributes); // Assert Assert.Single(resource.Attributes); var attribute = resource.Attributes.Single(); Assert.Equal("NullValue", attribute.Key); Assert.Empty((string)attribute.Value); } [Fact] public void CreateResource_EmptyAttributeKey() { // Arrange var attributes = new Dictionary<string, object> { { string.Empty, "value" } }; // Act var resource = new Resource(attributes); // Assert Assert.Single(resource.Attributes); var attribute = resource.Attributes.Single(); Assert.Empty(attribute.Key); Assert.Equal("value", attribute.Value); } [Fact] public void CreateResource_EmptyAttributeValue() { // Arrange var attributes = new Dictionary<string, object> { { "EmptyValue", string.Empty } }; // does not throw var resource = new Resource(attributes); // Assert Assert.Single(resource.Attributes); Assert.Contains(new KeyValuePair<string, object>("EmptyValue", string.Empty), resource.Attributes); } [Fact] public void CreateResource_EmptyAttribute() { // Arrange var attributeCount = 0; var attributes = this.CreateAttributes(attributeCount); // Act var resource = new Resource(attributes); // Assert ValidateResource(resource, attributeCount); } [Fact] public void CreateResource_SingleAttribute() { // Arrange var attributeCount = 1; var attributes = this.CreateAttributes(attributeCount); // Act var resource = new Resource(attributes); // Assert ValidateResource(resource, attributeCount); } [Fact] public void CreateResource_MultipleAttribute() { // Arrange var attributeCount = 5; var attributes = this.CreateAttributes(attributeCount); // Act var resource = new Resource(attributes); // Assert ValidateResource(resource, attributeCount); } [Fact] public void CreateResource_SupportedAttributeTypes() { var attributes = new Dictionary<string, object> { { "string", "stringValue" }, { "long", 1L }, { "bool", true }, { "double", 0.1d }, }; var resource = new Resource(attributes); Assert.Equal(4, resource.Attributes.Count()); Assert.Contains(new KeyValuePair<string, object>("string", "stringValue"), resource.Attributes); Assert.Contains(new KeyValuePair<string, object>("long", 1L), resource.Attributes); Assert.Contains(new KeyValuePair<string, object>("bool", true), resource.Attributes); Assert.Contains(new KeyValuePair<string, object>("double", 0.1d), resource.Attributes); } [Fact] public void CreateResource_NotSupportedAttributeTypes() { var attributes = new Dictionary<string, object> { { "dynamic", new { } }, { "array", new int[1] }, { "complex", this }, { "float", 0.1f }, }; var resource = new Resource(attributes); Assert.Equal(4, resource.Attributes.Count()); Assert.Contains(new KeyValuePair<string, object>("dynamic", string.Empty), resource.Attributes); Assert.Contains(new KeyValuePair<string, object>("array", string.Empty), resource.Attributes); Assert.Contains(new KeyValuePair<string, object>("complex", string.Empty), resource.Attributes); Assert.Contains(new KeyValuePair<string, object>("float", string.Empty), resource.Attributes); } [Fact] public void MergeResource_EmptyAttributeSource_MultiAttributeTarget() { // Arrange var sourceAttributeCount = 0; var sourceAttributes = this.CreateAttributes(sourceAttributeCount); var sourceResource = new Resource(sourceAttributes); var otherAttributeCount = 3; var otherAttributes = this.CreateAttributes(otherAttributeCount); var otherResource = new Resource(otherAttributes); // Act var newResource = sourceResource.Merge(otherResource); // Assert Assert.NotSame(otherResource, newResource); Assert.NotSame(sourceResource, newResource); ValidateResource(newResource, sourceAttributeCount + otherAttributeCount); } [Fact] public void MergeResource_MultiAttributeSource_EmptyAttributeTarget() { // Arrange var sourceAttributeCount = 3; var sourceAttributes = this.CreateAttributes(sourceAttributeCount); var sourceResource = new Resource(sourceAttributes); var otherAttributeCount = 0; var otherAttributes = this.CreateAttributes(otherAttributeCount); var otherResource = new Resource(otherAttributes); // Act var newResource = sourceResource.Merge(otherResource); // Assert Assert.NotSame(otherResource, newResource); Assert.NotSame(sourceResource, newResource); ValidateResource(newResource, sourceAttributeCount + otherAttributeCount); } [Fact] public void MergeResource_MultiAttributeSource_MultiAttributeTarget_NoOverlap() { // Arrange var sourceAttributeCount = 3; var sourceAttributes = this.CreateAttributes(sourceAttributeCount); var sourceResource = new Resource(sourceAttributes); var otherAttributeCount = 3; var otherAttributes = this.CreateAttributes(otherAttributeCount, sourceAttributeCount); var otherResource = new Resource(otherAttributes); // Act var newResource = sourceResource.Merge(otherResource); // Assert Assert.NotSame(otherResource, newResource); Assert.NotSame(sourceResource, newResource); ValidateResource(newResource, sourceAttributeCount + otherAttributeCount); } [Fact] public void MergeResource_MultiAttributeSource_MultiAttributeTarget_SingleOverlap() { // Arrange var sourceAttributeCount = 3; var sourceAttributes = this.CreateAttributes(sourceAttributeCount); var sourceResource = new Resource(sourceAttributes); var otherAttributeCount = 3; var otherAttributes = this.CreateAttributes(otherAttributeCount, sourceAttributeCount - 1); var otherResource = new Resource(otherAttributes); // Act var newResource = sourceResource.Merge(otherResource); // Assert Assert.NotSame(otherResource, newResource); Assert.NotSame(sourceResource, newResource); ValidateResource(newResource, sourceAttributeCount + otherAttributeCount - 1); // Also verify target attributes were not overwritten foreach (var otherAttribute in otherAttributes) { Assert.Contains(otherAttribute, otherResource.Attributes); } } [Fact] public void MergeResource_MultiAttributeSource_MultiAttributeTarget_FullOverlap() { // Arrange var sourceAttributeCount = 3; var sourceAttributes = this.CreateAttributes(sourceAttributeCount); var sourceResource = new Resource(sourceAttributes); var otherAttributeCount = 3; var otherAttributes = this.CreateAttributes(otherAttributeCount); var otherResource = new Resource(otherAttributes); // Act var newResource = sourceResource.Merge(otherResource); // Assert Assert.NotSame(otherResource, newResource); Assert.NotSame(sourceResource, newResource); ValidateResource(newResource, otherAttributeCount); // Also verify target attributes were not overwritten foreach (var otherAttribute in otherAttributes) { Assert.Contains(otherAttribute, otherResource.Attributes); } } [Fact] public void MergeResource_MultiAttributeSource_DuplicatedKeysInPrimary() { // Arrange var sourceAttributes = new List<KeyValuePair<string, object>> { new KeyValuePair<string, object>("key1", "value1"), new KeyValuePair<string, object>("key1", "value1.1"), }; var sourceResource = new Resource(sourceAttributes); var otherAttributes = new List<KeyValuePair<string, object>> { new KeyValuePair<string, object>("key2", "value2"), }; var otherResource = new Resource(otherAttributes); // Act var newResource = sourceResource.Merge(otherResource); // Assert Assert.NotSame(otherResource, newResource); Assert.NotSame(sourceResource, newResource); Assert.Equal(2, newResource.Attributes.Count()); Assert.Contains(new KeyValuePair<string, object>("key1", "value1"), newResource.Attributes); Assert.Contains(new KeyValuePair<string, object>("key2", "value2"), newResource.Attributes); } [Fact] public void MergeResource_SecondaryCanOverridePrimaryEmptyAttributeValue() { // Arrange var primaryAttributes = new Dictionary<string, object> { { "value", string.Empty } }; var secondaryAttributes = new Dictionary<string, object> { { "value", "not empty" } }; var primaryResource = new Resource(primaryAttributes); var secondaryResource = new Resource(secondaryAttributes); var newResource = primaryResource.Merge(secondaryResource); // Assert Assert.Single(newResource.Attributes); Assert.Contains(new KeyValuePair<string, object>("value", "not empty"), newResource.Attributes); } [Fact] public void GetResourceWithDefaultAttributes_EmptyResource() { // Arrange var resource = ResourceBuilder.CreateDefault().AddEnvironmentVariableDetector().Build(); // Assert var attributes = resource.Attributes; Assert.Equal(3, attributes.Count()); ValidateTelemetrySdkAttributes(attributes); } [Fact] public void GetResourceWithDefaultAttributes_ResourceWithAttrs() { // Arrange var resource = ResourceBuilder.CreateDefault().AddEnvironmentVariableDetector().AddAttributes(this.CreateAttributes(2)).Build(); // Assert var attributes = resource.Attributes; Assert.Equal(5, attributes.Count()); ValidateAttributes(attributes, 0, 1); ValidateTelemetrySdkAttributes(attributes); } [Fact] public void GetResourceWithDefaultAttributes_WithEnvVar() { // Arrange Environment.SetEnvironmentVariable(OtelEnvVarKey, "EVKey1=EVVal1,EVKey2=EVVal2"); var resource = ResourceBuilder.CreateDefault().AddEnvironmentVariableDetector().AddAttributes(this.CreateAttributes(2)).Build(); // Assert var attributes = resource.Attributes; Assert.Equal(7, attributes.Count()); ValidateAttributes(attributes, 0, 1); ValidateTelemetrySdkAttributes(attributes); Assert.Contains(new KeyValuePair<string, object>("EVKey1", "EVVal1"), attributes); Assert.Contains(new KeyValuePair<string, object>("EVKey2", "EVVal2"), attributes); } public void Dispose() { Environment.SetEnvironmentVariable(OtelEnvVarKey, null); } private static void AddAttributes(Dictionary<string, object> attributes, int attributeCount, int startIndex = 0) { for (var i = startIndex; i < attributeCount + startIndex; ++i) { attributes.Add($"{KeyName}{i}", $"{ValueName}{i}"); } } private static void ValidateAttributes(IEnumerable<KeyValuePair<string, object>> attributes, int startIndex = 0, int endIndex = 0) { var keyValuePairs = attributes as KeyValuePair<string, object>[] ?? attributes.ToArray(); var endInd = endIndex == 0 ? keyValuePairs.Length - 1 : endIndex; for (var i = startIndex; i <= endInd; ++i) { Assert.Contains( new KeyValuePair<string, object>( $"{KeyName}{i}", $"{ValueName}{i}"), keyValuePairs); } } private static void ValidateResource(Resource resource, int attributeCount) { Assert.NotNull(resource); Assert.NotNull(resource.Attributes); Assert.Equal(attributeCount, resource.Attributes.Count()); ValidateAttributes(resource.Attributes); } private static void ValidateTelemetrySdkAttributes(IEnumerable<KeyValuePair<string, object>> attributes) { Assert.Contains(new KeyValuePair<string, object>("telemetry.sdk.name", "opentelemetry"), attributes); Assert.Contains(new KeyValuePair<string, object>("telemetry.sdk.language", "dotnet"), attributes); var versionAttribute = attributes.Where(pair => pair.Key.Equals("telemetry.sdk.version")); Assert.Single(versionAttribute); } private Dictionary<string, object> CreateAttributes(int attributeCount, int startIndex = 0) { var attributes = new Dictionary<string, object>(); AddAttributes(attributes, attributeCount, startIndex); return attributes; } } }
1
18,583
since we are now no longer expecting an input of float to return an empty string, i have removed this test case.
open-telemetry-opentelemetry-dotnet
.cs
@@ -36,6 +36,18 @@ func MakeCounter(metric MetricName) *Counter { return c } +func NewCounter(name, desc string) *Counter { + c := &Counter{ + name: name, + description: desc, + values: make([]*counterValues, 0), + labels: make(map[string]int), + valuesIndices: make(map[int]int), + } + c.Register(nil) + return c +} + // Register registers the counter with the default/specific registry func (counter *Counter) Register(reg *Registry) { if reg == nil {
1
// Copyright (C) 2019-2020 Algorand, Inc. // This file is part of go-algorand // // go-algorand is free software: you can redistribute it and/or modify // it under the terms of the GNU Affero General Public License as // published by the Free Software Foundation, either version 3 of the // License, or (at your option) any later version. // // go-algorand is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Affero General Public License for more details. // // You should have received a copy of the GNU Affero General Public License // along with go-algorand. If not, see <https://www.gnu.org/licenses/>. package metrics import ( "math" "strconv" "strings" "sync/atomic" ) // MakeCounter create a new counter with the provided name and description. func MakeCounter(metric MetricName) *Counter { c := &Counter{ values: make([]*counterValues, 0), description: metric.Description, name: metric.Name, labels: make(map[string]int), valuesIndices: make(map[int]int), } c.Register(nil) return c } // Register registers the counter with the default/specific registry func (counter *Counter) Register(reg *Registry) { if reg == nil { DefaultRegistry().Register(counter) } else { reg.Register(counter) } } // Deregister deregisters the counter with the default/specific registry func (counter *Counter) Deregister(reg *Registry) { if reg == nil { DefaultRegistry().Deregister(counter) } else { reg.Deregister(counter) } } // Inc increases counter by 1 // Much faster if labels is nil or empty. func (counter *Counter) Inc(labels map[string]string) { if labels == nil || len(labels) == 0 { counter.fastAddUint64(1) } else { counter.Add(1.0, labels) } } // Add increases counter by x // For adding an integer, see AddUint64(x) func (counter *Counter) Add(x float64, labels map[string]string) { counter.Lock() defer counter.Unlock() labelIndex := counter.findLabelIndex(labels) // find where we have the same labels. if counterIdx, has := counter.valuesIndices[labelIndex]; !has { // we need to add a new counter. val := &counterValues{ counter: x, labels: labels, } val.createFormattedLabel() counter.values = append(counter.values, val) counter.valuesIndices[labelIndex] = len(counter.values) - 1 } else { // update existing value. counter.values[counterIdx].counter += x } } // AddUint64 increases counter by x // If labels is nil this is much faster than Add() // Calls through to Add() if labels is not nil. func (counter *Counter) AddUint64(x uint64, labels map[string]string) { if labels == nil || len(labels) == 0 { counter.fastAddUint64(x) } else { counter.Add(float64(x), labels) } } func (counter *Counter) fastAddUint64(x uint64) { if atomic.AddUint64(&counter.intValue, x) == x { // What we just added is the whole value, this // is the first Add. Create a dummy // counterValue for the no-labels value. // Dummy counterValue simplifies display in WriteMetric. counter.Add(0, nil) } } func (counter *Counter) findLabelIndex(labels map[string]string) int { accumulatedIndex := 0 for k, v := range labels { t := k + ":" + v // do we already have this key ( label ) in our map ? if i, has := counter.labels[t]; has { // yes, we do. use this index. accumulatedIndex += i } else { // no, we don't have it. counter.labels[t] = int(math.Exp2(float64(len(counter.labels)))) accumulatedIndex += counter.labels[t] } } return accumulatedIndex } func (cv *counterValues) createFormattedLabel() { var buf strings.Builder if len(cv.labels) < 1 { return } for k, v := range cv.labels { buf.WriteString("," + k + "=\"" + v + "\"") } cv.formattedLabels = buf.String()[1:] } // WriteMetric writes the metric into the output stream func (counter *Counter) WriteMetric(buf *strings.Builder, parentLabels string) { counter.Lock() defer counter.Unlock() if len(counter.values) < 1 { return } buf.WriteString("# HELP ") buf.WriteString(counter.name) buf.WriteString(" ") buf.WriteString(counter.description) buf.WriteString("\n# TYPE ") buf.WriteString(counter.name) buf.WriteString(" counter\n") for _, l := range counter.values { buf.WriteString(counter.name) buf.WriteString("{") if len(parentLabels) > 0 { buf.WriteString(parentLabels) if len(l.formattedLabels) > 0 { buf.WriteString(",") } } buf.WriteString(l.formattedLabels) buf.WriteString("} ") value := l.counter if len(l.labels) == 0 { value += float64(atomic.LoadUint64(&counter.intValue)) } buf.WriteString(strconv.FormatFloat(value, 'f', -1, 32)) buf.WriteString("\n") } } // AddMetric adds the metric into the map func (counter *Counter) AddMetric(values map[string]string) { counter.Lock() defer counter.Unlock() if len(counter.values) < 1 { return } for _, l := range counter.values { sum := l.counter if len(l.labels) == 0 { sum += float64(atomic.LoadUint64(&counter.intValue)) } values[counter.name] = strconv.FormatFloat(sum, 'f', -1, 32) } }
1
40,194
It would be cleaner if you were to pack the name&desc in a `MetricName` and pass it to `MakeCounter`
algorand-go-algorand
go
@@ -24,5 +24,5 @@ using System.Runtime.CompilerServices; #else [assembly: InternalsVisibleTo("OpenTelemetry.Exporter.Jaeger.Tests")] [assembly: InternalsVisibleTo("Benchmarks")] -[assembly: InternalsVisibleTo("DynamicProxyGenAssembly2] // Used by Moq. +[assembly: InternalsVisibleTo("DynamicProxyGenAssembly2")] // Used by Moq. #endif
1
// <copyright file="AssemblyInfo.cs" company="OpenTelemetry Authors"> // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // </copyright> using System.Runtime.CompilerServices; #if SIGNED [assembly: InternalsVisibleTo("OpenTelemetry.Exporter.Jaeger.Tests, PublicKey=002400000480000094000000060200000024000052534131000400000100010051c1562a090fb0c9f391012a32198b5e5d9a60e9b80fa2d7b434c9e5ccb7259bd606e66f9660676afc6692b8cdc6793d190904551d2103b7b22fa636dcbb8208839785ba402ea08fc00c8f1500ccef28bbf599aa64ffb1e1d5dc1bf3420a3777badfe697856e9d52070a50c3ea5821c80bef17ca3acffa28f89dd413f096f898")] [assembly: InternalsVisibleTo("Benchmarks, PublicKey=002400000480000094000000060200000024000052534131000400000100010051c1562a090fb0c9f391012a32198b5e5d9a60e9b80fa2d7b434c9e5ccb7259bd606e66f9660676afc6692b8cdc6793d190904551d2103b7b22fa636dcbb8208839785ba402ea08fc00c8f1500ccef28bbf599aa64ffb1e1d5dc1bf3420a3777badfe697856e9d52070a50c3ea5821c80bef17ca3acffa28f89dd413f096f898")] // Used by Moq. [assembly: InternalsVisibleTo("DynamicProxyGenAssembly2, PublicKey=0024000004800000940000000602000000240000525341310004000001000100c547cac37abd99c8db225ef2f6c8a3602f3b3606cc9891605d02baa56104f4cfc0734aa39b93bf7852f7d9266654753cc297e7d2edfe0bac1cdcf9f717241550e0a7b191195b7667bb4f64bcb8e2121380fd1d9d46ad2d92d2d15605093924cceaf74c4861eff62abf69b9291ed0a340e113be11e6a7d3113e92484cf7045cc7")] #else [assembly: InternalsVisibleTo("OpenTelemetry.Exporter.Jaeger.Tests")] [assembly: InternalsVisibleTo("Benchmarks")] [assembly: InternalsVisibleTo("DynamicProxyGenAssembly2] // Used by Moq. #endif
1
14,813
Do you know why do we need this?
open-telemetry-opentelemetry-dotnet
.cs
@@ -7,6 +7,7 @@ using System.Collections.Generic; using System.Linq; using MvvmCross.Platform.Ios.Presenters; using MvvmCross.Platform.Ios.Presenters.Attributes; +using MvvmCross.iOS.Views; using MvvmCross.ViewModels; using UIKit;
1
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MS-PL license. // See the LICENSE file in the project root for more information. using System; using System.Collections.Generic; using System.Linq; using MvvmCross.Platform.Ios.Presenters; using MvvmCross.Platform.Ios.Presenters.Attributes; using MvvmCross.ViewModels; using UIKit; namespace MvvmCross.Platform.Ios.Views { public class MvxTabBarViewController : MvxBaseTabBarViewController, IMvxTabBarViewController { private int _tabsCount = 0; public MvxTabBarViewController() : base() { // WORKAROUND: UIKit makes a first ViewDidLoad call, because a TabViewController expects it's view (tabs) to be drawn // on construction. Therefore we need to call ViewDidLoad "manually", otherwise ViewModel will be null ViewDidLoad(); } public MvxTabBarViewController(IntPtr handle) : base(handle) { } public override void ViewWillDisappear(bool animated) { base.ViewWillDisappear(animated); if (IsMovingFromParentViewController) { if (Mvx.TryResolve(out IMvxIosViewPresenter iPresenter) && iPresenter is MvxIosViewPresenter mvxIosViewPresenter) { mvxIosViewPresenter.CloseTabBarViewController(); }; } } public virtual void ShowTabView(UIViewController viewController, MvxTabPresentationAttribute attribute) { if (!string.IsNullOrEmpty(attribute.TabAccessibilityIdentifier)) viewController.View.AccessibilityIdentifier = attribute.TabAccessibilityIdentifier; // setup Tab SetTitleAndTabBarItem(viewController, attribute); // add Tab var currentTabs = new List<UIViewController>(); if (ViewControllers != null) { currentTabs = ViewControllers.ToList(); } currentTabs.Add(viewController); // update current Tabs ViewControllers = currentTabs.ToArray(); } protected virtual void SetTitleAndTabBarItem(UIViewController viewController, MvxTabPresentationAttribute attribute) { _tabsCount++; viewController.Title = attribute.TabName; if (!string.IsNullOrEmpty(attribute.TabIconName)) viewController.TabBarItem = new UITabBarItem(attribute.TabName, UIImage.FromBundle(attribute.TabIconName), _tabsCount); if (!string.IsNullOrEmpty(attribute.TabSelectedIconName)) viewController.TabBarItem.SelectedImage = UIImage.FromBundle(attribute.TabSelectedIconName); } public virtual bool ShowChildView(UIViewController viewController) { var navigationController = SelectedViewController as UINavigationController; // if the current selected ViewController is not a NavigationController, then a child cannot be shown if (navigationController == null) { return false; } navigationController.PushViewController(viewController, true); return true; } public virtual bool CanShowChildView() { return SelectedViewController is UINavigationController; } public virtual bool CloseChildViewModel(IMvxViewModel viewModel) { if (SelectedViewController is UINavigationController navController && navController.ViewControllers != null && navController.ViewControllers.Any()) { // if the ViewModel to close if the last in the stack, close it animated if (navController.TopViewController.GetIMvxIosView().ViewModel == viewModel) { navController.PopViewController(true); return true; } var controllers = navController.ViewControllers.ToList(); var controllerToClose = controllers.FirstOrDefault(vc => vc.GetIMvxIosView().ViewModel == viewModel); if (controllerToClose != null) { controllers.Remove(controllerToClose); navController.ViewControllers = controllers.ToArray(); return true; } } return false; } public virtual bool CloseTabViewModel(IMvxViewModel viewModel) { if (ViewControllers == null || !ViewControllers.Any()) return false; // loop through plain Tabs var plainToClose = ViewControllers.Where(v => !(v is UINavigationController)) .Select(v => v.GetIMvxIosView()) .FirstOrDefault(mvxView => mvxView.ViewModel == viewModel); if (plainToClose != null) { RemoveTabController((UIViewController)plainToClose); return true; } // loop through nav stack Tabs UIViewController toClose = null; foreach (var vc in ViewControllers.Where(v => v is UINavigationController)) { var root = ((UINavigationController)vc).ViewControllers.FirstOrDefault(); if (root != null && root.GetIMvxIosView().ViewModel == viewModel) { toClose = vc; break; } } if (toClose != null) { RemoveTabController((UIViewController)toClose); return true; } return false; } public void PresentViewControllerWithNavigation(UIViewController controller, bool animated = true, Action completionHandler = null) { PresentViewController(new UINavigationController(controller), animated, completionHandler); } protected virtual void RemoveTabController(UIViewController toClose) { var newTabs = ViewControllers.Where(v => v != toClose); ViewControllers = newTabs.ToArray(); } } public class MvxTabBarViewController<TViewModel> : MvxTabBarViewController where TViewModel : IMvxViewModel { public new TViewModel ViewModel { get { return (TViewModel)base.ViewModel; } set { base.ViewModel = value; } } public virtual UIViewController VisibleUIViewController { get { var topViewController = (SelectedViewController as UINavigationController)?.TopViewController ?? SelectedViewController; if (topViewController.PresentedViewController != null) { var presentedNavigationController = topViewController.PresentedViewController as UINavigationController; if (presentedNavigationController != null) { return presentedNavigationController.TopViewController; } else { return topViewController.PresentedViewController; } } else { return topViewController; } } } public MvxTabBarViewController() : base() { } public MvxTabBarViewController(IntPtr handle) : base(handle) { } } }
1
13,766
I guess this using is what causes the build to fail
MvvmCross-MvvmCross
.cs
@@ -239,6 +239,7 @@ DECLARE_THREAD_FN(ponyint_asio_backend_dispatch) close(b->wakeup); ponyint_messageq_destroy(&b->q); POOL_FREE(asio_backend_t, b); + pony_unregister_thread(); return NULL; }
1
#define PONY_WANT_ATOMIC_DEFS #include "asio.h" #include "event.h" #ifdef ASIO_USE_EPOLL #include "../actor/messageq.h" #include "../mem/pool.h" #include <sys/epoll.h> #include <sys/eventfd.h> #include <sys/timerfd.h> #include <unistd.h> #include <string.h> #include <signal.h> #include <stdbool.h> #ifdef USE_VALGRIND #include <valgrind/helgrind.h> #endif #define MAX_SIGNAL 128 struct asio_backend_t { int epfd; int wakeup; /* eventfd to break epoll loop */ struct epoll_event events[MAX_EVENTS]; PONY_ATOMIC(asio_event_t*) sighandlers[MAX_SIGNAL]; PONY_ATOMIC(bool) terminate; messageq_t q; }; static void send_request(asio_event_t* ev, int req) { asio_backend_t* b = ponyint_asio_get_backend(); asio_msg_t* msg = (asio_msg_t*)pony_alloc_msg( POOL_INDEX(sizeof(asio_msg_t)), 0); msg->event = ev; msg->flags = req; ponyint_messageq_push(&b->q, (pony_msg_t*)msg); eventfd_write(b->wakeup, 1); } static void signal_handler(int sig) { if(sig >= MAX_SIGNAL) return; // Reset the signal handler. signal(sig, signal_handler); asio_backend_t* b = ponyint_asio_get_backend(); asio_event_t* ev = atomic_load_explicit(&b->sighandlers[sig], memory_order_acquire); #ifdef USE_VALGRIND ANNOTATE_HAPPENS_AFTER(&b->sighandlers[sig]); #endif if(ev == NULL) return; eventfd_write(ev->fd, 1); } static void handle_queue(asio_backend_t* b) { asio_msg_t* msg; while((msg = (asio_msg_t*)ponyint_messageq_pop(&b->q)) != NULL) { asio_event_t* ev = msg->event; switch(msg->flags) { case ASIO_DISPOSABLE: pony_asio_event_send(ev, ASIO_DISPOSABLE, 0); break; default: {} } } } asio_backend_t* ponyint_asio_backend_init() { asio_backend_t* b = POOL_ALLOC(asio_backend_t); memset(b, 0, sizeof(asio_backend_t)); ponyint_messageq_init(&b->q); b->epfd = epoll_create1(EPOLL_CLOEXEC); b->wakeup = eventfd(0, EFD_NONBLOCK); if(b->epfd == 0 || b->wakeup == 0) { POOL_FREE(asio_backend_t, b); return NULL; } struct epoll_event ep; ep.data.ptr = b; ep.events = EPOLLIN | EPOLLRDHUP | EPOLLET; epoll_ctl(b->epfd, EPOLL_CTL_ADD, b->wakeup, &ep); return b; } void ponyint_asio_backend_final(asio_backend_t* b) { atomic_store_explicit(&b->terminate, true, memory_order_relaxed); eventfd_write(b->wakeup, 1); } PONY_API void pony_asio_event_resubscribe_write(asio_event_t* ev) { if((ev == NULL) || (ev->flags == ASIO_DISPOSABLE) || (ev->flags == ASIO_DESTROYED)) return; asio_backend_t* b = ponyint_asio_get_backend(); struct epoll_event ep; ep.data.ptr = ev; ep.events = 0; if(ev->flags & ASIO_ONESHOT) ep.events |= EPOLLONESHOT; if((ev->flags & ASIO_WRITE) && !ev->writeable) ep.events |= EPOLLOUT; else return; epoll_ctl(b->epfd, EPOLL_CTL_MOD, ev->fd, &ep); } PONY_API void pony_asio_event_resubscribe_read(asio_event_t* ev) { if((ev == NULL) || (ev->flags == ASIO_DISPOSABLE) || (ev->flags == ASIO_DESTROYED)) return; asio_backend_t* b = ponyint_asio_get_backend(); struct epoll_event ep; ep.data.ptr = ev; ep.events = EPOLLRDHUP | EPOLLET; if(ev->flags & ASIO_ONESHOT) ep.events |= EPOLLONESHOT; if((ev->flags & ASIO_READ) && !ev->readable) ep.events |= EPOLLIN; else return; epoll_ctl(b->epfd, EPOLL_CTL_MOD, ev->fd, &ep); } DECLARE_THREAD_FN(ponyint_asio_backend_dispatch) { pony_register_thread(); asio_backend_t* b = arg; while(!atomic_load_explicit(&b->terminate, memory_order_relaxed)) { int event_cnt = epoll_wait(b->epfd, b->events, MAX_EVENTS, -1); for(int i = 0; i < event_cnt; i++) { struct epoll_event* ep = &(b->events[i]); if(ep->data.ptr == b) continue; asio_event_t* ev = ep->data.ptr; uint32_t flags = 0; uint32_t count = 0; if(ev->flags & ASIO_READ) { if(ep->events & (EPOLLIN | EPOLLRDHUP | EPOLLHUP | EPOLLERR)) { flags |= ASIO_READ; ev->readable = true; } } if(ev->flags & ASIO_WRITE) { if(ep->events & EPOLLOUT) { flags |= ASIO_WRITE; ev->writeable = true; } } if(ev->flags & ASIO_TIMER) { if(ep->events & (EPOLLIN | EPOLLRDHUP | EPOLLHUP | EPOLLERR)) { uint64_t missed; ssize_t rc = read(ev->fd, &missed, sizeof(uint64_t)); (void)rc; flags |= ASIO_TIMER; } } if(ev->flags & ASIO_SIGNAL) { if(ep->events & (EPOLLIN | EPOLLRDHUP | EPOLLHUP | EPOLLERR)) { uint64_t missed; ssize_t rc = read(ev->fd, &missed, sizeof(uint64_t)); (void)rc; flags |= ASIO_SIGNAL; count = (uint32_t)missed; } } if(flags != 0) { if(ev->auto_resub && !(flags & ASIO_WRITE)) pony_asio_event_resubscribe_write(ev); if(ev->auto_resub && !(flags & ASIO_READ)) pony_asio_event_resubscribe_read(ev); pony_asio_event_send(ev, flags, count); } } handle_queue(b); } close(b->epfd); close(b->wakeup); ponyint_messageq_destroy(&b->q); POOL_FREE(asio_backend_t, b); return NULL; } static void timer_set_nsec(int fd, uint64_t nsec) { struct itimerspec ts; ts.it_interval.tv_sec = 0; ts.it_interval.tv_nsec = 0; ts.it_value.tv_sec = (time_t)(nsec / 1000000000); ts.it_value.tv_nsec = (long)(nsec - (ts.it_value.tv_sec * 1000000000)); timerfd_settime(fd, 0, &ts, NULL); } PONY_API void pony_asio_event_subscribe(asio_event_t* ev) { if((ev == NULL) || (ev->flags == ASIO_DISPOSABLE) || (ev->flags == ASIO_DESTROYED)) return; asio_backend_t* b = ponyint_asio_get_backend(); if(ev->noisy) ponyint_asio_noisy_add(); struct epoll_event ep; ep.data.ptr = ev; ep.events = EPOLLRDHUP | EPOLLET; if(ev->flags & ASIO_READ) ep.events |= EPOLLIN; if(ev->flags & ASIO_WRITE) ep.events |= EPOLLOUT; if(ev->flags & ASIO_TIMER) { ev->fd = timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK); timer_set_nsec(ev->fd, ev->nsec); ep.events |= EPOLLIN; } if(ev->flags & ASIO_SIGNAL) { int sig = (int)ev->nsec; asio_event_t* prev = NULL; #ifdef USE_VALGRIND ANNOTATE_HAPPENS_BEFORE(&b->sighandlers[sig]); #endif if((sig < MAX_SIGNAL) && atomic_compare_exchange_strong_explicit(&b->sighandlers[sig], &prev, ev, memory_order_release, memory_order_relaxed)) { signal(sig, signal_handler); ev->fd = eventfd(0, EFD_NONBLOCK); ep.events |= EPOLLIN; } else { return; } } if(ev->flags & ASIO_ONESHOT) { ep.events |= EPOLLONESHOT; ev->auto_resub = true; } epoll_ctl(b->epfd, EPOLL_CTL_ADD, ev->fd, &ep); } PONY_API void pony_asio_event_setnsec(asio_event_t* ev, uint64_t nsec) { if((ev == NULL) || (ev->flags == ASIO_DISPOSABLE) || (ev->flags == ASIO_DESTROYED)) return; if(ev->flags & ASIO_TIMER) { ev->nsec = nsec; timer_set_nsec(ev->fd, nsec); } } PONY_API void pony_asio_event_unsubscribe(asio_event_t* ev) { if((ev == NULL) || (ev->flags == ASIO_DISPOSABLE) || (ev->flags == ASIO_DESTROYED)) return; asio_backend_t* b = ponyint_asio_get_backend(); if(ev->noisy) { ponyint_asio_noisy_remove(); ev->noisy = false; } epoll_ctl(b->epfd, EPOLL_CTL_DEL, ev->fd, NULL); if(ev->flags & ASIO_TIMER) { if(ev->fd != -1) { close(ev->fd); ev->fd = -1; } } if(ev->flags & ASIO_SIGNAL) { int sig = (int)ev->nsec; asio_event_t* prev = ev; #ifdef USE_VALGRIND ANNOTATE_HAPPENS_BEFORE(&b->sighandlers[sig]); #endif if((sig < MAX_SIGNAL) && atomic_compare_exchange_strong_explicit(&b->sighandlers[sig], &prev, NULL, memory_order_release, memory_order_relaxed)) { signal(sig, SIG_DFL); close(ev->fd); ev->fd = -1; } } ev->flags = ASIO_DISPOSABLE; send_request(ev, ASIO_DISPOSABLE); } #endif
1
9,948
I like the explicit unregister approach.
ponylang-ponyc
c
@@ -25,8 +25,11 @@ namespace OpenTelemetry.Metrics Type: 0x10: Sum 0x20: Gauge - 0x30: Histogram - 0x40: Summary (reserved) + 0x40: Histogram + 0x50: HistogramWithMinMax (reserved) + 0x60: ExponentialHistogram (reserved) + 0x70: ExponentialHistogramWithMinMax (reserved) + 0x80: Summary (reserved) Point kind: 0x04: I1 (signed 1-byte integer)
1
// <copyright file="MetricType.cs" company="OpenTelemetry Authors"> // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // </copyright> using System; namespace OpenTelemetry.Metrics { [Flags] public enum MetricType : byte { /* Type: 0x10: Sum 0x20: Gauge 0x30: Histogram 0x40: Summary (reserved) Point kind: 0x04: I1 (signed 1-byte integer) 0x05: U1 (unsigned 1-byte integer) 0x06: I2 (signed 2-byte integer) 0x07: U2 (unsigned 2-byte integer) 0x08: I4 (signed 4-byte integer) 0x09: U4 (unsigned 4-byte integer) 0x0a: I8 (signed 8-byte integer) 0x0b: U8 (unsigned 8-byte integer) 0x0c: R4 (4-byte floating point) 0x0d: R8 (8-byte floating point) */ /// <summary> /// Sum of Long type. /// </summary> LongSum = 0x1a, /// <summary> /// Sum of Double type. /// </summary> DoubleSum = 0x1d, /// <summary> /// Gauge of Long type. /// </summary> LongGauge = 0x2a, /// <summary> /// Gauge of Double type. /// </summary> DoubleGauge = 0x2d, /// <summary> /// Histogram. /// </summary> Histogram = 0x30, } }
1
22,922
We should able to use 0x30 as summary, and make 0x80 reserved for future.
open-telemetry-opentelemetry-dotnet
.cs
@@ -104,7 +104,7 @@ namespace Datadog.Trace.ClrProfiler { tags = null; - if (!tracer.Settings.IsIntegrationEnabled(integrationId) || HttpBypassHelper.UriContainsAnyOf(requestUri, tracer.Settings.HttpClientExcludedUrlSubstrings)) + if (!tracer.Settings.IsIntegrationEnabled(integrationId) || PlatformHelpers.PlatformStrategy.ShouldSkipClientSpan(tracer.ActiveScope) || HttpBypassHelper.UriContainsAnyOf(requestUri, tracer.Settings.HttpClientExcludedUrlSubstrings)) { // integration disabled, don't create a scope, skip this trace return null;
1
// <copyright file="ScopeFactory.cs" company="Datadog"> // Unless explicitly stated otherwise all files in this repository are licensed under the Apache 2 License. // This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2017 Datadog, Inc. // </copyright> using System; using System.Data; using System.Linq; using Datadog.Trace.ClrProfiler.Helpers; using Datadog.Trace.ClrProfiler.Integrations.AdoNet; using Datadog.Trace.Configuration; using Datadog.Trace.ExtensionMethods; using Datadog.Trace.Logging; using Datadog.Trace.Tagging; using Datadog.Trace.Util; namespace Datadog.Trace.ClrProfiler { /// <summary> /// Convenience class that creates scopes and populates them with some standard details. /// </summary> internal static class ScopeFactory { public const string OperationName = "http.request"; public const string ServiceName = "http-client"; private static readonly IDatadogLogger Log = DatadogLogging.GetLoggerFor(typeof(ScopeFactory)); public static Scope GetActiveHttpScope(Tracer tracer) { var scope = tracer.ActiveScope; var parent = scope?.Span; if (parent != null && parent.Type == SpanTypes.Http && parent.GetTag(Tags.InstrumentationName) != null) { return scope; } return null; } /// <summary> /// Creates a span context for outbound http requests, or get the active one. /// Used to propagate headers without changing the active span. /// </summary> /// <param name="tracer">The tracer instance to use to create the span.</param> /// <param name="integrationId">The id of the integration creating this scope.</param> /// <returns>A span context to use to populate headers</returns> public static SpanContext CreateHttpSpanContext(Tracer tracer, IntegrationInfo integrationId) { if (!tracer.Settings.IsIntegrationEnabled(integrationId)) { // integration disabled, skip this trace return null; } try { var activeScope = GetActiveHttpScope(tracer); if (activeScope != null) { return activeScope.Span.Context; } return tracer.CreateSpanContext(serviceName: $"{tracer.DefaultServiceName}-{ServiceName}"); } catch (Exception ex) { Log.Error(ex, "Error creating or populating span context."); } return null; } /// <summary> /// Creates a scope for outbound http requests and populates some common details. /// </summary> /// <param name="tracer">The tracer instance to use to create the new scope.</param> /// <param name="httpMethod">The HTTP method used by the request.</param> /// <param name="requestUri">The URI requested by the request.</param> /// <param name="integrationId">The id of the integration creating this scope.</param> /// <param name="tags">The tags associated to the scope</param> /// <param name="spanId">The span ID</param> /// <returns>A new pre-populated scope.</returns> public static Scope CreateOutboundHttpScope(Tracer tracer, string httpMethod, Uri requestUri, IntegrationInfo integrationId, out HttpTags tags, ulong? spanId = null) => CreateOutboundHttpScope(tracer, httpMethod, requestUri, integrationId, out tags, spanId, startTime: null); /// <summary> /// Creates a scope for outbound http requests and populates some common details. /// </summary> /// <param name="tracer">The tracer instance to use to create the new scope.</param> /// <param name="httpMethod">The HTTP method used by the request.</param> /// <param name="requestUri">The URI requested by the request.</param> /// <param name="integrationId">The id of the integration creating this scope.</param> /// <param name="tags">The tags associated to the scope</param> /// <param name="spanId">The span ID</param> /// <param name="startTime">The start time that should be applied to the span</param> /// <returns>A new pre-populated scope.</returns> internal static Scope CreateOutboundHttpScope(Tracer tracer, string httpMethod, Uri requestUri, IntegrationInfo integrationId, out HttpTags tags, ulong? spanId, DateTimeOffset? startTime) { tags = null; if (!tracer.Settings.IsIntegrationEnabled(integrationId) || HttpBypassHelper.UriContainsAnyOf(requestUri, tracer.Settings.HttpClientExcludedUrlSubstrings)) { // integration disabled, don't create a scope, skip this trace return null; } Scope scope = null; try { if (GetActiveHttpScope(tracer) != null) { // we are already instrumenting this, // don't instrument nested methods that belong to the same stacktrace // e.g. HttpClientHandler.SendAsync() -> SocketsHttpHandler.SendAsync() return null; } string resourceUrl = requestUri != null ? UriHelpers.CleanUri(requestUri, removeScheme: true, tryRemoveIds: true) : null; string httpUrl = requestUri != null ? UriHelpers.CleanUri(requestUri, removeScheme: false, tryRemoveIds: false) : null; tags = new HttpTags(); string serviceName = tracer.Settings.GetServiceName(tracer, ServiceName); scope = tracer.StartActiveWithTags(OperationName, tags: tags, serviceName: serviceName, spanId: spanId, startTime: startTime); var span = scope.Span; span.Type = SpanTypes.Http; span.ResourceName = $"{httpMethod} {resourceUrl}"; tags.HttpMethod = httpMethod?.ToUpperInvariant(); tags.HttpUrl = httpUrl; tags.InstrumentationName = IntegrationRegistry.GetName(integrationId); tags.SetAnalyticsSampleRate(integrationId, tracer.Settings, enabledWithGlobalSetting: false); } catch (Exception ex) { Log.Error(ex, "Error creating or populating scope."); } // always returns the scope, even if it's null because we couldn't create it, // or we couldn't populate it completely (some tags is better than no tags) return scope; } public static Scope CreateDbCommandScope(Tracer tracer, IDbCommand command) { if (!tracer.Settings.IsIntegrationEnabled(AdoNetConstants.IntegrationId)) { // integration disabled, don't create a scope, skip this trace return null; } var commandType = command.GetType(); if (tracer.Settings.AdoNetExcludedTypes.Count > 0 && tracer.Settings.AdoNetExcludedTypes.Contains(commandType.FullName)) { // AdoNet type disabled, don't create a scope, skip this trace return null; } Scope scope = null; try { string dbType = GetDbType(commandType.Namespace, commandType.Name); if (dbType == null) { // don't create a scope, skip this trace return null; } Span parent = tracer.ActiveScope?.Span; if (parent != null && parent.Type == SpanTypes.Sql && parent.GetTag(Tags.DbType) == dbType && parent.ResourceName == command.CommandText) { // we are already instrumenting this, // don't instrument nested methods that belong to the same stacktrace // e.g. ExecuteReader() -> ExecuteReader(commandBehavior) return null; } string serviceName = tracer.Settings.GetServiceName(tracer, dbType); string operationName = $"{dbType}.query"; var tags = new SqlTags(); scope = tracer.StartActiveWithTags(operationName, tags: tags, serviceName: serviceName); var span = scope.Span; tags.DbType = dbType; span.AddTagsFromDbCommand(command); tags.SetAnalyticsSampleRate(AdoNetConstants.IntegrationId, tracer.Settings, enabledWithGlobalSetting: false); } catch (Exception ex) { Log.Error(ex, "Error creating or populating scope."); } return scope; } public static string GetDbType(string namespaceName, string commandTypeName) { // First we try with the most commons ones. Avoiding the ComputeStringHash var result = commandTypeName switch { "SqlCommand" => "sql-server", "NpgsqlCommand" => "postgres", "MySqlCommand" => "mysql", "SqliteCommand" => "sqlite", "SQLiteCommand" => "sqlite", _ => null, }; // If we add these cases to the previous switch the JIT will apply the ComputeStringHash codegen if (result != null || commandTypeName == "InterceptableDbCommand" || commandTypeName == "ProfiledDbCommand") { return result; } const string commandSuffix = "Command"; // Now the uncommon cases return commandTypeName switch { _ when namespaceName.Length == 0 && commandTypeName == commandSuffix => "command", _ when namespaceName.Contains('.') && commandTypeName == commandSuffix => // the + 1 could be dangerous and cause IndexOutOfRangeException, but this shouldn't happen // a period should never be the last character in a namespace namespaceName.Substring(namespaceName.LastIndexOf('.') + 1).ToLowerInvariant(), _ when commandTypeName == commandSuffix => namespaceName.ToLowerInvariant(), _ when commandTypeName.EndsWith(commandSuffix) => commandTypeName.Substring(0, commandTypeName.Length - commandSuffix.Length).ToLowerInvariant(), _ => commandTypeName.ToLowerInvariant() }; } } }
1
21,424
This allows strategic exclusion of http spans.
DataDog-dd-trace-dotnet
.cs
@@ -19,8 +19,10 @@ final class SearchDto private $searchableProperties; /** @var string[]|null */ private $appliedFilters; + /** @var string[]|null */ + private $strictTextSearchFields; - public function __construct(Request $request, ?array $searchableProperties, ?string $query, array $defaultSort, array $customSort, ?array $appliedFilters) + public function __construct(Request $request, ?array $searchableProperties, ?string $query, array $defaultSort, array $customSort, ?array $appliedFilters, ?array $strictTextSearchFields) { $this->request = $request; $this->searchableProperties = $searchableProperties;
1
<?php namespace EasyCorp\Bundle\EasyAdminBundle\Dto; use Symfony\Component\HttpFoundation\Request; /** * @author Javier Eguiluz <[email protected]> */ final class SearchDto { private $request; private $defaultSort; private $customSort; /** @internal */ private $mergedSort; private $query; /** @var string[]|null */ private $searchableProperties; /** @var string[]|null */ private $appliedFilters; public function __construct(Request $request, ?array $searchableProperties, ?string $query, array $defaultSort, array $customSort, ?array $appliedFilters) { $this->request = $request; $this->searchableProperties = $searchableProperties; $this->query = trim($query); $this->defaultSort = $defaultSort; $this->customSort = $customSort; $this->appliedFilters = $appliedFilters; } public function getRequest(): Request { return $this->request; } public function getSort(): array { if (null !== $this->mergedSort) { return $this->mergedSort; } // we can't use an array_merge() call because $customSort has more priority // than $defaultSort, so the default sort must only be applied if there's // not already a custom sort config for the same field $mergedSort = $this->customSort; foreach ($this->defaultSort as $fieldName => $order) { if (!\array_key_exists($fieldName, $mergedSort)) { $mergedSort[$fieldName] = $order; } } return $this->mergedSort = $mergedSort; } public function isSortingField(string $fieldProperty): bool { $firstSortField = \count($this->getSort()) > 0 ? array_keys($this->getSort())[0] : null; if (null === $firstSortField) { return false; } // TODO: check for association properties when they support search (e.g. 'user.name') return $fieldProperty === $firstSortField; } public function getSortDirection(string $fieldProperty): string { return \array_key_exists($fieldProperty, $this->getSort()) ? $this->getSort()[$fieldProperty] : 'DESC'; } public function getQuery(): ?string { return $this->query; } /** * @return string[]|null */ public function getSearchableProperties(): ?array { return $this->searchableProperties; } public function getAppliedFilters(): ?array { return $this->appliedFilters; } }
1
13,567
why not just `string[]` instead of nullable
EasyCorp-EasyAdminBundle
php
@@ -150,7 +150,7 @@ func (e *Extractor) listPackages(query ...string) ([]*jsonPackage, error) { // TODO(schroederc): support GOPACKAGESDRIVER args := append([]string{"list", "-compiler=" + e.BuildContext.Compiler, - "-tags=" + strings.Join(e.BuildContext.BuildTags, ","), + "-tags=" + strings.Join(e.BuildContext.BuildTags, " "), "-installsuffix=" + e.BuildContext.InstallSuffix, "-test", "-deps",
1
/* * Copyright 2018 The Kythe Authors. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package golang import ( "bytes" "encoding/json" "fmt" "go/build" "os" "os/exec" "path/filepath" "strings" ) // Fields must match go list; // see $GOROOT/src/cmd/go/internal/load/pkg.go. type jsonPackage struct { Dir string ImportPath string Name string Doc string Root string Export string Goroot bool GoFiles []string CFiles []string CgoFiles []string CXXFiles []string MFiles []string HFiles []string FFiles []string SFiles []string SwigFiles []string SwigCXXFiles []string SysoFiles []string CgoCFLAGS []string CgoCPPFLAGS []string CgoCXXFLAGS []string CgoFFLAGS []string CgoLDFLAGS []string CgoPkgConfig []string Imports []string TestGoFiles []string TestImports []string XTestGoFiles []string XTestImports []string ForTest string // q in a "p [q.test]" package, else "" DepOnly bool Error *jsonPackageError } func (pkg *jsonPackage) buildPackage() *build.Package { bp := &build.Package{ Dir: pkg.Dir, ImportPath: pkg.ImportPath, Name: pkg.Name, Doc: pkg.Doc, Root: pkg.Root, PkgObj: pkg.Export, Goroot: pkg.Goroot, GoFiles: pkg.GoFiles, CgoFiles: pkg.CgoFiles, CFiles: pkg.CFiles, CXXFiles: pkg.CXXFiles, MFiles: pkg.MFiles, HFiles: pkg.HFiles, FFiles: pkg.FFiles, SFiles: pkg.SFiles, SwigFiles: pkg.SwigFiles, SwigCXXFiles: pkg.SwigCXXFiles, SysoFiles: pkg.SysoFiles, CgoCFLAGS: pkg.CgoCFLAGS, CgoCPPFLAGS: pkg.CgoCPPFLAGS, CgoCXXFLAGS: pkg.CgoCXXFLAGS, CgoFFLAGS: pkg.CgoFFLAGS, CgoLDFLAGS: pkg.CgoLDFLAGS, CgoPkgConfig: pkg.CgoPkgConfig, Imports: pkg.Imports, TestGoFiles: pkg.TestGoFiles, TestImports: pkg.TestImports, XTestGoFiles: pkg.XTestGoFiles, XTestImports: pkg.XTestImports, } if bp.Root != "" { bp.SrcRoot = filepath.Join(bp.Root, "src") bp.PkgRoot = filepath.Join(bp.Root, "pkg") bp.BinDir = filepath.Join(bp.Root, "bin") } return bp } type jsonPackageError struct { ImportStack []string Pos string Err string } func (e jsonPackageError) Error() string { return fmt.Sprintf("%s: %s", e.Pos, e.Err) } func buildContextEnv(bc build.Context) ([]string, error) { cgo := "0" if bc.CgoEnabled { cgo = "1" } vars := []string{ "CGO_ENABLED=" + cgo, "GOARCH=" + bc.GOARCH, "GOOS=" + bc.GOOS, } envPaths := map[string]string{ "GOROOT": bc.GOROOT, "GOPATH": bc.GOPATH, } for name, path := range envPaths { abs, err := filepath.Abs(path) if err != nil { return nil, fmt.Errorf("error finding absolute path for %q: %v", path, err) } vars = append(vars, fmt.Sprintf("%s=%s", name, abs)) } return vars, nil } func (e *Extractor) listPackages(query ...string) ([]*jsonPackage, error) { // TODO(schroederc): support GOPACKAGESDRIVER args := append([]string{"list", "-compiler=" + e.BuildContext.Compiler, "-tags=" + strings.Join(e.BuildContext.BuildTags, ","), "-installsuffix=" + e.BuildContext.InstallSuffix, "-test", "-deps", "-e", "-export", "-compiled", "-json", "--"}, query...) goTool := "go" if e.BuildContext.GOROOT != "" { goTool = filepath.Join(e.BuildContext.GOROOT, "bin/go") } cmd := exec.Command(goTool, args...) env, err := buildContextEnv(e.BuildContext) if err != nil { return nil, err } cmd.Env = append(os.Environ(), env...) var out bytes.Buffer cmd.Stdout = &out cmd.Stderr = os.Stderr listErr := cmd.Run() var pkgs []*jsonPackage for de := json.NewDecoder(&out); de.More(); { var pkg jsonPackage if err := de.Decode(&pkg); err != nil { return nil, err } pkgs = append(pkgs, &pkg) } return pkgs, listErr }
1
10,816
How does this work? Why isn't each tag after the first picked up as a new arg/flag?
kythe-kythe
go
@@ -37,5 +37,10 @@ namespace Datadog.Trace /// The id of the container where the traced application is running. /// </summary> public const string ContainerId = "Datadog-Container-ID"; + + /// <summary> + /// The resource id of the site instance in azure app services where the traced application is running. + /// </summary> + public const string AzureAppServicesResourceId = "Datadog-AzureAppServices-Resource-ID"; } }
1
using System.Runtime.InteropServices; namespace Datadog.Trace { /// <summary> /// Names of HTTP headers that can be used when sending traces to the Trace Agent. /// </summary> internal static class AgentHttpHeaderNames { /// <summary> /// The language-specific tracer that generated this span. /// Always ".NET" for the .NET Tracer. /// </summary> public const string Language = "Datadog-Meta-Lang"; /// <summary> /// The interpreter for the given language, e.g. ".NET Framework" or ".NET Core". /// </summary> public const string LanguageInterpreter = "Datadog-Meta-Lang-Interpreter"; /// <summary> /// The interpreter version for the given language, e.g. "4.7.2" for .NET Framework or "2.1" for .NET Core. /// </summary> public const string LanguageVersion = "Datadog-Meta-Lang-Version"; /// <summary> /// The version of the tracer that generated this span. /// </summary> public const string TracerVersion = "Datadog-Meta-Tracer-Version"; /// <summary> /// The number of unique traces per request. /// </summary> public const string TraceCount = "X-Datadog-Trace-Count"; /// <summary> /// The id of the container where the traced application is running. /// </summary> public const string ContainerId = "Datadog-Container-ID"; } }
1
16,586
This key is actually yet to be determined. Meeting with the backend team and Garner to discuss.
DataDog-dd-trace-dotnet
.cs
@@ -1298,12 +1298,8 @@ bool CoreChecks::ValidateImageUpdate(VkImageView image_view, VkImageLayout image const char *func_name, std::string *error_code, std::string *error_msg) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00326"; auto iv_state = GetImageViewState(image_view); - if (!iv_state) { - std::stringstream error_str; - error_str << "Invalid VkImageView: " << report_data->FormatHandle(image_view).c_str(); - *error_msg = error_str.str(); - return false; - } + assert(iv_state); + // Note that when an imageview is created, we validated that memory is bound so no need to re-check here // Validate that imageLayout is compatible with aspect_mask and image format // and validate that image usage bits are correct for given usage
1
/* Copyright (c) 2015-2019 The Khronos Group Inc. * Copyright (c) 2015-2019 Valve Corporation * Copyright (c) 2015-2019 LunarG, Inc. * Copyright (C) 2015-2019 Google Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * Author: Tobin Ehlis <[email protected]> * John Zulauf <[email protected]> */ // Allow use of STL min and max functions in Windows #define NOMINMAX #include "chassis.h" #include "core_validation_error_enums.h" #include "core_validation.h" #include "descriptor_sets.h" #include "hash_vk_types.h" #include "vk_enum_string_helper.h" #include "vk_safe_struct.h" #include "vk_typemap_helper.h" #include "buffer_validation.h" #include <sstream> #include <algorithm> #include <array> #include <memory> // ExtendedBinding collects a VkDescriptorSetLayoutBinding and any extended // state that comes from a different array/structure so they can stay together // while being sorted by binding number. struct ExtendedBinding { ExtendedBinding(const VkDescriptorSetLayoutBinding *l, VkDescriptorBindingFlagsEXT f) : layout_binding(l), binding_flags(f) {} const VkDescriptorSetLayoutBinding *layout_binding; VkDescriptorBindingFlagsEXT binding_flags; }; struct BindingNumCmp { bool operator()(const ExtendedBinding &a, const ExtendedBinding &b) const { return a.layout_binding->binding < b.layout_binding->binding; } }; using DescriptorSet = cvdescriptorset::DescriptorSet; using DescriptorSetLayout = cvdescriptorset::DescriptorSetLayout; using DescriptorSetLayoutDef = cvdescriptorset::DescriptorSetLayoutDef; using DescriptorSetLayoutId = cvdescriptorset::DescriptorSetLayoutId; // Canonical dictionary of DescriptorSetLayoutDef (without any handle/device specific information) cvdescriptorset::DescriptorSetLayoutDict descriptor_set_layout_dict; DescriptorSetLayoutId GetCanonicalId(const VkDescriptorSetLayoutCreateInfo *p_create_info) { return descriptor_set_layout_dict.look_up(DescriptorSetLayoutDef(p_create_info)); } // Construct DescriptorSetLayout instance from given create info // Proactively reserve and resize as possible, as the reallocation was visible in profiling cvdescriptorset::DescriptorSetLayoutDef::DescriptorSetLayoutDef(const VkDescriptorSetLayoutCreateInfo *p_create_info) : flags_(p_create_info->flags), binding_count_(0), descriptor_count_(0), dynamic_descriptor_count_(0) { const auto *flags_create_info = lvl_find_in_chain<VkDescriptorSetLayoutBindingFlagsCreateInfoEXT>(p_create_info->pNext); binding_type_stats_ = {0, 0, 0}; std::set<ExtendedBinding, BindingNumCmp> sorted_bindings; const uint32_t input_bindings_count = p_create_info->bindingCount; // Sort the input bindings in binding number order, eliminating duplicates for (uint32_t i = 0; i < input_bindings_count; i++) { VkDescriptorBindingFlagsEXT flags = 0; if (flags_create_info && flags_create_info->bindingCount == p_create_info->bindingCount) { flags = flags_create_info->pBindingFlags[i]; } sorted_bindings.insert(ExtendedBinding(p_create_info->pBindings + i, flags)); } // Store the create info in the sorted order from above std::map<uint32_t, uint32_t> binding_to_dyn_count; uint32_t index = 0; binding_count_ = static_cast<uint32_t>(sorted_bindings.size()); bindings_.reserve(binding_count_); binding_flags_.reserve(binding_count_); binding_to_index_map_.reserve(binding_count_); for (auto input_binding : sorted_bindings) { // Add to binding and map, s.t. it is robust to invalid duplication of binding_num const auto binding_num = input_binding.layout_binding->binding; binding_to_index_map_[binding_num] = index++; bindings_.emplace_back(input_binding.layout_binding); auto &binding_info = bindings_.back(); binding_flags_.emplace_back(input_binding.binding_flags); descriptor_count_ += binding_info.descriptorCount; if (binding_info.descriptorCount > 0) { non_empty_bindings_.insert(binding_num); } if (binding_info.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC || binding_info.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) { binding_to_dyn_count[binding_num] = binding_info.descriptorCount; dynamic_descriptor_count_ += binding_info.descriptorCount; binding_type_stats_.dynamic_buffer_count++; } else if ((binding_info.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) || (binding_info.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER)) { binding_type_stats_.non_dynamic_buffer_count++; } else { binding_type_stats_.image_sampler_count++; } } assert(bindings_.size() == binding_count_); assert(binding_flags_.size() == binding_count_); uint32_t global_index = 0; global_index_range_.reserve(binding_count_); // Vector order is finalized so build vectors of descriptors and dynamic offsets by binding index for (uint32_t i = 0; i < binding_count_; ++i) { auto final_index = global_index + bindings_[i].descriptorCount; global_index_range_.emplace_back(global_index, final_index); global_index = final_index; } // Now create dyn offset array mapping for any dynamic descriptors uint32_t dyn_array_idx = 0; binding_to_dynamic_array_idx_map_.reserve(binding_to_dyn_count.size()); for (const auto &bc_pair : binding_to_dyn_count) { binding_to_dynamic_array_idx_map_[bc_pair.first] = dyn_array_idx; dyn_array_idx += bc_pair.second; } } size_t cvdescriptorset::DescriptorSetLayoutDef::hash() const { hash_util::HashCombiner hc; hc << flags_; hc.Combine(bindings_); hc.Combine(binding_flags_); return hc.Value(); } // // Return valid index or "end" i.e. binding_count_; // The asserts in "Get" are reduced to the set where no valid answer(like null or 0) could be given // Common code for all binding lookups. uint32_t cvdescriptorset::DescriptorSetLayoutDef::GetIndexFromBinding(uint32_t binding) const { const auto &bi_itr = binding_to_index_map_.find(binding); if (bi_itr != binding_to_index_map_.cend()) return bi_itr->second; return GetBindingCount(); } VkDescriptorSetLayoutBinding const *cvdescriptorset::DescriptorSetLayoutDef::GetDescriptorSetLayoutBindingPtrFromIndex( const uint32_t index) const { if (index >= bindings_.size()) return nullptr; return bindings_[index].ptr(); } // Return descriptorCount for given index, 0 if index is unavailable uint32_t cvdescriptorset::DescriptorSetLayoutDef::GetDescriptorCountFromIndex(const uint32_t index) const { if (index >= bindings_.size()) return 0; return bindings_[index].descriptorCount; } // For the given index, return descriptorType VkDescriptorType cvdescriptorset::DescriptorSetLayoutDef::GetTypeFromIndex(const uint32_t index) const { assert(index < bindings_.size()); if (index < bindings_.size()) return bindings_[index].descriptorType; return VK_DESCRIPTOR_TYPE_MAX_ENUM; } // For the given index, return stageFlags VkShaderStageFlags cvdescriptorset::DescriptorSetLayoutDef::GetStageFlagsFromIndex(const uint32_t index) const { assert(index < bindings_.size()); if (index < bindings_.size()) return bindings_[index].stageFlags; return VkShaderStageFlags(0); } // Return binding flags for given index, 0 if index is unavailable VkDescriptorBindingFlagsEXT cvdescriptorset::DescriptorSetLayoutDef::GetDescriptorBindingFlagsFromIndex( const uint32_t index) const { if (index >= binding_flags_.size()) return 0; return binding_flags_[index]; } const cvdescriptorset::IndexRange &cvdescriptorset::DescriptorSetLayoutDef::GetGlobalIndexRangeFromIndex(uint32_t index) const { const static IndexRange kInvalidRange = {0xFFFFFFFF, 0xFFFFFFFF}; if (index >= binding_flags_.size()) return kInvalidRange; return global_index_range_[index]; } // For the given binding, return the global index range (half open) // As start and end are often needed in pairs, get both with a single lookup. const cvdescriptorset::IndexRange &cvdescriptorset::DescriptorSetLayoutDef::GetGlobalIndexRangeFromBinding( const uint32_t binding) const { uint32_t index = GetIndexFromBinding(binding); return GetGlobalIndexRangeFromIndex(index); } // For given binding, return ptr to ImmutableSampler array VkSampler const *cvdescriptorset::DescriptorSetLayoutDef::GetImmutableSamplerPtrFromBinding(const uint32_t binding) const { const auto &bi_itr = binding_to_index_map_.find(binding); if (bi_itr != binding_to_index_map_.end()) { return bindings_[bi_itr->second].pImmutableSamplers; } return nullptr; } // Move to next valid binding having a non-zero binding count uint32_t cvdescriptorset::DescriptorSetLayoutDef::GetNextValidBinding(const uint32_t binding) const { auto it = non_empty_bindings_.upper_bound(binding); assert(it != non_empty_bindings_.cend()); if (it != non_empty_bindings_.cend()) return *it; return GetMaxBinding() + 1; } // For given index, return ptr to ImmutableSampler array VkSampler const *cvdescriptorset::DescriptorSetLayoutDef::GetImmutableSamplerPtrFromIndex(const uint32_t index) const { if (index < bindings_.size()) { return bindings_[index].pImmutableSamplers; } return nullptr; } // If our layout is compatible with rh_ds_layout, return true. bool cvdescriptorset::DescriptorSetLayout::IsCompatible(DescriptorSetLayout const *rh_ds_layout) const { bool compatible = (this == rh_ds_layout) || (GetLayoutDef() == rh_ds_layout->GetLayoutDef()); return compatible; } // If our layout is compatible with rh_ds_layout, return true, // else return false and fill in error_msg will description of what causes incompatibility bool cvdescriptorset::VerifySetLayoutCompatibility(DescriptorSetLayout const *lh_ds_layout, DescriptorSetLayout const *rh_ds_layout, std::string *error_msg) { // Short circuit the detailed check. if (lh_ds_layout->IsCompatible(rh_ds_layout)) return true; // Do a detailed compatibility check of this lhs def (referenced by lh_ds_layout), vs. the rhs (layout and def) // Should only be run if trivial accept has failed, and in that context should return false. VkDescriptorSetLayout lh_dsl_handle = lh_ds_layout->GetDescriptorSetLayout(); VkDescriptorSetLayout rh_dsl_handle = rh_ds_layout->GetDescriptorSetLayout(); DescriptorSetLayoutDef const *lh_ds_layout_def = lh_ds_layout->GetLayoutDef(); DescriptorSetLayoutDef const *rh_ds_layout_def = rh_ds_layout->GetLayoutDef(); // Check descriptor counts if (lh_ds_layout_def->GetTotalDescriptorCount() != rh_ds_layout_def->GetTotalDescriptorCount()) { std::stringstream error_str; error_str << "DescriptorSetLayout " << lh_dsl_handle << " has " << lh_ds_layout_def->GetTotalDescriptorCount() << " descriptors, but DescriptorSetLayout " << rh_dsl_handle << ", which comes from pipelineLayout, has " << rh_ds_layout_def->GetTotalDescriptorCount() << " descriptors."; *error_msg = error_str.str(); return false; // trivial fail case } // Descriptor counts match so need to go through bindings one-by-one // and verify that type and stageFlags match for (const auto &binding : lh_ds_layout_def->GetBindings()) { // TODO : Do we also need to check immutable samplers? // VkDescriptorSetLayoutBinding *rh_binding; if (binding.descriptorCount != rh_ds_layout_def->GetDescriptorCountFromBinding(binding.binding)) { std::stringstream error_str; error_str << "Binding " << binding.binding << " for DescriptorSetLayout " << lh_dsl_handle << " has a descriptorCount of " << binding.descriptorCount << " but binding " << binding.binding << " for DescriptorSetLayout " << rh_dsl_handle << ", which comes from pipelineLayout, has a descriptorCount of " << rh_ds_layout_def->GetDescriptorCountFromBinding(binding.binding); *error_msg = error_str.str(); return false; } else if (binding.descriptorType != rh_ds_layout_def->GetTypeFromBinding(binding.binding)) { std::stringstream error_str; error_str << "Binding " << binding.binding << " for DescriptorSetLayout " << lh_dsl_handle << " is type '" << string_VkDescriptorType(binding.descriptorType) << "' but binding " << binding.binding << " for DescriptorSetLayout " << rh_dsl_handle << ", which comes from pipelineLayout, is type '" << string_VkDescriptorType(rh_ds_layout_def->GetTypeFromBinding(binding.binding)) << "'"; *error_msg = error_str.str(); return false; } else if (binding.stageFlags != rh_ds_layout_def->GetStageFlagsFromBinding(binding.binding)) { std::stringstream error_str; error_str << "Binding " << binding.binding << " for DescriptorSetLayout " << lh_dsl_handle << " has stageFlags " << binding.stageFlags << " but binding " << binding.binding << " for DescriptorSetLayout " << rh_dsl_handle << ", which comes from pipelineLayout, has stageFlags " << rh_ds_layout_def->GetStageFlagsFromBinding(binding.binding); *error_msg = error_str.str(); return false; } } // No detailed check should succeed if the trivial check failed -- or the dictionary has failed somehow. bool compatible = true; assert(!compatible); return compatible; } bool cvdescriptorset::DescriptorSetLayoutDef::IsNextBindingConsistent(const uint32_t binding) const { if (!binding_to_index_map_.count(binding + 1)) return false; auto const &bi_itr = binding_to_index_map_.find(binding); if (bi_itr != binding_to_index_map_.end()) { const auto &next_bi_itr = binding_to_index_map_.find(binding + 1); if (next_bi_itr != binding_to_index_map_.end()) { auto type = bindings_[bi_itr->second].descriptorType; auto stage_flags = bindings_[bi_itr->second].stageFlags; auto immut_samp = bindings_[bi_itr->second].pImmutableSamplers ? true : false; auto flags = binding_flags_[bi_itr->second]; if ((type != bindings_[next_bi_itr->second].descriptorType) || (stage_flags != bindings_[next_bi_itr->second].stageFlags) || (immut_samp != (bindings_[next_bi_itr->second].pImmutableSamplers ? true : false)) || (flags != binding_flags_[next_bi_itr->second])) { return false; } return true; } } return false; } // The DescriptorSetLayout stores the per handle data for a descriptor set layout, and references the common defintion for the // handle invariant portion cvdescriptorset::DescriptorSetLayout::DescriptorSetLayout(const VkDescriptorSetLayoutCreateInfo *p_create_info, const VkDescriptorSetLayout layout) : layout_(layout), layout_destroyed_(false), layout_id_(GetCanonicalId(p_create_info)) {} // Validate descriptor set layout create info bool cvdescriptorset::ValidateDescriptorSetLayoutCreateInfo( const debug_report_data *report_data, const VkDescriptorSetLayoutCreateInfo *create_info, const bool push_descriptor_ext, const uint32_t max_push_descriptors, const bool descriptor_indexing_ext, const VkPhysicalDeviceDescriptorIndexingFeaturesEXT *descriptor_indexing_features, const VkPhysicalDeviceInlineUniformBlockFeaturesEXT *inline_uniform_block_features, const VkPhysicalDeviceInlineUniformBlockPropertiesEXT *inline_uniform_block_props) { bool skip = false; std::unordered_set<uint32_t> bindings; uint64_t total_descriptors = 0; const auto *flags_create_info = lvl_find_in_chain<VkDescriptorSetLayoutBindingFlagsCreateInfoEXT>(create_info->pNext); const bool push_descriptor_set = !!(create_info->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR); if (push_descriptor_set && !push_descriptor_ext) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, kVUID_Core_DrawState_ExtensionNotEnabled, "Attempted to use %s in %s but its required extension %s has not been enabled.\n", "VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR", "VkDescriptorSetLayoutCreateInfo::flags", VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME); } const bool update_after_bind_set = !!(create_info->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT); if (update_after_bind_set && !descriptor_indexing_ext) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, kVUID_Core_DrawState_ExtensionNotEnabled, "Attemped to use %s in %s but its required extension %s has not been enabled.\n", "VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT", "VkDescriptorSetLayoutCreateInfo::flags", VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME); } auto valid_type = [push_descriptor_set](const VkDescriptorType type) { return !push_descriptor_set || ((type != VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) && (type != VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) && (type != VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT)); }; uint32_t max_binding = 0; for (uint32_t i = 0; i < create_info->bindingCount; ++i) { const auto &binding_info = create_info->pBindings[i]; max_binding = std::max(max_binding, binding_info.binding); if (!bindings.insert(binding_info.binding).second) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkDescriptorSetLayoutCreateInfo-binding-00279", "duplicated binding number in VkDescriptorSetLayoutBinding."); } if (!valid_type(binding_info.descriptorType)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, (binding_info.descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT) ? "VUID-VkDescriptorSetLayoutCreateInfo-flags-02208" : "VUID-VkDescriptorSetLayoutCreateInfo-flags-00280", "invalid type %s ,for push descriptors in VkDescriptorSetLayoutBinding entry %" PRIu32 ".", string_VkDescriptorType(binding_info.descriptorType), i); } if (binding_info.descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT) { if ((binding_info.descriptorCount % 4) != 0) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkDescriptorSetLayoutBinding-descriptorType-02209", "descriptorCount =(%" PRIu32 ") must be a multiple of 4", binding_info.descriptorCount); } if (binding_info.descriptorCount > inline_uniform_block_props->maxInlineUniformBlockSize) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkDescriptorSetLayoutBinding-descriptorType-02210", "descriptorCount =(%" PRIu32 ") must be less than or equal to maxInlineUniformBlockSize", binding_info.descriptorCount); } } total_descriptors += binding_info.descriptorCount; } if (flags_create_info) { if (flags_create_info->bindingCount != 0 && flags_create_info->bindingCount != create_info->bindingCount) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-bindingCount-03002", "VkDescriptorSetLayoutCreateInfo::bindingCount (%d) != " "VkDescriptorSetLayoutBindingFlagsCreateInfoEXT::bindingCount (%d)", create_info->bindingCount, flags_create_info->bindingCount); } if (flags_create_info->bindingCount == create_info->bindingCount) { for (uint32_t i = 0; i < create_info->bindingCount; ++i) { const auto &binding_info = create_info->pBindings[i]; if (flags_create_info->pBindingFlags[i] & VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT) { if (!update_after_bind_set) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkDescriptorSetLayoutCreateInfo-flags-03000", "Invalid flags for VkDescriptorSetLayoutBinding entry %" PRIu32, i); } if (binding_info.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER && !descriptor_indexing_features->descriptorBindingUniformBufferUpdateAfterBind) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-" "descriptorBindingUniformBufferUpdateAfterBind-03005", "Invalid flags for VkDescriptorSetLayoutBinding entry %" PRIu32, i); } if ((binding_info.descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER || binding_info.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER || binding_info.descriptorType == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE) && !descriptor_indexing_features->descriptorBindingSampledImageUpdateAfterBind) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-" "descriptorBindingSampledImageUpdateAfterBind-03006", "Invalid flags for VkDescriptorSetLayoutBinding entry %" PRIu32, i); } if (binding_info.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE && !descriptor_indexing_features->descriptorBindingStorageImageUpdateAfterBind) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-" "descriptorBindingStorageImageUpdateAfterBind-03007", "Invalid flags for VkDescriptorSetLayoutBinding entry %" PRIu32, i); } if (binding_info.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER && !descriptor_indexing_features->descriptorBindingStorageBufferUpdateAfterBind) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-" "descriptorBindingStorageBufferUpdateAfterBind-03008", "Invalid flags for VkDescriptorSetLayoutBinding entry %" PRIu32, i); } if (binding_info.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER && !descriptor_indexing_features->descriptorBindingUniformTexelBufferUpdateAfterBind) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-" "descriptorBindingUniformTexelBufferUpdateAfterBind-03009", "Invalid flags for VkDescriptorSetLayoutBinding entry %" PRIu32, i); } if (binding_info.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER && !descriptor_indexing_features->descriptorBindingStorageTexelBufferUpdateAfterBind) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-" "descriptorBindingStorageTexelBufferUpdateAfterBind-03010", "Invalid flags for VkDescriptorSetLayoutBinding entry %" PRIu32, i); } if ((binding_info.descriptorType == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT || binding_info.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC || binding_info.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-None-03011", "Invalid flags for VkDescriptorSetLayoutBinding entry %" PRIu32, i); } if (binding_info.descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT && !inline_uniform_block_features->descriptorBindingInlineUniformBlockUpdateAfterBind) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-" "descriptorBindingInlineUniformBlockUpdateAfterBind-02211", "Invalid flags (VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT) for " "VkDescriptorSetLayoutBinding entry %" PRIu32 " with descriptorBindingInlineUniformBlockUpdateAfterBind not enabled", i); } } if (flags_create_info->pBindingFlags[i] & VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT_EXT) { if (!descriptor_indexing_features->descriptorBindingUpdateUnusedWhilePending) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-descriptorBindingUpdateUnusedWhilePending-03012", "Invalid flags for VkDescriptorSetLayoutBinding entry %" PRIu32, i); } } if (flags_create_info->pBindingFlags[i] & VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT_EXT) { if (!descriptor_indexing_features->descriptorBindingPartiallyBound) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-descriptorBindingPartiallyBound-03013", "Invalid flags for VkDescriptorSetLayoutBinding entry %" PRIu32, i); } } if (flags_create_info->pBindingFlags[i] & VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT_EXT) { if (binding_info.binding != max_binding) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-pBindingFlags-03004", "Invalid flags for VkDescriptorSetLayoutBinding entry %" PRIu32, i); } if (!descriptor_indexing_features->descriptorBindingVariableDescriptorCount) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-descriptorBindingVariableDescriptorCount-03014", "Invalid flags for VkDescriptorSetLayoutBinding entry %" PRIu32, i); } if ((binding_info.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC || binding_info.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-pBindingFlags-03015", "Invalid flags for VkDescriptorSetLayoutBinding entry %" PRIu32, i); } } if (push_descriptor_set && (flags_create_info->pBindingFlags[i] & (VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT | VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT_EXT | VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT_EXT))) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-flags-03003", "Invalid flags for VkDescriptorSetLayoutBinding entry %" PRIu32, i); } } } } if ((push_descriptor_set) && (total_descriptors > max_push_descriptors)) { const char *undefined = push_descriptor_ext ? "" : " -- undefined"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkDescriptorSetLayoutCreateInfo-flags-00281", "for push descriptor, total descriptor count in layout (%" PRIu64 ") must not be greater than VkPhysicalDevicePushDescriptorPropertiesKHR::maxPushDescriptors (%" PRIu32 "%s).", total_descriptors, max_push_descriptors, undefined); } return skip; } cvdescriptorset::AllocateDescriptorSetsData::AllocateDescriptorSetsData(uint32_t count) : required_descriptors_by_type{}, layout_nodes(count, nullptr) {} cvdescriptorset::DescriptorSet::DescriptorSet(const VkDescriptorSet set, const VkDescriptorPool pool, const std::shared_ptr<DescriptorSetLayout const> &layout, uint32_t variable_count, cvdescriptorset::DescriptorSet::StateTracker *state_data) : some_update_(false), set_(set), pool_state_(nullptr), p_layout_(layout), state_data_(state_data), variable_count_(variable_count) { pool_state_ = state_data->GetDescriptorPoolState(pool); // Foreach binding, create default descriptors of given type descriptors_.reserve(p_layout_->GetTotalDescriptorCount()); for (uint32_t i = 0; i < p_layout_->GetBindingCount(); ++i) { auto type = p_layout_->GetTypeFromIndex(i); switch (type) { case VK_DESCRIPTOR_TYPE_SAMPLER: { auto immut_sampler = p_layout_->GetImmutableSamplerPtrFromIndex(i); for (uint32_t di = 0; di < p_layout_->GetDescriptorCountFromIndex(i); ++di) { if (immut_sampler) { descriptors_.emplace_back(new SamplerDescriptor(immut_sampler + di)); some_update_ = true; // Immutable samplers are updated at creation } else descriptors_.emplace_back(new SamplerDescriptor(nullptr)); } break; } case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: { auto immut = p_layout_->GetImmutableSamplerPtrFromIndex(i); for (uint32_t di = 0; di < p_layout_->GetDescriptorCountFromIndex(i); ++di) { if (immut) { descriptors_.emplace_back(new ImageSamplerDescriptor(immut + di)); some_update_ = true; // Immutable samplers are updated at creation } else descriptors_.emplace_back(new ImageSamplerDescriptor(nullptr)); } break; } // ImageDescriptors case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE: case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT: case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: for (uint32_t di = 0; di < p_layout_->GetDescriptorCountFromIndex(i); ++di) descriptors_.emplace_back(new ImageDescriptor(type)); break; case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER: case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: for (uint32_t di = 0; di < p_layout_->GetDescriptorCountFromIndex(i); ++di) descriptors_.emplace_back(new TexelDescriptor(type)); break; case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER: case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC: case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER: case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: for (uint32_t di = 0; di < p_layout_->GetDescriptorCountFromIndex(i); ++di) descriptors_.emplace_back(new BufferDescriptor(type)); break; case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT: for (uint32_t di = 0; di < p_layout_->GetDescriptorCountFromIndex(i); ++di) descriptors_.emplace_back(new InlineUniformDescriptor(type)); break; case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NV: for (uint32_t di = 0; di < p_layout_->GetDescriptorCountFromIndex(i); ++di) descriptors_.emplace_back(new AccelerationStructureDescriptor(type)); break; default: assert(0); // Bad descriptor type specified break; } } } cvdescriptorset::DescriptorSet::~DescriptorSet() { InvalidateBoundCmdBuffers(); } static std::string StringDescriptorReqViewType(descriptor_req req) { std::string result(""); for (unsigned i = 0; i <= VK_IMAGE_VIEW_TYPE_END_RANGE; i++) { if (req & (1 << i)) { if (result.size()) result += ", "; result += string_VkImageViewType(VkImageViewType(i)); } } if (!result.size()) result = "(none)"; return result; } static char const *StringDescriptorReqComponentType(descriptor_req req) { if (req & DESCRIPTOR_REQ_COMPONENT_TYPE_SINT) return "SINT"; if (req & DESCRIPTOR_REQ_COMPONENT_TYPE_UINT) return "UINT"; if (req & DESCRIPTOR_REQ_COMPONENT_TYPE_FLOAT) return "FLOAT"; return "(none)"; } static unsigned DescriptorRequirementsBitsFromFormat(VkFormat fmt) { if (FormatIsSInt(fmt)) return DESCRIPTOR_REQ_COMPONENT_TYPE_SINT; if (FormatIsUInt(fmt)) return DESCRIPTOR_REQ_COMPONENT_TYPE_UINT; if (FormatIsDepthAndStencil(fmt)) return DESCRIPTOR_REQ_COMPONENT_TYPE_FLOAT | DESCRIPTOR_REQ_COMPONENT_TYPE_UINT; if (fmt == VK_FORMAT_UNDEFINED) return 0; // everything else -- UNORM/SNORM/FLOAT/USCALED/SSCALED is all float in the shader. return DESCRIPTOR_REQ_COMPONENT_TYPE_FLOAT; } // Validate that the state of this set is appropriate for the given bindings and dynamic_offsets at Draw time // This includes validating that all descriptors in the given bindings are updated, // that any update buffers are valid, and that any dynamic offsets are within the bounds of their buffers. // Return true if state is acceptable, or false and write an error message into error string bool CoreChecks::ValidateDrawState(const DescriptorSet *descriptor_set, const std::map<uint32_t, descriptor_req> &bindings, const std::vector<uint32_t> &dynamic_offsets, CMD_BUFFER_STATE *cb_node, const char *caller, std::string *error) { using DescriptorClass = cvdescriptorset::DescriptorClass; using BufferDescriptor = cvdescriptorset::BufferDescriptor; using ImageDescriptor = cvdescriptorset::ImageDescriptor; using ImageSamplerDescriptor = cvdescriptorset::ImageSamplerDescriptor; using SamplerDescriptor = cvdescriptorset::SamplerDescriptor; using TexelDescriptor = cvdescriptorset::TexelDescriptor; for (auto binding_pair : bindings) { auto binding = binding_pair.first; DescriptorSetLayout::ConstBindingIterator binding_it(descriptor_set->GetLayout().get(), binding); if (binding_it.AtEnd()) { // End at construction is the condition for an invalid binding. std::stringstream error_str; error_str << "Attempting to validate DrawState for binding #" << binding << " which is an invalid binding for this descriptor set."; *error = error_str.str(); return false; } // Copy the range, the end range is subject to update based on variable length descriptor arrays. cvdescriptorset::IndexRange index_range = binding_it.GetGlobalIndexRange(); auto array_idx = 0; // Track array idx if we're dealing with array descriptors if (binding_it.IsVariableDescriptorCount()) { // Only validate the first N descriptors if it uses variable_count index_range.end = index_range.start + descriptor_set->GetVariableDescriptorCount(); } for (uint32_t i = index_range.start; i < index_range.end; ++i, ++array_idx) { uint32_t index = i - index_range.start; const auto *descriptor = descriptor_set->GetDescriptorFromGlobalIndex(i); if ((binding_it.GetDescriptorBindingFlags() & (VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT_EXT | VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT)) || descriptor->GetClass() == DescriptorClass::InlineUniform) { // Can't validate the descriptor because it may not have been updated, // or the view could have been destroyed continue; } else if (!descriptor->updated) { std::stringstream error_str; error_str << "Descriptor in binding #" << binding << " index " << index << " is being used in draw but has never been updated via vkUpdateDescriptorSets() or a similar call."; *error = error_str.str(); return false; } else { auto descriptor_class = descriptor->GetClass(); if (descriptor_class == DescriptorClass::GeneralBuffer) { // Verify that buffers are valid auto buffer = static_cast<const BufferDescriptor *>(descriptor)->GetBuffer(); auto buffer_node = GetBufferState(buffer); if (!buffer_node) { std::stringstream error_str; error_str << "Descriptor in binding #" << binding << " index " << index << " references invalid buffer " << buffer << "."; *error = error_str.str(); return false; } else if (!buffer_node->sparse) { for (auto mem_binding : buffer_node->GetBoundMemory()) { if (!GetDevMemState(mem_binding)) { std::stringstream error_str; error_str << "Descriptor in binding #" << binding << " index " << index << " uses buffer " << buffer << " that references invalid memory " << mem_binding << "."; *error = error_str.str(); return false; } } } if (descriptor->IsDynamic()) { // Validate that dynamic offsets are within the buffer auto buffer_size = buffer_node->createInfo.size; auto range = static_cast<const BufferDescriptor *>(descriptor)->GetRange(); auto desc_offset = static_cast<const BufferDescriptor *>(descriptor)->GetOffset(); auto dyn_offset = dynamic_offsets[binding_it.GetDynamicOffsetIndex() + array_idx]; if (VK_WHOLE_SIZE == range) { if ((dyn_offset + desc_offset) > buffer_size) { std::stringstream error_str; error_str << "Dynamic descriptor in binding #" << binding << " index " << index << " uses buffer " << buffer << " with update range of VK_WHOLE_SIZE has dynamic offset " << dyn_offset << " combined with offset " << desc_offset << " that oversteps the buffer size of " << buffer_size << "."; *error = error_str.str(); return false; } } else { if ((dyn_offset + desc_offset + range) > buffer_size) { std::stringstream error_str; error_str << "Dynamic descriptor in binding #" << binding << " index " << index << " uses buffer " << buffer << " with dynamic offset " << dyn_offset << " combined with offset " << desc_offset << " and range " << range << " that oversteps the buffer size of " << buffer_size << "."; *error = error_str.str(); return false; } } } } else if (descriptor_class == DescriptorClass::ImageSampler || descriptor_class == DescriptorClass::Image) { VkImageView image_view; VkImageLayout image_layout; if (descriptor_class == DescriptorClass::ImageSampler) { image_view = static_cast<const ImageSamplerDescriptor *>(descriptor)->GetImageView(); image_layout = static_cast<const ImageSamplerDescriptor *>(descriptor)->GetImageLayout(); } else { image_view = static_cast<const ImageDescriptor *>(descriptor)->GetImageView(); image_layout = static_cast<const ImageDescriptor *>(descriptor)->GetImageLayout(); } auto reqs = binding_pair.second; auto image_view_state = GetImageViewState(image_view); if (nullptr == image_view_state) { // Image view must have been destroyed since initial update. Could potentially flag the descriptor // as "invalid" (updated = false) at DestroyImageView() time and detect this error at bind time std::stringstream error_str; error_str << "Descriptor in binding #" << binding << " index " << index << " is using imageView " << report_data->FormatHandle(image_view).c_str() << " that has been destroyed."; *error = error_str.str(); return false; } auto image_view_ci = image_view_state->create_info; if ((reqs & DESCRIPTOR_REQ_ALL_VIEW_TYPE_BITS) && (~reqs & (1 << image_view_ci.viewType))) { // bad view type std::stringstream error_str; error_str << "Descriptor in binding #" << binding << " index " << index << " requires an image view of type " << StringDescriptorReqViewType(reqs) << " but got " << string_VkImageViewType(image_view_ci.viewType) << "."; *error = error_str.str(); return false; } auto format_bits = DescriptorRequirementsBitsFromFormat(image_view_ci.format); if (!(reqs & format_bits)) { // bad component type std::stringstream error_str; error_str << "Descriptor in binding #" << binding << " index " << index << " requires " << StringDescriptorReqComponentType(reqs) << " component type, but bound descriptor format is " << string_VkFormat(image_view_ci.format) << "."; *error = error_str.str(); return false; } auto image_node = GetImageState(image_view_ci.image); assert(image_node); // Verify Image Layout // No "invalid layout" VUID required for this call, since the optimal_layout parameter is UNDEFINED. bool hit_error = false; VerifyImageLayout(cb_node, image_node, image_view_state->normalized_subresource_range, image_view_ci.subresourceRange.aspectMask, image_layout, VK_IMAGE_LAYOUT_UNDEFINED, caller, kVUIDUndefined, "VUID-VkDescriptorImageInfo-imageLayout-00344", &hit_error); if (hit_error) { *error = "Image layout specified at vkUpdateDescriptorSet* or vkCmdPushDescriptorSet* time " "doesn't match actual image layout at time descriptor is used. See previous error callback for " "specific details."; return false; } // Verify Sample counts if ((reqs & DESCRIPTOR_REQ_SINGLE_SAMPLE) && image_node->createInfo.samples != VK_SAMPLE_COUNT_1_BIT) { std::stringstream error_str; error_str << "Descriptor in binding #" << binding << " index " << index << " requires bound image to have VK_SAMPLE_COUNT_1_BIT but got " << string_VkSampleCountFlagBits(image_node->createInfo.samples) << "."; *error = error_str.str(); return false; } if ((reqs & DESCRIPTOR_REQ_MULTI_SAMPLE) && image_node->createInfo.samples == VK_SAMPLE_COUNT_1_BIT) { std::stringstream error_str; error_str << "Descriptor in binding #" << binding << " index " << index << " requires bound image to have multiple samples, but got VK_SAMPLE_COUNT_1_BIT."; *error = error_str.str(); return false; } } else if (descriptor_class == DescriptorClass::TexelBuffer) { auto texel_buffer = static_cast<const TexelDescriptor *>(descriptor); auto buffer_view = GetBufferViewState(texel_buffer->GetBufferView()); if (nullptr == buffer_view) { std::stringstream error_str; error_str << "Descriptor in binding #" << binding << " index " << index << " is using bufferView " << buffer_view << " that has been destroyed."; *error = error_str.str(); return false; } auto buffer = buffer_view->create_info.buffer; auto buffer_state = GetBufferState(buffer); if (!buffer_state) { std::stringstream error_str; error_str << "Descriptor in binding #" << binding << " index " << index << " is using buffer " << buffer_state << " that has been destroyed."; *error = error_str.str(); return false; } auto reqs = binding_pair.second; auto format_bits = DescriptorRequirementsBitsFromFormat(buffer_view->create_info.format); if (!(reqs & format_bits)) { // bad component type std::stringstream error_str; error_str << "Descriptor in binding #" << binding << " index " << index << " requires " << StringDescriptorReqComponentType(reqs) << " component type, but bound descriptor format is " << string_VkFormat(buffer_view->create_info.format) << "."; *error = error_str.str(); return false; } } if (descriptor_class == DescriptorClass::ImageSampler || descriptor_class == DescriptorClass::PlainSampler) { // Verify Sampler still valid VkSampler sampler; if (descriptor_class == DescriptorClass::ImageSampler) { sampler = static_cast<const ImageSamplerDescriptor *>(descriptor)->GetSampler(); } else { sampler = static_cast<const SamplerDescriptor *>(descriptor)->GetSampler(); } if (!ValidateSampler(sampler)) { std::stringstream error_str; error_str << "Descriptor in binding #" << binding << " index " << index << " is using sampler " << sampler << " that has been destroyed."; *error = error_str.str(); return false; } else { SAMPLER_STATE *sampler_state = GetSamplerState(sampler); if (sampler_state->samplerConversion && !descriptor->IsImmutableSampler()) { std::stringstream error_str; error_str << "sampler (" << sampler << ") in the descriptor set (" << descriptor_set->GetSet() << ") contains a YCBCR conversion (" << sampler_state->samplerConversion << ") , then the sampler MUST also exists as an immutable sampler."; *error = error_str.str(); } } } } } } return true; } // For given bindings, place any update buffers or images into the passed-in unordered_sets uint32_t cvdescriptorset::DescriptorSet::GetStorageUpdates(const std::map<uint32_t, descriptor_req> &bindings, std::unordered_set<VkBuffer> *buffer_set, std::unordered_set<VkImageView> *image_set) const { auto num_updates = 0; for (auto binding_pair : bindings) { auto binding = binding_pair.first; // If a binding doesn't exist, skip it if (!p_layout_->HasBinding(binding)) { continue; } uint32_t start_idx = p_layout_->GetGlobalIndexRangeFromBinding(binding).start; if (descriptors_[start_idx]->IsStorage()) { if (Image == descriptors_[start_idx]->descriptor_class) { for (uint32_t i = 0; i < p_layout_->GetDescriptorCountFromBinding(binding); ++i) { if (descriptors_[start_idx + i]->updated) { image_set->insert(static_cast<ImageDescriptor *>(descriptors_[start_idx + i].get())->GetImageView()); num_updates++; } } } else if (TexelBuffer == descriptors_[start_idx]->descriptor_class) { for (uint32_t i = 0; i < p_layout_->GetDescriptorCountFromBinding(binding); ++i) { if (descriptors_[start_idx + i]->updated) { auto bufferview = static_cast<TexelDescriptor *>(descriptors_[start_idx + i].get())->GetBufferView(); auto bv_state = state_data_->GetBufferViewState(bufferview); if (bv_state) { buffer_set->insert(bv_state->create_info.buffer); num_updates++; } } } } else if (GeneralBuffer == descriptors_[start_idx]->descriptor_class) { for (uint32_t i = 0; i < p_layout_->GetDescriptorCountFromBinding(binding); ++i) { if (descriptors_[start_idx + i]->updated) { buffer_set->insert(static_cast<BufferDescriptor *>(descriptors_[start_idx + i].get())->GetBuffer()); num_updates++; } } } } } return num_updates; } // Set is being deleted or updates so invalidate all bound cmd buffers void cvdescriptorset::DescriptorSet::InvalidateBoundCmdBuffers() { state_data_->InvalidateCommandBuffers(cb_bindings, VulkanTypedHandle(set_, kVulkanObjectTypeDescriptorSet)); } // Loop through the write updates to do for a push descriptor set, ignoring dstSet void cvdescriptorset::DescriptorSet::PerformPushDescriptorsUpdate(uint32_t write_count, const VkWriteDescriptorSet *p_wds) { assert(IsPushDescriptor()); for (uint32_t i = 0; i < write_count; i++) { PerformWriteUpdate(&p_wds[i]); } } // Perform write update in given update struct void cvdescriptorset::DescriptorSet::PerformWriteUpdate(const VkWriteDescriptorSet *update) { // Perform update on a per-binding basis as consecutive updates roll over to next binding auto descriptors_remaining = update->descriptorCount; auto binding_being_updated = update->dstBinding; auto offset = update->dstArrayElement; uint32_t update_index = 0; while (descriptors_remaining) { uint32_t update_count = std::min(descriptors_remaining, GetDescriptorCountFromBinding(binding_being_updated)); auto global_idx = p_layout_->GetGlobalIndexRangeFromBinding(binding_being_updated).start + offset; // Loop over the updates for a single binding at a time for (uint32_t di = 0; di < update_count; ++di, ++update_index) { descriptors_[global_idx + di]->WriteUpdate(update, update_index); } // Roll over to next binding in case of consecutive update descriptors_remaining -= update_count; offset = 0; binding_being_updated++; } if (update->descriptorCount) some_update_ = true; if (!(p_layout_->GetDescriptorBindingFlagsFromBinding(update->dstBinding) & (VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT_EXT | VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT))) { InvalidateBoundCmdBuffers(); } } // Validate Copy update bool CoreChecks::ValidateCopyUpdate(const VkCopyDescriptorSet *update, const DescriptorSet *dst_set, const DescriptorSet *src_set, const char *func_name, std::string *error_code, std::string *error_msg) { auto dst_layout = dst_set->GetLayout(); auto src_layout = src_set->GetLayout(); // Verify dst layout still valid if (dst_layout->IsDestroyed()) { *error_code = "VUID-VkCopyDescriptorSet-dstSet-parameter"; string_sprintf(error_msg, "Cannot call %s to perform copy update on dstSet %s" " created with destroyed %s.", func_name, report_data->FormatHandle(dst_set->GetSet()).c_str(), report_data->FormatHandle(dst_layout->GetDescriptorSetLayout()).c_str()); return false; } // Verify src layout still valid if (src_layout->IsDestroyed()) { *error_code = "VUID-VkCopyDescriptorSet-srcSet-parameter"; string_sprintf(error_msg, "Cannot call %s to perform copy update of dstSet %s" " from srcSet %s" " created with destroyed %s.", func_name, report_data->FormatHandle(dst_set->GetSet()).c_str(), report_data->FormatHandle(src_set->GetSet()).c_str(), report_data->FormatHandle(src_layout->GetDescriptorSetLayout()).c_str()); return false; } if (!dst_layout->HasBinding(update->dstBinding)) { *error_code = "VUID-VkCopyDescriptorSet-dstBinding-00347"; std::stringstream error_str; error_str << "DescriptorSet " << dst_set->GetSet() << " does not have copy update dest binding of " << update->dstBinding; *error_msg = error_str.str(); return false; } if (!src_set->HasBinding(update->srcBinding)) { *error_code = "VUID-VkCopyDescriptorSet-srcBinding-00345"; std::stringstream error_str; error_str << "DescriptorSet " << dst_set->GetSet() << " does not have copy update src binding of " << update->srcBinding; *error_msg = error_str.str(); return false; } // Verify idle ds if (dst_set->in_use.load() && !(dst_layout->GetDescriptorBindingFlagsFromBinding(update->dstBinding) & (VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT_EXT | VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT))) { // TODO : Re-using Free Idle error code, need copy update idle error code *error_code = "VUID-vkFreeDescriptorSets-pDescriptorSets-00309"; std::stringstream error_str; error_str << "Cannot call " << func_name << " to perform copy update on descriptor set " << dst_set->GetSet() << " that is in use by a command buffer"; *error_msg = error_str.str(); return false; } // src & dst set bindings are valid // Check bounds of src & dst auto src_start_idx = src_set->GetGlobalIndexRangeFromBinding(update->srcBinding).start + update->srcArrayElement; if ((src_start_idx + update->descriptorCount) > src_set->GetTotalDescriptorCount()) { // SRC update out of bounds *error_code = "VUID-VkCopyDescriptorSet-srcArrayElement-00346"; std::stringstream error_str; error_str << "Attempting copy update from descriptorSet " << update->srcSet << " binding#" << update->srcBinding << " with offset index of " << src_set->GetGlobalIndexRangeFromBinding(update->srcBinding).start << " plus update array offset of " << update->srcArrayElement << " and update of " << update->descriptorCount << " descriptors oversteps total number of descriptors in set: " << src_set->GetTotalDescriptorCount(); *error_msg = error_str.str(); return false; } auto dst_start_idx = dst_layout->GetGlobalIndexRangeFromBinding(update->dstBinding).start + update->dstArrayElement; if ((dst_start_idx + update->descriptorCount) > dst_layout->GetTotalDescriptorCount()) { // DST update out of bounds *error_code = "VUID-VkCopyDescriptorSet-dstArrayElement-00348"; std::stringstream error_str; error_str << "Attempting copy update to descriptorSet " << dst_set->GetSet() << " binding#" << update->dstBinding << " with offset index of " << dst_layout->GetGlobalIndexRangeFromBinding(update->dstBinding).start << " plus update array offset of " << update->dstArrayElement << " and update of " << update->descriptorCount << " descriptors oversteps total number of descriptors in set: " << dst_layout->GetTotalDescriptorCount(); *error_msg = error_str.str(); return false; } // Check that types match // TODO : Base default error case going from here is "VUID-VkAcquireNextImageInfoKHR-semaphore-parameter"2ba which covers all // consistency issues, need more fine-grained error codes *error_code = "VUID-VkCopyDescriptorSet-srcSet-00349"; auto src_type = src_set->GetTypeFromBinding(update->srcBinding); auto dst_type = dst_layout->GetTypeFromBinding(update->dstBinding); if (src_type != dst_type) { std::stringstream error_str; error_str << "Attempting copy update to descriptorSet " << dst_set->GetSet() << " binding #" << update->dstBinding << " with type " << string_VkDescriptorType(dst_type) << " from descriptorSet " << src_set->GetSet() << " binding #" << update->srcBinding << " with type " << string_VkDescriptorType(src_type) << ". Types do not match"; *error_msg = error_str.str(); return false; } // Verify consistency of src & dst bindings if update crosses binding boundaries if ((!VerifyUpdateConsistency(DescriptorSetLayout::ConstBindingIterator(src_layout.get(), update->srcBinding), update->srcArrayElement, update->descriptorCount, "copy update from", src_set->GetSet(), error_msg)) || (!VerifyUpdateConsistency(DescriptorSetLayout::ConstBindingIterator(dst_layout.get(), update->dstBinding), update->dstArrayElement, update->descriptorCount, "copy update to", dst_set->GetSet(), error_msg))) { return false; } if ((src_layout->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT) && !(dst_layout->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT)) { *error_code = "VUID-VkCopyDescriptorSet-srcSet-01918"; std::stringstream error_str; error_str << "If pname:srcSet's (" << update->srcSet << ") layout was created with the " "ename:VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT flag " "set, then pname:dstSet's (" << update->dstSet << ") layout must: also have been created with the " "ename:VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT flag set"; *error_msg = error_str.str(); return false; } if (!(src_layout->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT) && (dst_layout->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT)) { *error_code = "VUID-VkCopyDescriptorSet-srcSet-01919"; std::stringstream error_str; error_str << "If pname:srcSet's (" << update->srcSet << ") layout was created without the " "ename:VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT flag " "set, then pname:dstSet's (" << update->dstSet << ") layout must: also have been created without the " "ename:VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT flag set"; *error_msg = error_str.str(); return false; } if ((src_set->GetPoolState()->createInfo.flags & VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT_EXT) && !(dst_set->GetPoolState()->createInfo.flags & VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT_EXT)) { *error_code = "VUID-VkCopyDescriptorSet-srcSet-01920"; std::stringstream error_str; error_str << "If the descriptor pool from which pname:srcSet (" << update->srcSet << ") was allocated was created " "with the ename:VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT_EXT flag " "set, then the descriptor pool from which pname:dstSet (" << update->dstSet << ") was allocated must: " "also have been created with the ename:VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT_EXT flag set"; *error_msg = error_str.str(); return false; } if (!(src_set->GetPoolState()->createInfo.flags & VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT_EXT) && (dst_set->GetPoolState()->createInfo.flags & VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT_EXT)) { *error_code = "VUID-VkCopyDescriptorSet-srcSet-01921"; std::stringstream error_str; error_str << "If the descriptor pool from which pname:srcSet (" << update->srcSet << ") was allocated was created " "without the ename:VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT_EXT flag " "set, then the descriptor pool from which pname:dstSet (" << update->dstSet << ") was allocated must: " "also have been created without the ename:VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT_EXT flag set"; *error_msg = error_str.str(); return false; } if (src_type == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT) { if ((update->srcArrayElement % 4) != 0) { *error_code = "VUID-VkCopyDescriptorSet-srcBinding-02223"; std::stringstream error_str; error_str << "Attempting copy update to VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT binding with " << "srcArrayElement " << update->srcArrayElement << " not a multiple of 4"; *error_msg = error_str.str(); return false; } if ((update->dstArrayElement % 4) != 0) { *error_code = "VUID-VkCopyDescriptorSet-dstBinding-02224"; std::stringstream error_str; error_str << "Attempting copy update to VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT binding with " << "dstArrayElement " << update->dstArrayElement << " not a multiple of 4"; *error_msg = error_str.str(); return false; } if ((update->descriptorCount % 4) != 0) { *error_code = "VUID-VkCopyDescriptorSet-srcBinding-02225"; std::stringstream error_str; error_str << "Attempting copy update to VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT binding with " << "descriptorCount " << update->descriptorCount << " not a multiple of 4"; *error_msg = error_str.str(); return false; } } // Update parameters all look good and descriptor updated so verify update contents if (!VerifyCopyUpdateContents(update, src_set, src_type, src_start_idx, func_name, error_code, error_msg)) return false; // All checks passed so update is good return true; } // Perform Copy update void cvdescriptorset::DescriptorSet::PerformCopyUpdate(const VkCopyDescriptorSet *update, const DescriptorSet *src_set) { auto src_start_idx = src_set->GetGlobalIndexRangeFromBinding(update->srcBinding).start + update->srcArrayElement; auto dst_start_idx = p_layout_->GetGlobalIndexRangeFromBinding(update->dstBinding).start + update->dstArrayElement; // Update parameters all look good so perform update for (uint32_t di = 0; di < update->descriptorCount; ++di) { auto src = src_set->descriptors_[src_start_idx + di].get(); auto dst = descriptors_[dst_start_idx + di].get(); if (src->updated) { dst->CopyUpdate(src); some_update_ = true; } else { dst->updated = false; } } if (!(p_layout_->GetDescriptorBindingFlagsFromBinding(update->dstBinding) & (VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT_EXT | VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT))) { InvalidateBoundCmdBuffers(); } } // Update the drawing state for the affected descriptors. // Set cb_node to this set and this set to cb_node. // Add the bindings of the descriptor // Set the layout based on the current descriptor layout (will mask subsequent layer mismatch errors) // TODO: Modify the UpdateDrawState virtural functions to *only* set initial layout and not change layouts // Prereq: This should be called for a set that has been confirmed to be active for the given cb_node, meaning it's going // to be used in a draw by the given cb_node void cvdescriptorset::DescriptorSet::UpdateDrawState(CoreChecks *device_data, CMD_BUFFER_STATE *cb_node, const std::map<uint32_t, descriptor_req> &binding_req_map) { // bind cb to this descriptor set cb_bindings.insert(cb_node); // Add bindings for descriptor set, the set's pool, and individual objects in the set cb_node->object_bindings.emplace(set_, kVulkanObjectTypeDescriptorSet); pool_state_->cb_bindings.insert(cb_node); cb_node->object_bindings.emplace(pool_state_->pool, kVulkanObjectTypeDescriptorPool); // For the active slots, use set# to look up descriptorSet from boundDescriptorSets, and bind all of that descriptor set's // resources for (auto binding_req_pair : binding_req_map) { auto binding = binding_req_pair.first; // We aren't validating descriptors created with PARTIALLY_BOUND or UPDATE_AFTER_BIND, so don't record state if (p_layout_->GetDescriptorBindingFlagsFromBinding(binding) & (VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT_EXT | VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT)) { continue; } auto range = p_layout_->GetGlobalIndexRangeFromBinding(binding); for (uint32_t i = range.start; i < range.end; ++i) { descriptors_[i]->UpdateDrawState(device_data, cb_node); } } } void cvdescriptorset::DescriptorSet::FilterAndTrackOneBindingReq(const BindingReqMap::value_type &binding_req_pair, const BindingReqMap &in_req, BindingReqMap *out_req, TrackedBindings *bindings) { assert(out_req); assert(bindings); const auto binding = binding_req_pair.first; // Use insert and look at the boolean ("was inserted") in the returned pair to see if this is a new set member. // Saves one hash lookup vs. find ... compare w/ end ... insert. const auto it_bool_pair = bindings->insert(binding); if (it_bool_pair.second) { out_req->emplace(binding_req_pair); } } void cvdescriptorset::DescriptorSet::FilterAndTrackOneBindingReq(const BindingReqMap::value_type &binding_req_pair, const BindingReqMap &in_req, BindingReqMap *out_req, TrackedBindings *bindings, uint32_t limit) { if (bindings->size() < limit) FilterAndTrackOneBindingReq(binding_req_pair, in_req, out_req, bindings); } void cvdescriptorset::DescriptorSet::FilterAndTrackBindingReqs(CMD_BUFFER_STATE *cb_state, const BindingReqMap &in_req, BindingReqMap *out_req) { TrackedBindings &bound = cached_validation_[cb_state].command_binding_and_usage; if (bound.size() == GetBindingCount()) { return; // All bindings are bound, out req is empty } for (const auto &binding_req_pair : in_req) { const auto binding = binding_req_pair.first; // If a binding doesn't exist, or has already been bound, skip it if (p_layout_->HasBinding(binding)) { FilterAndTrackOneBindingReq(binding_req_pair, in_req, out_req, &bound); } } } void cvdescriptorset::DescriptorSet::FilterAndTrackBindingReqs(CMD_BUFFER_STATE *cb_state, PIPELINE_STATE *pipeline, const BindingReqMap &in_req, BindingReqMap *out_req) { auto &validated = cached_validation_[cb_state]; auto &image_sample_val = validated.image_samplers[pipeline]; auto *const dynamic_buffers = &validated.dynamic_buffers; auto *const non_dynamic_buffers = &validated.non_dynamic_buffers; const auto &stats = p_layout_->GetBindingTypeStats(); for (const auto &binding_req_pair : in_req) { auto binding = binding_req_pair.first; VkDescriptorSetLayoutBinding const *layout_binding = p_layout_->GetDescriptorSetLayoutBindingPtrFromBinding(binding); if (!layout_binding) { continue; } // Caching criteria differs per type. // If image_layout have changed , the image descriptors need to be validated against them. if ((layout_binding->descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) || (layout_binding->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) { FilterAndTrackOneBindingReq(binding_req_pair, in_req, out_req, dynamic_buffers, stats.dynamic_buffer_count); } else if ((layout_binding->descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) || (layout_binding->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER)) { FilterAndTrackOneBindingReq(binding_req_pair, in_req, out_req, non_dynamic_buffers, stats.non_dynamic_buffer_count); } else { // This is rather crude, as the changed layouts may not impact the bound descriptors, // but the simple "versioning" is a simple "dirt" test. auto &version = image_sample_val[binding]; // Take advantage of default construtor zero initialzing new entries if (version != cb_state->image_layout_change_count) { version = cb_state->image_layout_change_count; out_req->emplace(binding_req_pair); } } } } cvdescriptorset::SamplerDescriptor::SamplerDescriptor(const VkSampler *immut) : sampler_(VK_NULL_HANDLE), immutable_(false) { updated = false; descriptor_class = PlainSampler; if (immut) { sampler_ = *immut; immutable_ = true; updated = true; } } // Validate given sampler. Currently this only checks to make sure it exists in the samplerMap bool CoreChecks::ValidateSampler(const VkSampler sampler) const { return (GetSamplerState(sampler) != nullptr); } bool CoreChecks::ValidateImageUpdate(VkImageView image_view, VkImageLayout image_layout, VkDescriptorType type, const char *func_name, std::string *error_code, std::string *error_msg) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00326"; auto iv_state = GetImageViewState(image_view); if (!iv_state) { std::stringstream error_str; error_str << "Invalid VkImageView: " << report_data->FormatHandle(image_view).c_str(); *error_msg = error_str.str(); return false; } // Note that when an imageview is created, we validated that memory is bound so no need to re-check here // Validate that imageLayout is compatible with aspect_mask and image format // and validate that image usage bits are correct for given usage VkImageAspectFlags aspect_mask = iv_state->create_info.subresourceRange.aspectMask; VkImage image = iv_state->create_info.image; VkFormat format = VK_FORMAT_MAX_ENUM; VkImageUsageFlags usage = 0; auto image_node = GetImageState(image); if (image_node) { format = image_node->createInfo.format; usage = image_node->createInfo.usage; // Validate that memory is bound to image // TODO: This should have its own valid usage id apart from 2524 which is from CreateImageView case. The only // the error here occurs is if memory bound to a created imageView has been freed. if (ValidateMemoryIsBoundToImage(image_node, func_name, "VUID-VkImageViewCreateInfo-image-01020")) { *error_code = "VUID-VkImageViewCreateInfo-image-01020"; *error_msg = "No memory bound to image."; return false; } // KHR_maintenance1 allows rendering into 2D or 2DArray views which slice a 3D image, // but not binding them to descriptor sets. if (image_node->createInfo.imageType == VK_IMAGE_TYPE_3D && (iv_state->create_info.viewType == VK_IMAGE_VIEW_TYPE_2D || iv_state->create_info.viewType == VK_IMAGE_VIEW_TYPE_2D_ARRAY)) { *error_code = "VUID-VkDescriptorImageInfo-imageView-00343"; *error_msg = "ImageView must not be a 2D or 2DArray view of a 3D image"; return false; } } // First validate that format and layout are compatible if (format == VK_FORMAT_MAX_ENUM) { std::stringstream error_str; error_str << "Invalid image (" << report_data->FormatHandle(image).c_str() << ") in imageView (" << report_data->FormatHandle(image_view).c_str() << ")."; *error_msg = error_str.str(); return false; } // TODO : The various image aspect and format checks here are based on general spec language in 11.5 Image Views section under // vkCreateImageView(). What's the best way to create unique id for these cases? bool ds = FormatIsDepthOrStencil(format); switch (image_layout) { case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: // Only Color bit must be set if ((aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT) != VK_IMAGE_ASPECT_COLOR_BIT) { std::stringstream error_str; error_str << "ImageView (" << report_data->FormatHandle(image_view).c_str() << ") uses layout VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL but does not have VK_IMAGE_ASPECT_COLOR_BIT set."; *error_msg = error_str.str(); return false; } // format must NOT be DS if (ds) { std::stringstream error_str; error_str << "ImageView (" << report_data->FormatHandle(image_view).c_str() << ") uses layout VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL but the image format is " << string_VkFormat(format) << " which is not a color format."; *error_msg = error_str.str(); return false; } break; case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: // Depth or stencil bit must be set, but both must NOT be set if (aspect_mask & VK_IMAGE_ASPECT_DEPTH_BIT) { if (aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT) { // both must NOT be set std::stringstream error_str; error_str << "ImageView (" << report_data->FormatHandle(image_view).c_str() << ") has both STENCIL and DEPTH aspects set"; *error_msg = error_str.str(); return false; } } else if (!(aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT)) { // Neither were set std::stringstream error_str; error_str << "ImageView (" << report_data->FormatHandle(image_view).c_str() << ") has layout " << string_VkImageLayout(image_layout) << " but does not have STENCIL or DEPTH aspects set"; *error_msg = error_str.str(); return false; } // format must be DS if (!ds) { std::stringstream error_str; error_str << "ImageView (" << report_data->FormatHandle(image_view).c_str() << ") has layout " << string_VkImageLayout(image_layout) << " but the image format is " << string_VkFormat(format) << " which is not a depth/stencil format."; *error_msg = error_str.str(); return false; } break; default: // For other layouts if the source is depth/stencil image, both aspect bits must not be set if (ds) { if (aspect_mask & VK_IMAGE_ASPECT_DEPTH_BIT) { if (aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT) { // both must NOT be set std::stringstream error_str; error_str << "ImageView (" << report_data->FormatHandle(image_view).c_str() << ") has layout " << string_VkImageLayout(image_layout) << " and is using depth/stencil image of format " << string_VkFormat(format) << " but it has both STENCIL and DEPTH aspects set, which is illegal. When using a depth/stencil " "image in a descriptor set, please only set either VK_IMAGE_ASPECT_DEPTH_BIT or " "VK_IMAGE_ASPECT_STENCIL_BIT depending on whether it will be used for depth reads or stencil " "reads respectively."; *error_code = "VUID-VkDescriptorImageInfo-imageView-01976"; *error_msg = error_str.str(); return false; } } } break; } // Now validate that usage flags are correctly set for given type of update // As we're switching per-type, if any type has specific layout requirements, check those here as well // TODO : The various image usage bit requirements are in general spec language for VkImageUsageFlags bit block in 11.3 Images // under vkCreateImage() // TODO : Need to also validate case "VUID-VkWriteDescriptorSet-descriptorType-00336" where STORAGE_IMAGE & INPUT_ATTACH types // must have been created with identify swizzle const char *error_usage_bit = nullptr; switch (type) { case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE: case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: { if (!(usage & VK_IMAGE_USAGE_SAMPLED_BIT)) { error_usage_bit = "VK_IMAGE_USAGE_SAMPLED_BIT"; } break; } case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: { if (!(usage & VK_IMAGE_USAGE_STORAGE_BIT)) { error_usage_bit = "VK_IMAGE_USAGE_STORAGE_BIT"; } else if (VK_IMAGE_LAYOUT_GENERAL != image_layout) { std::stringstream error_str; // TODO : Need to create custom enum error codes for these cases if (image_node->shared_presentable) { if (VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR != image_layout) { error_str << "ImageView (" << report_data->FormatHandle(image_view).c_str() << ") of VK_DESCRIPTOR_TYPE_STORAGE_IMAGE type with a front-buffered image is being updated with " "layout " << string_VkImageLayout(image_layout) << " but according to spec section 13.1 Descriptor Types, 'Front-buffered images that report " "support for VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT must be in the " "VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR layout.'"; *error_msg = error_str.str(); return false; } } else if (VK_IMAGE_LAYOUT_GENERAL != image_layout) { error_str << "ImageView (" << report_data->FormatHandle(image_view).c_str() << ") of VK_DESCRIPTOR_TYPE_STORAGE_IMAGE type is being updated with layout " << string_VkImageLayout(image_layout) << " but according to spec section 13.1 Descriptor Types, 'Load and store operations on storage " "images can only be done on images in VK_IMAGE_LAYOUT_GENERAL layout.'"; *error_msg = error_str.str(); return false; } } break; } case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT: { if (!(usage & VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT)) { error_usage_bit = "VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT"; } break; } default: break; } if (error_usage_bit) { std::stringstream error_str; error_str << "ImageView (" << report_data->FormatHandle(image_view).c_str() << ") with usage mask " << std::hex << std::showbase << usage << " being used for a descriptor update of type " << string_VkDescriptorType(type) << " does not have " << error_usage_bit << " set."; *error_msg = error_str.str(); return false; } if ((type == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE) || (type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)) { // Test that the layout is compatible with the descriptorType for the two sampled image types const static std::array<VkImageLayout, 3> valid_layouts = { {VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL}}; struct ExtensionLayout { VkImageLayout layout; bool DeviceExtensions::*extension; }; const static std::array<ExtensionLayout, 3> extended_layouts{ {// Note double brace req'd for aggregate initialization {VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR, &DeviceExtensions::vk_khr_shared_presentable_image}, {VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL, &DeviceExtensions::vk_khr_maintenance2}, {VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL, &DeviceExtensions::vk_khr_maintenance2}}}; auto is_layout = [image_layout, this](const ExtensionLayout &ext_layout) { return device_extensions.*(ext_layout.extension) && (ext_layout.layout == image_layout); }; bool valid_layout = (std::find(valid_layouts.cbegin(), valid_layouts.cend(), image_layout) != valid_layouts.cend()) || std::any_of(extended_layouts.cbegin(), extended_layouts.cend(), is_layout); if (!valid_layout) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-01403"; std::stringstream error_str; error_str << "Descriptor update with descriptorType " << string_VkDescriptorType(type) << " is being updated with invalid imageLayout " << string_VkImageLayout(image_layout) << " for image " << report_data->FormatHandle(image).c_str() << " in imageView " << report_data->FormatHandle(image_view).c_str() << ". Allowed layouts are: VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, " << "VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL"; for (auto &ext_layout : extended_layouts) { if (device_extensions.*(ext_layout.extension)) { error_str << ", " << string_VkImageLayout(ext_layout.layout); } } *error_msg = error_str.str(); return false; } } return true; } void cvdescriptorset::SamplerDescriptor::WriteUpdate(const VkWriteDescriptorSet *update, const uint32_t index) { if (!immutable_) { sampler_ = update->pImageInfo[index].sampler; } updated = true; } void cvdescriptorset::SamplerDescriptor::CopyUpdate(const Descriptor *src) { if (!immutable_) { auto update_sampler = static_cast<const SamplerDescriptor *>(src)->sampler_; sampler_ = update_sampler; } updated = true; } void cvdescriptorset::SamplerDescriptor::UpdateDrawState(CoreChecks *dev_data, CMD_BUFFER_STATE *cb_node) { if (!immutable_) { auto sampler_state = dev_data->GetSamplerState(sampler_); if (sampler_state) dev_data->AddCommandBufferBindingSampler(cb_node, sampler_state); } } cvdescriptorset::ImageSamplerDescriptor::ImageSamplerDescriptor(const VkSampler *immut) : sampler_(VK_NULL_HANDLE), immutable_(false), image_view_(VK_NULL_HANDLE), image_layout_(VK_IMAGE_LAYOUT_UNDEFINED) { updated = false; descriptor_class = ImageSampler; if (immut) { sampler_ = *immut; immutable_ = true; } } void cvdescriptorset::ImageSamplerDescriptor::WriteUpdate(const VkWriteDescriptorSet *update, const uint32_t index) { updated = true; const auto &image_info = update->pImageInfo[index]; if (!immutable_) { sampler_ = image_info.sampler; } image_view_ = image_info.imageView; image_layout_ = image_info.imageLayout; } void cvdescriptorset::ImageSamplerDescriptor::CopyUpdate(const Descriptor *src) { if (!immutable_) { auto update_sampler = static_cast<const ImageSamplerDescriptor *>(src)->sampler_; sampler_ = update_sampler; } auto image_view = static_cast<const ImageSamplerDescriptor *>(src)->image_view_; auto image_layout = static_cast<const ImageSamplerDescriptor *>(src)->image_layout_; updated = true; image_view_ = image_view; image_layout_ = image_layout; } void cvdescriptorset::ImageSamplerDescriptor::UpdateDrawState(CoreChecks *dev_data, CMD_BUFFER_STATE *cb_node) { // First add binding for any non-immutable sampler if (!immutable_) { auto sampler_state = dev_data->GetSamplerState(sampler_); if (sampler_state) dev_data->AddCommandBufferBindingSampler(cb_node, sampler_state); } // Add binding for image auto iv_state = dev_data->GetImageViewState(image_view_); if (iv_state) { dev_data->AddCommandBufferBindingImageView(cb_node, iv_state); dev_data->SetImageViewInitialLayout(cb_node, *iv_state, image_layout_); } } cvdescriptorset::ImageDescriptor::ImageDescriptor(const VkDescriptorType type) : storage_(false), image_view_(VK_NULL_HANDLE), image_layout_(VK_IMAGE_LAYOUT_UNDEFINED) { updated = false; descriptor_class = Image; if (VK_DESCRIPTOR_TYPE_STORAGE_IMAGE == type) storage_ = true; } void cvdescriptorset::ImageDescriptor::WriteUpdate(const VkWriteDescriptorSet *update, const uint32_t index) { updated = true; const auto &image_info = update->pImageInfo[index]; image_view_ = image_info.imageView; image_layout_ = image_info.imageLayout; } void cvdescriptorset::ImageDescriptor::CopyUpdate(const Descriptor *src) { auto image_view = static_cast<const ImageDescriptor *>(src)->image_view_; auto image_layout = static_cast<const ImageDescriptor *>(src)->image_layout_; updated = true; image_view_ = image_view; image_layout_ = image_layout; } void cvdescriptorset::ImageDescriptor::UpdateDrawState(CoreChecks *dev_data, CMD_BUFFER_STATE *cb_node) { // Add binding for image auto iv_state = dev_data->GetImageViewState(image_view_); if (iv_state) { dev_data->AddCommandBufferBindingImageView(cb_node, iv_state); dev_data->SetImageViewInitialLayout(cb_node, *iv_state, image_layout_); } } cvdescriptorset::BufferDescriptor::BufferDescriptor(const VkDescriptorType type) : storage_(false), dynamic_(false), buffer_(VK_NULL_HANDLE), offset_(0), range_(0) { updated = false; descriptor_class = GeneralBuffer; if (VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC == type) { dynamic_ = true; } else if (VK_DESCRIPTOR_TYPE_STORAGE_BUFFER == type) { storage_ = true; } else if (VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC == type) { dynamic_ = true; storage_ = true; } } void cvdescriptorset::BufferDescriptor::WriteUpdate(const VkWriteDescriptorSet *update, const uint32_t index) { updated = true; const auto &buffer_info = update->pBufferInfo[index]; buffer_ = buffer_info.buffer; offset_ = buffer_info.offset; range_ = buffer_info.range; } void cvdescriptorset::BufferDescriptor::CopyUpdate(const Descriptor *src) { auto buff_desc = static_cast<const BufferDescriptor *>(src); updated = true; buffer_ = buff_desc->buffer_; offset_ = buff_desc->offset_; range_ = buff_desc->range_; } void cvdescriptorset::BufferDescriptor::UpdateDrawState(CoreChecks *dev_data, CMD_BUFFER_STATE *cb_node) { auto buffer_node = dev_data->GetBufferState(buffer_); if (buffer_node) dev_data->AddCommandBufferBindingBuffer(cb_node, buffer_node); } cvdescriptorset::TexelDescriptor::TexelDescriptor(const VkDescriptorType type) : buffer_view_(VK_NULL_HANDLE), storage_(false) { updated = false; descriptor_class = TexelBuffer; if (VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER == type) storage_ = true; } void cvdescriptorset::TexelDescriptor::WriteUpdate(const VkWriteDescriptorSet *update, const uint32_t index) { updated = true; buffer_view_ = update->pTexelBufferView[index]; } void cvdescriptorset::TexelDescriptor::CopyUpdate(const Descriptor *src) { updated = true; buffer_view_ = static_cast<const TexelDescriptor *>(src)->buffer_view_; } void cvdescriptorset::TexelDescriptor::UpdateDrawState(CoreChecks *dev_data, CMD_BUFFER_STATE *cb_node) { auto bv_state = dev_data->GetBufferViewState(buffer_view_); if (bv_state) { dev_data->AddCommandBufferBindingBufferView(cb_node, bv_state); } } // This is a helper function that iterates over a set of Write and Copy updates, pulls the DescriptorSet* for updated // sets, and then calls their respective Validate[Write|Copy]Update functions. // If the update hits an issue for which the callback returns "true", meaning that the call down the chain should // be skipped, then true is returned. // If there is no issue with the update, then false is returned. bool CoreChecks::ValidateUpdateDescriptorSets(uint32_t write_count, const VkWriteDescriptorSet *p_wds, uint32_t copy_count, const VkCopyDescriptorSet *p_cds, const char *func_name) { bool skip = false; // Validate Write updates for (uint32_t i = 0; i < write_count; i++) { auto dest_set = p_wds[i].dstSet; auto set_node = GetSetNode(dest_set); if (!set_node) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, HandleToUint64(dest_set), kVUID_Core_DrawState_InvalidDescriptorSet, "Cannot call %s on %s that has not been allocated.", func_name, report_data->FormatHandle(dest_set).c_str()); } else { std::string error_code; std::string error_str; if (!ValidateWriteUpdate(set_node, &p_wds[i], func_name, &error_code, &error_str)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, HandleToUint64(dest_set), error_code, "%s failed write update validation for %s with error: %s.", func_name, report_data->FormatHandle(dest_set).c_str(), error_str.c_str()); } } } // Now validate copy updates for (uint32_t i = 0; i < copy_count; ++i) { auto dst_set = p_cds[i].dstSet; auto src_set = p_cds[i].srcSet; auto src_node = GetSetNode(src_set); auto dst_node = GetSetNode(dst_set); // Object_tracker verifies that src & dest descriptor set are valid assert(src_node); assert(dst_node); std::string error_code; std::string error_str; if (!ValidateCopyUpdate(&p_cds[i], dst_node, src_node, func_name, &error_code, &error_str)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, HandleToUint64(dst_set), error_code, "%s failed copy update from %s to %s with error: %s.", func_name, report_data->FormatHandle(src_set).c_str(), report_data->FormatHandle(dst_set).c_str(), error_str.c_str()); } } return skip; } // This is a helper function that iterates over a set of Write and Copy updates, pulls the DescriptorSet* for updated // sets, and then calls their respective Perform[Write|Copy]Update functions. // Prerequisite : ValidateUpdateDescriptorSets() should be called and return "false" prior to calling PerformUpdateDescriptorSets() // with the same set of updates. // This is split from the validate code to allow validation prior to calling down the chain, and then update after // calling down the chain. void cvdescriptorset::PerformUpdateDescriptorSets(CoreChecks *dev_data, uint32_t write_count, const VkWriteDescriptorSet *p_wds, uint32_t copy_count, const VkCopyDescriptorSet *p_cds) { // Write updates first uint32_t i = 0; for (i = 0; i < write_count; ++i) { auto dest_set = p_wds[i].dstSet; auto set_node = dev_data->GetSetNode(dest_set); if (set_node) { set_node->PerformWriteUpdate(&p_wds[i]); } } // Now copy updates for (i = 0; i < copy_count; ++i) { auto dst_set = p_cds[i].dstSet; auto src_set = p_cds[i].srcSet; auto src_node = dev_data->GetSetNode(src_set); auto dst_node = dev_data->GetSetNode(dst_set); if (src_node && dst_node) { dst_node->PerformCopyUpdate(&p_cds[i], src_node); } } } cvdescriptorset::DecodedTemplateUpdate::DecodedTemplateUpdate(CoreChecks *device_data, VkDescriptorSet descriptorSet, const TEMPLATE_STATE *template_state, const void *pData, VkDescriptorSetLayout push_layout) { auto const &create_info = template_state->create_info; inline_infos.resize(create_info.descriptorUpdateEntryCount); // Make sure we have one if we need it desc_writes.reserve(create_info.descriptorUpdateEntryCount); // emplaced, so reserved without initialization VkDescriptorSetLayout effective_dsl = create_info.templateType == VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET ? create_info.descriptorSetLayout : push_layout; auto layout_obj = GetDescriptorSetLayout(device_data, effective_dsl); // Create a WriteDescriptorSet struct for each template update entry for (uint32_t i = 0; i < create_info.descriptorUpdateEntryCount; i++) { auto binding_count = layout_obj->GetDescriptorCountFromBinding(create_info.pDescriptorUpdateEntries[i].dstBinding); auto binding_being_updated = create_info.pDescriptorUpdateEntries[i].dstBinding; auto dst_array_element = create_info.pDescriptorUpdateEntries[i].dstArrayElement; desc_writes.reserve(desc_writes.size() + create_info.pDescriptorUpdateEntries[i].descriptorCount); for (uint32_t j = 0; j < create_info.pDescriptorUpdateEntries[i].descriptorCount; j++) { desc_writes.emplace_back(); auto &write_entry = desc_writes.back(); size_t offset = create_info.pDescriptorUpdateEntries[i].offset + j * create_info.pDescriptorUpdateEntries[i].stride; char *update_entry = (char *)(pData) + offset; if (dst_array_element >= binding_count) { dst_array_element = 0; binding_being_updated = layout_obj->GetNextValidBinding(binding_being_updated); } write_entry.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; write_entry.pNext = NULL; write_entry.dstSet = descriptorSet; write_entry.dstBinding = binding_being_updated; write_entry.dstArrayElement = dst_array_element; write_entry.descriptorCount = 1; write_entry.descriptorType = create_info.pDescriptorUpdateEntries[i].descriptorType; switch (create_info.pDescriptorUpdateEntries[i].descriptorType) { case VK_DESCRIPTOR_TYPE_SAMPLER: case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE: case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT: write_entry.pImageInfo = reinterpret_cast<VkDescriptorImageInfo *>(update_entry); break; case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER: case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER: case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC: case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: write_entry.pBufferInfo = reinterpret_cast<VkDescriptorBufferInfo *>(update_entry); break; case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER: case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: write_entry.pTexelBufferView = reinterpret_cast<VkBufferView *>(update_entry); break; case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT: { VkWriteDescriptorSetInlineUniformBlockEXT *inline_info = &inline_infos[i]; inline_info->sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK_EXT; inline_info->pNext = nullptr; inline_info->dataSize = create_info.pDescriptorUpdateEntries[i].descriptorCount; inline_info->pData = update_entry; write_entry.pNext = inline_info; // descriptorCount must match the dataSize member of the VkWriteDescriptorSetInlineUniformBlockEXT structure write_entry.descriptorCount = inline_info->dataSize; // skip the rest of the array, they just represent bytes in the update j = create_info.pDescriptorUpdateEntries[i].descriptorCount; break; } default: assert(0); break; } dst_array_element++; } } } // These helper functions carry out the validate and record descriptor updates peformed via update templates. They decode // the templatized data and leverage the non-template UpdateDescriptor helper functions. bool CoreChecks::ValidateUpdateDescriptorSetsWithTemplateKHR(VkDescriptorSet descriptorSet, const TEMPLATE_STATE *template_state, const void *pData) { // Translate the templated update into a normal update for validation... cvdescriptorset::DecodedTemplateUpdate decoded_update(this, descriptorSet, template_state, pData); return ValidateUpdateDescriptorSets(static_cast<uint32_t>(decoded_update.desc_writes.size()), decoded_update.desc_writes.data(), 0, NULL, "vkUpdateDescriptorSetWithTemplate()"); } void CoreChecks::PerformUpdateDescriptorSetsWithTemplateKHR(VkDescriptorSet descriptorSet, const TEMPLATE_STATE *template_state, const void *pData) { // Translate the templated update into a normal update for validation... cvdescriptorset::DecodedTemplateUpdate decoded_update(this, descriptorSet, template_state, pData); cvdescriptorset::PerformUpdateDescriptorSets(this, static_cast<uint32_t>(decoded_update.desc_writes.size()), decoded_update.desc_writes.data(), 0, NULL); } std::string cvdescriptorset::DescriptorSet::StringifySetAndLayout() const { std::string out; auto layout_handle = p_layout_->GetDescriptorSetLayout(); if (IsPushDescriptor()) { string_sprintf(&out, "Push Descriptors defined with VkDescriptorSetLayout %s", state_data_->report_data->FormatHandle(layout_handle).c_str()); } else { string_sprintf(&out, "VkDescriptorSet %s allocated with VkDescriptorSetLayout %s", state_data_->report_data->FormatHandle(set_).c_str(), state_data_->report_data->FormatHandle(layout_handle).c_str()); } return out; }; // Loop through the write updates to validate for a push descriptor set, ignoring dstSet bool CoreChecks::ValidatePushDescriptorsUpdate(const DescriptorSet *push_set, uint32_t write_count, const VkWriteDescriptorSet *p_wds, const char *func_name) { assert(push_set->IsPushDescriptor()); bool skip = false; for (uint32_t i = 0; i < write_count; i++) { std::string error_code; std::string error_str; if (!ValidateWriteUpdate(push_set, &p_wds[i], func_name, &error_code, &error_str)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, HandleToUint64(push_set->GetDescriptorSetLayout()), error_code, "%s failed update validation: %s.", func_name, error_str.c_str()); } } return skip; } // For the given buffer, verify that its creation parameters are appropriate for the given type // If there's an error, update the error_msg string with details and return false, else return true bool cvdescriptorset::ValidateBufferUsage(BUFFER_STATE const *buffer_node, VkDescriptorType type, std::string *error_code, std::string *error_msg) { // Verify that usage bits set correctly for given type auto usage = buffer_node->createInfo.usage; const char *error_usage_bit = nullptr; switch (type) { case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER: if (!(usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT)) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00334"; error_usage_bit = "VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT"; } break; case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: if (!(usage & VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT)) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00335"; error_usage_bit = "VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT"; } break; case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER: case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC: if (!(usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT)) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00330"; error_usage_bit = "VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT"; } break; case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER: case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: if (!(usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT)) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00331"; error_usage_bit = "VK_BUFFER_USAGE_STORAGE_BUFFER_BIT"; } break; default: break; } if (error_usage_bit) { std::stringstream error_str; error_str << "Buffer (" << buffer_node->buffer << ") with usage mask " << std::hex << std::showbase << usage << " being used for a descriptor update of type " << string_VkDescriptorType(type) << " does not have " << error_usage_bit << " set."; *error_msg = error_str.str(); return false; } return true; } // For buffer descriptor updates, verify the buffer usage and VkDescriptorBufferInfo struct which includes: // 1. buffer is valid // 2. buffer was created with correct usage flags // 3. offset is less than buffer size // 4. range is either VK_WHOLE_SIZE or falls in (0, (buffer size - offset)] // 5. range and offset are within the device's limits // If there's an error, update the error_msg string with details and return false, else return true bool CoreChecks::ValidateBufferUpdate(VkDescriptorBufferInfo const *buffer_info, VkDescriptorType type, const char *func_name, std::string *error_code, std::string *error_msg) { // First make sure that buffer is valid auto buffer_node = GetBufferState(buffer_info->buffer); // Any invalid buffer should already be caught by object_tracker assert(buffer_node); if (ValidateMemoryIsBoundToBuffer(buffer_node, func_name, "VUID-VkWriteDescriptorSet-descriptorType-00329")) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00329"; *error_msg = "No memory bound to buffer."; return false; } // Verify usage bits if (!cvdescriptorset::ValidateBufferUsage(buffer_node, type, error_code, error_msg)) { // error_msg will have been updated by ValidateBufferUsage() return false; } // offset must be less than buffer size if (buffer_info->offset >= buffer_node->createInfo.size) { *error_code = "VUID-VkDescriptorBufferInfo-offset-00340"; std::stringstream error_str; error_str << "VkDescriptorBufferInfo offset of " << buffer_info->offset << " is greater than or equal to buffer " << buffer_node->buffer << " size of " << buffer_node->createInfo.size; *error_msg = error_str.str(); return false; } if (buffer_info->range != VK_WHOLE_SIZE) { // Range must be VK_WHOLE_SIZE or > 0 if (!buffer_info->range) { *error_code = "VUID-VkDescriptorBufferInfo-range-00341"; std::stringstream error_str; error_str << "VkDescriptorBufferInfo range is not VK_WHOLE_SIZE and is zero, which is not allowed."; *error_msg = error_str.str(); return false; } // Range must be VK_WHOLE_SIZE or <= (buffer size - offset) if (buffer_info->range > (buffer_node->createInfo.size - buffer_info->offset)) { *error_code = "VUID-VkDescriptorBufferInfo-range-00342"; std::stringstream error_str; error_str << "VkDescriptorBufferInfo range is " << buffer_info->range << " which is greater than buffer size (" << buffer_node->createInfo.size << ") minus requested offset of " << buffer_info->offset; *error_msg = error_str.str(); return false; } } // Check buffer update sizes against device limits const auto &limits = phys_dev_props.limits; if (VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER == type || VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC == type) { auto max_ub_range = limits.maxUniformBufferRange; if (buffer_info->range != VK_WHOLE_SIZE && buffer_info->range > max_ub_range) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00332"; std::stringstream error_str; error_str << "VkDescriptorBufferInfo range is " << buffer_info->range << " which is greater than this device's maxUniformBufferRange (" << max_ub_range << ")"; *error_msg = error_str.str(); return false; } else if (buffer_info->range == VK_WHOLE_SIZE && (buffer_node->createInfo.size - buffer_info->offset) > max_ub_range) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00332"; std::stringstream error_str; error_str << "VkDescriptorBufferInfo range is VK_WHOLE_SIZE but effective range " << "(" << (buffer_node->createInfo.size - buffer_info->offset) << ") is greater than this device's " << "maxUniformBufferRange (" << max_ub_range << ")"; *error_msg = error_str.str(); return false; } } else if (VK_DESCRIPTOR_TYPE_STORAGE_BUFFER == type || VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC == type) { auto max_sb_range = limits.maxStorageBufferRange; if (buffer_info->range != VK_WHOLE_SIZE && buffer_info->range > max_sb_range) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00333"; std::stringstream error_str; error_str << "VkDescriptorBufferInfo range is " << buffer_info->range << " which is greater than this device's maxStorageBufferRange (" << max_sb_range << ")"; *error_msg = error_str.str(); return false; } else if (buffer_info->range == VK_WHOLE_SIZE && (buffer_node->createInfo.size - buffer_info->offset) > max_sb_range) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00333"; std::stringstream error_str; error_str << "VkDescriptorBufferInfo range is VK_WHOLE_SIZE but effective range " << "(" << (buffer_node->createInfo.size - buffer_info->offset) << ") is greater than this device's " << "maxStorageBufferRange (" << max_sb_range << ")"; *error_msg = error_str.str(); return false; } } return true; } // Verify that the contents of the update are ok, but don't perform actual update bool CoreChecks::VerifyCopyUpdateContents(const VkCopyDescriptorSet *update, const DescriptorSet *src_set, VkDescriptorType type, uint32_t index, const char *func_name, std::string *error_code, std::string *error_msg) { // Note : Repurposing some Write update error codes here as specific details aren't called out for copy updates like they are // for write updates using DescriptorClass = cvdescriptorset::DescriptorClass; using BufferDescriptor = cvdescriptorset::BufferDescriptor; using ImageDescriptor = cvdescriptorset::ImageDescriptor; using ImageSamplerDescriptor = cvdescriptorset::ImageSamplerDescriptor; using SamplerDescriptor = cvdescriptorset::SamplerDescriptor; using TexelDescriptor = cvdescriptorset::TexelDescriptor; auto device_data = this; switch (src_set->GetDescriptorFromGlobalIndex(index)->descriptor_class) { case DescriptorClass::PlainSampler: { for (uint32_t di = 0; di < update->descriptorCount; ++di) { const auto src_desc = src_set->GetDescriptorFromGlobalIndex(index + di); if (!src_desc->updated) continue; if (!src_desc->IsImmutableSampler()) { auto update_sampler = static_cast<const SamplerDescriptor *>(src_desc)->GetSampler(); if (!ValidateSampler(update_sampler)) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00325"; std::stringstream error_str; error_str << "Attempted copy update to sampler descriptor with invalid sampler: " << update_sampler << "."; *error_msg = error_str.str(); return false; } } else { // TODO : Warn here } } break; } case DescriptorClass::ImageSampler: { for (uint32_t di = 0; di < update->descriptorCount; ++di) { const auto src_desc = src_set->GetDescriptorFromGlobalIndex(index + di); if (!src_desc->updated) continue; auto img_samp_desc = static_cast<const ImageSamplerDescriptor *>(src_desc); // First validate sampler if (!img_samp_desc->IsImmutableSampler()) { auto update_sampler = img_samp_desc->GetSampler(); if (!ValidateSampler(update_sampler)) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00325"; std::stringstream error_str; error_str << "Attempted copy update to sampler descriptor with invalid sampler: " << update_sampler << "."; *error_msg = error_str.str(); return false; } } else { // TODO : Warn here } // Validate image auto image_view = img_samp_desc->GetImageView(); auto image_layout = img_samp_desc->GetImageLayout(); if (!ValidateImageUpdate(image_view, image_layout, type, func_name, error_code, error_msg)) { std::stringstream error_str; error_str << "Attempted copy update to combined image sampler descriptor failed due to: " << error_msg->c_str(); *error_msg = error_str.str(); return false; } } break; } case DescriptorClass::Image: { for (uint32_t di = 0; di < update->descriptorCount; ++di) { const auto src_desc = src_set->GetDescriptorFromGlobalIndex(index + di); if (!src_desc->updated) continue; auto img_desc = static_cast<const ImageDescriptor *>(src_desc); auto image_view = img_desc->GetImageView(); auto image_layout = img_desc->GetImageLayout(); if (!ValidateImageUpdate(image_view, image_layout, type, func_name, error_code, error_msg)) { std::stringstream error_str; error_str << "Attempted copy update to image descriptor failed due to: " << error_msg->c_str(); *error_msg = error_str.str(); return false; } } break; } case DescriptorClass::TexelBuffer: { for (uint32_t di = 0; di < update->descriptorCount; ++di) { const auto src_desc = src_set->GetDescriptorFromGlobalIndex(index + di); if (!src_desc->updated) continue; auto buffer_view = static_cast<const TexelDescriptor *>(src_desc)->GetBufferView(); auto bv_state = device_data->GetBufferViewState(buffer_view); if (!bv_state) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00323"; std::stringstream error_str; error_str << "Attempted copy update to texel buffer descriptor with invalid buffer view: " << buffer_view; *error_msg = error_str.str(); return false; } auto buffer = bv_state->create_info.buffer; if (!cvdescriptorset::ValidateBufferUsage(GetBufferState(buffer), type, error_code, error_msg)) { std::stringstream error_str; error_str << "Attempted copy update to texel buffer descriptor failed due to: " << error_msg->c_str(); *error_msg = error_str.str(); return false; } } break; } case DescriptorClass::GeneralBuffer: { for (uint32_t di = 0; di < update->descriptorCount; ++di) { const auto src_desc = src_set->GetDescriptorFromGlobalIndex(index + di); if (!src_desc->updated) continue; auto buffer = static_cast<const BufferDescriptor *>(src_desc)->GetBuffer(); if (!cvdescriptorset::ValidateBufferUsage(GetBufferState(buffer), type, error_code, error_msg)) { std::stringstream error_str; error_str << "Attempted copy update to buffer descriptor failed due to: " << error_msg->c_str(); *error_msg = error_str.str(); return false; } } break; } case DescriptorClass::InlineUniform: case DescriptorClass::AccelerationStructure: break; default: assert(0); // We've already verified update type so should never get here break; } // All checks passed so update contents are good return true; } // Update the common AllocateDescriptorSetsData void CoreChecks::UpdateAllocateDescriptorSetsData(const VkDescriptorSetAllocateInfo *p_alloc_info, cvdescriptorset::AllocateDescriptorSetsData *ds_data) { for (uint32_t i = 0; i < p_alloc_info->descriptorSetCount; i++) { auto layout = GetDescriptorSetLayout(this, p_alloc_info->pSetLayouts[i]); if (layout) { ds_data->layout_nodes[i] = layout; // Count total descriptors required per type for (uint32_t j = 0; j < layout->GetBindingCount(); ++j) { const auto &binding_layout = layout->GetDescriptorSetLayoutBindingPtrFromIndex(j); uint32_t typeIndex = static_cast<uint32_t>(binding_layout->descriptorType); ds_data->required_descriptors_by_type[typeIndex] += binding_layout->descriptorCount; } } // Any unknown layouts will be flagged as errors during ValidateAllocateDescriptorSets() call } } // Verify that the state at allocate time is correct, but don't actually allocate the sets yet bool CoreChecks::ValidateAllocateDescriptorSets(const VkDescriptorSetAllocateInfo *p_alloc_info, const cvdescriptorset::AllocateDescriptorSetsData *ds_data) { bool skip = false; auto pool_state = GetDescriptorPoolState(p_alloc_info->descriptorPool); for (uint32_t i = 0; i < p_alloc_info->descriptorSetCount; i++) { auto layout = GetDescriptorSetLayout(this, p_alloc_info->pSetLayouts[i]); if (layout) { // nullptr layout indicates no valid layout handle for this device, validated/logged in object_tracker if (layout->IsPushDescriptor()) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, HandleToUint64(p_alloc_info->pSetLayouts[i]), "VUID-VkDescriptorSetAllocateInfo-pSetLayouts-00308", "%s specified at pSetLayouts[%" PRIu32 "] in vkAllocateDescriptorSets() was created with invalid flag %s set.", report_data->FormatHandle(p_alloc_info->pSetLayouts[i]).c_str(), i, "VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR"); } if (layout->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT && !(pool_state->createInfo.flags & VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT_EXT)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, 0, "VUID-VkDescriptorSetAllocateInfo-pSetLayouts-03044", "Descriptor set layout create flags and pool create flags mismatch for index (%d)", i); } } } if (!device_extensions.vk_khr_maintenance1) { // Track number of descriptorSets allowable in this pool if (pool_state->availableSets < p_alloc_info->descriptorSetCount) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, HandleToUint64(pool_state->pool), "VUID-VkDescriptorSetAllocateInfo-descriptorSetCount-00306", "Unable to allocate %u descriptorSets from %s" ". This pool only has %d descriptorSets remaining.", p_alloc_info->descriptorSetCount, report_data->FormatHandle(pool_state->pool).c_str(), pool_state->availableSets); } // Determine whether descriptor counts are satisfiable for (auto it = ds_data->required_descriptors_by_type.begin(); it != ds_data->required_descriptors_by_type.end(); ++it) { if (ds_data->required_descriptors_by_type.at(it->first) > pool_state->availableDescriptorTypeCount[it->first]) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, HandleToUint64(pool_state->pool), "VUID-VkDescriptorSetAllocateInfo-descriptorPool-00307", "Unable to allocate %u descriptors of type %s from %s" ". This pool only has %d descriptors of this type remaining.", ds_data->required_descriptors_by_type.at(it->first), string_VkDescriptorType(VkDescriptorType(it->first)), report_data->FormatHandle(pool_state->pool).c_str(), pool_state->availableDescriptorTypeCount[it->first]); } } } const auto *count_allocate_info = lvl_find_in_chain<VkDescriptorSetVariableDescriptorCountAllocateInfoEXT>(p_alloc_info->pNext); if (count_allocate_info) { if (count_allocate_info->descriptorSetCount != 0 && count_allocate_info->descriptorSetCount != p_alloc_info->descriptorSetCount) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, 0, "VUID-VkDescriptorSetVariableDescriptorCountAllocateInfoEXT-descriptorSetCount-03045", "VkDescriptorSetAllocateInfo::descriptorSetCount (%d) != " "VkDescriptorSetVariableDescriptorCountAllocateInfoEXT::descriptorSetCount (%d)", p_alloc_info->descriptorSetCount, count_allocate_info->descriptorSetCount); } if (count_allocate_info->descriptorSetCount == p_alloc_info->descriptorSetCount) { for (uint32_t i = 0; i < p_alloc_info->descriptorSetCount; i++) { auto layout = GetDescriptorSetLayout(this, p_alloc_info->pSetLayouts[i]); if (count_allocate_info->pDescriptorCounts[i] > layout->GetDescriptorCountFromBinding(layout->GetMaxBinding())) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, 0, "VUID-VkDescriptorSetVariableDescriptorCountAllocateInfoEXT-pSetLayouts-03046", "pDescriptorCounts[%d] = (%d), binding's descriptorCount = (%d)", i, count_allocate_info->pDescriptorCounts[i], layout->GetDescriptorCountFromBinding(layout->GetMaxBinding())); } } } } return skip; } // Decrement allocated sets from the pool and insert new sets into set_map void CoreChecks::PerformAllocateDescriptorSets(const VkDescriptorSetAllocateInfo *p_alloc_info, const VkDescriptorSet *descriptor_sets, const cvdescriptorset::AllocateDescriptorSetsData *ds_data) { auto pool_state = descriptorPoolMap[p_alloc_info->descriptorPool].get(); // Account for sets and individual descriptors allocated from pool pool_state->availableSets -= p_alloc_info->descriptorSetCount; for (auto it = ds_data->required_descriptors_by_type.begin(); it != ds_data->required_descriptors_by_type.end(); ++it) { pool_state->availableDescriptorTypeCount[it->first] -= ds_data->required_descriptors_by_type.at(it->first); } const auto *variable_count_info = lvl_find_in_chain<VkDescriptorSetVariableDescriptorCountAllocateInfoEXT>(p_alloc_info->pNext); bool variable_count_valid = variable_count_info && variable_count_info->descriptorSetCount == p_alloc_info->descriptorSetCount; // Create tracking object for each descriptor set; insert into global map and the pool's set. for (uint32_t i = 0; i < p_alloc_info->descriptorSetCount; i++) { uint32_t variable_count = variable_count_valid ? variable_count_info->pDescriptorCounts[i] : 0; std::unique_ptr<cvdescriptorset::DescriptorSet> new_ds(new cvdescriptorset::DescriptorSet( descriptor_sets[i], p_alloc_info->descriptorPool, ds_data->layout_nodes[i], variable_count, this)); pool_state->sets.insert(new_ds.get()); new_ds->in_use.store(0); setMap[descriptor_sets[i]] = std::move(new_ds); } } cvdescriptorset::PrefilterBindRequestMap::PrefilterBindRequestMap(cvdescriptorset::DescriptorSet &ds, const BindingReqMap &in_map, CMD_BUFFER_STATE *cb_state) : filtered_map_(), orig_map_(in_map) { if (ds.GetTotalDescriptorCount() > kManyDescriptors_) { filtered_map_.reset(new std::map<uint32_t, descriptor_req>()); ds.FilterAndTrackBindingReqs(cb_state, orig_map_, filtered_map_.get()); } } cvdescriptorset::PrefilterBindRequestMap::PrefilterBindRequestMap(cvdescriptorset::DescriptorSet &ds, const BindingReqMap &in_map, CMD_BUFFER_STATE *cb_state, PIPELINE_STATE *pipeline) : filtered_map_(), orig_map_(in_map) { if (ds.GetTotalDescriptorCount() > kManyDescriptors_) { filtered_map_.reset(new std::map<uint32_t, descriptor_req>()); ds.FilterAndTrackBindingReqs(cb_state, pipeline, orig_map_, filtered_map_.get()); } } // Starting at offset descriptor of given binding, parse over update_count // descriptor updates and verify that for any binding boundaries that are crossed, the next binding(s) are all consistent // Consistency means that their type, stage flags, and whether or not they use immutable samplers matches // If so, return true. If not, fill in error_msg and return false bool cvdescriptorset::VerifyUpdateConsistency(DescriptorSetLayout::ConstBindingIterator current_binding, uint32_t offset, uint32_t update_count, const char *type, const VkDescriptorSet set, std::string *error_msg) { // Verify consecutive bindings match (if needed) auto orig_binding = current_binding; // Track count of descriptors in the current_bindings that are remaining to be updated auto binding_remaining = current_binding.GetDescriptorCount(); // First, it's legal to offset beyond your own binding so handle that case // Really this is just searching for the binding in which the update begins and adjusting offset accordingly while (offset >= binding_remaining && !current_binding.AtEnd()) { // Advance to next binding, decrement offset by binding size offset -= binding_remaining; ++current_binding; binding_remaining = current_binding.GetDescriptorCount(); // Accessors are safe if AtEnd } assert(!current_binding.AtEnd()); // As written assumes range check has been made before calling binding_remaining -= offset; while (update_count > binding_remaining) { // While our updates overstep current binding // Verify next consecutive binding matches type, stage flags & immutable sampler use auto next_binding = current_binding.Next(); if (!current_binding.IsConsistent(next_binding)) { std::stringstream error_str; error_str << "Attempting " << type; if (current_binding.Layout()->IsPushDescriptor()) { error_str << " push descriptors"; } else { error_str << " descriptor set " << set; } error_str << " binding #" << orig_binding.Binding() << " with #" << update_count << " descriptors being updated but this update oversteps the bounds of this binding and the next binding is " "not consistent with current binding so this update is invalid."; *error_msg = error_str.str(); return false; } current_binding = next_binding; // For sake of this check consider the bindings updated and grab count for next binding update_count -= binding_remaining; binding_remaining = current_binding.GetDescriptorCount(); } return true; } // Validate the state for a given write update but don't actually perform the update // If an error would occur for this update, return false and fill in details in error_msg string bool CoreChecks::ValidateWriteUpdate(const DescriptorSet *dest_set, const VkWriteDescriptorSet *update, const char *func_name, std::string *error_code, std::string *error_msg) { const auto dest_layout = dest_set->GetLayout(); // Verify dst layout still valid if (dest_layout->IsDestroyed()) { *error_code = "VUID-VkWriteDescriptorSet-dstSet-00320"; string_sprintf(error_msg, "Cannot call %s to perform write update on %s which has been destroyed", func_name, dest_set->StringifySetAndLayout().c_str()); return false; } // Verify dst binding exists if (!dest_layout->HasBinding(update->dstBinding)) { *error_code = "VUID-VkWriteDescriptorSet-dstBinding-00315"; std::stringstream error_str; error_str << dest_set->StringifySetAndLayout() << " does not have binding " << update->dstBinding; *error_msg = error_str.str(); return false; } DescriptorSetLayout::ConstBindingIterator dest(dest_layout.get(), update->dstBinding); // Make sure binding isn't empty if (0 == dest.GetDescriptorCount()) { *error_code = "VUID-VkWriteDescriptorSet-dstBinding-00316"; std::stringstream error_str; error_str << dest_set->StringifySetAndLayout() << " cannot updated binding " << update->dstBinding << " that has 0 descriptors"; *error_msg = error_str.str(); return false; } // Verify idle ds if (dest_set->in_use.load() && !(dest.GetDescriptorBindingFlags() & (VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT_EXT | VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT))) { // TODO : Re-using Free Idle error code, need write update idle error code *error_code = "VUID-vkFreeDescriptorSets-pDescriptorSets-00309"; std::stringstream error_str; error_str << "Cannot call " << func_name << " to perform write update on " << dest_set->StringifySetAndLayout() << " that is in use by a command buffer"; *error_msg = error_str.str(); return false; } // We know that binding is valid, verify update and do update on each descriptor auto start_idx = dest.GetGlobalIndexRange().start + update->dstArrayElement; auto type = dest.GetType(); if (type != update->descriptorType) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00319"; std::stringstream error_str; error_str << "Attempting write update to " << dest_set->StringifySetAndLayout() << " binding #" << update->dstBinding << " with type " << string_VkDescriptorType(type) << " but update type is " << string_VkDescriptorType(update->descriptorType); *error_msg = error_str.str(); return false; } auto total_descriptors = dest_layout->GetTotalDescriptorCount(); if (update->descriptorCount > (total_descriptors - start_idx)) { *error_code = "VUID-VkWriteDescriptorSet-dstArrayElement-00321"; std::stringstream error_str; error_str << "Attempting write update to " << dest_set->StringifySetAndLayout() << " binding #" << update->dstBinding << " with " << total_descriptors - start_idx << " descriptors in that binding and all successive bindings of the set, but update of " << update->descriptorCount << " descriptors combined with update array element offset of " << update->dstArrayElement << " oversteps the available number of consecutive descriptors"; *error_msg = error_str.str(); return false; } if (type == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT) { if ((update->dstArrayElement % 4) != 0) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-02219"; std::stringstream error_str; error_str << "Attempting write update to " << dest_set->StringifySetAndLayout() << " binding #" << update->dstBinding << " with " << "dstArrayElement " << update->dstArrayElement << " not a multiple of 4"; *error_msg = error_str.str(); return false; } if ((update->descriptorCount % 4) != 0) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-02220"; std::stringstream error_str; error_str << "Attempting write update to " << dest_set->StringifySetAndLayout() << " binding #" << update->dstBinding << " with " << "descriptorCount " << update->descriptorCount << " not a multiple of 4"; *error_msg = error_str.str(); return false; } const auto *write_inline_info = lvl_find_in_chain<VkWriteDescriptorSetInlineUniformBlockEXT>(update->pNext); if (!write_inline_info || write_inline_info->dataSize != update->descriptorCount) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-02221"; std::stringstream error_str; if (!write_inline_info) { error_str << "Attempting write update to " << dest_set->StringifySetAndLayout() << " binding #" << update->dstBinding << " with " << "VkWriteDescriptorSetInlineUniformBlockEXT missing"; } else { error_str << "Attempting write update to " << dest_set->StringifySetAndLayout() << " binding #" << update->dstBinding << " with " << "VkWriteDescriptorSetInlineUniformBlockEXT dataSize " << write_inline_info->dataSize << " not equal to " << "VkWriteDescriptorSet descriptorCount " << update->descriptorCount; } *error_msg = error_str.str(); return false; } // This error is probably unreachable due to the previous two errors if (write_inline_info && (write_inline_info->dataSize % 4) != 0) { *error_code = "VUID-VkWriteDescriptorSetInlineUniformBlockEXT-dataSize-02222"; std::stringstream error_str; error_str << "Attempting write update to " << dest_set->StringifySetAndLayout() << " binding #" << update->dstBinding << " with " << "VkWriteDescriptorSetInlineUniformBlockEXT dataSize " << write_inline_info->dataSize << " not a multiple of 4"; *error_msg = error_str.str(); return false; } } // Verify consecutive bindings match (if needed) if (!VerifyUpdateConsistency(DescriptorSetLayout::ConstBindingIterator(dest_layout.get(), update->dstBinding), update->dstArrayElement, update->descriptorCount, "write update to", dest_set->GetSet(), error_msg)) { // TODO : Should break out "consecutive binding updates" language into valid usage statements *error_code = "VUID-VkWriteDescriptorSet-dstArrayElement-00321"; return false; } // Update is within bounds and consistent so last step is to validate update contents if (!VerifyWriteUpdateContents(dest_set, update, start_idx, func_name, error_code, error_msg)) { std::stringstream error_str; error_str << "Write update to " << dest_set->StringifySetAndLayout() << " binding #" << update->dstBinding << " failed with error message: " << error_msg->c_str(); *error_msg = error_str.str(); return false; } // All checks passed, update is clean return true; } // Verify that the contents of the update are ok, but don't perform actual update bool CoreChecks::VerifyWriteUpdateContents(const DescriptorSet *dest_set, const VkWriteDescriptorSet *update, const uint32_t index, const char *func_name, std::string *error_code, std::string *error_msg) { using ImageSamplerDescriptor = cvdescriptorset::ImageSamplerDescriptor; using SamplerDescriptor = cvdescriptorset::SamplerDescriptor; switch (update->descriptorType) { case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: { for (uint32_t di = 0; di < update->descriptorCount; ++di) { // Validate image auto image_view = update->pImageInfo[di].imageView; auto image_layout = update->pImageInfo[di].imageLayout; if (!ValidateImageUpdate(image_view, image_layout, update->descriptorType, func_name, error_code, error_msg)) { std::stringstream error_str; error_str << "Attempted write update to combined image sampler descriptor failed due to: " << error_msg->c_str(); *error_msg = error_str.str(); return false; } if (device_extensions.vk_khr_sampler_ycbcr_conversion) { ImageSamplerDescriptor *desc = (ImageSamplerDescriptor *)dest_set->GetDescriptorFromGlobalIndex(index + di); if (desc->IsImmutableSampler()) { auto sampler_state = GetSamplerState(desc->GetSampler()); auto iv_state = GetImageViewState(image_view); if (iv_state && sampler_state) { if (iv_state->samplerConversion != sampler_state->samplerConversion) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-01948"; std::stringstream error_str; error_str << "Attempted write update to combined image sampler and image view and sampler ycbcr " "conversions are not identical, sampler: " << desc->GetSampler() << " image view: " << iv_state->image_view << "."; *error_msg = error_str.str(); return false; } } } else { auto iv_state = GetImageViewState(image_view); if (iv_state && (iv_state->samplerConversion != VK_NULL_HANDLE)) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-01947"; std::stringstream error_str; error_str << "Because dstSet (" << update->dstSet << ") is bound to image view (" << iv_state->image_view << ") that includes a YCBCR conversion, it must have been allocated with a layout that " "includes an immutable sampler."; *error_msg = error_str.str(); return false; } } } } } // fall through case VK_DESCRIPTOR_TYPE_SAMPLER: { for (uint32_t di = 0; di < update->descriptorCount; ++di) { SamplerDescriptor *desc = (SamplerDescriptor *)dest_set->GetDescriptorFromGlobalIndex(index + di); if (!desc->IsImmutableSampler()) { if (!ValidateSampler(update->pImageInfo[di].sampler)) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00325"; std::stringstream error_str; error_str << "Attempted write update to sampler descriptor with invalid sampler: " << update->pImageInfo[di].sampler << "."; *error_msg = error_str.str(); return false; } } else { // TODO : Warn here } } break; } case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE: case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT: case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: { for (uint32_t di = 0; di < update->descriptorCount; ++di) { auto image_view = update->pImageInfo[di].imageView; auto image_layout = update->pImageInfo[di].imageLayout; if (!ValidateImageUpdate(image_view, image_layout, update->descriptorType, func_name, error_code, error_msg)) { std::stringstream error_str; error_str << "Attempted write update to image descriptor failed due to: " << error_msg->c_str(); *error_msg = error_str.str(); return false; } } break; } case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER: case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: { for (uint32_t di = 0; di < update->descriptorCount; ++di) { auto buffer_view = update->pTexelBufferView[di]; auto bv_state = GetBufferViewState(buffer_view); if (!bv_state) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00323"; std::stringstream error_str; error_str << "Attempted write update to texel buffer descriptor with invalid buffer view: " << buffer_view; *error_msg = error_str.str(); return false; } auto buffer = bv_state->create_info.buffer; auto buffer_state = GetBufferState(buffer); // Verify that buffer underlying the view hasn't been destroyed prematurely if (!buffer_state) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00323"; std::stringstream error_str; error_str << "Attempted write update to texel buffer descriptor failed because underlying buffer (" << buffer << ") has been destroyed: " << error_msg->c_str(); *error_msg = error_str.str(); return false; } else if (!cvdescriptorset::ValidateBufferUsage(buffer_state, update->descriptorType, error_code, error_msg)) { std::stringstream error_str; error_str << "Attempted write update to texel buffer descriptor failed due to: " << error_msg->c_str(); *error_msg = error_str.str(); return false; } } break; } case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER: case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC: case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER: case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: { for (uint32_t di = 0; di < update->descriptorCount; ++di) { if (!ValidateBufferUpdate(update->pBufferInfo + di, update->descriptorType, func_name, error_code, error_msg)) { std::stringstream error_str; error_str << "Attempted write update to buffer descriptor failed due to: " << error_msg->c_str(); *error_msg = error_str.str(); return false; } } break; } case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT: break; case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NV: // XXX TODO break; default: assert(0); // We've already verified update type so should never get here break; } // All checks passed so update contents are good return true; }
1
11,094
Why remove the crash protection? We're just going to get a bug filed on it.
KhronosGroup-Vulkan-ValidationLayers
cpp
@@ -24,6 +24,10 @@ export const HEADERS = { TEXT_PLAIN: 'text/plain', TEXT_HTML: 'text/html', FORWARDED_PROTO: 'X-Forwarded-Proto', + XFRAMES_OPTIONS: 'X-Frame-Options', + CSP: 'Content-Security-Policy', + CTO: 'X-Content-Type-Options', + XSS: 'X-XSS-Protection', ETAG: 'ETag', JSON_CHARSET: 'application/json; charset=utf-8', OCTET_STREAM: 'application/octet-stream; charset=utf-8',
1
/** * @prettier */ // @flow export const DEFAULT_PORT: string = '4873'; export const DEFAULT_PROTOCOL: string = 'http'; export const DEFAULT_DOMAIN: string = 'localhost'; export const TIME_EXPIRATION_24H: string = '24h'; export const TIME_EXPIRATION_7D: string = '7d'; export const DIST_TAGS = 'dist-tags'; export const USERS = 'users'; export const DEFAULT_MIN_LIMIT_PASSWORD: number = 3; export const DEFAULT_USER = 'Anonymous'; export const keyPem = 'verdaccio-key.pem'; export const certPem = 'verdaccio-cert.pem'; export const csrPem = 'verdaccio-csr.pem'; export const HEADERS = { JSON: 'application/json', CONTENT_TYPE: 'Content-type', TEXT_PLAIN: 'text/plain', TEXT_HTML: 'text/html', FORWARDED_PROTO: 'X-Forwarded-Proto', ETAG: 'ETag', JSON_CHARSET: 'application/json; charset=utf-8', OCTET_STREAM: 'application/octet-stream; charset=utf-8', TEXT_CHARSET: 'text/plain; charset=utf-8', WWW_AUTH: 'WWW-Authenticate', GZIP: 'gzip', }; export const CHARACTER_ENCODING = { UTF8: 'utf8', }; export const HEADER_TYPE = { CONTENT_ENCODING: 'content-encoding', CONTENT_TYPE: 'content-type', CONTENT_LENGTH: 'content-length', ACCEPT_ENCODING: 'accept-encoding', }; export const ERROR_CODE = { token_required: 'token is required', }; export const TOKEN_BASIC = 'Basic'; export const TOKEN_BEARER = 'Bearer'; export const DEFAULT_REGISTRY = 'https://registry.npmjs.org'; export const DEFAULT_UPLINK = 'npmjs'; export const ROLES = { $ALL: '$all', ALL: 'all', $AUTH: '$authenticated', $ANONYMOUS: '$anonymous', DEPRECATED_ALL: '@all', DEPRECATED_AUTH: '@authenticated', DEPRECATED_ANONYMOUS: '@anonymous', }; export const HTTP_STATUS = { OK: 200, CREATED: 201, MULTIPLE_CHOICES: 300, NOT_MODIFIED: 304, BAD_REQUEST: 400, UNAUTHORIZED: 401, FORBIDDEN: 403, NOT_FOUND: 404, CONFLICT: 409, UNSUPPORTED_MEDIA: 415, BAD_DATA: 422, INTERNAL_ERROR: 500, SERVICE_UNAVAILABLE: 503, LOOP_DETECTED: 508, }; export const API_MESSAGE = { PKG_CREATED: 'created new package', PKG_CHANGED: 'package changed', PKG_REMOVED: 'package removed', PKG_PUBLISHED: 'package published', TARBALL_UPLOADED: 'tarball uploaded successfully', TARBALL_REMOVED: 'tarball removed', TAG_UPDATED: 'tags updated', TAG_REMOVED: 'tag removed', TAG_ADDED: 'package tagged', LOGGED_OUT: 'Logged out', }; export const SUPPORT_ERRORS = { PLUGIN_MISSING_INTERFACE: 'the plugin does not provide implementation of the requested feature', TFA_DISABLED: 'the two-factor authentication is not yet supported', }; export const API_ERROR = { PASSWORD_SHORT: (passLength: number = DEFAULT_MIN_LIMIT_PASSWORD) => `The provided password is too short. Please pick a password longer than ${passLength} characters.`, MUST_BE_LOGGED: 'You must be logged in to publish packages.', PLUGIN_ERROR: 'bug in the auth plugin system', CONFIG_BAD_FORMAT: 'config file must be an object', BAD_USERNAME_PASSWORD: 'bad username/password, access denied', NO_PACKAGE: 'no such package available', PACKAGE_CANNOT_BE_ADDED: 'this package cannot be added', BAD_DATA: 'bad data', NOT_ALLOWED: 'not allowed to access package', NOT_ALLOWED_PUBLISH: 'not allowed to publish package', INTERNAL_SERVER_ERROR: 'internal server error', UNKNOWN_ERROR: 'unknown error', NOT_PACKAGE_UPLINK: 'package does not exist on uplink', UPLINK_OFFLINE_PUBLISH: 'one of the uplinks is down, refuse to publish', UPLINK_OFFLINE: 'uplink is offline', CONTENT_MISMATCH: 'content length mismatch', NOT_FILE_UPLINK: "file doesn't exist on uplink", MAX_USERS_REACHED: 'maximum amount of users reached', VERSION_NOT_EXIST: "this version doesn't exist", FILE_NOT_FOUND: 'File not found', BAD_STATUS_CODE: 'bad status code', PACKAGE_EXIST: 'this package is already present', BAD_AUTH_HEADER: 'bad authorization header', WEB_DISABLED: 'Web interface is disabled in the config file', DEPRECATED_BASIC_HEADER: 'basic authentication is deprecated, please use JWT instead', BAD_FORMAT_USER_GROUP: 'user groups is different than an array', RESOURCE_UNAVAILABLE: 'resource unavailable', BAD_PACKAGE_DATA: 'bad incoming package data', USERNAME_PASSWORD_REQUIRED: 'username and password is required', USERNAME_ALREADY_REGISTERED: 'username is already registered', }; export const APP_ERROR = { CONFIG_NOT_VALID: 'CONFIG: it does not look like a valid config file', PROFILE_ERROR: 'profile unexpected error', PASSWORD_VALIDATION: 'not valid password', }; export const DEFAULT_NO_README = 'ERROR: No README data found!'; export const MODULE_NOT_FOUND = 'MODULE_NOT_FOUND'; export const WEB_TITLE = 'Verdaccio'; export const PACKAGE_ACCESS = { SCOPE: '@*/*', ALL: '**', }; export const UPDATE_BANNER = { CHANGELOG_URL: 'https://github.com/verdaccio/verdaccio/releases/tag/', }; export const STORAGE = { PACKAGE_FILE_NAME: 'package.json', FILE_EXIST_ERROR: 'EEXISTS', NO_SUCH_FILE_ERROR: 'ENOENT', DEFAULT_REVISION: '0-0000000000000000', };
1
19,982
Maybe be consistent and name it `FRAME_OPTIONS`.
verdaccio-verdaccio
js
@@ -170,15 +170,6 @@ Blockly.Connection.prototype.connect_ = function(childConnection) { if (!orphanBlock.outputConnection) { throw 'Orphan block does not have an output connection.'; } - // Attempt to reattach the orphan at the end of the newly inserted - // block. Since this block may be a row, walk down to the end - // or to the first (and only) shadow block. - var connection = Blockly.Connection.lastConnectionInRow_( - childBlock, orphanBlock); - if (connection) { - orphanBlock.outputConnection.connect(connection); - orphanBlock = null; - } } else if (parentConnection.type == Blockly.NEXT_STATEMENT) { // Statement connections. // Statement blocks may be inserted into the middle of a stack.
1
/** * @license * Visual Blocks Editor * * Copyright 2011 Google Inc. * https://developers.google.com/blockly/ * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * @fileoverview Components for creating connections between blocks. * @author [email protected] (Neil Fraser) */ 'use strict'; goog.provide('Blockly.Connection'); goog.require('goog.asserts'); goog.require('goog.dom'); /** * Class for a connection between blocks. * @param {!Blockly.Block} source The block establishing this connection. * @param {number} type The type of the connection. * @constructor */ Blockly.Connection = function(source, type) { /** * @type {!Blockly.Block} * @private */ this.sourceBlock_ = source; /** @type {number} */ this.type = type; // Shortcut for the databases for this connection's workspace. if (source.workspace.connectionDBList) { this.db_ = source.workspace.connectionDBList[type]; this.dbOpposite_ = source.workspace.connectionDBList[Blockly.OPPOSITE_TYPE[type]]; this.hidden_ = !this.db_; } }; /** * Constants for checking whether two connections are compatible. */ Blockly.Connection.CAN_CONNECT = 0; Blockly.Connection.REASON_SELF_CONNECTION = 1; Blockly.Connection.REASON_WRONG_TYPE = 2; Blockly.Connection.REASON_TARGET_NULL = 3; Blockly.Connection.REASON_CHECKS_FAILED = 4; Blockly.Connection.REASON_DIFFERENT_WORKSPACES = 5; Blockly.Connection.REASON_SHADOW_PARENT = 6; /** * Connection this connection connects to. Null if not connected. * @type {Blockly.Connection} */ Blockly.Connection.prototype.targetConnection = null; /** * List of compatible value types. Null if all types are compatible. * @type {Array} * @private */ Blockly.Connection.prototype.check_ = null; /** * DOM representation of a shadow block, or null if none. * @type {Element} * @private */ Blockly.Connection.prototype.shadowDom_ = null; /** * Horizontal location of this connection. * @type {number} * @private */ Blockly.Connection.prototype.x_ = 0; /** * Vertical location of this connection. * @type {number} * @private */ Blockly.Connection.prototype.y_ = 0; /** * Has this connection been added to the connection database? * @type {boolean} * @private */ Blockly.Connection.prototype.inDB_ = false; /** * Connection database for connections of this type on the current workspace. * @type {Blockly.ConnectionDB} * @private */ Blockly.Connection.prototype.db_ = null; /** * Connection database for connections compatible with this type on the * current workspace. * @type {Blockly.ConnectionDB} * @private */ Blockly.Connection.prototype.dbOpposite_ = null; /** * Whether this connections is hidden (not tracked in a database) or not. * @type {boolean} * @private */ Blockly.Connection.prototype.hidden_ = null; /** * Connect two connections together. This is the connection on the superior * block. * @param {!Blockly.Connection} childConnection Connection on inferior block. * @private */ Blockly.Connection.prototype.connect_ = function(childConnection) { var parentConnection = this; var parentBlock = parentConnection.getSourceBlock(); var childBlock = childConnection.getSourceBlock(); var isSurroundingC = false; if (parentConnection == parentBlock.getFirstStatementConnection()) { isSurroundingC = true; } // Disconnect any existing parent on the child connection. if (childConnection.isConnected()) { // Scratch-specific behaviour: // If we're using a c-shaped block to surround a stack, remember where the // stack used to be connected. if (isSurroundingC) { var previousParentConnection = childConnection.targetConnection; } childConnection.disconnect(); } if (parentConnection.isConnected()) { // Other connection is already connected to something. // Disconnect it and reattach it or bump it as needed. var orphanBlock = parentConnection.targetBlock(); var shadowDom = parentConnection.getShadowDom(); // Temporarily set the shadow DOM to null so it does not respawn. parentConnection.setShadowDom(null); // Displaced shadow blocks dissolve rather than reattaching or bumping. if (orphanBlock.isShadow()) { // Save the shadow block so that field values are preserved. shadowDom = Blockly.Xml.blockToDom(orphanBlock); orphanBlock.dispose(); orphanBlock = null; } else if (parentConnection.type == Blockly.INPUT_VALUE) { // Value connections. // If female block is already connected, disconnect and bump the male. if (!orphanBlock.outputConnection) { throw 'Orphan block does not have an output connection.'; } // Attempt to reattach the orphan at the end of the newly inserted // block. Since this block may be a row, walk down to the end // or to the first (and only) shadow block. var connection = Blockly.Connection.lastConnectionInRow_( childBlock, orphanBlock); if (connection) { orphanBlock.outputConnection.connect(connection); orphanBlock = null; } } else if (parentConnection.type == Blockly.NEXT_STATEMENT) { // Statement connections. // Statement blocks may be inserted into the middle of a stack. // Split the stack. if (!orphanBlock.previousConnection) { throw 'Orphan block does not have a previous connection.'; } // Attempt to reattach the orphan at the bottom of the newly inserted // block. Since this block may be a stack, walk down to the end. var newBlock = childBlock; while (newBlock.nextConnection) { var nextBlock = newBlock.getNextBlock(); if (nextBlock && !nextBlock.isShadow()) { newBlock = nextBlock; } else { if (orphanBlock.previousConnection.checkType_( newBlock.nextConnection)) { newBlock.nextConnection.connect(orphanBlock.previousConnection); orphanBlock = null; } break; } } } if (orphanBlock) { // Unable to reattach orphan. parentConnection.disconnect(); if (Blockly.Events.recordUndo) { // Bump it off to the side after a moment. var group = Blockly.Events.getGroup(); setTimeout(function() { // Verify orphan hasn't been deleted or reconnected (user on meth). if (orphanBlock.workspace && !orphanBlock.getParent()) { Blockly.Events.setGroup(group); if (orphanBlock.outputConnection) { orphanBlock.outputConnection.bumpAwayFrom_(parentConnection); } else if (orphanBlock.previousConnection) { orphanBlock.previousConnection.bumpAwayFrom_(parentConnection); } Blockly.Events.setGroup(false); } }, Blockly.BUMP_DELAY); } } // Restore the shadow DOM. parentConnection.setShadowDom(shadowDom); } if (isSurroundingC && previousParentConnection) { previousParentConnection.connect(parentBlock.previousConnection); } var event; if (Blockly.Events.isEnabled()) { event = new Blockly.Events.Move(childBlock); } // Establish the connections. Blockly.Connection.connectReciprocally_(parentConnection, childConnection); // Demote the inferior block so that one is a child of the superior one. childBlock.setParent(parentBlock); if (event) { event.recordNew(); Blockly.Events.fire(event); } }; /** * Sever all links to this connection (not including from the source object). */ Blockly.Connection.prototype.dispose = function() { if (this.isConnected()) { throw 'Disconnect connection before disposing of it.'; } if (this.inDB_) { this.db_.removeConnection_(this); } if (Blockly.highlightedConnection_ == this) { Blockly.highlightedConnection_ = null; } if (Blockly.localConnection_ == this) { Blockly.localConnection_ = null; } this.db_ = null; this.dbOpposite_ = null; }; /** * @return {boolean} true if the connection is not connected or is connected to * an insertion marker, false otherwise. */ Blockly.Connection.prototype.isConnectedToNonInsertionMarker = function() { return this.targetConnection && !this.targetBlock().isInsertionMarker(); }; /** * Get the source block for this connection. * @return {Blockly.Block} The source block, or null if there is none. */ Blockly.Connection.prototype.getSourceBlock = function() { return this.sourceBlock_; }; /** * Does the connection belong to a superior block (higher in the source stack)? * @return {boolean} True if connection faces down or right. */ Blockly.Connection.prototype.isSuperior = function() { return this.type == Blockly.INPUT_VALUE || this.type == Blockly.NEXT_STATEMENT; }; /** * Is the connection connected? * @return {boolean} True if connection is connected to another connection. */ Blockly.Connection.prototype.isConnected = function() { return !!this.targetConnection; }; /** * Checks whether the current connection can connect with the target * connection. * @param {Blockly.Connection} target Connection to check compatibility with. * @return {number} Blockly.Connection.CAN_CONNECT if the connection is legal, * an error code otherwise. * @private */ Blockly.Connection.prototype.canConnectWithReason_ = function(target) { if (!target) { return Blockly.Connection.REASON_TARGET_NULL; } if (this.isSuperior()) { var blockA = this.sourceBlock_; var blockB = target.getSourceBlock(); } else { var blockB = this.sourceBlock_; var blockA = target.getSourceBlock(); } if (blockA && blockA == blockB) { return Blockly.Connection.REASON_SELF_CONNECTION; } else if (target.type != Blockly.OPPOSITE_TYPE[this.type]) { return Blockly.Connection.REASON_WRONG_TYPE; } else if (blockA && blockB && blockA.workspace !== blockB.workspace) { return Blockly.Connection.REASON_DIFFERENT_WORKSPACES; } else if (!this.checkType_(target)) { return Blockly.Connection.REASON_CHECKS_FAILED; } else if (blockA.isShadow() && !blockB.isShadow()) { return Blockly.Connection.REASON_SHADOW_PARENT; } return Blockly.Connection.CAN_CONNECT; }; /** * Checks whether the current connection and target connection are compatible * and throws an exception if they are not. * @param {Blockly.Connection} target The connection to check compatibility * with. * @private */ Blockly.Connection.prototype.checkConnection_ = function(target) { switch (this.canConnectWithReason_(target)) { case Blockly.Connection.CAN_CONNECT: break; case Blockly.Connection.REASON_SELF_CONNECTION: throw 'Attempted to connect a block to itself.'; case Blockly.Connection.REASON_DIFFERENT_WORKSPACES: // Usually this means one block has been deleted. throw 'Blocks not on same workspace.'; case Blockly.Connection.REASON_WRONG_TYPE: throw 'Attempt to connect incompatible types.'; case Blockly.Connection.REASON_TARGET_NULL: throw 'Target connection is null.'; case Blockly.Connection.REASON_CHECKS_FAILED: throw 'Connection checks failed.'; case Blockly.Connection.REASON_SHADOW_PARENT: throw 'Connecting non-shadow to shadow block.'; default: throw 'Unknown connection failure: this should never happen!'; } }; /** * Check if the two connections can be dragged to connect to each other. * This is used by the connection database when searching for the closest * connection. * @param {!Blockly.Connection} candidate A nearby connection to check. * @return {boolean} True if the connection is allowed, false otherwise. */ Blockly.Connection.prototype.isConnectionAllowed = function(candidate) { // Don't consider insertion markers. if (candidate.sourceBlock_.isInsertionMarker()) { return false; } // Type checking. var canConnect = this.canConnectWithReason_(candidate); if (canConnect != Blockly.Connection.CAN_CONNECT) { return false; } var firstStatementConnection = this.sourceBlock_.getFirstStatementConnection(); switch (candidate.type) { case Blockly.PREVIOUS_STATEMENT: { if (!firstStatementConnection || this != firstStatementConnection) { if (this.targetConnection) { return false; } if (candidate.targetConnection) { // If the other side of this connection is the active insertion marker // connection, we've obviously already decided that this is a good // connection. if (candidate.targetConnection == Blockly.insertionMarkerConnection_) { return true; } else { return false; } } } // Scratch-specific behaviour: // If this is a c-shaped block, statement blocks cannot be connected // anywhere other than inside the first statement input. if (firstStatementConnection) { // Can't connect if there is already a block inside the first statement // input. if (this == firstStatementConnection) { if (this.targetConnection) { return false; } } // Can't connect this block's next connection unless we're connecting // in front of the first block on a stack. else if (this == this.sourceBlock_.nextConnection && candidate.isConnectedToNonInsertionMarker()) { return false; } } break; } case Blockly.OUTPUT_VALUE: { // Can't drag an input to an output--you have to move the inferior block. return false; } case Blockly.INPUT_VALUE: { // Offering to connect the left (male) of a value block to an already // connected value pair is ok, we'll splice it in. // However, don't offer to splice into an unmovable block. if (candidate.targetConnection && !candidate.targetBlock().isMovable() && !candidate.targetBlock().isShadow()) { return false; } break; } case Blockly.NEXT_STATEMENT: { // Scratch-specific behaviour: // If this is a c-block, we can't connect this block's // previous connection unless we're connecting to the end of the last // block on a stack or there's already a block connected inside the c. if (firstStatementConnection && this == this.sourceBlock_.previousConnection && candidate.isConnectedToNonInsertionMarker() && !firstStatementConnection.targetConnection) { return false; } // Don't let a block with no next connection bump other blocks out of the // stack. But covering up a shadow block or stack of shadow blocks is // fine. Similarly, replacing a terminal statement with another terminal // statement is allowed. if (candidate.isConnectedToNonInsertionMarker() && !this.sourceBlock_.nextConnection && !candidate.targetBlock().isShadow() && candidate.targetBlock().nextConnection) { return false; } break; } default: throw 'Unknown connection type in isConnectionAllowed'; } // Don't let blocks try to connect to themselves or ones they nest. if (Blockly.draggingConnections_.indexOf(candidate) != -1) { return false; } return true; }; /** * Connect this connection to another connection. * @param {!Blockly.Connection} otherConnection Connection to connect to. */ Blockly.Connection.prototype.connect = function(otherConnection) { if (this.targetConnection == otherConnection) { // Already connected together. NOP. return; } this.checkConnection_(otherConnection); // Determine which block is superior (higher in the source stack). if (this.isSuperior()) { // Superior block. this.connect_(otherConnection); } else { // Inferior block. otherConnection.connect_(this); } }; /** * Update two connections to target each other. * @param {Blockly.Connection} first The first connection to update. * @param {Blockly.Connection} second The second conneciton to update. * @private */ Blockly.Connection.connectReciprocally_ = function(first, second) { goog.asserts.assert(first && second, 'Cannot connect null connections.'); first.targetConnection = second; second.targetConnection = first; }; /** * Does the given block have one and only one connection point that will accept * an orphaned block? * @param {!Blockly.Block} block The superior block. * @param {!Blockly.Block} orphanBlock The inferior block. * @return {Blockly.Connection} The suitable connection point on 'block', * or null. * @private */ Blockly.Connection.singleConnection_ = function(block, orphanBlock) { var connection = false; for (var i = 0; i < block.inputList.length; i++) { var thisConnection = block.inputList[i].connection; if (thisConnection && thisConnection.type == Blockly.INPUT_VALUE && orphanBlock.outputConnection.checkType_(thisConnection)) { if (connection) { return null; // More than one connection. } connection = thisConnection; } } return connection; }; /** * Walks down a row a blocks, at each stage checking if there are any * connections that will accept the orphaned block. If at any point there * are zero or multiple eligible connections, returns null. Otherwise * returns the only input on the last block in the chain. * Terminates early for shadow blocks. * @param {!Blockly.Block} startBlock The block on which to start the search. * @param {!Blockly.Block} orphanBlock The block that is looking for a home. * @return {Blockly.Connection} The suitable connection point on the chain * of blocks, or null. * @private */ Blockly.Connection.lastConnectionInRow_ = function(startBlock, orphanBlock) { var newBlock = startBlock; var connection; while (connection = Blockly.Connection.singleConnection_( /** @type {!Blockly.Block} */ (newBlock), orphanBlock)) { // '=' is intentional in line above. newBlock = connection.targetBlock(); if (!newBlock || newBlock.isShadow()) { return connection; } } return null; }; /** * Disconnect this connection. */ Blockly.Connection.prototype.disconnect = function() { var otherConnection = this.targetConnection; goog.asserts.assert(otherConnection, 'Source connection not connected.'); goog.asserts.assert(otherConnection.targetConnection == this, 'Target connection not connected to source connection.'); var parentBlock, childBlock, parentConnection; if (this.isSuperior()) { // Superior block. parentBlock = this.sourceBlock_; childBlock = otherConnection.getSourceBlock(); parentConnection = this; } else { // Inferior block. parentBlock = otherConnection.getSourceBlock(); childBlock = this.sourceBlock_; parentConnection = otherConnection; } this.disconnectInternal_(parentBlock, childBlock); parentConnection.respawnShadow_(); }; /** * Disconnect two blocks that are connected by this connection. * @param {!Blockly.Block} parentBlock The superior block. * @param {!Blockly.Block} childBlock The inferior block. * @private */ Blockly.Connection.prototype.disconnectInternal_ = function(parentBlock, childBlock) { var event; if (Blockly.Events.isEnabled()) { event = new Blockly.Events.Move(childBlock); } var otherConnection = this.targetConnection; otherConnection.targetConnection = null; this.targetConnection = null; childBlock.setParent(null); if (event) { event.recordNew(); Blockly.Events.fire(event); } }; /** * Respawn the shadow block if there was one connected to the this connection. * @private */ Blockly.Connection.prototype.respawnShadow_ = function() { var parentBlock = this.getSourceBlock(); var shadow = this.getShadowDom(); if (parentBlock.workspace && shadow && Blockly.Events.recordUndo) { var blockShadow = Blockly.Xml.domToBlock(shadow, parentBlock.workspace); if (blockShadow.outputConnection) { this.connect(blockShadow.outputConnection); } else if (blockShadow.previousConnection) { this.connect(blockShadow.previousConnection); } else { throw 'Child block does not have output or previous statement.'; } } }; /** * Returns the block that this connection connects to. * @return {Blockly.Block} The connected block or null if none is connected. */ Blockly.Connection.prototype.targetBlock = function() { if (this.isConnected()) { return this.targetConnection.getSourceBlock(); } return null; }; /** * Is this connection compatible with another connection with respect to the * value type system. E.g. square_root("Hello") is not compatible. * @param {!Blockly.Connection} otherConnection Connection to compare against. * @return {boolean} True if the connections share a type. * @private */ Blockly.Connection.prototype.checkType_ = function(otherConnection) { if (!this.check_ || !otherConnection.check_) { // One or both sides are promiscuous enough that anything will fit. return true; } // Find any intersection in the check lists. for (var i = 0; i < this.check_.length; i++) { if (otherConnection.check_.indexOf(this.check_[i]) != -1) { return true; } } // No intersection. return false; }; /** * Change a connection's compatibility. * @param {*} check Compatible value type or list of value types. * Null if all types are compatible. * @return {!Blockly.Connection} The connection being modified * (to allow chaining). */ Blockly.Connection.prototype.setCheck = function(check) { if (check) { // Ensure that check is in an array. if (!goog.isArray(check)) { check = [check]; } this.check_ = check; // The new value type may not be compatible with the existing connection. if (this.isConnected() && !this.checkType_(this.targetConnection)) { var child = this.isSuperior() ? this.targetBlock() : this.sourceBlock_; child.unplug(); // Bump away. this.sourceBlock_.bumpNeighbours_(); } } else { this.check_ = null; } return this; }; /** * Returns a shape enum for this connection. * Used in scratch-blocks to draw unoccupied inputs. * @return {number} Enum representing shape. */ Blockly.Connection.prototype.getOutputShape = function() { if (!this.check_) return Blockly.OUTPUT_SHAPE_SQUARE; if (this.check_.indexOf('Boolean') !== -1) { return Blockly.OUTPUT_SHAPE_HEXAGONAL; } if (this.check_.indexOf('Number') !== -1) { return Blockly.OUTPUT_SHAPE_ROUND; } if (this.check_.indexOf('String') !== -1) { return Blockly.OUTPUT_SHAPE_SQUARE; } return Blockly.OUTPUT_SHAPE_SQUARE; }; /** * Change a connection's shadow block. * @param {Element} shadow DOM representation of a block or null. */ Blockly.Connection.prototype.setShadowDom = function(shadow) { this.shadowDom_ = shadow; }; /** * Return a connection's shadow block. * @return {Element} shadow DOM representation of a block or null. */ Blockly.Connection.prototype.getShadowDom = function() { return this.shadowDom_; };
1
7,939
This check/exception is a guard for the next few lines of code. It shouldn't be necessary now. In fact, I don't think you need the if (parentConnection.type == Blockly.INPUT_VALUE) branch at all.
LLK-scratch-blocks
js
@@ -72,8 +72,10 @@ class TestStringMethods(testtools.TestCase): def test_parse_provisioning_output_failure_00(self): res = self.molecule._parse_provisioning_output(TestStringMethods.OUTPUT_MIXED_FAILED) + self.assertFalse(res) def test_parse_provisioning_output_success_00(self): res = self.molecule._parse_provisioning_output(TestStringMethods.OUTPUT_MIXED_SUCCESS) + self.assertTrue(res)
1
# Copyright (c) 2015 Cisco Systems # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. import testtools from molecule.core import Molecule from mock import patch class TestStringMethods(testtools.TestCase): OUTPUT_MIXED_FAILED = """ TASK: [cisco.zuul | Adjust setup script] ************************************** ok: [aio-01-ubuntu] TASK: [cisco.zuul | file ] **************************************************** changed: [aio-01-ubuntu] TASK: [cisco.zuul | Fetch Zuul portal dependencies] *************************** ok: [aio-01-ubuntu] TASK: [cisco.zuul | start zuul service] *************************************** changed: [aio-01-ubuntu] TASK: [cisco.zuul | start zuul merger service] ******************************** changed: [aio-01-ubuntu] NOTIFIED: [cisco.zuul | restart apache2] ************************************** changed: [aio-01-ubuntu] PLAY RECAP ******************************************************************** aio-01-ubuntu : ok=36 changed=29 unreachable=0 failed=0 """ OUTPUT_MIXED_SUCCESS = """ TASK: [cisco.zuul | Adjust setup script] ************************************** ok: [aio-01-ubuntu] TASK: [cisco.zuul | Fetch Zuul portal dependencies] *************************** ok: [aio-01-ubuntu] NOTIFIED: [cisco.zuul | restart apache2] ************************************** changed: [aio-01-ubuntu] PLAY RECAP ******************************************************************** aio-01-ubuntu : ok=36 changed=0 unreachable=0 failed=0 """ def setUp(self): super(TestStringMethods, self).setUp() with patch('molecule.core.Molecule._main') as mocked: mocked.return_value = None self.molecule = Molecule(None) def test_parse_provisioning_output_failure_00(self): res = self.molecule._parse_provisioning_output(TestStringMethods.OUTPUT_MIXED_FAILED) self.assertFalse(res) def test_parse_provisioning_output_success_00(self): res = self.molecule._parse_provisioning_output(TestStringMethods.OUTPUT_MIXED_SUCCESS) self.assertTrue(res)
1
5,669
Could probably move these constants too?
ansible-community-molecule
py
@@ -100,9 +100,8 @@ func (cmd *Command) Start() (err error) { case <-time.After(1 * time.Minute): err := cmd.mysteriumClient.PingProposal(proposal, signer) if err != nil { - //TODO failed to refresh proposal. Stop everything? log.Error("Failed to ping proposal", err) - cmd.Kill() + // do not stop server on missing ping to discovery. More on this in MYST-362 and MYST-370 } case <-stopPinger: log.Info("Stopping proposal pinger")
1
package server import ( "errors" log "github.com/cihub/seelog" "github.com/mysterium/node/communication" "github.com/mysterium/node/identity" "github.com/mysterium/node/ip" "github.com/mysterium/node/location" "github.com/mysterium/node/nat" "github.com/mysterium/node/openvpn" "github.com/mysterium/node/openvpn/discovery" "github.com/mysterium/node/openvpn/middlewares/state" "github.com/mysterium/node/server" dto_discovery "github.com/mysterium/node/service_discovery/dto" "github.com/mysterium/node/session" "time" ) // Command represent entrypoint for Mysterium server with top level components type Command struct { identityLoader func() (identity.Identity, error) createSigner identity.SignerFactory ipResolver ip.Resolver mysteriumClient server.Client natService nat.NATService locationDetector location.Detector dialogWaiterFactory func(identity identity.Identity) communication.DialogWaiter dialogWaiter communication.DialogWaiter sessionManagerFactory func(serverIP string) session.Manager vpnServerFactory func(sessionManager session.Manager, serviceLocation dto_discovery.Location, providerID identity.Identity, callback state.Callback) *openvpn.Server vpnServer *openvpn.Server } // Start starts server - does not block func (cmd *Command) Start() (err error) { providerID, err := cmd.identityLoader() if err != nil { return err } cmd.dialogWaiter = cmd.dialogWaiterFactory(providerID) providerContact, err := cmd.dialogWaiter.Start() // if for some reason we will need truly external IP, use GetPublicIP() vpnServerIP, err := cmd.ipResolver.GetOutboundIP() if err != nil { return err } cmd.natService.Add(nat.RuleForwarding{ SourceAddress: "10.8.0.0/24", TargetIP: vpnServerIP, }) if err = cmd.natService.Start(); err != nil { return err } serviceLocation, err := detectCountry(cmd.ipResolver, cmd.locationDetector) if err != nil { return err } proposal := discovery.NewServiceProposalWithLocation(providerID, providerContact, serviceLocation) sessionManager := cmd.sessionManagerFactory(vpnServerIP) dialogHandler := session.NewDialogHandler(proposal.ID, sessionManager) if err := cmd.dialogWaiter.ServeDialogs(dialogHandler); err != nil { return err } stopPinger := make(chan int) vpnStateCallback := func(state openvpn.State) { switch state { case openvpn.ConnectedState: log.Info("Open vpn service started") case openvpn.ExitingState: log.Info("Open vpn service exiting") close(stopPinger) } } cmd.vpnServer = cmd.vpnServerFactory(sessionManager, serviceLocation, providerID, vpnStateCallback) if err := cmd.vpnServer.Start(); err != nil { return err } signer := cmd.createSigner(providerID) if err := cmd.mysteriumClient.RegisterProposal(proposal, signer); err != nil { return err } go func() { for { select { case <-time.After(1 * time.Minute): err := cmd.mysteriumClient.PingProposal(proposal, signer) if err != nil { //TODO failed to refresh proposal. Stop everything? log.Error("Failed to ping proposal", err) cmd.Kill() } case <-stopPinger: log.Info("Stopping proposal pinger") return } } }() return nil } func detectCountry(ipResolver ip.Resolver, locationDetector location.Detector) (dto_discovery.Location, error) { myIP, err := ipResolver.GetPublicIP() if err != nil { return dto_discovery.Location{}, errors.New("IP detection failed: " + err.Error()) } myCountry, err := locationDetector.DetectCountry(myIP) if err != nil { return dto_discovery.Location{}, errors.New("Country detection failed: " + err.Error()) } log.Info("Country detected: ", myCountry) return dto_discovery.Location{Country: myCountry}, nil } // Wait blocks until server is stopped func (cmd *Command) Wait() error { return cmd.vpnServer.Wait() } // Kill stops server func (cmd *Command) Kill() error { cmd.vpnServer.Stop() err := cmd.dialogWaiter.Stop() if err != nil { return err } err = cmd.natService.Stop() return err }
1
10,621
Maybe 'failed' instead of 'missing'. Also do we really need to write ticket numbers here?
mysteriumnetwork-node
go
@@ -26,7 +26,6 @@ import org.apache.solr.common.params.SolrParams; import org.apache.solr.request.SolrQueryRequest; import org.apache.solr.response.transform.DocTransformer; import org.apache.solr.response.transform.TransformerFactory; -import org.bouncycastle.util.Strings; import org.junit.After; import org.junit.BeforeClass; import org.junit.Test;
1
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.solr.response; import java.io.IOException; import org.apache.lucene.index.IndexableField; import org.apache.solr.SolrTestCaseJ4; import org.apache.solr.common.SolrDocument; import org.apache.solr.common.SolrInputDocument; import org.apache.solr.common.params.SolrParams; import org.apache.solr.request.SolrQueryRequest; import org.apache.solr.response.transform.DocTransformer; import org.apache.solr.response.transform.TransformerFactory; import org.bouncycastle.util.Strings; import org.junit.After; import org.junit.BeforeClass; import org.junit.Test; public class TestCustomDocTransformer extends SolrTestCaseJ4 { @BeforeClass public static void beforeClass() throws Exception { initCore("solrconfig-doctransformers.xml","schema.xml"); } @After public void cleanup() throws Exception { assertU(delQ("*:*")); assertU(commit()); } @Test public void testCustomTransformer() throws Exception { // Build a simple index int max = 10; for(int i=0; i<max; i++) { SolrInputDocument sdoc = new SolrInputDocument(); sdoc.addField("id", i); sdoc.addField("subject", "xx"); sdoc.addField("title", "title_"+i); updateJ(jsonAdd(sdoc), null); } assertU(commit()); assertQ(req("q", "*:*"), "//*[@numFound='" + max + "']"); assertQ( req( "q", "*:*", "fl", "id,out:[custom extra=subject,title]"), // Check that the concatenated fields make it in the results "//*[@numFound='" + max + "']", "//str[.='xx#title_0#']", "//str[.='xx#title_1#']", "//str[.='xx#title_2#']", "//str[.='xx#title_3#']"); } public static class CustomTransformerFactory extends TransformerFactory { @Override public DocTransformer create(String field, SolrParams params, SolrQueryRequest req) { String[] extra = null; String ext = params.get("extra"); if(ext!=null) { extra = Strings.split(ext, ','); } return new CustomTransformer(field, extra); } } public static class CustomTransformer extends DocTransformer { final String name; final String[] extra; final StringBuilder str = new StringBuilder(); public CustomTransformer(String name, String[] extra) { this.name = name; this.extra = extra; } @Override public String getName() { return "custom"; } @Override public String[] getExtraRequestFields() { return extra; } /** * This transformer simply concatenates the values of multiple fields */ @Override public void transform(SolrDocument doc, int docid) throws IOException { str.setLength(0); for(String s : extra) { String v = getAsString(s, doc); str.append(v).append('#'); } System.out.println( "HELLO: "+str ); doc.setField(name, str.toString()); } } public static String getAsString(String field, SolrDocument doc) { Object v = doc.getFirstValue(field); if(v != null) { if(v instanceof IndexableField) { return ((IndexableField)v).stringValue(); } return v.toString(); } return null; } }
1
28,379
Test used to use old bouncycastle dependency which isn't needed anymore from Hadoop. Switched to use builtin Java split.
apache-lucene-solr
java
@@ -193,9 +193,11 @@ class SynthDriver(SynthDriver): voices=OrderedDict() for v in _espeak.getVoiceList(): l=v.languages[1:] + # #7167: Some languages names require unicode characters EG: Norwegian Bokmål + name=v.name.decode("UTF-8") # #5783: For backwards compatibility, voice identifies should always be lowercase identifier=os.path.basename(v.identifier).lower() - voices[identifier]=VoiceInfo(identifier,v.name,l) + voices[identifier]=VoiceInfo(identifier,name,l) return voices def _get_voice(self):
1
# -*- coding: UTF-8 -*- #synthDrivers/espeak.py #A part of NonVisual Desktop Access (NVDA) #Copyright (C) 2007-2015 NV Access Limited, Peter Vágner, Aleksey Sadovoy #This file is covered by the GNU General Public License. #See the file COPYING for more details. import os from collections import OrderedDict import _espeak import Queue import threading import languageHandler from synthDriverHandler import SynthDriver,VoiceInfo,BooleanSynthSetting import speech from logHandler import log class SynthDriver(SynthDriver): name = "espeak" description = "eSpeak NG" supportedSettings=( SynthDriver.VoiceSetting(), SynthDriver.VariantSetting(), SynthDriver.RateSetting(), # Translators: This is the name of the rate boost voice toggle # which further increases the speaking rate when enabled. BooleanSynthSetting("rateBoost",_("Rate boos&t")), SynthDriver.PitchSetting(), SynthDriver.InflectionSetting(), SynthDriver.VolumeSetting(), ) @classmethod def check(cls): return True def __init__(self): _espeak.initialize() log.info("Using eSpeak NG version %s" % _espeak.info()) lang=languageHandler.getLanguage() _espeak.setVoiceByLanguage(lang) self._language=lang self._variantDict=_espeak.getVariantDict() self.variant="max" self.rate=30 self.pitch=40 self.inflection=75 def _get_language(self): return self._language PROSODY_ATTRS = { speech.PitchCommand: "pitch", speech.VolumeCommand: "volume", speech.RateCommand: "rate", } IPA_TO_ESPEAK = { u"θ": u"T", u"s": u"s", u"ˈ": u"'", } def _processText(self, text): text = unicode(text) # We need to make several replacements. return text.translate({ 0x1: None, # used for embedded commands 0x3C: u"&lt;", # <: because of XML 0x3E: u"&gt;", # >: because of XML 0x5B: u" [", # [: [[ indicates phonemes }) def speak(self,speechSequence): defaultLanguage=self._language textList=[] langChanged=False prosody={} # We output malformed XML, as we might close an outer tag after opening an inner one; e.g. # <voice><prosody></voice></prosody>. # However, eSpeak doesn't seem to mind. for item in speechSequence: if isinstance(item,basestring): textList.append(self._processText(item)) elif isinstance(item,speech.IndexCommand): textList.append("<mark name=\"%d\" />"%item.index) elif isinstance(item,speech.CharacterModeCommand): textList.append("<say-as interpret-as=\"characters\">" if item.state else "</say-as>") elif isinstance(item,speech.LangChangeCommand): if langChanged: textList.append("</voice>") textList.append("<voice xml:lang=\"%s\">"%(item.lang if item.lang else defaultLanguage).replace('_','-')) langChanged=True elif isinstance(item,speech.BreakCommand): textList.append('<break time="%dms" />' % item.time) elif type(item) in self.PROSODY_ATTRS: if prosody: # Close previous prosody tag. textList.append("</prosody>") attr=self.PROSODY_ATTRS[type(item)] if item.multiplier==1: # Returning to normal. try: del prosody[attr] except KeyError: pass else: prosody[attr]=int(item.multiplier* 100) if not prosody: continue textList.append("<prosody") for attr,val in prosody.iteritems(): textList.append(' %s="%d%%"'%(attr,val)) textList.append(">") elif isinstance(item,speech.PhonemeCommand): # We can't use unicode.translate because we want to reject unknown characters. try: phonemes="".join([self.IPA_TO_ESPEAK[char] for char in item.ipa]) # There needs to be a space after the phoneme command. # Otherwise, eSpeak will announce a subsequent SSML tag instead of processing it. textList.append(u"[[%s]] "%phonemes) except KeyError: log.debugWarning("Unknown character in IPA string: %s"%item.ipa) if item.text: textList.append(self._processText(item.text)) elif isinstance(item,speech.SpeechCommand): log.debugWarning("Unsupported speech command: %s"%item) else: log.error("Unknown speech: %s"%item) # Close any open tags. if langChanged: textList.append("</voice>") if prosody: textList.append("</prosody>") text=u"".join(textList) _espeak.speak(text) def cancel(self): _espeak.stop() def pause(self,switch): _espeak.pause(switch) _rateBoost = False RATE_BOOST_MULTIPLIER = 3 def _get_rateBoost(self): return self._rateBoost def _set_rateBoost(self, enable): if enable == self._rateBoost: return rate = self.rate self._rateBoost = enable self.rate = rate def _get_rate(self): val=_espeak.getParameter(_espeak.espeakRATE,1) if self._rateBoost: val=int(val/self.RATE_BOOST_MULTIPLIER) return self._paramToPercent(val,_espeak.minRate,_espeak.maxRate) def _set_rate(self,rate): val=self._percentToParam(rate, _espeak.minRate, _espeak.maxRate) if self._rateBoost: val=int(val*self.RATE_BOOST_MULTIPLIER) _espeak.setParameter(_espeak.espeakRATE,val,0) def _get_pitch(self): val=_espeak.getParameter(_espeak.espeakPITCH,1) return self._paramToPercent(val,_espeak.minPitch,_espeak.maxPitch) def _set_pitch(self,pitch): val=self._percentToParam(pitch, _espeak.minPitch, _espeak.maxPitch) _espeak.setParameter(_espeak.espeakPITCH,val,0) def _get_inflection(self): val=_espeak.getParameter(_espeak.espeakRANGE,1) return self._paramToPercent(val,_espeak.minPitch,_espeak.maxPitch) def _set_inflection(self,val): val=self._percentToParam(val, _espeak.minPitch, _espeak.maxPitch) _espeak.setParameter(_espeak.espeakRANGE,val,0) def _get_volume(self): return _espeak.getParameter(_espeak.espeakVOLUME,1) def _set_volume(self,volume): _espeak.setParameter(_espeak.espeakVOLUME,volume,0) def _getAvailableVoices(self): voices=OrderedDict() for v in _espeak.getVoiceList(): l=v.languages[1:] # #5783: For backwards compatibility, voice identifies should always be lowercase identifier=os.path.basename(v.identifier).lower() voices[identifier]=VoiceInfo(identifier,v.name,l) return voices def _get_voice(self): curVoice=getattr(self,'_voice',None) if curVoice: return curVoice curVoice = _espeak.getCurrentVoice() if not curVoice: return "" # #5783: For backwards compatibility, voice identifies should always be lowercase return curVoice.identifier.split('+')[0].lower() def _set_voice(self, identifier): if not identifier: return # #5783: For backwards compatibility, voice identifies should always be lowercase identifier=identifier.lower() if "\\" in identifier: identifier=os.path.basename(identifier) self._voice=identifier try: _espeak.setVoiceAndVariant(voice=identifier,variant=self._variant) except: self._voice=None raise self._language=super(SynthDriver,self).language def _get_lastIndex(self): return _espeak.lastIndex def terminate(self): _espeak.terminate() def _get_variant(self): return self._variant def _set_variant(self,val): self._variant = val if val in self._variantDict else "max" _espeak.setVoiceAndVariant(variant=self._variant) def _getAvailableVariants(self): return OrderedDict((ID,VoiceInfo(ID, name)) for ID, name in self._variantDict.iteritems())
1
19,573
nit: I think this would be more readable as "Some language names contain Unicode characters".
nvaccess-nvda
py
@@ -1,3 +1,9 @@ +#NVDAHelper.py +#A part of NonVisual Desktop Access (NVDA) +#Copyright (C) 2017-2018 NV Access Limited, Peter Vagner, Davy Kager +#This file is covered by the GNU General Public License. +#See the file COPYING for more details. + import os import sys import _winreg
1
import os import sys import _winreg import msvcrt import versionInfo import winKernel import config from ctypes import * from ctypes.wintypes import * from comtypes import BSTR import winUser import eventHandler import queueHandler import api import globalVars from logHandler import log import time import globalVars versionedLibPath='lib' versionedLib64Path='lib64' if getattr(sys,'frozen',None): # Not running from source. Libraries are in a version-specific directory versionedLibPath=os.path.join(versionedLibPath,versionInfo.version) versionedLib64Path=os.path.join(versionedLib64Path,versionInfo.version) _remoteLib=None _remoteLoader64=None localLib=None generateBeep=None VBuf_getTextInRange=None lastInputLanguageName=None lastInputMethodName=None #utility function to point an exported function pointer in a dll to a ctypes wrapped python function def _setDllFuncPointer(dll,name,cfunc): cast(getattr(dll,name),POINTER(c_void_p)).contents.value=cast(cfunc,c_void_p).value #Implementation of nvdaController methods @WINFUNCTYPE(c_long,c_wchar_p) def nvdaController_speakText(text): focus=api.getFocusObject() if focus.sleepMode==focus.SLEEP_FULL: return -1 import queueHandler import speech queueHandler.queueFunction(queueHandler.eventQueue,speech.speakText,text) return 0 @WINFUNCTYPE(c_long) def nvdaController_cancelSpeech(): focus=api.getFocusObject() if focus.sleepMode==focus.SLEEP_FULL: return -1 import queueHandler import speech queueHandler.queueFunction(queueHandler.eventQueue,speech.cancelSpeech) return 0 @WINFUNCTYPE(c_long,c_wchar_p) def nvdaController_brailleMessage(text): focus=api.getFocusObject() if focus.sleepMode==focus.SLEEP_FULL: return -1 import queueHandler import braille queueHandler.queueFunction(queueHandler.eventQueue,braille.handler.message,text) return 0 def _lookupKeyboardLayoutNameWithHexString(layoutString): buf=create_unicode_buffer(1024) bufSize=c_int(2048) key=HKEY() if windll.advapi32.RegOpenKeyExW(_winreg.HKEY_LOCAL_MACHINE,u"SYSTEM\\CurrentControlSet\\Control\\Keyboard Layouts\\"+ layoutString,0,_winreg.KEY_QUERY_VALUE,byref(key))==0: try: if windll.advapi32.RegQueryValueExW(key,u"Layout Display Name",0,None,buf,byref(bufSize))==0: windll.shlwapi.SHLoadIndirectString(buf.value,buf,1023,None) return buf.value if windll.advapi32.RegQueryValueExW(key,u"Layout Text",0,None,buf,byref(bufSize))==0: return buf.value finally: windll.advapi32.RegCloseKey(key) @WINFUNCTYPE(c_long,c_wchar_p) def nvdaControllerInternal_requestRegistration(uuidString): pid=c_long() windll.rpcrt4.I_RpcBindingInqLocalClientPID(None,byref(pid)) pid=pid.value if not pid: log.error("Could not get process ID for RPC call") return -1; bindingHandle=c_long() bindingHandle.value=localLib.createRemoteBindingHandle(uuidString) if not bindingHandle: log.error("Could not bind to inproc rpc server for pid %d"%pid) return -1 registrationHandle=c_long() res=localLib.nvdaInProcUtils_registerNVDAProcess(bindingHandle,byref(registrationHandle)) if res!=0 or not registrationHandle: log.error("Could not register NVDA with inproc rpc server for pid %d, res %d, registrationHandle %s"%(pid,res,registrationHandle)) windll.rpcrt4.RpcBindingFree(byref(bindingHandle)) return -1 import appModuleHandler queueHandler.queueFunction(queueHandler.eventQueue,appModuleHandler.update,pid,helperLocalBindingHandle=bindingHandle,inprocRegistrationHandle=registrationHandle) return 0 @WINFUNCTYPE(c_long,c_long,c_long,c_long,c_long,c_long) def nvdaControllerInternal_displayModelTextChangeNotify(hwnd, left, top, right, bottom): import displayModel displayModel.textChangeNotify(hwnd, left, top, right, bottom) return 0 @WINFUNCTYPE(c_long,c_long,c_long,c_long,c_long,c_long) def nvdaControllerInternal_drawFocusRectNotify(hwnd, left, top, right, bottom): import eventHandler from NVDAObjects.window import Window focus=api.getFocusObject() if isinstance(focus,Window) and hwnd==focus.windowHandle: eventHandler.queueEvent("displayModel_drawFocusRectNotify",focus,rect=(left,top,right,bottom)) return 0; @WINFUNCTYPE(c_long,c_long,c_long,c_wchar_p) def nvdaControllerInternal_logMessage(level,pid,message): if not log.isEnabledFor(level): return 0 if pid: from appModuleHandler import getAppNameFromProcessID codepath="RPC process %s (%s)"%(pid,getAppNameFromProcessID(pid,includeExt=True)) else: codepath="NVDAHelperLocal" log._log(level,message,[],codepath=codepath) return 0 def handleInputCompositionEnd(result): import speech import characterProcessing from NVDAObjects.inputComposition import InputComposition from NVDAObjects.behaviors import CandidateItem focus=api.getFocusObject() result=result.lstrip(u'\u3000 ') curInputComposition=None if isinstance(focus,InputComposition): curInputComposition=focus oldSpeechMode=speech.speechMode speech.speechMode=speech.speechMode_off eventHandler.executeEvent("gainFocus",focus.parent) speech.speechMode=oldSpeechMode elif isinstance(focus.parent,InputComposition): #Candidate list is still up curInputComposition=focus.parent focus.parent=focus.parent.parent if curInputComposition and not result: result=curInputComposition.compositionString.lstrip(u'\u3000 ') if result: speech.speakText(result,symbolLevel=characterProcessing.SYMLVL_ALL) def handleInputCompositionStart(compositionString,selectionStart,selectionEnd,isReading): import speech from NVDAObjects.inputComposition import InputComposition from NVDAObjects.behaviors import CandidateItem focus=api.getFocusObject() if focus.parent and isinstance(focus.parent,InputComposition): #Candidates infront of existing composition string announce=not config.conf["inputComposition"]["announceSelectedCandidate"] focus.parent.compositionUpdate(compositionString,selectionStart,selectionEnd,isReading,announce=announce) return 0 #IME keeps updating input composition while the candidate list is open #Therefore ignore new composition updates if candidate selections are configured for speaking. if config.conf["inputComposition"]["announceSelectedCandidate"] and isinstance(focus,CandidateItem): return 0 if not isinstance(focus,InputComposition): parent=api.getDesktopObject().objectWithFocus() # #5640: Although we want to use the most correct focus (I.e. OS, not NVDA), if they are the same, we definitely want to use the original instance, so that state such as auto selection is maintained. if parent==focus: parent=focus curInputComposition=InputComposition(parent=parent) oldSpeechMode=speech.speechMode speech.speechMode=speech.speechMode_off eventHandler.executeEvent("gainFocus",curInputComposition) focus=curInputComposition speech.speechMode=oldSpeechMode focus.compositionUpdate(compositionString,selectionStart,selectionEnd,isReading) @WINFUNCTYPE(c_long,c_wchar_p,c_int,c_int,c_int) def nvdaControllerInternal_inputCompositionUpdate(compositionString,selectionStart,selectionEnd,isReading): from NVDAObjects.inputComposition import InputComposition if selectionStart==-1: queueHandler.queueFunction(queueHandler.eventQueue,handleInputCompositionEnd,compositionString) return 0 focus=api.getFocusObject() if isinstance(focus,InputComposition): focus.compositionUpdate(compositionString,selectionStart,selectionEnd,isReading) else: queueHandler.queueFunction(queueHandler.eventQueue,handleInputCompositionStart,compositionString,selectionStart,selectionEnd,isReading) return 0 def handleInputCandidateListUpdate(candidatesString,selectionIndex,inputMethod): candidateStrings=candidatesString.split('\n') import speech from NVDAObjects.inputComposition import InputComposition, CandidateList, CandidateItem focus=api.getFocusObject() if not (0<=selectionIndex<len(candidateStrings)): if isinstance(focus,CandidateItem): oldSpeechMode=speech.speechMode speech.speechMode=speech.speechMode_off eventHandler.executeEvent("gainFocus",focus.parent) speech.speechMode=oldSpeechMode return oldCandidateItemsText=None if isinstance(focus,CandidateItem): oldCandidateItemsText=focus.visibleCandidateItemsText parent=focus.parent wasCandidate=True else: parent=focus wasCandidate=False item=CandidateItem(parent=parent,candidateStrings=candidateStrings,candidateIndex=selectionIndex,inputMethod=inputMethod) if wasCandidate and focus.windowHandle==item.windowHandle and focus.candidateIndex==item.candidateIndex and focus.name==item.name: return if config.conf["inputComposition"]["autoReportAllCandidates"] and item.visibleCandidateItemsText!=oldCandidateItemsText: import ui ui.message(item.visibleCandidateItemsText) eventHandler.executeEvent("gainFocus",item) @WINFUNCTYPE(c_long,c_wchar_p,c_long,c_wchar_p) def nvdaControllerInternal_inputCandidateListUpdate(candidatesString,selectionIndex,inputMethod): queueHandler.queueFunction(queueHandler.eventQueue,handleInputCandidateListUpdate,candidatesString,selectionIndex,inputMethod) return 0 inputConversionModeMessages={ 1:( # Translators: A mode that allows typing in the actual 'native' characters for an east-Asian input method language currently selected, rather than alpha numeric (Roman/English) characters. _("Native input"), # Translators: a mode that lets you type in alpha numeric (roman/english) characters, rather than 'native' characters for the east-Asian input method language currently selected. _("Alpha numeric input") ), 8:( # Translators: for East-Asian input methods, a mode that allows typing in full-shaped (full double-byte) characters, rather than the smaller half-shaped ones. _("Full shaped mode"), # Translators: for East-Asian input methods, a mode that allows typing in half-shaped (single-byte) characters, rather than the larger full-shaped (double-byte) ones. _("Half shaped mode") ), } JapaneseInputConversionModeMessages= { # Translators: For Japanese character input: half-shaped (single-byte) alpha numeric (roman/english) mode. 0: _("half alphanumeric"), # Translators: For Japanese character input: half-shaped (single-byte) Katacana input mode. 3: _("half katakana"), # Translators: For Japanese character input: alpha numeric (roman/english) mode. 8: _("alphanumeric"), # Translators: For Japanese character input: Hiragana input mode. 9: _("hiragana"), # Translators: For Japanese character input: Katacana input mode. 11: _("katakana"), # Translators: For Japanese character input: half-shaped (single-byte) alpha numeric (roman/english) mode. 16: _("half alphanumeric"), # Translators: For Japanese character input: half katakana roman input mode. 19: _("half katakana roman"), # Translators: For Japanese character input: alpha numeric (roman/english) mode. 24: _("alphanumeric"), # Translators: For Japanese character input: Hiragana Roman input mode. 25: _("hiragana roman"), # Translators: For Japanese character input: Katacana Roman input mode. 27: _("katakana roman"), } def handleInputConversionModeUpdate(oldFlags,newFlags,lcid): import ui textList=[] if newFlags!=oldFlags and lcid&0xff==0x11: #Japanese msg=JapaneseInputConversionModeMessages.get(newFlags) if msg: textList.append(msg) else: for x in xrange(32): x=2**x msgs=inputConversionModeMessages.get(x) if not msgs: continue newOn=bool(newFlags&x) oldOn=bool(oldFlags&x) if newOn!=oldOn: textList.append(msgs[0] if newOn else msgs[1]) if len(textList)>0: queueHandler.queueFunction(queueHandler.eventQueue,ui.message," ".join(textList)) @WINFUNCTYPE(c_long,c_long,c_long,c_ulong) def nvdaControllerInternal_inputConversionModeUpdate(oldFlags,newFlags,lcid): queueHandler.queueFunction(queueHandler.eventQueue,handleInputConversionModeUpdate,oldFlags,newFlags,lcid) return 0 @WINFUNCTYPE(c_long,c_long) def nvdaControllerInternal_IMEOpenStatusUpdate(opened): if opened: # Translators: a message when the IME open status changes to opened message=_("IME opened") else: # Translators: a message when the IME open status changes to closed message=_("IME closed") import ui queueHandler.queueFunction(queueHandler.eventQueue,ui.message,message) return 0 @WINFUNCTYPE(c_long,c_long,c_ulong,c_wchar_p) def nvdaControllerInternal_inputLangChangeNotify(threadID,hkl,layoutString): global lastInputMethodName, lastInputLanguageName focus=api.getFocusObject() #This callback can be called before NVDa is fully initialized #So also handle focus object being None as well as checking for sleepMode if not focus or focus.sleepMode: return 0 import NVDAObjects.window #Generally we should not allow input lang changes from threads that are not focused. #But threadIDs for console windows are always wrong so don't ignore for those. if not isinstance(focus,NVDAObjects.window.Window) or (threadID!=focus.windowThreadID and focus.windowClassName!="ConsoleWindowClass"): return 0 import sayAllHandler #Never announce changes while in sayAll (#1676) if sayAllHandler.isRunning(): return 0 import queueHandler import ui languageID=hkl&0xffff buf=create_unicode_buffer(1024) res=windll.kernel32.GetLocaleInfoW(languageID,2,buf,1024) # Translators: the label for an unknown language when switching input methods. inputLanguageName=buf.value if res else _("unknown language") layoutStringCodes=[] inputMethodName=None #layoutString can either be a real input method name, a hex string for an input method name in the registry, or an empty string. #If its a real input method name its used as is. #If its a hex string or its empty, then the method name is looked up by trying: #The full hex string, the hkl as a hex string, the low word of the hex string or hkl, the high word of the hex string or hkl. if layoutString: try: int(layoutString,16) layoutStringCodes.append(layoutString) except ValueError: inputMethodName=layoutString if not inputMethodName: layoutStringCodes.insert(0,hex(hkl)[2:].rstrip('L').upper().rjust(8,'0')) for stringCode in list(layoutStringCodes): layoutStringCodes.append(stringCode[4:].rjust(8,'0')) if stringCode[0]<'D': layoutStringCodes.append(stringCode[0:4].rjust(8,'0')) for stringCode in layoutStringCodes: inputMethodName=_lookupKeyboardLayoutNameWithHexString(stringCode) if inputMethodName: break if not inputMethodName: log.debugWarning("Could not find layout name for keyboard layout, reporting as unknown") # Translators: The label for an unknown input method when switching input methods. inputMethodName=_("unknown input method") if ' - ' in inputMethodName: inputMethodName="".join(inputMethodName.split(' - ')[1:]) if inputLanguageName!=lastInputLanguageName: lastInputLanguageName=inputLanguageName # Translators: the message announcing the language and keyboard layout when it changes inputMethodName=_("{language} - {layout}").format(language=inputLanguageName,layout=inputMethodName) if inputMethodName!=lastInputMethodName: lastInputMethodName=inputMethodName queueHandler.queueFunction(queueHandler.eventQueue,ui.message,inputMethodName) return 0 @WINFUNCTYPE(c_long,c_long,c_wchar) def nvdaControllerInternal_typedCharacterNotify(threadID,ch): focus=api.getFocusObject() if focus.windowClassName!="ConsoleWindowClass": eventHandler.queueEvent("typedCharacter",focus,ch=ch) return 0 @WINFUNCTYPE(c_long, c_int, c_int) def nvdaControllerInternal_vbufChangeNotify(rootDocHandle, rootID): import virtualBuffers virtualBuffers.VirtualBuffer.changeNotify(rootDocHandle, rootID) return 0 @WINFUNCTYPE(c_long, c_wchar_p) def nvdaControllerInternal_installAddonPackageFromPath(addonPath): import wx from gui import addonGui log.debug("Requesting installation of add-on from %s", addonPath) wx.CallAfter(addonGui.AddonsDialog.handleRemoteAddonInstall, addonPath) return 0 class RemoteLoader64(object): def __init__(self): # Create a pipe so we can write to stdin of the loader process. pipeReadOrig, self._pipeWrite = winKernel.CreatePipe(None, 0) # Make the read end of the pipe inheritable. pipeRead = self._duplicateAsInheritable(pipeReadOrig) winKernel.closeHandle(pipeReadOrig) # stdout/stderr of the loader process should go to nul. with file("nul", "w") as nul: nulHandle = self._duplicateAsInheritable(msvcrt.get_osfhandle(nul.fileno())) # Set the process to start with the appropriate std* handles. si = winKernel.STARTUPINFO(dwFlags=winKernel.STARTF_USESTDHANDLES, hSTDInput=pipeRead, hSTDOutput=nulHandle, hSTDError=nulHandle) pi = winKernel.PROCESS_INFORMATION() # Even if we have uiAccess privileges, they will not be inherited by default. # Therefore, explicitly specify our own process token, which causes them to be inherited. token = winKernel.OpenProcessToken(winKernel.GetCurrentProcess(), winKernel.MAXIMUM_ALLOWED) try: winKernel.CreateProcessAsUser(token, None, os.path.join(versionedLib64Path,u"nvdaHelperRemoteLoader.exe"), None, None, True, None, None, None, si, pi) # We don't need the thread handle. winKernel.closeHandle(pi.hThread) self._process = pi.hProcess except: winKernel.closeHandle(self._pipeWrite) raise finally: winKernel.closeHandle(pipeRead) winKernel.closeHandle(token) def _duplicateAsInheritable(self, handle): curProc = winKernel.GetCurrentProcess() return winKernel.DuplicateHandle(curProc, handle, curProc, 0, True, winKernel.DUPLICATE_SAME_ACCESS) def terminate(self): # Closing the write end of the pipe will cause EOF for the waiting loader process, which will then exit gracefully. winKernel.closeHandle(self._pipeWrite) # Wait until it's dead. winKernel.waitForSingleObject(self._process, winKernel.INFINITE) winKernel.closeHandle(self._process) def initialize(): global _remoteLib, _remoteLoader64, localLib, generateBeep,VBuf_getTextInRange localLib=cdll.LoadLibrary(os.path.join(versionedLibPath,'nvdaHelperLocal.dll')) for name,func in [ ("nvdaController_speakText",nvdaController_speakText), ("nvdaController_cancelSpeech",nvdaController_cancelSpeech), ("nvdaController_brailleMessage",nvdaController_brailleMessage), ("nvdaControllerInternal_requestRegistration",nvdaControllerInternal_requestRegistration), ("nvdaControllerInternal_inputLangChangeNotify",nvdaControllerInternal_inputLangChangeNotify), ("nvdaControllerInternal_typedCharacterNotify",nvdaControllerInternal_typedCharacterNotify), ("nvdaControllerInternal_displayModelTextChangeNotify",nvdaControllerInternal_displayModelTextChangeNotify), ("nvdaControllerInternal_logMessage",nvdaControllerInternal_logMessage), ("nvdaControllerInternal_inputCompositionUpdate",nvdaControllerInternal_inputCompositionUpdate), ("nvdaControllerInternal_inputCandidateListUpdate",nvdaControllerInternal_inputCandidateListUpdate), ("nvdaControllerInternal_IMEOpenStatusUpdate",nvdaControllerInternal_IMEOpenStatusUpdate), ("nvdaControllerInternal_inputConversionModeUpdate",nvdaControllerInternal_inputConversionModeUpdate), ("nvdaControllerInternal_vbufChangeNotify",nvdaControllerInternal_vbufChangeNotify), ("nvdaControllerInternal_installAddonPackageFromPath",nvdaControllerInternal_installAddonPackageFromPath), ("nvdaControllerInternal_drawFocusRectNotify",nvdaControllerInternal_drawFocusRectNotify), ]: try: _setDllFuncPointer(localLib,"_%s"%name,func) except AttributeError as e: log.error("nvdaHelperLocal function pointer for %s could not be found, possibly old nvdaHelperLocal dll"%name,exc_info=True) raise e localLib.nvdaHelperLocal_initialize() generateBeep=localLib.generateBeep generateBeep.argtypes=[c_char_p,c_float,c_int,c_int,c_int] generateBeep.restype=c_int # The rest of this function (to do with injection only applies if NVDA is not running as a Windows store application) # Handle VBuf_getTextInRange's BSTR out parameter so that the BSTR will be freed automatically. VBuf_getTextInRange = CFUNCTYPE(c_int, c_int, c_int, c_int, POINTER(BSTR), c_int)( ("VBuf_getTextInRange", localLib), ((1,), (1,), (1,), (2,), (1,))) if config.isAppX: log.info("Remote injection disabled due to running as a Windows Store Application") return #Load nvdaHelperRemote.dll but with an altered search path so it can pick up other dlls in lib h=windll.kernel32.LoadLibraryExW(os.path.abspath(os.path.join(versionedLibPath,u"nvdaHelperRemote.dll")),0,0x8) if not h: log.critical("Error loading nvdaHelperRemote.dll: %s" % WinError()) return _remoteLib=CDLL("nvdaHelperRemote",handle=h) if _remoteLib.injection_initialize(globalVars.appArgs.secure) == 0: raise RuntimeError("Error initializing NVDAHelperRemote") if not _remoteLib.installIA2Support(): log.error("Error installing IA2 support") #Manually start the in-process manager thread for this NVDA main thread now, as a slow system can cause this action to confuse WX _remoteLib.initInprocManagerThreadIfNeeded() if os.environ.get('PROCESSOR_ARCHITEW6432')=='AMD64': _remoteLoader64=RemoteLoader64() def terminate(): global _remoteLib, _remoteLoader64, localLib, generateBeep, VBuf_getTextInRange if not config.isAppX: if not _remoteLib.uninstallIA2Support(): log.debugWarning("Error uninstalling IA2 support") if _remoteLib.injection_terminate() == 0: raise RuntimeError("Error terminating NVDAHelperRemote") _remoteLib=None if _remoteLoader64: _remoteLoader64.terminate() _remoteLoader64=None generateBeep=None VBuf_getTextInRange=None localLib.nvdaHelperLocal_terminate() localLib=None LOCAL_WIN10_DLL_PATH = os.path.join(versionedLibPath,"nvdaHelperLocalWin10.dll") def getHelperLocalWin10Dll(): """Get a ctypes WinDLL instance for the nvdaHelperLocalWin10 dll. This is a C++/CX dll used to provide access to certain UWP functionality. """ return windll[LOCAL_WIN10_DLL_PATH] def bstrReturn(address): """Handle a BSTR returned from a ctypes function call. This includes freeing the memory. This is needed for nvdaHelperLocalWin10 functions which return a BSTR. """ # comtypes.BSTR.from_address seems to cause a crash for some reason. Not sure why. # Just access the string ourselves. # This will terminate at a null character, even though BSTR allows nulls. # We're only using this for normal, null-terminated strings anyway. val = wstring_at(address) windll.oleaut32.SysFreeString(address) return val
1
22,467
Thanks for adding the header, but I don't think 2017 is a very accurate guess here. Could you do a quick search with git blame and change this accordingly?
nvaccess-nvda
py
@@ -1,7 +1,6 @@ -import { createElement, createContext } from '../../'; +import { createElement, createContext } from '../../src'; import { expect } from 'chai'; -/** @jsx createElement */ /* eslint-env browser, mocha */ describe('createContext', () => {
1
import { createElement, createContext } from '../../'; import { expect } from 'chai'; /** @jsx createElement */ /* eslint-env browser, mocha */ describe('createContext', () => { it('should return a Provider and a Consumer', () => { const context = createContext(); expect(context).to.have.property('Provider'); expect(context).to.have.property('Consumer'); }); it('should return a valid Provider Component', () => { const { Provider } = createContext(); const contextValue = { value: 'test' }; const children = [<div>child1</div>, <div>child2</div>]; const providerComponent = <Provider {...contextValue}>{children}</Provider>; //expect(providerComponent).to.have.property('tag', 'Provider'); expect(providerComponent.props.value).to.equal(contextValue.value); expect(providerComponent.props.children).to.equal(children); }); });
1
16,139
Oh interesting - does web-test-runner not resolve package.json files?
preactjs-preact
js
@@ -5,6 +5,7 @@ selected={startDate} onChange={date => setStartDate(date)} locale={fi} + ariaDayPrefix="Päivä" /> ); };
1
() => { const [startDate, setStartDate] = useState(new Date()); return ( <DatePicker selected={startDate} onChange={date => setStartDate(date)} locale={fi} /> ); };
1
7,231
Is there a way to derive this value from the locale itself? I feel hardcoding the prefix in the props isn't the right approach, but I'm not sure what the locale file contains exactly.
Hacker0x01-react-datepicker
js
@@ -21,11 +21,18 @@ export function setComponentProps(component, props, opts, context, mountAll) { if ((component.__key = props.key)) delete props.key; if (!component.base || mountAll) { - if (component.componentWillMount) component.componentWillMount(); + if (component.componentWillMount) { + options.warn("'componentWillMount' is deprecated"); + component.componentWillMount(); + } } else if (component.componentWillReceiveProps) { + options.warn("'componentWillReceiveProps' is deprecated"); component.componentWillReceiveProps(props, context); } + if (component.getDerivedStateFromProps) { + component.setState(component.getDerivedStateFromProps(props, component.state)); + } if (context && context!==component.context) { if (!component.prevContext) component.prevContext = component.context;
1
import { SYNC_RENDER, NO_RENDER, FORCE_RENDER, ASYNC_RENDER, ATTR_KEY } from '../constants'; import options from '../options'; import { extend } from '../util'; import { enqueueRender } from '../render-queue'; import { getNodeProps } from './index'; import { diff, mounts, diffLevel, flushMounts, recollectNodeTree, removeChildren } from './diff'; import { createComponent, collectComponent } from './component-recycler'; import { removeNode } from '../dom/index'; /** Set a component's `props` (generally derived from JSX attributes). * @param {Object} props * @param {Object} [opts] * @param {boolean} [opts.renderSync=false] If `true` and {@link options.syncComponentUpdates} is `true`, triggers synchronous rendering. * @param {boolean} [opts.render=true] If `false`, no render will be triggered. */ export function setComponentProps(component, props, opts, context, mountAll) { if (component._disable) return; component._disable = true; if ((component.__ref = props.ref)) delete props.ref; if ((component.__key = props.key)) delete props.key; if (!component.base || mountAll) { if (component.componentWillMount) component.componentWillMount(); } else if (component.componentWillReceiveProps) { component.componentWillReceiveProps(props, context); } if (context && context!==component.context) { if (!component.prevContext) component.prevContext = component.context; component.context = context; } if (!component.prevProps) component.prevProps = component.props; component.props = props; component._disable = false; if (opts!==NO_RENDER) { if (opts===SYNC_RENDER || options.syncComponentUpdates!==false || !component.base) { renderComponent(component, SYNC_RENDER, mountAll); } else { enqueueRender(component); } } if (component.__ref) component.__ref(component); } /** Render a Component, triggering necessary lifecycle events and taking High-Order Components into account. * @param {Component} component * @param {Object} [opts] * @param {boolean} [opts.build=false] If `true`, component will build and store a DOM node if not already associated with one. * @private */ export function renderComponent(component, opts, mountAll, isChild) { if (component._disable) return; let props = component.props, state = component.state, context = component.context, previousProps = component.prevProps || props, previousState = component.prevState || state, previousContext = component.prevContext || context, isUpdate = component.base, nextBase = component.nextBase, initialBase = isUpdate || nextBase, initialChildComponent = component._component, skip = false, rendered, inst, cbase; // if updating if (isUpdate) { component.props = previousProps; component.state = previousState; component.context = previousContext; if (opts!==FORCE_RENDER && component.shouldComponentUpdate && component.shouldComponentUpdate(props, state, context) === false) { skip = true; } else if (component.componentWillUpdate) { component.componentWillUpdate(props, state, context); } component.props = props; component.state = state; component.context = context; } component.prevProps = component.prevState = component.prevContext = component.nextBase = null; component._dirty = false; if (!skip) { rendered = component.render(props, state, context); // context to pass to the child, can be updated via (grand-)parent component if (component.getChildContext) { context = extend(extend({}, context), component.getChildContext()); } let childComponent = rendered && rendered.nodeName, toUnmount, base; if (typeof childComponent==='function') { // set up high order component link let childProps = getNodeProps(rendered); inst = initialChildComponent; if (inst && inst.constructor===childComponent && childProps.key==inst.__key) { setComponentProps(inst, childProps, SYNC_RENDER, context, false); } else { toUnmount = inst; component._component = inst = createComponent(childComponent, childProps, context); inst.nextBase = inst.nextBase || nextBase; inst._parentComponent = component; setComponentProps(inst, childProps, NO_RENDER, context, false); renderComponent(inst, SYNC_RENDER, mountAll, true); } base = inst.base; } else { cbase = initialBase; // destroy high order component link toUnmount = initialChildComponent; if (toUnmount) { cbase = component._component = null; } if (initialBase || opts===SYNC_RENDER) { if (cbase) cbase._component = null; base = diff(cbase, rendered, context, mountAll || !isUpdate, initialBase && initialBase.parentNode, true); } } if (initialBase && base!==initialBase && inst!==initialChildComponent) { let baseParent = initialBase.parentNode; if (baseParent && base!==baseParent) { baseParent.replaceChild(base, initialBase); if (!toUnmount) { initialBase._component = null; recollectNodeTree(initialBase, false); } } } if (toUnmount) { unmountComponent(toUnmount); } component.base = base; if (base && !isChild) { let componentRef = component, t = component; while ((t=t._parentComponent)) { (componentRef = t).base = base; } base._component = componentRef; base._componentConstructor = componentRef.constructor; } } if (!isUpdate || mountAll) { mounts.unshift(component); } else if (!skip) { // Ensure that pending componentDidMount() hooks of child components // are called before the componentDidUpdate() hook in the parent. // Note: disabled as it causes duplicate hooks, see https://github.com/developit/preact/issues/750 // flushMounts(); if (component.componentDidUpdate) { component.componentDidUpdate(previousProps, previousState, previousContext); } if (options.afterUpdate) options.afterUpdate(component); } if (component._renderCallbacks!=null) { while (component._renderCallbacks.length) component._renderCallbacks.pop().call(component); } if (!diffLevel && !isChild) flushMounts(); } /** Apply the Component referenced by a VNode to the DOM. * @param {Element} dom The DOM node to mutate * @param {VNode} vnode A Component-referencing VNode * @returns {Element} dom The created/mutated element * @private */ export function buildComponentFromVNode(dom, vnode, context, mountAll) { let c = dom && dom._component, originalComponent = c, oldDom = dom, isDirectOwner = c && dom._componentConstructor===vnode.nodeName, isOwner = isDirectOwner, props = getNodeProps(vnode); while (c && !isOwner && (c=c._parentComponent)) { isOwner = c.constructor===vnode.nodeName; } if (c && isOwner && (!mountAll || c._component)) { setComponentProps(c, props, ASYNC_RENDER, context, mountAll); dom = c.base; } else { if (originalComponent && !isDirectOwner) { unmountComponent(originalComponent); dom = oldDom = null; } c = createComponent(vnode.nodeName, props, context); if (dom && !c.nextBase) { c.nextBase = dom; // passing dom/oldDom as nextBase will recycle it if unused, so bypass recycling on L229: oldDom = null; } setComponentProps(c, props, SYNC_RENDER, context, mountAll); dom = c.base; if (oldDom && dom!==oldDom) { oldDom._component = null; recollectNodeTree(oldDom, false); } } return dom; } /** Remove a component from the DOM and recycle it. * @param {Component} component The Component instance to unmount * @private */ export function unmountComponent(component) { if (options.beforeUnmount) options.beforeUnmount(component); let base = component.base; component._disable = true; if (component.componentWillUnmount) component.componentWillUnmount(); component.base = null; // recursively tear down & recollect high-order component children: let inner = component._component; if (inner) { unmountComponent(inner); } else if (base) { if (base[ATTR_KEY] && base[ATTR_KEY].ref) base[ATTR_KEY].ref(null); component.nextBase = base; removeNode(base); collectComponent(component); removeChildren(base); } if (component.__ref) component.__ref(null); }
1
11,968
I'd much rather see these warnings in our devtools (`debug/index.js`). Strings contribute quite a bit to our file size and moving them there would prevent bloating core.
preactjs-preact
js
@@ -96,6 +96,10 @@ public class SmartStoreInspectorActivity extends Activity implements AdapterView private String lastAlertMessage; private JSONArray lastResults; + // Default queries + private String SOUPS_QUERY = String.format("select %s from %s", SmartStore.SOUP_NAME_COL, SmartStore.SOUP_ATTRS_TABLE); + private String INDICES_QUERY = String.format("select %s, %s, %s from %s", SmartStore.SOUP_NAME_COL, SmartStore.PATH_COL, SmartStore.COLUMN_TYPE_COL, SmartStore.SOUP_INDEX_MAP_TABLE); + /** * Create intent to bring up inspector * @param parentActivity
1
/* * Copyright (c) 2014-present, salesforce.com, inc. * All rights reserved. * Redistribution and use of this software in source and binary forms, with or * without modification, are permitted provided that the following conditions * are met: * - Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * - Neither the name of salesforce.com, inc. nor the names of its contributors * may be used to endorse or promote products derived from this software without * specific prior written permission of salesforce.com, inc. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ package com.salesforce.androidsdk.smartstore.ui; import android.app.Activity; import android.app.AlertDialog; import android.content.Intent; import android.os.Bundle; import android.text.SpannableString; import android.text.Spanned; import android.text.TextUtils; import android.util.Pair; import android.view.View; import android.view.animation.Animation; import android.view.animation.AnimationUtils; import android.view.animation.GridLayoutAnimationController; import android.widget.AdapterView; import android.widget.ArrayAdapter; import android.widget.EditText; import android.widget.GridView; import android.widget.MultiAutoCompleteTextView; import android.widget.MultiAutoCompleteTextView.Tokenizer; import android.widget.Spinner; import com.salesforce.androidsdk.accounts.UserAccount; import com.salesforce.androidsdk.smartstore.R; import com.salesforce.androidsdk.smartstore.app.SmartStoreSDKManager; import com.salesforce.androidsdk.smartstore.store.DBOpenHelper; import com.salesforce.androidsdk.smartstore.store.QuerySpec; import com.salesforce.androidsdk.smartstore.store.SmartSqlHelper; import com.salesforce.androidsdk.smartstore.store.SmartStore; import com.salesforce.androidsdk.smartstore.util.SmartStoreLogger; import com.salesforce.androidsdk.util.JSONObjectHelper; import org.json.JSONArray; import org.json.JSONException; import org.json.JSONObject; import java.util.ArrayList; import java.util.LinkedList; import java.util.List; public class SmartStoreInspectorActivity extends Activity implements AdapterView.OnItemSelectedListener { // Keys for extras bundle private static final String IS_GLOBAL_STORE = "isGlobalStore"; private static final String DB_NAME = "dbName"; private static final String TAG = "SmartStoreInspectorActivity"; // Default page size / index private static final int DEFAULT_PAGE_SIZE = 10; private static final int DEFAULT_PAGE_INDEX = 0; public static final String USER_STORE = " (user store)"; public static final String GLOBAL_STORE = " (global store)"; public static final String DEFAULT_STORE = "default"; // Store private String dbName; private boolean isGlobal; private SmartStore smartStore; private List<String> allStores; // View elements private Spinner spinner; private MultiAutoCompleteTextView queryText; private EditText pageSizeText; private EditText pageIndexText; private GridView resultGrid; // Test support private String lastAlertTitle; private String lastAlertMessage; private JSONArray lastResults; /** * Create intent to bring up inspector * @param parentActivity * @param isGlobal pass true to get an inspector for the default global smartstore * pass false to get an inspector for the default user smartstore * @param dbName * @return */ public static Intent getIntent(Activity parentActivity, boolean isGlobal, String dbName) { final Bundle bundle = new Bundle(); bundle.putBoolean(IS_GLOBAL_STORE, isGlobal); bundle.putString(DB_NAME, dbName); final Intent intent = new Intent(parentActivity, SmartStoreInspectorActivity.class); intent.putExtras(bundle); return intent; } @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); readExtras(); setContentView(R.layout.sf__inspector); getActionBar().setTitle(R.string.sf__inspector_title); spinner = findViewById(R.id.sf__inspector_stores_spinner); queryText = findViewById(R.id.sf__inspector_query_text); pageSizeText = findViewById(R.id.sf__inspector_pagesize_text); pageIndexText = findViewById(R.id.sf__inspector_pageindex_text); resultGrid = findViewById(R.id.sf__inspector_result_grid); setupSpinner(); } @Override protected void onResume() { super.onResume(); setupStore(isGlobal, dbName); } private void readExtras() { Bundle bundle = getIntent().getExtras(); boolean hasUser = SmartStoreSDKManager.getInstance().getUserAccountManager().getCurrentUser() != null; // isGlobal is set to true // if no bundle, or no value for isGlobalStore in bundle, or true specified for isGlobalStore in bundle, or there is no current user isGlobal = bundle == null || !bundle.containsKey(IS_GLOBAL_STORE) || bundle.getBoolean(IS_GLOBAL_STORE) || !hasUser; // dbName is set to DBOpenHelper.DEFAULT_DB_NAME // if no bundle, or no value for dbName in bundle dbName = bundle == null || !bundle.containsKey(DB_NAME) ? DBOpenHelper.DEFAULT_DB_NAME : bundle.getString(DB_NAME); } private void setupSpinner() { SmartStoreSDKManager mgr = SmartStoreSDKManager.getInstance(); allStores = new ArrayList<>(); for (String dbName : mgr.getUserStoresPrefixList()) allStores.add(getDisplayNameForStore(false, dbName)); for (String dbName : mgr.getGlobalStoresPrefixList()) allStores.add(getDisplayNameForStore(true, dbName)); int selectedStoreIndex = allStores.indexOf(getDisplayNameForStore(this.isGlobal, this.dbName)); spinner.setAdapter(new ArrayAdapter<>(this, android.R.layout.simple_spinner_item, allStores)); spinner.setSelection(selectedStoreIndex); spinner.setOnItemSelectedListener(this); } private String getDisplayNameForStore(boolean isGlobal, String dbName) { return (dbName.equals(DBOpenHelper.DEFAULT_DB_NAME) ? DEFAULT_STORE : dbName) + (isGlobal ? GLOBAL_STORE : USER_STORE); } private Pair<Boolean, String> getStoreFromDisplayName(String storeDisplayName) { boolean isGlobal; String dbName; if (storeDisplayName.endsWith(GLOBAL_STORE)) { isGlobal = true; dbName = storeDisplayName.substring(0, storeDisplayName.length() - GLOBAL_STORE.length()); } else { isGlobal = false; dbName = storeDisplayName.substring(0, storeDisplayName.length() - USER_STORE.length()); } dbName = dbName.equals(DEFAULT_STORE) ? DBOpenHelper.DEFAULT_DB_NAME : dbName; return new Pair<>(isGlobal, dbName); } private void setupStore(boolean isGlobal, String dbName) { SmartStoreSDKManager mgr = SmartStoreSDKManager.getInstance(); UserAccount currentUser = mgr.getUserAccountManager().getCurrentUser(); if (this.isGlobal != isGlobal || !this.dbName.equals(dbName) || smartStore == null) { this.isGlobal = isGlobal; this.dbName = dbName; smartStore = isGlobal ? mgr.getGlobalSmartStore(dbName) : mgr.getSmartStore(dbName, currentUser, null); setupAutocomplete(queryText); } } /** * Called when item selected in stores drop down * @param adapterView * @param view * @param i * @param l */ @Override public void onItemSelected(AdapterView<?> adapterView, View view, int i, long l) { Pair<Boolean, String> selectedStore = getStoreFromDisplayName(allStores.get(i)); setupStore(selectedStore.first, selectedStore.second); } /** * Called when no item is selected in stores drop down * @param adapterView */ @Override public void onNothingSelected(AdapterView<?> adapterView) { } /** * Called when "Clear" button is clicked * * @param v */ public void onClearClick(View v) { reset(); } /** * Reset activity to its original state */ public void reset() { queryText.setText(""); pageSizeText.setText(""); pageIndexText.setText(""); resultGrid.setAdapter(null); lastAlertTitle = null; lastAlertMessage = null; lastResults = null; } /** * @return title of last alert shown (used by tests) */ public String getLastAlertTitle() { return lastAlertTitle; } /** * @return message of last alert shown (used by tests) */ public String getLastAlertMessage() { return lastAlertMessage; } /** * @return last results shown (used by tests) */ public JSONArray getLastResults() { return lastResults; } /** * Called when "Run" button is clicked * * @param v */ public void onRunClick(View v) { runQuery(); } /** * Called when "Soups" button is clicked * * @param v */ public void onSoupsClick(View v) { List<String> names = smartStore.getAllSoupNames(); if (names.size() == 0) { showAlert(null, getString(R.string.sf__inspector_no_soups_found)); return; } if (names.size() > 10) { queryText.setText(getString(R.string.sf__inspector_soups_query)); } else { StringBuilder sb = new StringBuilder(); boolean first = true; for (String name : names) { if (!first) sb.append(" union "); sb.append("select '"); sb.append(name); sb.append("', count(*) from {"); sb.append(name); sb.append("}"); first = false; } queryText.setText(sb.toString()); } runQuery(); } /** * Called when "Indices" button is clicked * * @param v */ public void onIndicesClick(View v) { queryText .setText(getString(R.string.sf__inspector_indices_query)); runQuery(); } /** * Helper method that builds query spec from typed query, runs it and * updates result grid */ private void runQuery() { try { String query = queryText.getText().toString(); if (query.length() == 0) { showAlert(null, getString(R.string.sf__inspector_no_query_specified)); return; } int pageSize = getInt(pageSizeText, DEFAULT_PAGE_SIZE); int pageIndex = getInt(pageIndexText, DEFAULT_PAGE_INDEX); QuerySpec querySpec = QuerySpec .buildSmartQuerySpec(query, pageSize); showResult(smartStore.query(querySpec, pageIndex)); } catch (Exception e) { showAlert(e.getClass().getSimpleName(), e.getMessage()); } } /** * Helper function to get integer typed in a text field Returns defaultValue * if no integer were typed * * @param textField * @param defaultValue * @return */ private int getInt(EditText textField, int defaultValue) { String s = textField.getText().toString(); if (s.length() == 0) { return defaultValue; } else { return Integer.parseInt(s); } } private void showAlert(String title, String message) { lastAlertTitle = title; lastAlertMessage = message; new AlertDialog.Builder(this).setTitle(title) .setMessage(message).show(); } /** * Helper method to populate result grid with query result set (expected to * be a JSONArray of JSONArray's) * * @param result * @throws JSONException */ private void showResult(JSONArray result) throws JSONException { lastResults = result; ArrayAdapter<String> adapter = new ArrayAdapter<String>(this, R.layout.sf__inspector_result_cell); if (result.length() == 0) { showAlert(null, getString(R.string.sf__inspector_no_rows_returned)); } for (int j = 0; j < result.length(); j++) { JSONArray row = result.getJSONArray(j); for (int i = 0; i < row.length(); i++) { Object val = JSONObjectHelper.opt(row, i); adapter.add(val instanceof JSONObject ? ((JSONObject) val).toString(2) : (val == null ? "null" : val.toString())); } } int numColumns = (result.length() > 0 ? result.getJSONArray(0).length() : 0); resultGrid.setNumColumns(numColumns); resultGrid.setAdapter(adapter); animateGridView(resultGrid); } /** * Helper method to attach animation to grid view * * @param gridView */ private void animateGridView(GridView gridView) { Animation animation = AnimationUtils.loadAnimation(this, android.R.anim.fade_in); GridLayoutAnimationController animationController = new GridLayoutAnimationController( animation, 0f, 0.1f); gridView.setLayoutAnimation(animationController); animationController.start(); } /** * Helper method to setup auto-complete for query input field * * @param textView */ private void setupAutocomplete(MultiAutoCompleteTextView textView) { ArrayAdapter<String> adapter = new ArrayAdapter<String>(this, android.R.layout.simple_dropdown_item_1line); // Adding {soupName} and {soupName:specialField} List<String> names = new LinkedList<String>(); names.addAll(smartStore.getAllSoupNames()); for (String name : names) { adapter.add("{" + name + "}"); adapter.add("{" + name + ":" + SmartSqlHelper.SOUP + "}"); adapter.add("{" + name + ":" + SmartStore.SOUP_ENTRY_ID + "}"); adapter.add("{" + name + ":" + SmartStore.SOUP_LAST_MODIFIED_DATE + "}"); } // Adding {soupName:indexedPath} try { JSONArray result = smartStore.query(QuerySpec.buildSmartQuerySpec( "SELECT soupName, path FROM soup_index_map", 1000), 0); for (int j = 0; j < result.length(); j++) { JSONArray row = result.getJSONArray(j); adapter.add("{" + row.getString(0) + ":" + row.getString(1) + "}"); } } catch (JSONException e) { SmartStoreLogger.e(TAG, "Error occurred while parsing JSON", e); } // Adding some SQL keywords adapter.add("select"); adapter.add("from"); adapter.add("where"); adapter.add("order by"); adapter.add("asc"); adapter.add("desc"); adapter.add("group by"); textView.setAdapter(adapter); textView.setTokenizer(new QueryTokenizer()); } } /** * Tokenized used by query auto-complete field * * @author wmathurin * */ class QueryTokenizer implements Tokenizer { public int findTokenStart(CharSequence text, int cursor) { int i = cursor; while (i > 0 && text.charAt(i - 1) != ' ') { i--; } return i; } public int findTokenEnd(CharSequence text, int cursor) { int i = cursor; int len = text.length(); while (i < len) { if (text.charAt(i) == ' ') { return i; } else { i++; } } return len; } public CharSequence terminateToken(CharSequence text) { int i = text.length(); while (i > 0 && text.charAt(i - 1) == ' ') { i--; } if (i > 0 && text.charAt(i - 1) == ' ') { return text; } else { if (text instanceof Spanned) { SpannableString sp = new SpannableString(text + " "); TextUtils.copySpansFrom((Spanned) text, 0, text.length(), Object.class, sp, 0); return sp; } else { return text; } } } }
1
17,057
Use `String.format(Locale.US, ...) to avoid the `Lint` warning.
forcedotcom-SalesforceMobileSDK-Android
java
@@ -34,9 +34,17 @@ type AppliedToGroup struct { // PodReference represents a Pod Reference. type PodReference struct { - // The name of this pod. + // The name of this Pod. Name string - // The namespace of this pod. + // The Namespace of this Pod. + Namespace string +} + +// ServiceReference represents reference to a v1.Service. +type ServiceReference struct { + // The name of this Service. + Name string + // The Namespace of this Service. Namespace string }
1
// Copyright 2019 Antrea Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package controlplane import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" secv1alpha1 "github.com/vmware-tanzu/antrea/pkg/apis/security/v1alpha1" statsv1alpha1 "github.com/vmware-tanzu/antrea/pkg/apis/stats/v1alpha1" ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // AppliedToGroup is the message format of antrea/pkg/controller/types.AppliedToGroup in an API response. type AppliedToGroup struct { metav1.TypeMeta metav1.ObjectMeta // GroupMembers is a list of resources selected by this group. GroupMembers []GroupMember } // PodReference represents a Pod Reference. type PodReference struct { // The name of this pod. Name string // The namespace of this pod. Namespace string } // NamedPort represents a Port with a name on Pod. type NamedPort struct { // Port represents the Port number. Port int32 // Name represents the associated name with this Port number. Name string // Protocol for port. Must be UDP, TCP, or SCTP. Protocol Protocol } // ExternalEntityReference represents a ExternalEntity Reference. type ExternalEntityReference struct { // The name of this ExternalEntity. Name string // The namespace of this ExternalEntity. Namespace string } // GroupMember represents an resource member to be populated in Groups. type GroupMember struct { // Pod maintains the reference to the Pod. Pod *PodReference // ExternalEntity maintains the reference to the ExternalEntity. ExternalEntity *ExternalEntityReference // IP is the IP address of the Endpoints associated with the GroupMember. IPs []IPAddress // Ports is the list NamedPort of the GroupMember. Ports []NamedPort } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ClusterGroupMembers is a list of GroupMember objects that are currently selected by a ClusterGroup. type ClusterGroupMembers struct { metav1.TypeMeta metav1.ObjectMeta EffectiveMembers []GroupMember } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // AppliedToGroupPatch describes the incremental update of an AppliedToGroup. type AppliedToGroupPatch struct { metav1.TypeMeta metav1.ObjectMeta AddedGroupMembers []GroupMember RemovedGroupMembers []GroupMember } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // AppliedToGroupList is a list of AppliedToGroup objects. type AppliedToGroupList struct { metav1.TypeMeta metav1.ListMeta Items []AppliedToGroup } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // AddressGroup is the message format of antrea/pkg/controller/types.AddressGroup in an API response. type AddressGroup struct { metav1.TypeMeta metav1.ObjectMeta // GroupMembers is a list of GroupMember selected by this group. GroupMembers []GroupMember } // IPAddress describes a single IP address. Either an IPv4 or IPv6 address must be set. type IPAddress []byte // IPNet describes an IP network. type IPNet struct { IP IPAddress PrefixLength int32 } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // AddressGroupPatch describes the incremental update of an AddressGroup. type AddressGroupPatch struct { metav1.TypeMeta metav1.ObjectMeta AddedGroupMembers []GroupMember RemovedGroupMembers []GroupMember } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // AddressGroupList is a list of AddressGroup objects. type AddressGroupList struct { metav1.TypeMeta metav1.ListMeta Items []AddressGroup } type NetworkPolicyType string const ( K8sNetworkPolicy NetworkPolicyType = "K8sNetworkPolicy" AntreaClusterNetworkPolicy NetworkPolicyType = "AntreaClusterNetworkPolicy" AntreaNetworkPolicy NetworkPolicyType = "AntreaNetworkPolicy" ) type NetworkPolicyReference struct { // Type of the NetworkPolicy. Type NetworkPolicyType // Namespace of the NetworkPolicy. It's empty for Antrea ClusterNetworkPolicy. Namespace string // Name of the NetworkPolicy. Name string // UID of the NetworkPolicy. UID types.UID } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // NetworkPolicy is the message format of antrea/pkg/controller/types.NetworkPolicy in an API response. type NetworkPolicy struct { metav1.TypeMeta metav1.ObjectMeta // Rules is a list of rules to be applied to the selected GroupMembers. Rules []NetworkPolicyRule // AppliedToGroups is a list of names of AppliedToGroups to which this policy applies. // Cannot be set in conjunction with any NetworkPolicyRule.AppliedToGroups in Rules. AppliedToGroups []string // Priority represents the relative priority of this NetworkPolicy as compared to // other NetworkPolicies. Priority will be unset (nil) for K8s NetworkPolicy. Priority *float64 // TierPriority represents the priority of the Tier associated with this NetworkPolicy. // The TierPriority will remain nil for K8s NetworkPolicy. TierPriority *int32 // Reference to the original NetworkPolicy that the internal NetworkPolicy is created for. SourceRef *NetworkPolicyReference } // Direction defines traffic direction of NetworkPolicyRule. type Direction string const ( DirectionIn Direction = "In" DirectionOut Direction = "Out" ) // NetworkPolicyRule describes a particular set of traffic that is allowed. type NetworkPolicyRule struct { // The direction of this rule. // If it's set to In, From must be set and To must not be set. // If it's set to Out, To must be set and From must not be set. Direction Direction // From represents sources which should be able to access the GroupMembers selected by the policy. From NetworkPolicyPeer // To represents destinations which should be able to be accessed by the GroupMembers selected by the policy. To NetworkPolicyPeer // Services is a list of services which should be matched. Services []Service // Priority defines the priority of the Rule as compared to other rules in the // NetworkPolicy. Priority int32 // Action specifies the action to be applied on the rule. i.e. Allow/Drop. An empty // action “nil” defaults to Allow action, which would be the case for rules created for // K8s NetworkPolicy. Action *secv1alpha1.RuleAction // EnableLogging is used to indicate if agent should generate logs // when rules are matched. Should be default to false. EnableLogging bool // AppliedToGroups is a list of names of AppliedToGroups to which this rule applies. // Cannot be set in conjunction with NetworkPolicy.AppliedToGroups of the NetworkPolicy // that this Rule is referred to. AppliedToGroups []string } // Protocol defines network protocols supported for things like container ports. type Protocol string const ( // ProtocolTCP is the TCP protocol. ProtocolTCP Protocol = "TCP" // ProtocolUDP is the UDP protocol. ProtocolUDP Protocol = "UDP" // ProtocolSCTP is the SCTP protocol. ProtocolSCTP Protocol = "SCTP" ) // Service describes a port to allow traffic on. type Service struct { // The protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this // field defaults to TCP. // +optional Protocol *Protocol // The port name or number on the given protocol. If not specified, this matches all port numbers. // +optional Port *intstr.IntOrString // EndPort defines the end of the port range, being the end included within the range. // It can only be specified when a numerical `port` is specified. // +optional EndPort *int32 } // NetworkPolicyPeer describes a peer of NetworkPolicyRules. // It could be a list of names of AddressGroups and/or a list of IPBlock. type NetworkPolicyPeer struct { // A list of names of AddressGroups. AddressGroups []string // A list of IPBlock. IPBlocks []IPBlock } // IPBlock describes a particular CIDR (Ex. "192.168.1.1/24"). The except entry describes CIDRs that should // not be included within this rule. type IPBlock struct { // CIDR is an IPNet represents the IP Block. CIDR IPNet // Except is a slice of IPNets that should not be included within an IP Block. // Except values will be rejected if they are outside the CIDR range. // +optional Except []IPNet } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // NetworkPolicyList is a list of NetworkPolicy objects. type NetworkPolicyList struct { metav1.TypeMeta metav1.ListMeta Items []NetworkPolicy } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // NodeStatsSummary contains stats produced on a Node. It's used by the antrea-agents to report stats to the antrea-controller. type NodeStatsSummary struct { metav1.TypeMeta metav1.ObjectMeta // The TrafficStats of K8s NetworkPolicies collected from the Node. NetworkPolicies []NetworkPolicyStats // The TrafficStats of Antrea ClusterNetworkPolicies collected from the Node. AntreaClusterNetworkPolicies []NetworkPolicyStats // The TrafficStats of Antrea NetworkPolicies collected from the Node. AntreaNetworkPolicies []NetworkPolicyStats } // NetworkPolicyStats contains the information and traffic stats of a NetworkPolicy. type NetworkPolicyStats struct { // The reference of the NetworkPolicy. NetworkPolicy NetworkPolicyReference // The stats of the NetworkPolicy. TrafficStats statsv1alpha1.TrafficStats } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // NetworkPolicyStatus is the status of a NetworkPolicy. type NetworkPolicyStatus struct { metav1.TypeMeta metav1.ObjectMeta // Nodes contains statuses produced on a list of Nodes. Nodes []NetworkPolicyNodeStatus } // NetworkPolicyNodeStatus is the status of a NetworkPolicy on a Node. type NetworkPolicyNodeStatus struct { // The name of the Node that produces the status. NodeName string // The generation realized by the Node. Generation int64 } type GroupReference struct { // Namespace of the Group. Empty for ClusterGroup. Namespace string // Name of the Group. Name string // UID of the Group. UID types.UID } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // GroupAssociation is a list of GroupReferences for responses to groupassociation queries. type GroupAssociation struct { metav1.TypeMeta metav1.ObjectMeta // AssociatedGroups is a list of GroupReferences that is associated with the // Pod/ExternalEntity being queried. AssociatedGroups []GroupReference }
1
32,217
nit: ServiceReference represents a reference to a v1.Service.
antrea-io-antrea
go
@@ -73,7 +73,7 @@ type GcpChaosSpec struct { // The device name of the disk to detach. // Needed in disk-loss. // +optional - DeviceName *string `json:"deviceName,omitempty"` + DeviceName *[]string `json:"deviceName,omitempty"` } // GcpChaosStatus represents the status of a GcpChaos
1
// Copyright 2021 Chaos Mesh Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package v1alpha1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // +kubebuilder:object:root=true // +chaos-mesh:base // GcpChaos is the Schema for the gcpchaos API type GcpChaos struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` Spec GcpChaosSpec `json:"spec"` Status GcpChaosStatus `json:"status,omitempty"` } // GcpChaosAction represents the chaos action about gcp. type GcpChaosAction string const ( // NodeStop represents the chaos action of stopping the node. NodeStop GcpChaosAction = "node-stop" // NodeReset represents the chaos action of resetting the node. NodeReset GcpChaosAction = "node-reset" // DiskLoss represents the chaos action of detaching the disk. DiskLoss GcpChaosAction = "disk-loss" ) // GcpChaosSpec is the content of the specification for a GcpChaos type GcpChaosSpec struct { // Action defines the specific gcp chaos action. // Supported action: node-stop / node-reset / disk-loss // Default action: node-stop // +kubebuilder:validation:Enum=node-stop;node-reset;disk-loss Action GcpChaosAction `json:"action"` // Duration represents the duration of the chaos action. // +optional Duration *string `json:"duration,omitempty"` // Scheduler defines some schedule rules to control the running time of the chaos experiment about time. // +optional Scheduler *SchedulerSpec `json:"scheduler,omitempty"` // SecretName defines the name of kubernetes secret. It is used for GCP credentials. // +optional SecretName *string `json:"secretName,omitempty"` // Project defines the name of gcp project. Project string `json:"project"` // Zone defines the zone of gcp project. Zone string `json:"zone"` // Instance defines the name of the instance Instance string `json:"instance"` // The device name of the disk to detach. // Needed in disk-loss. // +optional DeviceName *string `json:"deviceName,omitempty"` } // GcpChaosStatus represents the status of a GcpChaos type GcpChaosStatus struct { ChaosStatus `json:",inline"` // The attached disk info string. // Needed in disk-loss. AttachedDiskString string `json:"attachedDiskString,omitempty"` }
1
20,663
since it changes to the array, how about change the name to `DeviceNames`
chaos-mesh-chaos-mesh
go
@@ -41,6 +41,12 @@ const ( // top level property ConfigTLP TopLevelProperty = "Config" + // RuntimeTLP is a top level property supported by CAS template engine + // + // The policy specific properties are placed with RuntimeTLP as the + // top level property + RuntimeTLP TopLevelProperty = "Runtime" + // VolumeTLP is a top level property supported by CAS template engine // // The properties provided by the caller are placed with VolumeTLP
1
/* Copyright 2017 The OpenEBS Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1alpha1 // TopLevelProperty represents the top level property that // is a starting point to represent a hierarchical chain of // properties. // // e.g. // Config.prop1.subprop1 = val1 // Config.prop1.subprop2 = val2 // In above example Config is a top level object // // NOTE: // The value of any hierarchical chain of properties // can be parsed via dot notation type TopLevelProperty string const ( // CASTOptionsTLP is a top level property supported by CAS template engine. // CAS template specific options are placed here CASTOptionsTLP TopLevelProperty = "CAST" // ConfigTLP is a top level property supported by CAS template engine // // The policy specific properties are placed with ConfigTLP as the // top level property ConfigTLP TopLevelProperty = "Config" // VolumeTLP is a top level property supported by CAS template engine // // The properties provided by the caller are placed with VolumeTLP // as the top level property // // NOTE: // CAS template engine cannot modify these properties. These are the // runtime properties that are provided as inputs to CAS template // engine. VolumeTLP TopLevelProperty = "Volume" // SnapshotTLP is a top level property supported by CAS template engine // // The properties provided by the caller are placed with SnapshotTLP // as the top level property // // NOTE: // CAS template engine cannot modify these properties. These are the // runtime properties that are provided as inputs to CAS template // engine. SnapshotTLP TopLevelProperty = "Snapshot" // StoragePoolTLP is a top level property supported by CAS template engine // // The properties provided by the caller are placed with StoragePoolTLP // as the top level property // // NOTE: // CAS template engine cannot modify these properties. These are the // runtime properties that are provided as inputs to CAS template // engine. StoragePoolTLP TopLevelProperty = "Storagepool" // TaskResultTLP is a top level property supported by CAS template engine // // The specific results after the execution of a task are placed with // TaskResultTLP as the top level property // // NOTE: // This is typically used to feed inputs of a task's execution // result to **next task** before the later's execution TaskResultTLP TopLevelProperty = "TaskResult" // CurrentJSONResultTLP is a top level property supported by CAS template engine // The result of the current task's execution is stored in this top // level property. CurrentJSONResultTLP TopLevelProperty = "JsonResult" // ListItemsTLP is a top level property supported by CAS template engine // // Results of one or more tasks' execution can be saved in this property. // // Example: // Below shows how specific properties of a list of items can be retrieved in // a go template. Below dot notation is for illustration purposes and only // reflects the way the specific property value was set. // // {{- .ListItems.volumes.default.mypv2.ip -}} // {{- .ListItems.volumes.default.mypv2.status -}} // {{- .ListItems.volumes.openebs.mypv.ip -}} // {{- .ListItems.volumes.openebs.mypv.status -}} ListItemsTLP TopLevelProperty = "ListItems" ) // StoragePoolTLPProperty is used to define properties that comes // after StoragePoolTLP type StoragePoolTLPProperty string const ( // OwnerCTP indicates the owner of this pool; the one who // is executing this policy // // NOTE: // The corresponding value will be accessed as // {{ .Storagepool.owner }} OwnerCTP StoragePoolTLPProperty = "owner" // DiskListCTP indicates the list of disks DiskListCTP StoragePoolTLPProperty = "diskList" // DiskDeviceListCTP indicates the list of device id of disks // If device is is not present for a disk, it contians the device path. DiskDeviceListCTP StoragePoolTLPProperty = "diskDeviceIdList" // NOodeNameCTP is the name of node where cstorpool will be ceated NodeNameCTP StoragePoolTLPProperty = "nodeName" // PoolTypeCTP is the type of cstorpool that will be ceated PoolTypeCTP StoragePoolTLPProperty = "poolType" // InitPhaseCTP is the init phase for the cstorpool CR that will be ceated InitPhaseCTP StoragePoolTLPProperty = "phase" ) // VolumeTLPProperty is used to define properties that comes // after VolumeTLP type VolumeTLPProperty string const ( // OwnerVTP indicates the owner of this volume; the one who // is executing this policy // // NOTE: // The corresponding value will be accessed as // {{ .Volume.owner }} OwnerVTP VolumeTLPProperty = "owner" // RunNamespaceVTP is the namespace where this policy is // supposed to run // NOTE: // The corresponding value will be accessed as // {{ .Volume.runNamespace }} RunNamespaceVTP VolumeTLPProperty = "runNamespace" // CapacityVTP is the capacity of the volume // NOTE: // The corresponding value will be accessed as // {{ .Volume.capacity }} CapacityVTP VolumeTLPProperty = "capacity" // PersistentVolumeClaimVTP is the PVC of the volume // NOTE: // The corresponding value will be accessed as // {{ .Volume.pvc }} PersistentVolumeClaimVTP VolumeTLPProperty = "pvc" // StorageClassVTP is the StorageClass of the volume // // NOTE: // The corresponding value will be accessed as // {{ .Volume.storageclass }} StorageClassVTP VolumeTLPProperty = "storageclass" ) // CloneTLPProperty is used to define properties for clone operations type CloneTLPProperty string const ( // SnapshotNameVTP is the snapshot name SnapshotNameVTP CloneTLPProperty = "snapshotName" // SourceVolumeTargetIPVTP is source volume target IP SourceVolumeTargetIPVTP CloneTLPProperty = "sourceVolumeTargetIP" // IsCloneEnableVTP is a bool value for clone operations // for a volume IsCloneEnableVTP CloneTLPProperty = "isCloneEnable" // SourceVolumeVTP is the name of the source volume SourceVolumeVTP CloneTLPProperty = "sourceVolume" ) // SnapshotTLPProperty is used to define properties for clone operations type SnapshotTLPProperty string const ( // VolumeNameSTP is the snapshot name VolumeSTP SnapshotTLPProperty = "volumeName" ) // PolicyTLPProperty is the name of the property that is found // under PolicyTLP type PolicyTLPProperty string const ( // EnabledPTP is the enabled property of the policy // NOTE: // The corresponding value will be accessed as // {{ .Policy.<PolicyName>.enabled }} EnabledPTP PolicyTLPProperty = "enabled" // ValuePTP is the value property of the policy // NOTE: // The corresponding value will be accessed as // {{ .Policy.<PolicyName>.value }} ValuePTP PolicyTLPProperty = "value" // DataPTP is the data property of the policy // NOTE: // The corresponding value will be accessed as // {{ .Policy.<PolicyName>.data }} DataPTP PolicyTLPProperty = "data" ) const ( // TaskIdentityPrefix is the prefix used for all TaskIdentity TaskIdentityPrefix string = "key" ) // TaskTLPProperty is the name of the property that is found // under TaskTLP type TaskTLPProperty string const ( // APIVersionTTP is the apiVersion property of the task // NOTE: // The corresponding value will be accessed as // {{ .Task.<TaskIdentity>.apiVersion }} APIVersionTTP TaskTLPProperty = "apiVersion" // KindTTP is the kind property of the task // NOTE: // The corresponding value will be accessed as // {{ .Task.<TaskIdentity>.kind }} KindTTP TaskTLPProperty = "kind" ) // TaskResultTLPProperty is the name of the property that is found // under TaskResultTLP type TaskResultTLPProperty string const ( // ObjectNameTRTP is the objectName property of the // TaskResultTLP // // NOTE: // The corresponding value will be accessed as // {{ .TaskResult.<TaskIdentity>.objectName }} ObjectNameTRTP TaskResultTLPProperty = "objectName" // AnnotationsTRTP is the annotations property of the // TaskResultTLP // // NOTE: // The corresponding value will be accessed as // {{ .TaskResult.<TaskIdentity>.annotations }} AnnotationsTRTP TaskResultTLPProperty = "annotations" // TaskResultVerifyErrTRTP is a property of TaskResultTLP // // First error found after **verification** checks done against the result of // the task's execution is stored in this property. // // NOTE: // The corresponding value will be accessed as // {{ .TaskResult.<TaskIdentity>.verifyErr }} TaskResultVerifyErrTRTP TaskResultTLPProperty = "verifyErr" // TaskResultNotFoundErrTRTP is a property of TaskResultTLP // // First error found after **not found** checks done against the result of // the task's execution is stored in this property. // // NOTE: // The corresponding value will be accessed as // {{ .TaskResult.<TaskIdentity>.notFoundErr }} TaskResultNotFoundErrTRTP TaskResultTLPProperty = "notFoundErr" // TaskResultVersionMismatchErrTRTP is a property of TaskResultTLP // // First error found after **version mismatch** checks done against the // result of the task's execution is stored in this property. // // NOTE: // The corresponding value will be accessed as // {{ .TaskResult.<TaskIdentity>.versionMismatchErr }} TaskResultVersionMismatchErrTRTP TaskResultTLPProperty = "versionMismatchErr" ) // ListItemsTLPProperty is the name of the property that is found // under ListItemsTLP type ListItemsTLPProperty string const ( // CurrentRepeatResourceLITP is a property of ListItemsTLP // // It is the current repeat resource due to which a task is getting // executed is set here // // Example: // {{- .ListItems.currentRepeatResource -}} // // Above templating will give the current repeat resource name CurrentRepeatResourceLITP ListItemsTLPProperty = "currentRepeatResource" )
1
12,887
Can we avoid this. upgrade engine code should take care of this.
openebs-maya
go
@@ -36,3 +36,4 @@ ACTIVATE_SALT = 'activate' PASSWORD_RESET_SALT = 'reset' MAX_LINK_AGE = 60 * 60 * 24 # 24 hours CODE_EXP_MINUTES = 10 +TOKEN_EXP_DEFAULT = {'days': 90}
1
# Copyright (c) 2017 Quilt Data, Inc. All rights reserved. """ Constants """ from enum import Enum import re PUBLIC = 'public' # This username is blocked by Quilt signup TEAM = 'team' VALID_NAME_RE = re.compile(r'^[a-zA-Z]\w*$') VALID_USERNAME_RE = re.compile(r'^[a-z][a-z0-9_]*$') VALID_EMAIL_RE = re.compile(r'^([^\s@]+)@([^\s@]+)$') class PaymentPlan(Enum): FREE = 'free' INDIVIDUAL = 'individual_monthly_7' TEAM = 'team_monthly_490' TEAM_UNPAID = 'team_unpaid' FTS_LANGUAGE = 'english' BAD_NAMES = set([ TEAM, PUBLIC, 'anonymous', 'quilt' ]) def blacklisted_name(username): return username in BAD_NAMES ACTIVATE_SALT = 'activate' PASSWORD_RESET_SALT = 'reset' MAX_LINK_AGE = 60 * 60 * 24 # 24 hours CODE_EXP_MINUTES = 10
1
16,862
should also be alphabetized or at least grouped and alphabetized within group
quiltdata-quilt
py
@@ -287,6 +287,7 @@ def _try_import_backends(): assert results.webengine_error is not None return results + # pylint: enable=unused-variable def _handle_ssl_support(fatal=False):
1
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: # Copyright 2017 Florian Bruhin (The Compiler) <[email protected]> # # This file is part of qutebrowser. # # qutebrowser is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # qutebrowser is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. """Dialogs shown when there was a problem with a backend choice.""" import os import sys import functools import html import ctypes import ctypes.util import attr from PyQt5.QtCore import Qt from PyQt5.QtWidgets import (QApplication, QDialog, QPushButton, QHBoxLayout, QVBoxLayout, QLabel, QMessageBox) from PyQt5.QtNetwork import QSslSocket from qutebrowser.config import config from qutebrowser.utils import usertypes, objreg, version, qtutils, log, utils from qutebrowser.misc import objects, msgbox _Result = usertypes.enum( '_Result', ['quit', 'restart', 'restart_webkit', 'restart_webengine'], is_int=True, start=QDialog.Accepted + 1) @attr.s class _Button: """A button passed to BackendProblemDialog.""" text = attr.ib() setting = attr.ib() value = attr.ib() default = attr.ib(default=False) def _other_backend(backend): """Get the other backend enum/setting for a given backend.""" other_backend = { usertypes.Backend.QtWebKit: usertypes.Backend.QtWebEngine, usertypes.Backend.QtWebEngine: usertypes.Backend.QtWebKit, }[backend] other_setting = other_backend.name.lower()[2:] return (other_backend, other_setting) def _error_text(because, text, backend): """Get an error text for the given information.""" other_backend, other_setting = _other_backend(backend) return ("<b>Failed to start with the {backend} backend!</b>" "<p>qutebrowser tried to start with the {backend} backend but " "failed because {because}.</p>{text}" "<p><b>Forcing the {other_backend.name} backend</b></p>" "<p>This forces usage of the {other_backend.name} backend by " "setting the <i>backend = '{other_setting}'</i> option " "(if you have a <i>config.py</i> file, you'll need to set " "this manually).</p>".format( backend=backend.name, because=because, text=text, other_backend=other_backend, other_setting=other_setting)) class _Dialog(QDialog): """A dialog which gets shown if there are issues with the backend.""" def __init__(self, because, text, backend, buttons=None, parent=None): super().__init__(parent) vbox = QVBoxLayout(self) other_backend, other_setting = _other_backend(backend) text = _error_text(because, text, backend) label = QLabel(text, wordWrap=True) label.setTextFormat(Qt.RichText) vbox.addWidget(label) hbox = QHBoxLayout() buttons = [] if buttons is None else buttons quit_button = QPushButton("Quit") quit_button.clicked.connect(lambda: self.done(_Result.quit)) hbox.addWidget(quit_button) backend_button = QPushButton("Force {} backend".format( other_backend.name)) backend_button.clicked.connect(functools.partial( self._change_setting, 'backend', other_setting)) hbox.addWidget(backend_button) for button in buttons: btn = QPushButton(button.text, default=button.default) btn.clicked.connect(functools.partial( self._change_setting, button.setting, button.value)) hbox.addWidget(btn) vbox.addLayout(hbox) def _change_setting(self, setting, value): """Change the given setting and restart.""" config.instance.set_obj(setting, value, save_yaml=True) save_manager = objreg.get('save-manager') save_manager.save_all(is_exit=True) if setting == 'backend' and value == 'webkit': self.done(_Result.restart_webkit) elif setting == 'backend' and value == 'webengine': self.done(_Result.restart_webengine) else: self.done(_Result.restart) def _show_dialog(*args, **kwargs): """Show a dialog for a backend problem.""" cmd_args = objreg.get('args') if cmd_args.no_err_windows: text = _error_text(*args, **kwargs) print(text, file=sys.stderr) sys.exit(usertypes.Exit.err_init) dialog = _Dialog(*args, **kwargs) status = dialog.exec_() quitter = objreg.get('quitter') if status in [_Result.quit, QDialog.Rejected]: pass elif status == _Result.restart_webkit: quitter.restart(override_args={'backend': 'webkit'}) elif status == _Result.restart_webengine: quitter.restart(override_args={'backend': 'webengine'}) elif status == _Result.restart: quitter.restart() else: assert False, status sys.exit(usertypes.Exit.err_init) def _nvidia_shader_workaround(): """Work around QOpenGLShaderProgram issues. NOTE: This needs to be called before _handle_nouveau_graphics, or some setups will segfault in version.opengl_vendor(). See https://bugs.launchpad.net/ubuntu/+source/python-qt4/+bug/941826 """ assert objects.backend == usertypes.Backend.QtWebEngine, objects.backend if utils.is_linux: ctypes.CDLL(ctypes.util.find_library("GL"), mode=ctypes.RTLD_GLOBAL) def _handle_nouveau_graphics(): assert objects.backend == usertypes.Backend.QtWebEngine, objects.backend if os.environ.get('QUTE_SKIP_NOUVEAU_CHECK'): return if version.opengl_vendor() != 'nouveau': return if (os.environ.get('LIBGL_ALWAYS_SOFTWARE') == '1' or 'QT_XCB_FORCE_SOFTWARE_OPENGL' in os.environ): return button = _Button("Force software rendering", 'qt.force_software_rendering', True) _show_dialog( backend=usertypes.Backend.QtWebEngine, because="you're using Nouveau graphics", text="<p>There are two ways to fix this:</p>" "<p><b>Forcing software rendering</b></p>" "<p>This allows you to use the newer QtWebEngine backend (based " "on Chromium) but could have noticable performance impact " "(depending on your hardware). " "This sets the <i>qt.force_software_rendering = True</i> option " "(if you have a <i>config.py</i> file, you'll need to set this " "manually).</p>", buttons=[button], ) # Should never be reached assert False def _handle_wayland(): assert objects.backend == usertypes.Backend.QtWebEngine, objects.backend if os.environ.get('QUTE_SKIP_WAYLAND_CHECK'): return platform = QApplication.instance().platformName() if platform not in ['wayland', 'wayland-egl']: return if 'DISPLAY' in os.environ: # XWayland is available, but QT_QPA_PLATFORM=wayland is set button = _Button("Force XWayland", 'qt.force_platform', 'xcb') _show_dialog( backend=usertypes.Backend.QtWebEngine, because="you're using Wayland", text="<p>There are two ways to fix this:</p>" "<p><b>Force Qt to use XWayland</b></p>" "<p>This allows you to use the newer QtWebEngine backend " "(based on Chromium). " "This sets the <i>qt.force_platform = 'xcb'</i> option " "(if you have a <i>config.py</i> file, you'll need to set " "this manually).</p>", buttons=[button], ) else: # XWayland is unavailable _show_dialog( backend=usertypes.Backend.QtWebEngine, because="you're using Wayland without XWayland", text="<p>There are two ways to fix this:</p>" "<p><b>Set up XWayland</b></p>" "<p>This allows you to use the newer QtWebEngine backend " "(based on Chromium). " ) # Should never be reached assert False @attr.s class BackendImports: """Whether backend modules could be imported.""" webkit_available = attr.ib(default=None) webengine_available = attr.ib(default=None) webkit_error = attr.ib(default=None) webengine_error = attr.ib(default=None) def _try_import_backends(): """Check whether backends can be imported and return BackendImports.""" # pylint: disable=unused-variable results = BackendImports() try: from PyQt5 import QtWebKit from PyQt5 import QtWebKitWidgets except ImportError as e: results.webkit_available = False results.webkit_error = str(e) else: if qtutils.is_new_qtwebkit(): results.webkit_available = True else: results.webkit_available = False results.webkit_error = "Unsupported legacy QtWebKit found" try: from PyQt5 import QtWebEngineWidgets except ImportError as e: results.webengine_available = False results.webengine_error = str(e) else: results.webengine_available = True assert results.webkit_available is not None assert results.webengine_available is not None if not results.webkit_available: assert results.webkit_error is not None if not results.webengine_available: assert results.webengine_error is not None return results def _handle_ssl_support(fatal=False): """Check for full SSL availability. If "fatal" is given, show an error and exit. """ text = ("Could not initialize QtNetwork SSL support. If you use " "OpenSSL 1.1 with a PyQt package from PyPI (e.g. on Archlinux " "or Debian Stretch), you need to set LD_LIBRARY_PATH to the path " "of OpenSSL 1.0. This only affects downloads.") if QSslSocket.supportsSsl(): return if fatal: errbox = msgbox.msgbox(parent=None, title="SSL error", text="Could not initialize SSL support.", icon=QMessageBox.Critical, plain_text=False) errbox.exec_() sys.exit(usertypes.Exit.err_init) assert not fatal log.init.warning(text) def _check_backend_modules(): """Check for the modules needed for QtWebKit/QtWebEngine.""" imports = _try_import_backends() if imports.webkit_available and imports.webengine_available: return elif not imports.webkit_available and not imports.webengine_available: text = ("<p>qutebrowser needs QtWebKit or QtWebEngine, but neither " "could be imported!</p>" "<p>The errors encountered were:<ul>" "<li><b>QtWebKit:</b> {webkit_error}" "<li><b>QtWebEngine:</b> {webengine_error}" "</ul></p>".format( webkit_error=html.escape(imports.webkit_error), webengine_error=html.escape(imports.webengine_error))) errbox = msgbox.msgbox(parent=None, title="No backend library found!", text=text, icon=QMessageBox.Critical, plain_text=False) errbox.exec_() sys.exit(usertypes.Exit.err_init) elif objects.backend == usertypes.Backend.QtWebKit: if imports.webkit_available: return assert imports.webengine_available _show_dialog( backend=usertypes.Backend.QtWebKit, because="QtWebKit could not be imported", text="<p><b>The error encountered was:</b><br/>{}</p>".format( html.escape(imports.webkit_error)) ) elif objects.backend == usertypes.Backend.QtWebEngine: if imports.webengine_available: return assert imports.webkit_available _show_dialog( backend=usertypes.Backend.QtWebEngine, because="QtWebEngine could not be imported", text="<p><b>The error encountered was:</b><br/>{}</p>".format( html.escape(imports.webengine_error)) ) # Should never be reached assert False def init(): _check_backend_modules() if objects.backend == usertypes.Backend.QtWebEngine: _handle_ssl_support() _handle_wayland() _nvidia_shader_workaround() _handle_nouveau_graphics() else: assert objects.backend == usertypes.Backend.QtWebKit, objects.backend _handle_ssl_support(fatal=True)
1
19,415
No need for this, as pylint already only turns things off for this function and it's needed for the entire function.
qutebrowser-qutebrowser
py
@@ -29,6 +29,7 @@ const ( var ( errUnmarshalBuildOpts = errors.New("can't unmarshal build field into string or compose-style map") + errUnmarshalCountOpts = errors.New("can't unmarshal count field into task count or auto scaling config") ) var dockerfileDefaultName = "Dockerfile"
1
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 // Package manifest provides functionality to create Manifest files. package manifest import ( "errors" "fmt" "path/filepath" "strconv" "strings" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/copilot-cli/internal/pkg/template" "gopkg.in/yaml.v3" ) const ( // LoadBalancedWebServiceType is a web service with a load balancer and Fargate as compute. LoadBalancedWebServiceType = "Load Balanced Web Service" // BackendServiceType is a service that cannot be accessed from the internet but can be reached from other services. BackendServiceType = "Backend Service" defaultSidecarPort = "80" defaultFluentbitImage = "amazon/aws-for-fluent-bit:latest" ) var ( errUnmarshalBuildOpts = errors.New("can't unmarshal build field into string or compose-style map") ) var dockerfileDefaultName = "Dockerfile" // ServiceTypes are the supported service manifest types. var ServiceTypes = []string{ LoadBalancedWebServiceType, BackendServiceType, } // Service holds the basic data that every service manifest file needs to have. type Service struct { Name *string `yaml:"name"` Type *string `yaml:"type"` // must be one of the supported manifest types. } // ServiceImage represents the service's container image. type ServiceImage struct { Build BuildArgsOrString `yaml:"build"` // Path to the Dockerfile. } // BuildConfig populates a docker.BuildArguments struct from the fields available in the manifest. // Prefer the following hierarchy: // 1. Specific dockerfile, specific context // 2. Specific dockerfile, context = dockerfile dir // 3. "Dockerfile" located in context dir // 4. "Dockerfile" located in ws root. func (s *ServiceImage) BuildConfig(rootDirectory string) *DockerBuildArgs { df := s.dockerfile() ctx := s.context() if df != "" && ctx != "" { return &DockerBuildArgs{ Dockerfile: aws.String(filepath.Join(rootDirectory, df)), Context: aws.String(filepath.Join(rootDirectory, ctx)), Args: s.args(), } } if df != "" && ctx == "" { return &DockerBuildArgs{ Dockerfile: aws.String(filepath.Join(rootDirectory, df)), Context: aws.String(filepath.Join(rootDirectory, filepath.Dir(df))), Args: s.args(), } } if df == "" && ctx != "" { return &DockerBuildArgs{ Dockerfile: aws.String(filepath.Join(rootDirectory, ctx, dockerfileDefaultName)), Context: aws.String(filepath.Join(rootDirectory, ctx)), Args: s.args(), } } return &DockerBuildArgs{ Dockerfile: aws.String(filepath.Join(rootDirectory, dockerfileDefaultName)), Context: aws.String(rootDirectory), Args: s.args(), } } // dockerfile returns the path to the service's Dockerfile. If no dockerfile is specified, // returns "". func (s *ServiceImage) dockerfile() string { // Prefer to use the "Dockerfile" string in BuildArgs. Otherwise, // "BuildString". If no dockerfile specified, return "". if s.Build.BuildArgs.Dockerfile != nil { return aws.StringValue(s.Build.BuildArgs.Dockerfile) } var dfPath string if s.Build.BuildString != nil { dfPath = aws.StringValue(s.Build.BuildString) } return dfPath } // context returns the build context directory if it exists, otherwise an empty string. func (s *ServiceImage) context() string { return aws.StringValue(s.Build.BuildArgs.Context) } // args returns the args section, if it exists, to override args in the dockerfile. // Otherwise it returns an empty map. func (s *ServiceImage) args() map[string]string { return s.Build.BuildArgs.Args } // BuildArgsOrString is a custom type which supports unmarshaling yaml which // can either be of type string or type DockerBuildArgs. type BuildArgsOrString struct { BuildString *string BuildArgs DockerBuildArgs } // UnmarshalYAML overrides the default YAML unmarshaling logic for the BuildArgsOrString // struct, allowing it to perform more complex unmarshaling behavior. // This method implements the yaml.Unmarshaler (v2) interface. func (b *BuildArgsOrString) UnmarshalYAML(unmarshal func(interface{}) error) error { if err := unmarshal(&b.BuildArgs); err != nil { switch err.(type) { case *yaml.TypeError: break default: return err } } if !b.BuildArgs.isEmpty() { // Unmarshaled successfully to b.BuildArgs, return. return nil } if err := unmarshal(&b.BuildString); err != nil { return errUnmarshalBuildOpts } return nil } // DockerBuildArgs represents the options specifiable under the "build" field // of Docker Compose services. For more information, see: // https://docs.docker.com/compose/compose-file/#build type DockerBuildArgs struct { Context *string `yaml:"context,omitempty"` Dockerfile *string `yaml:"dockerfile,omitempty"` Args map[string]string `yaml:"args,omitempty"` } func (b *DockerBuildArgs) isEmpty() bool { if b.Context == nil && b.Dockerfile == nil && b.Args == nil { return true } return false } // ServiceImageWithPort represents a container image with an exposed port. type ServiceImageWithPort struct { ServiceImage `yaml:",inline"` Port *uint16 `yaml:"port"` } // LogConfig holds configuration for Firelens to route your logs. type LogConfig struct { Image *string `yaml:"image"` Destination map[string]string `yaml:"destination,flow"` EnableMetadata *bool `yaml:"enableMetadata"` SecretOptions map[string]string `yaml:"secretOptions"` ConfigFile *string `yaml:"configFilePath"` } func (lc *LogConfig) logConfigOpts() *template.LogConfigOpts { return &template.LogConfigOpts{ Image: lc.image(), ConfigFile: lc.ConfigFile, EnableMetadata: lc.enableMetadata(), Destination: lc.Destination, SecretOptions: lc.SecretOptions, } } func (lc *LogConfig) image() *string { if lc.Image == nil { return aws.String(defaultFluentbitImage) } return lc.Image } func (lc *LogConfig) enableMetadata() *string { if lc.EnableMetadata == nil { // Enable ecs log metadata by default. return aws.String("true") } return aws.String(strconv.FormatBool(*lc.EnableMetadata)) } // Sidecar holds configuration for all sidecar containers in a service. type Sidecar struct { Sidecars map[string]*SidecarConfig `yaml:"sidecars"` } // SidecarsOpts converts the service's sidecar configuration into a format parsable by the templates pkg. func (s *Sidecar) SidecarsOpts() ([]*template.SidecarOpts, error) { if s.Sidecars == nil { return nil, nil } var sidecars []*template.SidecarOpts for name, config := range s.Sidecars { port, protocol, err := parsePortMapping(config.Port) if err != nil { return nil, err } sidecars = append(sidecars, &template.SidecarOpts{ Name: aws.String(name), Image: config.Image, Port: port, Protocol: protocol, CredsParam: config.CredsParam, }) } return sidecars, nil } // SidecarConfig represents the configurable options for setting up a sidecar container. type SidecarConfig struct { Port *string `yaml:"port"` Image *string `yaml:"image"` CredsParam *string `yaml:"credentialsParameter"` } // TaskConfig represents the resource boundaries and environment variables for the containers in the task. type TaskConfig struct { CPU *int `yaml:"cpu"` Memory *int `yaml:"memory"` Count *int `yaml:"count"` // 0 is a valid value, so we want the default value to be nil. Variables map[string]string `yaml:"variables"` Secrets map[string]string `yaml:"secrets"` } // ServiceProps contains properties for creating a new service manifest. type ServiceProps struct { Name string Dockerfile string } // UnmarshalService deserializes the YAML input stream into a service manifest object. // If an error occurs during deserialization, then returns the error. // If the service type in the manifest is invalid, then returns an ErrInvalidManifestType. func UnmarshalService(in []byte) (interface{}, error) { am := Service{} if err := yaml.Unmarshal(in, &am); err != nil { return nil, fmt.Errorf("unmarshal to service manifest: %w", err) } typeVal := aws.StringValue(am.Type) switch typeVal { case LoadBalancedWebServiceType: m := newDefaultLoadBalancedWebService() if err := yaml.Unmarshal(in, m); err != nil { return nil, fmt.Errorf("unmarshal to load balanced web service: %w", err) } return m, nil case BackendServiceType: m := newDefaultBackendService() if err := yaml.Unmarshal(in, m); err != nil { return nil, fmt.Errorf("unmarshal to backend service: %w", err) } if m.BackendServiceConfig.Image.HealthCheck != nil { // Make sure that unset fields in the healthcheck gets a default value. m.BackendServiceConfig.Image.HealthCheck.applyIfNotSet(newDefaultContainerHealthCheck()) } return m, nil default: return nil, &ErrInvalidSvcManifestType{Type: typeVal} } } func durationp(v time.Duration) *time.Duration { return &v } // Valid sidecar portMapping example: 2000/udp, or 2000 (default to be tcp). func parsePortMapping(s *string) (port *string, protocol *string, err error) { if s == nil { // default port for sidecar container to be 80. return aws.String(defaultSidecarPort), nil, nil } portProtocol := strings.Split(*s, "/") switch len(portProtocol) { case 1: return aws.String(portProtocol[0]), nil, nil case 2: return aws.String(portProtocol[0]), aws.String(portProtocol[1]), nil default: return nil, nil, fmt.Errorf("cannot parse port mapping from %s", *s) } }
1
14,897
nit: can we remove "can't" from the error message?
aws-copilot-cli
go
@@ -114,7 +114,14 @@ func NewCStorVolumeReplicaController( q.Operation = common.QOpAdd glog.Infof("cStorVolumeReplica Added event : %v, %v", cVR.ObjectMeta.Name, string(cVR.ObjectMeta.UID)) controller.recorder.Event(cVR, corev1.EventTypeNormal, string(common.SuccessSynced), string(common.MessageCreateSynced)) - cVR.Status.Phase = apis.CVRStatusPending + + // For New request phase of cVR will be empty + if IsEmptyStatus(cVR) { + cVR.Status.Phase = apis.CVRStatusPending + } else { + cVR.Status.Phase = apis.CVRStatusRecreate + } + cVR, _ = controller.clientset.OpenebsV1alpha1().CStorVolumeReplicas(cVR.Namespace).Update(cVR) controller.enqueueCStorReplica(cVR, q) },
1
/* Copyright 2018 The OpenEBS Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package replicacontroller import ( "github.com/golang/glog" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/runtime" kubeinformers "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/workqueue" "github.com/openebs/maya/cmd/cstor-pool-mgmt/controller/common" apis "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1" //clientset "github.com/openebs/maya/pkg/client/clientset/versioned" clientset "github.com/openebs/maya/pkg/client/generated/clientset/internalclientset" //openebsScheme "github.com/openebs/maya/pkg/client/clientset/versioned/scheme" openebsScheme "github.com/openebs/maya/pkg/client/generated/clientset/internalclientset/scheme" //informers "github.com/openebs/maya/pkg/client/informers/externalversions" informers "github.com/openebs/maya/pkg/client/generated/informer/externalversions" ) const replicaControllerName = "CStorVolumeReplica" // CStorVolumeReplicaController is the controller implementation for cStorVolumeReplica resources. type CStorVolumeReplicaController struct { // kubeclientset is a standard kubernetes clientset. kubeclientset kubernetes.Interface // clientset is a openebs custom resource package generated for custom API group. clientset clientset.Interface // cStorReplicaSynced is used for caches sync to get populated cStorReplicaSynced cache.InformerSynced // workqueue is a rate limited work queue. This is used to queue work to be // processed instead of performing it as soon as a change happens. This // means we can ensure we only process a fixed amount of resources at a // time, and makes it easy to ensure we are never processing the same item // simultaneously in two different workers. workqueue workqueue.RateLimitingInterface // recorder is an event recorder for recording Event resources to the // Kubernetes API. recorder record.EventRecorder } // NewCStorVolumeReplicaController returns a new cStor Replica controller instance func NewCStorVolumeReplicaController( kubeclientset kubernetes.Interface, clientset clientset.Interface, kubeInformerFactory kubeinformers.SharedInformerFactory, cStorInformerFactory informers.SharedInformerFactory) *CStorVolumeReplicaController { // obtain references to shared index informers for the cStorReplica resources. cStorReplicaInformer := cStorInformerFactory.Openebs().V1alpha1().CStorVolumeReplicas() openebsScheme.AddToScheme(scheme.Scheme) // Create event broadcaster // Add cStor-Replica-controller types to the default Kubernetes Scheme so Events can be // logged for cStor-Replica-controller types. glog.V(4).Info("Creating event broadcaster") eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(glog.Infof) // StartEventWatcher starts sending events received from this EventBroadcaster to the given // event handler function. The return value can be ignored or used to stop recording, if // desired. Events("") denotes empty namespace eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: kubeclientset.CoreV1().Events("")}) recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: replicaControllerName}) controller := &CStorVolumeReplicaController{ kubeclientset: kubeclientset, clientset: clientset, cStorReplicaSynced: cStorReplicaInformer.Informer().HasSynced, workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "CStorVolumeReplica"), recorder: recorder, } glog.Info("Setting up event handlers") // Instantiating QueueLoad before entering workqueue. q := common.QueueLoad{} // Set up an event handler for when cStorReplica resources change. cStorReplicaInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { cVR := obj.(*apis.CStorVolumeReplica) if !IsRightCStorVolumeReplica(cVR) { return } if IsDeletionFailedBefore(cVR) || IsErrorDuplicate(cVR) { return } q.Operation = common.QOpAdd glog.Infof("cStorVolumeReplica Added event : %v, %v", cVR.ObjectMeta.Name, string(cVR.ObjectMeta.UID)) controller.recorder.Event(cVR, corev1.EventTypeNormal, string(common.SuccessSynced), string(common.MessageCreateSynced)) cVR.Status.Phase = apis.CVRStatusPending cVR, _ = controller.clientset.OpenebsV1alpha1().CStorVolumeReplicas(cVR.Namespace).Update(cVR) controller.enqueueCStorReplica(cVR, q) }, UpdateFunc: func(old, new interface{}) { newCVR := new.(*apis.CStorVolumeReplica) oldCVR := old.(*apis.CStorVolumeReplica) if !IsRightCStorVolumeReplica(newCVR) { return } if IsOnlyStatusChange(oldCVR, newCVR) { glog.Infof("Only cVR status change: %v, %v", newCVR.ObjectMeta.Name, string(newCVR.ObjectMeta.UID)) return } // ToDo: Need to have statuses in more organised manner // ToDo: IsErrorDuplicate(newCVR) is ignored as of now. if IsDeletionFailedBefore(newCVR) { return } // Periodic resync will send update events for all known cStorReplica. // Two different versions of the same cStorReplica will always have different RVs. if newCVR.ResourceVersion == oldCVR.ResourceVersion { q.Operation = common.QOpSync glog.Infof("CstorVolumeReplica status sync event for %s", newCVR.ObjectMeta.Name) controller.recorder.Event(newCVR, corev1.EventTypeNormal, string(common.SuccessSynced), string(common.StatusSynced)) } else if IsDestroyEvent(newCVR) { q.Operation = common.QOpDestroy glog.Infof("cStorVolumeReplica Destroy event : %v, %v", newCVR.ObjectMeta.Name, string(newCVR.ObjectMeta.UID)) controller.recorder.Event(newCVR, corev1.EventTypeNormal, string(common.SuccessSynced), string(common.MessageDestroySynced)) } else { q.Operation = common.QOpModify glog.Infof("cStorVolumeReplica Modify event : %v, %v", newCVR.ObjectMeta.Name, string(newCVR.ObjectMeta.UID)) controller.recorder.Event(newCVR, corev1.EventTypeNormal, string(common.SuccessSynced), string(common.MessageModifySynced)) return // will be removed once modify is implemented } controller.enqueueCStorReplica(newCVR, q) }, DeleteFunc: func(obj interface{}) { cVR := obj.(*apis.CStorVolumeReplica) if !IsRightCStorVolumeReplica(cVR) { return } glog.Infof("cVR Resource deleted event: %v, %v", cVR.ObjectMeta.Name, string(cVR.ObjectMeta.UID)) }, }) return controller } // enqueueCStorReplica takes a CStorReplica resource and converts it into a namespace/name // string which is then put onto the work queue. This method should *not* be // passed resources of any type other than CStorReplica. func (c *CStorVolumeReplicaController) enqueueCStorReplica(obj *apis.CStorVolumeReplica, q common.QueueLoad) { var key string var err error if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil { runtime.HandleError(err) return } q.Key = key c.workqueue.AddRateLimited(q) }
1
11,189
instead of modifying 'Status' which impacts the state diagram, how about using some annotations on CVR? This can probably help for our upgrade as well. Is this possible? cc: @AmitKumarDas
openebs-maya
go
@@ -38,9 +38,10 @@ const ( DefaultDownloadURL = "https://download.docker.com" DockerPreqReqList = "apt-transport-https ca-certificates curl gnupg-agent software-properties-common" - KubernetesDownloadURL = "https://apt.kubernetes.io/" - KubernetesGPGURL = "https://packages.cloud.google.com/apt/doc/apt-key.gpg" - + KubernetesDownloadURL = "https://apt.kubernetes.io/" + KubernetesGPGURL = "https://packages.cloud.google.com/apt/doc/apt-key.gpg" + KubernetesBaseurl = "https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64" + KubernetesGpgkey = "https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg" KubeEdgeDownloadURL = "https://github.com/kubeedge/kubeedge/releases/download" KubeEdgePath = "/etc/kubeedge/" KubeEdgeConfPath = KubeEdgePath + "kubeedge/edge/conf"
1
/* Copyright 2019 The Kubeedge Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package util import ( "bytes" "fmt" "io" "os" "os/exec" "strings" "sync" "github.com/spf13/pflag" types "github.com/kubeedge/kubeedge/keadm/app/cmd/common" ) //Constants used by installers const ( UbuntuOSType = "ubuntu" CentOSType = "centos" DefaultDownloadURL = "https://download.docker.com" DockerPreqReqList = "apt-transport-https ca-certificates curl gnupg-agent software-properties-common" KubernetesDownloadURL = "https://apt.kubernetes.io/" KubernetesGPGURL = "https://packages.cloud.google.com/apt/doc/apt-key.gpg" KubeEdgeDownloadURL = "https://github.com/kubeedge/kubeedge/releases/download" KubeEdgePath = "/etc/kubeedge/" KubeEdgeConfPath = KubeEdgePath + "kubeedge/edge/conf" KubeEdgeBinaryName = "edge_core" KubeEdgeDefaultCertPath = KubeEdgePath + "certs/" KubeEdgeConfigEdgeYaml = KubeEdgeConfPath + "/edge.yaml" KubeEdgeConfigNodeJSON = KubeEdgeConfPath + "/node.json" KubeEdgeConfigLoggingYaml = KubeEdgeConfPath + "/logging.yaml" KubeEdgeConfigModulesYaml = KubeEdgeConfPath + "/modules.yaml" KubeEdgeCloudCertGenPath = KubeEdgePath + "certgen.sh" KubeEdgeEdgeCertsTarFileName = "certs.tgz" KubeEdgeEdgeCertsTarFilePath = KubeEdgePath + "certs.tgz" KubeEdgeCloudConfPath = KubeEdgePath + "kubeedge/cloud/conf" KubeEdgeControllerYaml = KubeEdgeCloudConfPath + "/controller.yaml" KubeEdgeControllerLoggingYaml = KubeEdgeCloudConfPath + "/logging.yaml" KubeEdgeControllerModulesYaml = KubeEdgeCloudConfPath + "/modules.yaml" KubeCloudBinaryName = "edgecontroller" KubeCloudApiserverYamlPath = "/etc/kubernetes/manifests/kube-apiserver.yaml" KubeCloudReplaceIndex = 25 KubeCloudReplaceString = " - --insecure-bind-address=0.0.0.0\n" KubeAPIServerName = "kube-apiserver" KubeEdgeHTTPProto = "http" KubeEdgeHTTPSProto = "https" KubeEdgeHTTPPort = "8080" KubeEdgeHTTPSPort = "6443" KubeEdgeHTTPRequestTimeout = 30 ) //AddToolVals gets the value and default values of each flags and collects them in temporary cache func AddToolVals(f *pflag.Flag, flagData map[string]types.FlagData) { flagData[f.Name] = types.FlagData{Val: f.Value.String(), DefVal: f.DefValue} } //CheckIfAvailable checks is val of a flag is empty then return the default value func CheckIfAvailable(val, defval string) string { if val == "" { return defval } return val } //Common struct contains OS and Tool version properties and also embeds OS interface type Common struct { types.OSTypeInstaller OSVersion string ToolVersion string KubeConfig string } //SetOSInterface defines a method to set the implemtation of the OS interface func (co *Common) SetOSInterface(intf types.OSTypeInstaller) { co.OSTypeInstaller = intf } //Command defines commands to be executed and captures std out and std error type Command struct { Cmd *exec.Cmd StdOut []byte StdErr []byte } //ExecuteCommand executes the command and captures the output in stdOut func (cm *Command) ExecuteCommand() { var err error cm.StdOut, err = cm.Cmd.Output() if err != nil { fmt.Println("Output failed: ", err) cm.StdErr = []byte(err.Error()) } } //GetStdOutput gets StdOut field func (cm Command) GetStdOutput() string { if len(cm.StdOut) != 0 { return strings.TrimRight(string(cm.StdOut), "\n") } return "" } //GetStdErr gets StdErr field func (cm Command) GetStdErr() string { if len(cm.StdErr) != 0 { return strings.TrimRight(string(cm.StdErr), "\n") } return "" } //ExecuteCmdShowOutput captures both StdOut and StdErr after exec.cmd(). //It helps in the commands where it takes some time for execution. func (cm Command) ExecuteCmdShowOutput() error { var stdoutBuf, stderrBuf bytes.Buffer stdoutIn, _ := cm.Cmd.StdoutPipe() stderrIn, _ := cm.Cmd.StderrPipe() var errStdout, errStderr error stdout := io.MultiWriter(os.Stdout, &stdoutBuf) stderr := io.MultiWriter(os.Stderr, &stderrBuf) err := cm.Cmd.Start() if err != nil { return fmt.Errorf("failed to start because of error : %s", err.Error()) } var wg sync.WaitGroup wg.Add(1) go func() { _, errStdout = io.Copy(stdout, stdoutIn) wg.Done() }() _, errStderr = io.Copy(stderr, stderrIn) wg.Wait() err = cm.Cmd.Wait() if err != nil { return fmt.Errorf("failed to run because of error : %s", err.Error()) } if errStdout != nil || errStderr != nil { return fmt.Errorf("failed to capture stdout or stderr") } cm.StdOut, cm.StdErr = stdoutBuf.Bytes(), stderrBuf.Bytes() return nil } //GetOSVersion gets the OS name func GetOSVersion() string { c := &Command{Cmd: exec.Command("sh", "-c", ". /etc/os-release && echo $ID")} c.ExecuteCommand() return c.GetStdOutput() } //GetOSInterface helps in returning OS specific object which implements OSTypeInstaller interface. func GetOSInterface() types.OSTypeInstaller { switch GetOSVersion() { case UbuntuOSType: return &UbuntuOS{} case CentOSType: return &CentOS{} default: } return nil } //IsKubeEdgeController identifies if the node is having edge controller and k8s api-server already running. //If so, then return true, else it can used as edge node and initialise it. func IsKubeEdgeController() (types.ModuleRunning, error) { osType := GetOSInterface() edgeControllerRunning, err := osType.IsKubeEdgeProcessRunning(KubeCloudBinaryName) if err != nil { return types.NoneRunning, err } apiServerRunning, err := osType.IsKubeEdgeProcessRunning(KubeAPIServerName) if err != nil { return types.NoneRunning, err } //If any of edgecontroller or K8S API server is running, then we believe the node is cloud node if edgeControllerRunning || apiServerRunning { return types.KubeEdgeCloudRunning, nil } edgeCoreRunning, err := osType.IsKubeEdgeProcessRunning(KubeEdgeBinaryName) if err != nil { return types.NoneRunning, err } if false != edgeCoreRunning { return types.KubeEdgeEdgeRunning, nil } return types.NoneRunning, nil }
1
11,939
Rename this as KubernetesGPGURL - >KubernetesUbuntuGPGURL , Modify it where ever it is used.
kubeedge-kubeedge
go